diff options
Diffstat (limited to 'services')
80 files changed, 3610 insertions, 1376 deletions
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk index fee2347..c359be5 100644 --- a/services/audioflinger/Android.mk +++ b/services/audioflinger/Android.mk @@ -44,12 +44,13 @@ LOCAL_SRC_FILES:= \ SpdifStreamOut.cpp \ Effects.cpp \ AudioMixer.cpp.arm \ - PatchPanel.cpp - -LOCAL_SRC_FILES += StateQueue.cpp + BufferProviders.cpp \ + PatchPanel.cpp \ + StateQueue.cpp LOCAL_C_INCLUDES := \ $(TOPDIR)frameworks/av/services/audiopolicy \ + $(TOPDIR)external/sonic \ $(call include-path-for, audio-effects) \ $(call include-path-for, audio-utils) @@ -68,7 +69,8 @@ LOCAL_SHARED_LIBRARIES := \ libhardware_legacy \ libeffects \ libpowermanager \ - libserviceutility + libserviceutility \ + libsonic LOCAL_STATIC_LIBRARIES := \ libscheduling_policy \ diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp index f3206cb..5002099 100644 --- a/services/audioflinger/AudioFlinger.cpp +++ b/services/audioflinger/AudioFlinger.cpp @@ -45,6 +45,8 @@ #include "AudioFlinger.h" #include "ServiceUtilities.h" +#include <media/AudioResamplerPublic.h> + #include <media/EffectsFactoryApi.h> #include <audio_effects/effect_visualizer.h> #include <audio_effects/effect_ns.h> @@ -1140,19 +1142,46 @@ size_t AudioFlinger::getInputBufferSize(uint32_t sampleRate, audio_format_t form if (ret != NO_ERROR) { return 0; } + if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) { + return 0; + } AutoMutex lock(mHardwareLock); mHardwareStatus = AUDIO_HW_GET_INPUT_BUFFER_SIZE; - audio_config_t config; - memset(&config, 0, sizeof(config)); - config.sample_rate = sampleRate; - config.channel_mask = channelMask; - config.format = format; + audio_config_t config, proposed; + memset(&proposed, 0, sizeof(proposed)); + proposed.sample_rate = sampleRate; + proposed.channel_mask = channelMask; + proposed.format = format; audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice(); - size_t size = dev->get_input_buffer_size(dev, &config); + size_t frames; + for (;;) { + // Note: config is currently a const parameter for get_input_buffer_size() + // but we use a copy from proposed in case config changes from the call. + config = proposed; + frames = dev->get_input_buffer_size(dev, &config); + if (frames != 0) { + break; // hal success, config is the result + } + // change one parameter of the configuration each iteration to a more "common" value + // to see if the device will support it. + if (proposed.format != AUDIO_FORMAT_PCM_16_BIT) { + proposed.format = AUDIO_FORMAT_PCM_16_BIT; + } else if (proposed.sample_rate != 44100) { // 44.1 is claimed as must in CDD as well as + proposed.sample_rate = 44100; // legacy AudioRecord.java. TODO: Query hw? + } else { + ALOGW("getInputBufferSize failed with minimum buffer size sampleRate %u, " + "format %#x, channelMask 0x%X", + sampleRate, format, channelMask); + break; // retries failed, break out of loop with frames == 0. + } + } mHardwareStatus = AUDIO_HW_IDLE; - return size; + if (frames > 0 && config.sample_rate != sampleRate) { + frames = destinationFramesPossible(frames, sampleRate, config.sample_rate); + } + return frames; // may be converted to bytes at the Java level. } uint32_t AudioFlinger::getInputFramesLost(audio_io_handle_t ioHandle) const @@ -1419,9 +1448,8 @@ sp<IAudioRecord> AudioFlinger::openRecord( goto Exit; } - // we don't yet support anything other than 16-bit PCM - if (!(audio_is_valid_format(format) && - audio_is_linear_pcm(format) && format == AUDIO_FORMAT_PCM_16_BIT)) { + // we don't yet support anything other than linear PCM + if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) { ALOGE("openRecord() invalid format %#x", format); lStatus = BAD_VALUE; goto Exit; @@ -2002,11 +2030,11 @@ sp<AudioFlinger::RecordThread> AudioFlinger::openInput_l(audio_module_handle_t m status, address.string()); // If the input could not be opened with the requested parameters and we can handle the - // conversion internally, try to open again with the proposed parameters. The AudioFlinger can - // resample the input and do mono to stereo or stereo to mono conversions on 16 bit PCM inputs. + // conversion internally, try to open again with the proposed parameters. if (status == BAD_VALUE && - config->format == halconfig.format && halconfig.format == AUDIO_FORMAT_PCM_16_BIT && - (halconfig.sample_rate <= 2 * config->sample_rate) && + audio_is_linear_pcm(config->format) && + audio_is_linear_pcm(halconfig.format) && + (halconfig.sample_rate <= AUDIO_RESAMPLER_DOWN_RATIO_MAX * config->sample_rate) && (audio_channel_count_from_in_mask(halconfig.channel_mask) <= FCC_2) && (audio_channel_count_from_in_mask(config->channel_mask) <= FCC_2)) { // FIXME describe the change proposed by HAL (save old values so we can log them here) diff --git a/services/audioflinger/AudioHwDevice.cpp b/services/audioflinger/AudioHwDevice.cpp index 09d86ea..3191598 100644 --- a/services/audioflinger/AudioHwDevice.cpp +++ b/services/audioflinger/AudioHwDevice.cpp @@ -44,7 +44,7 @@ status_t AudioHwDevice::openOutputStream( AudioStreamOut *outputStream = new AudioStreamOut(this, flags); // Try to open the HAL first using the current format. - ALOGV("AudioHwDevice::openOutputStream(), try " + ALOGV("openOutputStream(), try " " sampleRate %d, Format %#x, " "channelMask %#x", config->sample_rate, @@ -59,7 +59,7 @@ status_t AudioHwDevice::openOutputStream( // FIXME Look at any modification to the config. // The HAL might modify the config to suggest a wrapped format. // Log this so we can see what the HALs are doing. - ALOGI("AudioHwDevice::openOutputStream(), HAL returned" + ALOGI("openOutputStream(), HAL returned" " sampleRate %d, Format %#x, " "channelMask %#x, status %d", config->sample_rate, @@ -72,16 +72,19 @@ status_t AudioHwDevice::openOutputStream( && ((flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) && ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0); - // FIXME - Add isEncodingSupported() query to SPDIF wrapper then - // call it from here. if (wrapperNeeded) { - outputStream = new SpdifStreamOut(this, flags); - status = outputStream->open(handle, devices, &originalConfig, address); - if (status != NO_ERROR) { - ALOGE("ERROR - AudioHwDevice::openOutputStream(), SPDIF open returned %d", - status); - delete outputStream; - outputStream = NULL; + if (SPDIFEncoder::isFormatSupported(originalConfig.format)) { + outputStream = new SpdifStreamOut(this, flags, originalConfig.format); + status = outputStream->open(handle, devices, &originalConfig, address); + if (status != NO_ERROR) { + ALOGE("ERROR - openOutputStream(), SPDIF open returned %d", + status); + delete outputStream; + outputStream = NULL; + } + } else { + ALOGE("ERROR - openOutputStream(), SPDIFEncoder does not support format 0x%08x", + originalConfig.format); } } } diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp index dddca02..c2c791f 100644 --- a/services/audioflinger/AudioMixer.cpp +++ b/services/audioflinger/AudioMixer.cpp @@ -38,9 +38,7 @@ #include <audio_utils/format.h> #include <common_time/local_clock.h> #include <common_time/cc_helper.h> - -#include <media/EffectsFactoryApi.h> -#include <audio_effects/effect_downmix.h> +#include <media/AudioResamplerPublic.h> #include "AudioMixerOps.h" #include "AudioMixer.h" @@ -91,323 +89,6 @@ T min(const T& a, const T& b) return a < b ? a : b; } -AudioMixer::CopyBufferProvider::CopyBufferProvider(size_t inputFrameSize, - size_t outputFrameSize, size_t bufferFrameCount) : - mInputFrameSize(inputFrameSize), - mOutputFrameSize(outputFrameSize), - mLocalBufferFrameCount(bufferFrameCount), - mLocalBufferData(NULL), - mConsumed(0) -{ - ALOGV("CopyBufferProvider(%p)(%zu, %zu, %zu)", this, - inputFrameSize, outputFrameSize, bufferFrameCount); - LOG_ALWAYS_FATAL_IF(inputFrameSize < outputFrameSize && bufferFrameCount == 0, - "Requires local buffer if inputFrameSize(%zu) < outputFrameSize(%zu)", - inputFrameSize, outputFrameSize); - if (mLocalBufferFrameCount) { - (void)posix_memalign(&mLocalBufferData, 32, mLocalBufferFrameCount * mOutputFrameSize); - } - mBuffer.frameCount = 0; -} - -AudioMixer::CopyBufferProvider::~CopyBufferProvider() -{ - ALOGV("~CopyBufferProvider(%p)", this); - if (mBuffer.frameCount != 0) { - mTrackBufferProvider->releaseBuffer(&mBuffer); - } - free(mLocalBufferData); -} - -status_t AudioMixer::CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer, - int64_t pts) -{ - //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)", - // this, pBuffer, pBuffer->frameCount, pts); - if (mLocalBufferFrameCount == 0) { - status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts); - if (res == OK) { - copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount); - } - return res; - } - if (mBuffer.frameCount == 0) { - mBuffer.frameCount = pBuffer->frameCount; - status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts); - // At one time an upstream buffer provider had - // res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014. - // - // By API spec, if res != OK, then mBuffer.frameCount == 0. - // but there may be improper implementations. - ALOG_ASSERT(res == OK || mBuffer.frameCount == 0); - if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe. - pBuffer->raw = NULL; - pBuffer->frameCount = 0; - return res; - } - mConsumed = 0; - } - ALOG_ASSERT(mConsumed < mBuffer.frameCount); - size_t count = min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed); - count = min(count, pBuffer->frameCount); - pBuffer->raw = mLocalBufferData; - pBuffer->frameCount = count; - copyFrames(pBuffer->raw, (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize, - pBuffer->frameCount); - return OK; -} - -void AudioMixer::CopyBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) -{ - //ALOGV("CopyBufferProvider(%p)::releaseBuffer(%p(%zu))", - // this, pBuffer, pBuffer->frameCount); - if (mLocalBufferFrameCount == 0) { - mTrackBufferProvider->releaseBuffer(pBuffer); - return; - } - // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount"); - mConsumed += pBuffer->frameCount; // TODO: update for efficiency to reuse existing content - if (mConsumed != 0 && mConsumed >= mBuffer.frameCount) { - mTrackBufferProvider->releaseBuffer(&mBuffer); - ALOG_ASSERT(mBuffer.frameCount == 0); - } - pBuffer->raw = NULL; - pBuffer->frameCount = 0; -} - -void AudioMixer::CopyBufferProvider::reset() -{ - if (mBuffer.frameCount != 0) { - mTrackBufferProvider->releaseBuffer(&mBuffer); - } - mConsumed = 0; -} - -AudioMixer::DownmixerBufferProvider::DownmixerBufferProvider( - audio_channel_mask_t inputChannelMask, - audio_channel_mask_t outputChannelMask, audio_format_t format, - uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount) : - CopyBufferProvider( - audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask), - audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask), - bufferFrameCount) // set bufferFrameCount to 0 to do in-place -{ - ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)", - this, inputChannelMask, outputChannelMask, format, - sampleRate, sessionId); - if (!sIsMultichannelCapable - || EffectCreate(&sDwnmFxDesc.uuid, - sessionId, - SESSION_ID_INVALID_AND_IGNORED, - &mDownmixHandle) != 0) { - ALOGE("DownmixerBufferProvider() error creating downmixer effect"); - mDownmixHandle = NULL; - return; - } - // channel input configuration will be overridden per-track - mDownmixConfig.inputCfg.channels = inputChannelMask; // FIXME: Should be bits - mDownmixConfig.outputCfg.channels = outputChannelMask; // FIXME: should be bits - mDownmixConfig.inputCfg.format = format; - mDownmixConfig.outputCfg.format = format; - mDownmixConfig.inputCfg.samplingRate = sampleRate; - mDownmixConfig.outputCfg.samplingRate = sampleRate; - mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ; - mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE; - // input and output buffer provider, and frame count will not be used as the downmix effect - // process() function is called directly (see DownmixerBufferProvider::getNextBuffer()) - mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS | - EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE; - mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask; - - int cmdStatus; - uint32_t replySize = sizeof(int); - - // Configure downmixer - status_t status = (*mDownmixHandle)->command(mDownmixHandle, - EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/, - &mDownmixConfig /*pCmdData*/, - &replySize, &cmdStatus /*pReplyData*/); - if (status != 0 || cmdStatus != 0) { - ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer", - status, cmdStatus); - EffectRelease(mDownmixHandle); - mDownmixHandle = NULL; - return; - } - - // Enable downmixer - replySize = sizeof(int); - status = (*mDownmixHandle)->command(mDownmixHandle, - EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/, - &replySize, &cmdStatus /*pReplyData*/); - if (status != 0 || cmdStatus != 0) { - ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer", - status, cmdStatus); - EffectRelease(mDownmixHandle); - mDownmixHandle = NULL; - return; - } - - // Set downmix type - // parameter size rounded for padding on 32bit boundary - const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int); - const int downmixParamSize = - sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t); - effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize); - param->psize = sizeof(downmix_params_t); - const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE; - memcpy(param->data, &downmixParam, param->psize); - const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD; - param->vsize = sizeof(downmix_type_t); - memcpy(param->data + psizePadded, &downmixType, param->vsize); - replySize = sizeof(int); - status = (*mDownmixHandle)->command(mDownmixHandle, - EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */, - param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/); - free(param); - if (status != 0 || cmdStatus != 0) { - ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type", - status, cmdStatus); - EffectRelease(mDownmixHandle); - mDownmixHandle = NULL; - return; - } - ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType); -} - -AudioMixer::DownmixerBufferProvider::~DownmixerBufferProvider() -{ - ALOGV("~DownmixerBufferProvider (%p)", this); - EffectRelease(mDownmixHandle); - mDownmixHandle = NULL; -} - -void AudioMixer::DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames) -{ - mDownmixConfig.inputCfg.buffer.frameCount = frames; - mDownmixConfig.inputCfg.buffer.raw = const_cast<void *>(src); - mDownmixConfig.outputCfg.buffer.frameCount = frames; - mDownmixConfig.outputCfg.buffer.raw = dst; - // may be in-place if src == dst. - status_t res = (*mDownmixHandle)->process(mDownmixHandle, - &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer); - ALOGE_IF(res != OK, "DownmixBufferProvider error %d", res); -} - -/* call once in a pthread_once handler. */ -/*static*/ status_t AudioMixer::DownmixerBufferProvider::init() -{ - // find multichannel downmix effect if we have to play multichannel content - uint32_t numEffects = 0; - int ret = EffectQueryNumberEffects(&numEffects); - if (ret != 0) { - ALOGE("AudioMixer() error %d querying number of effects", ret); - return NO_INIT; - } - ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects); - - for (uint32_t i = 0 ; i < numEffects ; i++) { - if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) { - ALOGV("effect %d is called %s", i, sDwnmFxDesc.name); - if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) { - ALOGI("found effect \"%s\" from %s", - sDwnmFxDesc.name, sDwnmFxDesc.implementor); - sIsMultichannelCapable = true; - break; - } - } - } - ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect"); - return NO_INIT; -} - -/*static*/ bool AudioMixer::DownmixerBufferProvider::sIsMultichannelCapable = false; -/*static*/ effect_descriptor_t AudioMixer::DownmixerBufferProvider::sDwnmFxDesc; - -AudioMixer::RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputChannelMask, - audio_channel_mask_t outputChannelMask, audio_format_t format, - size_t bufferFrameCount) : - CopyBufferProvider( - audio_bytes_per_sample(format) - * audio_channel_count_from_out_mask(inputChannelMask), - audio_bytes_per_sample(format) - * audio_channel_count_from_out_mask(outputChannelMask), - bufferFrameCount), - mFormat(format), - mSampleSize(audio_bytes_per_sample(format)), - mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)), - mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask)) -{ - ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %zu %zu", - this, format, inputChannelMask, outputChannelMask, - mInputChannels, mOutputChannels); - - const audio_channel_representation_t inputRepresentation = - audio_channel_mask_get_representation(inputChannelMask); - const audio_channel_representation_t outputRepresentation = - audio_channel_mask_get_representation(outputChannelMask); - const uint32_t inputBits = audio_channel_mask_get_bits(inputChannelMask); - const uint32_t outputBits = audio_channel_mask_get_bits(outputChannelMask); - - switch (inputRepresentation) { - case AUDIO_CHANNEL_REPRESENTATION_POSITION: - switch (outputRepresentation) { - case AUDIO_CHANNEL_REPRESENTATION_POSITION: - memcpy_by_index_array_initialization(mIdxAry, ARRAY_SIZE(mIdxAry), - outputBits, inputBits); - return; - case AUDIO_CHANNEL_REPRESENTATION_INDEX: - // TODO: output channel index mask not currently allowed - // fall through - default: - break; - } - break; - case AUDIO_CHANNEL_REPRESENTATION_INDEX: - switch (outputRepresentation) { - case AUDIO_CHANNEL_REPRESENTATION_POSITION: - memcpy_by_index_array_initialization_src_index(mIdxAry, ARRAY_SIZE(mIdxAry), - outputBits, inputBits); - return; - case AUDIO_CHANNEL_REPRESENTATION_INDEX: - // TODO: output channel index mask not currently allowed - // fall through - default: - break; - } - break; - default: - break; - } - LOG_ALWAYS_FATAL("invalid channel mask conversion from %#x to %#x", - inputChannelMask, outputChannelMask); -} - -void AudioMixer::RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames) -{ - memcpy_by_index_array(dst, mOutputChannels, - src, mInputChannels, mIdxAry, mSampleSize, frames); -} - -AudioMixer::ReformatBufferProvider::ReformatBufferProvider(int32_t channels, - audio_format_t inputFormat, audio_format_t outputFormat, - size_t bufferFrameCount) : - CopyBufferProvider( - channels * audio_bytes_per_sample(inputFormat), - channels * audio_bytes_per_sample(outputFormat), - bufferFrameCount), - mChannels(channels), - mInputFormat(inputFormat), - mOutputFormat(outputFormat) -{ - ALOGV("ReformatBufferProvider(%p)(%d, %#x, %#x)", this, channels, inputFormat, outputFormat); -} - -void AudioMixer::ReformatBufferProvider::copyFrames(void *dst, const void *src, size_t frames) -{ - memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannels); -} - // ---------------------------------------------------------------------------- // Ensure mConfiguredNames bitmask is initialized properly on all architectures. @@ -442,6 +123,7 @@ AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate, uint32_t maxNumTr t->resampler = NULL; t->downmixerBufferProvider = NULL; t->mReformatBufferProvider = NULL; + t->mTimestretchBufferProvider = NULL; t++; } @@ -454,6 +136,7 @@ AudioMixer::~AudioMixer() delete t->resampler; delete t->downmixerBufferProvider; delete t->mReformatBufferProvider; + delete t->mTimestretchBufferProvider; t++; } delete [] mState.outputTemp; @@ -532,6 +215,7 @@ int AudioMixer::getTrackName(audio_channel_mask_t channelMask, t->mReformatBufferProvider = NULL; t->downmixerBufferProvider = NULL; t->mPostDownmixReformatBufferProvider = NULL; + t->mTimestretchBufferProvider = NULL; t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT; t->mFormat = format; t->mMixerInFormat = selectMixerInFormat(format); @@ -539,6 +223,8 @@ int AudioMixer::getTrackName(audio_channel_mask_t channelMask, t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits( AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO); t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask); + t->mSpeed = AUDIO_TIMESTRETCH_SPEED_NORMAL; + t->mPitch = AUDIO_TIMESTRETCH_PITCH_NORMAL; // Check the downmixing (or upmixing) requirements. status_t status = t->prepareForDownmix(); if (status != OK) { @@ -731,6 +417,10 @@ void AudioMixer::track_t::reconfigureBufferProviders() mPostDownmixReformatBufferProvider->setBufferProvider(bufferProvider); bufferProvider = mPostDownmixReformatBufferProvider; } + if (mTimestretchBufferProvider) { + mTimestretchBufferProvider->setBufferProvider(bufferProvider); + bufferProvider = mTimestretchBufferProvider; + } } void AudioMixer::deleteTrackName(int name) @@ -751,7 +441,9 @@ void AudioMixer::deleteTrackName(int name) mState.tracks[name].unprepareForDownmix(); // delete the reformatter mState.tracks[name].unprepareForReformat(); - + // delete the timestretch provider + delete track.mTimestretchBufferProvider; + track.mTimestretchBufferProvider = NULL; mTrackNames &= ~(1<<name); } @@ -973,6 +665,26 @@ void AudioMixer::setParameter(int name, int target, int param, void *value) } } break; + case TIMESTRETCH: + switch (param) { + case PLAYBACK_RATE: { + const float speed = reinterpret_cast<float*>(value)[0]; + const float pitch = reinterpret_cast<float*>(value)[1]; + ALOG_ASSERT(AUDIO_TIMESTRETCH_SPEED_MIN <= speed + && speed <= AUDIO_TIMESTRETCH_SPEED_MAX, + "bad speed %f", speed); + ALOG_ASSERT(AUDIO_TIMESTRETCH_PITCH_MIN <= pitch + && pitch <= AUDIO_TIMESTRETCH_PITCH_MAX, + "bad pitch %f", pitch); + if (track.setPlaybackRate(speed, pitch)) { + ALOGV("setParameter(TIMESTRETCH, PLAYBACK_RATE, %f %f", speed, pitch); + // invalidateState(1 << name); + } + } break; + default: + LOG_ALWAYS_FATAL("setParameter timestretch: bad param %d", param); + } + break; default: LOG_ALWAYS_FATAL("setParameter: bad target %d", target); @@ -1018,6 +730,28 @@ bool AudioMixer::track_t::setResampler(uint32_t trackSampleRate, uint32_t devSam return false; } +bool AudioMixer::track_t::setPlaybackRate(float speed, float pitch) +{ + if (speed == mSpeed && pitch == mPitch) { + return false; + } + mSpeed = speed; + mPitch = pitch; + if (mTimestretchBufferProvider == NULL) { + // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer + // but if none exists, it is the channel count (1 for mono). + const int timestretchChannelCount = downmixerBufferProvider != NULL + ? mMixerChannelCount : channelCount; + mTimestretchBufferProvider = new TimestretchBufferProvider(timestretchChannelCount, + mMixerInFormat, sampleRate, speed, pitch); + reconfigureBufferProviders(); + } else { + reinterpret_cast<TimestretchBufferProvider*>(mTimestretchBufferProvider) + ->setPlaybackRate(speed, pitch); + } + return true; +} + /* Checks to see if the volume ramp has completed and clears the increment * variables appropriately. * @@ -1096,6 +830,8 @@ void AudioMixer::setBufferProvider(int name, AudioBufferProvider* bufferProvider mState.tracks[name].downmixerBufferProvider->reset(); } else if (mState.tracks[name].mPostDownmixReformatBufferProvider != NULL) { mState.tracks[name].mPostDownmixReformatBufferProvider->reset(); + } else if (mState.tracks[name].mTimestretchBufferProvider != NULL) { + mState.tracks[name].mTimestretchBufferProvider->reset(); } mState.tracks[name].mInputBufferProvider = bufferProvider; diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h index 381036b..e27a0d1 100644 --- a/services/audioflinger/AudioMixer.h +++ b/services/audioflinger/AudioMixer.h @@ -29,6 +29,7 @@ #include <utils/threads.h> #include "AudioResampler.h" +#include "BufferProviders.h" // FIXME This is actually unity gain, which might not be max in future, expressed in U.12 #define MAX_GAIN_INT AudioMixer::UNITY_GAIN_INT @@ -72,6 +73,7 @@ public: RESAMPLE = 0x3001, RAMP_VOLUME = 0x3002, // ramp to new volume VOLUME = 0x3003, // don't ramp + TIMESTRETCH = 0x3004, // set Parameter names // for target TRACK @@ -99,6 +101,9 @@ public: VOLUME0 = 0x4200, VOLUME1 = 0x4201, AUXLEVEL = 0x4210, + // for target TIMESTRETCH + PLAYBACK_RATE = 0x4300, // Configure timestretch on this track name; + // parameter 'value' is a pointer to the new playback rate. }; @@ -159,7 +164,6 @@ private: struct state_t; struct track_t; - class CopyBufferProvider; typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp, int32_t* aux); @@ -214,6 +218,9 @@ private: /* Buffer providers are constructed to translate the track input data as needed. * + * TODO: perhaps make a single PlaybackConverterProvider class to move + * all pre-mixer track buffer conversions outside the AudioMixer class. + * * 1) mInputBufferProvider: The AudioTrack buffer provider. * 2) mReformatBufferProvider: If not NULL, performs the audio reformat to * match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer @@ -223,13 +230,14 @@ private: * the number of channels required by the mixer sink. * 4) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from * the downmixer requirements to the mixer engine input requirements. + * 5) mTimestretchBufferProvider: Adds timestretching for playback rate */ AudioBufferProvider* mInputBufferProvider; // externally provided buffer provider. - CopyBufferProvider* mReformatBufferProvider; // provider wrapper for reformatting. - CopyBufferProvider* downmixerBufferProvider; // wrapper for channel conversion. - CopyBufferProvider* mPostDownmixReformatBufferProvider; + PassthruBufferProvider* mReformatBufferProvider; // provider wrapper for reformatting. + PassthruBufferProvider* downmixerBufferProvider; // wrapper for channel conversion. + PassthruBufferProvider* mPostDownmixReformatBufferProvider; + PassthruBufferProvider* mTimestretchBufferProvider; - // 16-byte boundary int32_t sessionId; audio_format_t mMixerFormat; // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT) @@ -251,6 +259,9 @@ private: audio_channel_mask_t mMixerChannelMask; uint32_t mMixerChannelCount; + float mSpeed; + float mPitch; + bool needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; } bool setResampler(uint32_t trackSampleRate, uint32_t devSampleRate); bool doesResample() const { return resampler != NULL; } @@ -263,6 +274,7 @@ private: void unprepareForDownmix(); status_t prepareForReformat(); void unprepareForReformat(); + bool setPlaybackRate(float speed, float pitch); void reconfigureBufferProviders(); }; @@ -282,112 +294,6 @@ private: track_t tracks[MAX_NUM_TRACKS] __attribute__((aligned(32))); }; - // Base AudioBufferProvider class used for DownMixerBufferProvider, RemixBufferProvider, - // and ReformatBufferProvider. - // It handles a private buffer for use in converting format or channel masks from the - // input data to a form acceptable by the mixer. - // TODO: Make a ResamplerBufferProvider when integers are entirely removed from the - // processing pipeline. - class CopyBufferProvider : public AudioBufferProvider { - public: - // Use a private buffer of bufferFrameCount frames (each frame is outputFrameSize bytes). - // If bufferFrameCount is 0, no private buffer is created and in-place modification of - // the upstream buffer provider's buffers is performed by copyFrames(). - CopyBufferProvider(size_t inputFrameSize, size_t outputFrameSize, - size_t bufferFrameCount); - virtual ~CopyBufferProvider(); - - // Overrides AudioBufferProvider methods - virtual status_t getNextBuffer(Buffer* buffer, int64_t pts); - virtual void releaseBuffer(Buffer* buffer); - - // Other public methods - - // call this to release the buffer to the upstream provider. - // treat it as an audio discontinuity for future samples. - virtual void reset(); - - // this function should be supplied by the derived class. It converts - // #frames in the *src pointer to the *dst pointer. It is public because - // some providers will allow this to work on arbitrary buffers outside - // of the internal buffers. - virtual void copyFrames(void *dst, const void *src, size_t frames) = 0; - - // set the upstream buffer provider. Consider calling "reset" before this function. - void setBufferProvider(AudioBufferProvider *p) { - mTrackBufferProvider = p; - } - - protected: - AudioBufferProvider* mTrackBufferProvider; - const size_t mInputFrameSize; - const size_t mOutputFrameSize; - private: - AudioBufferProvider::Buffer mBuffer; - const size_t mLocalBufferFrameCount; - void* mLocalBufferData; - size_t mConsumed; - }; - - // DownmixerBufferProvider wraps a track AudioBufferProvider to provide - // position dependent downmixing by an Audio Effect. - class DownmixerBufferProvider : public CopyBufferProvider { - public: - DownmixerBufferProvider(audio_channel_mask_t inputChannelMask, - audio_channel_mask_t outputChannelMask, audio_format_t format, - uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount); - virtual ~DownmixerBufferProvider(); - virtual void copyFrames(void *dst, const void *src, size_t frames); - bool isValid() const { return mDownmixHandle != NULL; } - - static status_t init(); - static bool isMultichannelCapable() { return sIsMultichannelCapable; } - - protected: - effect_handle_t mDownmixHandle; - effect_config_t mDownmixConfig; - - // effect descriptor for the downmixer used by the mixer - static effect_descriptor_t sDwnmFxDesc; - // indicates whether a downmix effect has been found and is usable by this mixer - static bool sIsMultichannelCapable; - // FIXME: should we allow effects outside of the framework? - // We need to here. A special ioId that must be <= -2 so it does not map to a session. - static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2; - }; - - // RemixBufferProvider wraps a track AudioBufferProvider to perform an - // upmix or downmix to the proper channel count and mask. - class RemixBufferProvider : public CopyBufferProvider { - public: - RemixBufferProvider(audio_channel_mask_t inputChannelMask, - audio_channel_mask_t outputChannelMask, audio_format_t format, - size_t bufferFrameCount); - virtual void copyFrames(void *dst, const void *src, size_t frames); - - protected: - const audio_format_t mFormat; - const size_t mSampleSize; - const size_t mInputChannels; - const size_t mOutputChannels; - int8_t mIdxAry[sizeof(uint32_t)*8]; // 32 bits => channel indices - }; - - // ReformatBufferProvider wraps a track AudioBufferProvider to convert the input data - // to an acceptable mixer input format type. - class ReformatBufferProvider : public CopyBufferProvider { - public: - ReformatBufferProvider(int32_t channels, - audio_format_t inputFormat, audio_format_t outputFormat, - size_t bufferFrameCount); - virtual void copyFrames(void *dst, const void *src, size_t frames); - - protected: - const int32_t mChannels; - const audio_format_t mInputFormat; - const audio_format_t mOutputFormat; - }; - // bitmask of allocated track names, where bit 0 corresponds to TRACK0 etc. uint32_t mTrackNames; diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp index 46e3d6c..e49b7b1 100644 --- a/services/audioflinger/AudioResampler.cpp +++ b/services/audioflinger/AudioResampler.cpp @@ -41,7 +41,7 @@ public: AudioResamplerOrder1(int inChannelCount, int32_t sampleRate) : AudioResampler(inChannelCount, sampleRate, LOW_QUALITY), mX0L(0), mX0R(0) { } - virtual void resample(int32_t* out, size_t outFrameCount, + virtual size_t resample(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider); private: // number of bits used in interpolation multiply - 15 bits avoids overflow @@ -51,9 +51,9 @@ private: static const int kPreInterpShift = kNumPhaseBits - kNumInterpBits; void init() {} - void resampleMono16(int32_t* out, size_t outFrameCount, + size_t resampleMono16(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider); - void resampleStereo16(int32_t* out, size_t outFrameCount, + size_t resampleStereo16(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider); #ifdef ASM_ARM_RESAMP1 // asm optimisation for ResamplerOrder1 void AsmMono16Loop(int16_t *in, int32_t* maxOutPt, int32_t maxInIdx, @@ -329,7 +329,7 @@ void AudioResampler::reset() { // ---------------------------------------------------------------------------- -void AudioResamplerOrder1::resample(int32_t* out, size_t outFrameCount, +size_t AudioResamplerOrder1::resample(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider) { // should never happen, but we overflow if it does @@ -338,15 +338,16 @@ void AudioResamplerOrder1::resample(int32_t* out, size_t outFrameCount, // select the appropriate resampler switch (mChannelCount) { case 1: - resampleMono16(out, outFrameCount, provider); - break; + return resampleMono16(out, outFrameCount, provider); case 2: - resampleStereo16(out, outFrameCount, provider); - break; + return resampleStereo16(out, outFrameCount, provider); + default: + LOG_ALWAYS_FATAL("invalid channel count: %d", mChannelCount); + return 0; } } -void AudioResamplerOrder1::resampleStereo16(int32_t* out, size_t outFrameCount, +size_t AudioResamplerOrder1::resampleStereo16(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider) { int32_t vl = mVolume[0]; @@ -442,9 +443,10 @@ resampleStereo16_exit: // save state mInputIndex = inputIndex; mPhaseFraction = phaseFraction; + return outputIndex / 2 /* channels for stereo */; } -void AudioResamplerOrder1::resampleMono16(int32_t* out, size_t outFrameCount, +size_t AudioResamplerOrder1::resampleMono16(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider) { int32_t vl = mVolume[0]; @@ -538,6 +540,7 @@ resampleMono16_exit: // save state mInputIndex = inputIndex; mPhaseFraction = phaseFraction; + return outputIndex; } #ifdef ASM_ARM_RESAMP1 // asm optimisation for ResamplerOrder1 diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h index 863614a..a8e3e6f 100644 --- a/services/audioflinger/AudioResampler.h +++ b/services/audioflinger/AudioResampler.h @@ -67,12 +67,18 @@ public: // Resample int16_t samples from provider and accumulate into 'out'. // A mono provider delivers a sequence of samples. // A stereo provider delivers a sequence of interleaved pairs of samples. - // Multi-channel providers are not supported. + // // In either case, 'out' holds interleaved pairs of fixed-point Q4.27. // That is, for a mono provider, there is an implicit up-channeling. // Since this method accumulates, the caller is responsible for clearing 'out' initially. - // FIXME assumes provider is always successful; it should return the actual frame count. - virtual void resample(int32_t* out, size_t outFrameCount, + // + // For a float resampler, 'out' holds interleaved pairs of float samples. + // + // Multichannel interleaved frames for n > 2 is supported for quality DYN_LOW_QUALITY, + // DYN_MED_QUALITY, and DYN_HIGH_QUALITY. + // + // Returns the number of frames resampled into the out buffer. + virtual size_t resample(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider) = 0; virtual void reset(); diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp index d3cbd1c..172c2a5 100644 --- a/services/audioflinger/AudioResamplerCubic.cpp +++ b/services/audioflinger/AudioResamplerCubic.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#define LOG_TAG "AudioSRC" +#define LOG_TAG "AudioResamplerCubic" #include <stdint.h> #include <string.h> @@ -32,7 +32,7 @@ void AudioResamplerCubic::init() { memset(&right, 0, sizeof(state)); } -void AudioResamplerCubic::resample(int32_t* out, size_t outFrameCount, +size_t AudioResamplerCubic::resample(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider) { // should never happen, but we overflow if it does @@ -41,15 +41,16 @@ void AudioResamplerCubic::resample(int32_t* out, size_t outFrameCount, // select the appropriate resampler switch (mChannelCount) { case 1: - resampleMono16(out, outFrameCount, provider); - break; + return resampleMono16(out, outFrameCount, provider); case 2: - resampleStereo16(out, outFrameCount, provider); - break; + return resampleStereo16(out, outFrameCount, provider); + default: + LOG_ALWAYS_FATAL("invalid channel count: %d", mChannelCount); + return 0; } } -void AudioResamplerCubic::resampleStereo16(int32_t* out, size_t outFrameCount, +size_t AudioResamplerCubic::resampleStereo16(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider) { int32_t vl = mVolume[0]; @@ -67,7 +68,7 @@ void AudioResamplerCubic::resampleStereo16(int32_t* out, size_t outFrameCount, mBuffer.frameCount = inFrameCount; provider->getNextBuffer(&mBuffer, mPTS); if (mBuffer.raw == NULL) { - return; + return 0; } // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount); } @@ -115,9 +116,10 @@ save_state: // ALOGW("Done: index=%d, fraction=%u", inputIndex, phaseFraction); mInputIndex = inputIndex; mPhaseFraction = phaseFraction; + return outputIndex / 2 /* channels for stereo */; } -void AudioResamplerCubic::resampleMono16(int32_t* out, size_t outFrameCount, +size_t AudioResamplerCubic::resampleMono16(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider) { int32_t vl = mVolume[0]; @@ -135,7 +137,7 @@ void AudioResamplerCubic::resampleMono16(int32_t* out, size_t outFrameCount, mBuffer.frameCount = inFrameCount; provider->getNextBuffer(&mBuffer, mPTS); if (mBuffer.raw == NULL) { - return; + return 0; } // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount); } @@ -182,6 +184,7 @@ save_state: // ALOGW("Done: index=%d, fraction=%u", inputIndex, phaseFraction); mInputIndex = inputIndex; mPhaseFraction = phaseFraction; + return outputIndex; } // ---------------------------------------------------------------------------- diff --git a/services/audioflinger/AudioResamplerCubic.h b/services/audioflinger/AudioResamplerCubic.h index 1ddc5f9..4b45b0b 100644 --- a/services/audioflinger/AudioResamplerCubic.h +++ b/services/audioflinger/AudioResamplerCubic.h @@ -31,7 +31,7 @@ public: AudioResamplerCubic(int inChannelCount, int32_t sampleRate) : AudioResampler(inChannelCount, sampleRate, MED_QUALITY) { } - virtual void resample(int32_t* out, size_t outFrameCount, + virtual size_t resample(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider); private: // number of bits used in interpolation multiply - 14 bits avoids overflow @@ -43,9 +43,9 @@ private: int32_t a, b, c, y0, y1, y2, y3; } state; void init(); - void resampleMono16(int32_t* out, size_t outFrameCount, + size_t resampleMono16(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider); - void resampleStereo16(int32_t* out, size_t outFrameCount, + size_t resampleStereo16(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider); static inline int32_t interp(state* p, int32_t x) { return (((((p->a * x >> 14) + p->b) * x >> 14) + p->c) * x >> 14) + p->y1; diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/services/audioflinger/AudioResamplerDyn.cpp index c21d4ca..6481b85 100644 --- a/services/audioflinger/AudioResamplerDyn.cpp +++ b/services/audioflinger/AudioResamplerDyn.cpp @@ -477,15 +477,15 @@ void AudioResamplerDyn<TC, TI, TO>::setSampleRate(int32_t inSampleRate) } template<typename TC, typename TI, typename TO> -void AudioResamplerDyn<TC, TI, TO>::resample(int32_t* out, size_t outFrameCount, +size_t AudioResamplerDyn<TC, TI, TO>::resample(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider) { - (this->*mResampleFunc)(reinterpret_cast<TO*>(out), outFrameCount, provider); + return (this->*mResampleFunc)(reinterpret_cast<TO*>(out), outFrameCount, provider); } template<typename TC, typename TI, typename TO> template<int CHANNELS, bool LOCKED, int STRIDE> -void AudioResamplerDyn<TC, TI, TO>::resample(TO* out, size_t outFrameCount, +size_t AudioResamplerDyn<TC, TI, TO>::resample(TO* out, size_t outFrameCount, AudioBufferProvider* provider) { // TODO Mono -> Mono is not supported. OUTPUT_CHANNELS reflects minimum of stereo out. @@ -610,6 +610,7 @@ resample_exit: ALOG_ASSERT(mBuffer.frameCount == 0); // there must be no frames in the buffer mInBuffer.setImpulse(impulse); mPhaseFraction = phaseFraction; + return outputIndex / OUTPUT_CHANNELS; } /* instantiate templates used by AudioResampler::create */ diff --git a/services/audioflinger/AudioResamplerDyn.h b/services/audioflinger/AudioResamplerDyn.h index 238b163..3b1c381 100644 --- a/services/audioflinger/AudioResamplerDyn.h +++ b/services/audioflinger/AudioResamplerDyn.h @@ -52,7 +52,7 @@ public: virtual void setVolume(float left, float right); - virtual void resample(int32_t* out, size_t outFrameCount, + virtual size_t resample(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider); private: @@ -111,10 +111,10 @@ private: int inSampleRate, int outSampleRate, double tbwCheat); template<int CHANNELS, bool LOCKED, int STRIDE> - void resample(TO* out, size_t outFrameCount, AudioBufferProvider* provider); + size_t resample(TO* out, size_t outFrameCount, AudioBufferProvider* provider); // define a pointer to member function type for resample - typedef void (AudioResamplerDyn<TC, TI, TO>::*resample_ABP_t)(TO* out, + typedef size_t (AudioResamplerDyn<TC, TI, TO>::*resample_ABP_t)(TO* out, size_t outFrameCount, AudioBufferProvider* provider); // data - the contiguous storage and layout of these is important. diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/services/audioflinger/AudioResamplerSinc.cpp index ba9a356..41730ee 100644 --- a/services/audioflinger/AudioResamplerSinc.cpp +++ b/services/audioflinger/AudioResamplerSinc.cpp @@ -256,7 +256,7 @@ void AudioResamplerSinc::setVolume(float left, float right) { mVolumeSIMD[1] = u4_28_from_float(clampFloatVol(right)); } -void AudioResamplerSinc::resample(int32_t* out, size_t outFrameCount, +size_t AudioResamplerSinc::resample(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider) { // FIXME store current state (up or down sample) and only load the coefs when the state @@ -272,17 +272,18 @@ void AudioResamplerSinc::resample(int32_t* out, size_t outFrameCount, // select the appropriate resampler switch (mChannelCount) { case 1: - resample<1>(out, outFrameCount, provider); - break; + return resample<1>(out, outFrameCount, provider); case 2: - resample<2>(out, outFrameCount, provider); - break; + return resample<2>(out, outFrameCount, provider); + default: + LOG_ALWAYS_FATAL("invalid channel count: %d", mChannelCount); + return 0; } } template<int CHANNELS> -void AudioResamplerSinc::resample(int32_t* out, size_t outFrameCount, +size_t AudioResamplerSinc::resample(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider) { const Constants& c(*mConstants); @@ -357,6 +358,7 @@ resample_exit: mImpulse = impulse; mInputIndex = inputIndex; mPhaseFraction = phaseFraction; + return outputIndex / CHANNELS; } template<int CHANNELS> diff --git a/services/audioflinger/AudioResamplerSinc.h b/services/audioflinger/AudioResamplerSinc.h index 6d8e85d..0fbeac8 100644 --- a/services/audioflinger/AudioResamplerSinc.h +++ b/services/audioflinger/AudioResamplerSinc.h @@ -39,7 +39,7 @@ public: virtual ~AudioResamplerSinc(); - virtual void resample(int32_t* out, size_t outFrameCount, + virtual size_t resample(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider); private: void init(); @@ -47,7 +47,7 @@ private: virtual void setVolume(float left, float right); template<int CHANNELS> - void resample(int32_t* out, size_t outFrameCount, + size_t resample(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider); template<int CHANNELS> diff --git a/services/audioflinger/BufferProviders.cpp b/services/audioflinger/BufferProviders.cpp new file mode 100644 index 0000000..dcae5e7 --- /dev/null +++ b/services/audioflinger/BufferProviders.cpp @@ -0,0 +1,540 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "BufferProvider" +//#define LOG_NDEBUG 0 + +#include <audio_effects/effect_downmix.h> +#include <audio_utils/primitives.h> +#include <audio_utils/format.h> +#include <media/AudioResamplerPublic.h> +#include <media/EffectsFactoryApi.h> + +#include <utils/Log.h> + +#include "Configuration.h" +#include "BufferProviders.h" + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0])) +#endif + +namespace android { + +// ---------------------------------------------------------------------------- + +template <typename T> +static inline T min(const T& a, const T& b) +{ + return a < b ? a : b; +} + +CopyBufferProvider::CopyBufferProvider(size_t inputFrameSize, + size_t outputFrameSize, size_t bufferFrameCount) : + mInputFrameSize(inputFrameSize), + mOutputFrameSize(outputFrameSize), + mLocalBufferFrameCount(bufferFrameCount), + mLocalBufferData(NULL), + mConsumed(0) +{ + ALOGV("CopyBufferProvider(%p)(%zu, %zu, %zu)", this, + inputFrameSize, outputFrameSize, bufferFrameCount); + LOG_ALWAYS_FATAL_IF(inputFrameSize < outputFrameSize && bufferFrameCount == 0, + "Requires local buffer if inputFrameSize(%zu) < outputFrameSize(%zu)", + inputFrameSize, outputFrameSize); + if (mLocalBufferFrameCount) { + (void)posix_memalign(&mLocalBufferData, 32, mLocalBufferFrameCount * mOutputFrameSize); + } + mBuffer.frameCount = 0; +} + +CopyBufferProvider::~CopyBufferProvider() +{ + ALOGV("~CopyBufferProvider(%p)", this); + if (mBuffer.frameCount != 0) { + mTrackBufferProvider->releaseBuffer(&mBuffer); + } + free(mLocalBufferData); +} + +status_t CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer, + int64_t pts) +{ + //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)", + // this, pBuffer, pBuffer->frameCount, pts); + if (mLocalBufferFrameCount == 0) { + status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts); + if (res == OK) { + copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount); + } + return res; + } + if (mBuffer.frameCount == 0) { + mBuffer.frameCount = pBuffer->frameCount; + status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts); + // At one time an upstream buffer provider had + // res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014. + // + // By API spec, if res != OK, then mBuffer.frameCount == 0. + // but there may be improper implementations. + ALOG_ASSERT(res == OK || mBuffer.frameCount == 0); + if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe. + pBuffer->raw = NULL; + pBuffer->frameCount = 0; + return res; + } + mConsumed = 0; + } + ALOG_ASSERT(mConsumed < mBuffer.frameCount); + size_t count = min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed); + count = min(count, pBuffer->frameCount); + pBuffer->raw = mLocalBufferData; + pBuffer->frameCount = count; + copyFrames(pBuffer->raw, (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize, + pBuffer->frameCount); + return OK; +} + +void CopyBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) +{ + //ALOGV("CopyBufferProvider(%p)::releaseBuffer(%p(%zu))", + // this, pBuffer, pBuffer->frameCount); + if (mLocalBufferFrameCount == 0) { + mTrackBufferProvider->releaseBuffer(pBuffer); + return; + } + // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount"); + mConsumed += pBuffer->frameCount; // TODO: update for efficiency to reuse existing content + if (mConsumed != 0 && mConsumed >= mBuffer.frameCount) { + mTrackBufferProvider->releaseBuffer(&mBuffer); + ALOG_ASSERT(mBuffer.frameCount == 0); + } + pBuffer->raw = NULL; + pBuffer->frameCount = 0; +} + +void CopyBufferProvider::reset() +{ + if (mBuffer.frameCount != 0) { + mTrackBufferProvider->releaseBuffer(&mBuffer); + } + mConsumed = 0; +} + +DownmixerBufferProvider::DownmixerBufferProvider( + audio_channel_mask_t inputChannelMask, + audio_channel_mask_t outputChannelMask, audio_format_t format, + uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount) : + CopyBufferProvider( + audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask), + audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask), + bufferFrameCount) // set bufferFrameCount to 0 to do in-place +{ + ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)", + this, inputChannelMask, outputChannelMask, format, + sampleRate, sessionId); + if (!sIsMultichannelCapable + || EffectCreate(&sDwnmFxDesc.uuid, + sessionId, + SESSION_ID_INVALID_AND_IGNORED, + &mDownmixHandle) != 0) { + ALOGE("DownmixerBufferProvider() error creating downmixer effect"); + mDownmixHandle = NULL; + return; + } + // channel input configuration will be overridden per-track + mDownmixConfig.inputCfg.channels = inputChannelMask; // FIXME: Should be bits + mDownmixConfig.outputCfg.channels = outputChannelMask; // FIXME: should be bits + mDownmixConfig.inputCfg.format = format; + mDownmixConfig.outputCfg.format = format; + mDownmixConfig.inputCfg.samplingRate = sampleRate; + mDownmixConfig.outputCfg.samplingRate = sampleRate; + mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ; + mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE; + // input and output buffer provider, and frame count will not be used as the downmix effect + // process() function is called directly (see DownmixerBufferProvider::getNextBuffer()) + mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS | + EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE; + mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask; + + int cmdStatus; + uint32_t replySize = sizeof(int); + + // Configure downmixer + status_t status = (*mDownmixHandle)->command(mDownmixHandle, + EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/, + &mDownmixConfig /*pCmdData*/, + &replySize, &cmdStatus /*pReplyData*/); + if (status != 0 || cmdStatus != 0) { + ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer", + status, cmdStatus); + EffectRelease(mDownmixHandle); + mDownmixHandle = NULL; + return; + } + + // Enable downmixer + replySize = sizeof(int); + status = (*mDownmixHandle)->command(mDownmixHandle, + EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/, + &replySize, &cmdStatus /*pReplyData*/); + if (status != 0 || cmdStatus != 0) { + ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer", + status, cmdStatus); + EffectRelease(mDownmixHandle); + mDownmixHandle = NULL; + return; + } + + // Set downmix type + // parameter size rounded for padding on 32bit boundary + const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int); + const int downmixParamSize = + sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t); + effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize); + param->psize = sizeof(downmix_params_t); + const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE; + memcpy(param->data, &downmixParam, param->psize); + const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD; + param->vsize = sizeof(downmix_type_t); + memcpy(param->data + psizePadded, &downmixType, param->vsize); + replySize = sizeof(int); + status = (*mDownmixHandle)->command(mDownmixHandle, + EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */, + param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/); + free(param); + if (status != 0 || cmdStatus != 0) { + ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type", + status, cmdStatus); + EffectRelease(mDownmixHandle); + mDownmixHandle = NULL; + return; + } + ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType); +} + +DownmixerBufferProvider::~DownmixerBufferProvider() +{ + ALOGV("~DownmixerBufferProvider (%p)", this); + EffectRelease(mDownmixHandle); + mDownmixHandle = NULL; +} + +void DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames) +{ + mDownmixConfig.inputCfg.buffer.frameCount = frames; + mDownmixConfig.inputCfg.buffer.raw = const_cast<void *>(src); + mDownmixConfig.outputCfg.buffer.frameCount = frames; + mDownmixConfig.outputCfg.buffer.raw = dst; + // may be in-place if src == dst. + status_t res = (*mDownmixHandle)->process(mDownmixHandle, + &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer); + ALOGE_IF(res != OK, "DownmixBufferProvider error %d", res); +} + +/* call once in a pthread_once handler. */ +/*static*/ status_t DownmixerBufferProvider::init() +{ + // find multichannel downmix effect if we have to play multichannel content + uint32_t numEffects = 0; + int ret = EffectQueryNumberEffects(&numEffects); + if (ret != 0) { + ALOGE("AudioMixer() error %d querying number of effects", ret); + return NO_INIT; + } + ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects); + + for (uint32_t i = 0 ; i < numEffects ; i++) { + if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) { + ALOGV("effect %d is called %s", i, sDwnmFxDesc.name); + if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) { + ALOGI("found effect \"%s\" from %s", + sDwnmFxDesc.name, sDwnmFxDesc.implementor); + sIsMultichannelCapable = true; + break; + } + } + } + ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect"); + return NO_INIT; +} + +/*static*/ bool DownmixerBufferProvider::sIsMultichannelCapable = false; +/*static*/ effect_descriptor_t DownmixerBufferProvider::sDwnmFxDesc; + +RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputChannelMask, + audio_channel_mask_t outputChannelMask, audio_format_t format, + size_t bufferFrameCount) : + CopyBufferProvider( + audio_bytes_per_sample(format) + * audio_channel_count_from_out_mask(inputChannelMask), + audio_bytes_per_sample(format) + * audio_channel_count_from_out_mask(outputChannelMask), + bufferFrameCount), + mFormat(format), + mSampleSize(audio_bytes_per_sample(format)), + mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)), + mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask)) +{ + ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %zu %zu", + this, format, inputChannelMask, outputChannelMask, + mInputChannels, mOutputChannels); + + const audio_channel_representation_t inputRepresentation = + audio_channel_mask_get_representation(inputChannelMask); + const audio_channel_representation_t outputRepresentation = + audio_channel_mask_get_representation(outputChannelMask); + const uint32_t inputBits = audio_channel_mask_get_bits(inputChannelMask); + const uint32_t outputBits = audio_channel_mask_get_bits(outputChannelMask); + + switch (inputRepresentation) { + case AUDIO_CHANNEL_REPRESENTATION_POSITION: + switch (outputRepresentation) { + case AUDIO_CHANNEL_REPRESENTATION_POSITION: + memcpy_by_index_array_initialization(mIdxAry, ARRAY_SIZE(mIdxAry), + outputBits, inputBits); + return; + case AUDIO_CHANNEL_REPRESENTATION_INDEX: + // TODO: output channel index mask not currently allowed + // fall through + default: + break; + } + break; + case AUDIO_CHANNEL_REPRESENTATION_INDEX: + switch (outputRepresentation) { + case AUDIO_CHANNEL_REPRESENTATION_POSITION: + memcpy_by_index_array_initialization_src_index(mIdxAry, ARRAY_SIZE(mIdxAry), + outputBits, inputBits); + return; + case AUDIO_CHANNEL_REPRESENTATION_INDEX: + // TODO: output channel index mask not currently allowed + // fall through + default: + break; + } + break; + default: + break; + } + LOG_ALWAYS_FATAL("invalid channel mask conversion from %#x to %#x", + inputChannelMask, outputChannelMask); +} + +void RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames) +{ + memcpy_by_index_array(dst, mOutputChannels, + src, mInputChannels, mIdxAry, mSampleSize, frames); +} + +ReformatBufferProvider::ReformatBufferProvider(int32_t channelCount, + audio_format_t inputFormat, audio_format_t outputFormat, + size_t bufferFrameCount) : + CopyBufferProvider( + channelCount * audio_bytes_per_sample(inputFormat), + channelCount * audio_bytes_per_sample(outputFormat), + bufferFrameCount), + mChannelCount(channelCount), + mInputFormat(inputFormat), + mOutputFormat(outputFormat) +{ + ALOGV("ReformatBufferProvider(%p)(%u, %#x, %#x)", + this, channelCount, inputFormat, outputFormat); +} + +void ReformatBufferProvider::copyFrames(void *dst, const void *src, size_t frames) +{ + memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannelCount); +} + +TimestretchBufferProvider::TimestretchBufferProvider(int32_t channelCount, + audio_format_t format, uint32_t sampleRate, float speed, float pitch) : + mChannelCount(channelCount), + mFormat(format), + mSampleRate(sampleRate), + mFrameSize(channelCount * audio_bytes_per_sample(format)), + mSpeed(speed), + mPitch(pitch), + mLocalBufferFrameCount(0), + mLocalBufferData(NULL), + mRemaining(0), + mSonicStream(sonicCreateStream(sampleRate, mChannelCount)) +{ + ALOGV("TimestretchBufferProvider(%p)(%u, %#x, %u %f %f)", + this, channelCount, format, sampleRate, speed, pitch); + mBuffer.frameCount = 0; + + LOG_ALWAYS_FATAL_IF(mSonicStream == NULL, + "TimestretchBufferProvider can't allocate Sonic stream"); + sonicSetSpeed(mSonicStream, speed); +} + +TimestretchBufferProvider::~TimestretchBufferProvider() +{ + ALOGV("~TimestretchBufferProvider(%p)", this); + sonicDestroyStream(mSonicStream); + if (mBuffer.frameCount != 0) { + mTrackBufferProvider->releaseBuffer(&mBuffer); + } + free(mLocalBufferData); +} + +status_t TimestretchBufferProvider::getNextBuffer( + AudioBufferProvider::Buffer *pBuffer, int64_t pts) +{ + ALOGV("TimestretchBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)", + this, pBuffer, pBuffer->frameCount, pts); + + // BYPASS + //return mTrackBufferProvider->getNextBuffer(pBuffer, pts); + + // check if previously processed data is sufficient. + if (pBuffer->frameCount <= mRemaining) { + ALOGV("previous sufficient"); + pBuffer->raw = mLocalBufferData; + return OK; + } + + // do we need to resize our buffer? + if (pBuffer->frameCount > mLocalBufferFrameCount) { + void *newmem; + if (posix_memalign(&newmem, 32, pBuffer->frameCount * mFrameSize) == OK) { + if (mRemaining != 0) { + memcpy(newmem, mLocalBufferData, mRemaining * mFrameSize); + } + free(mLocalBufferData); + mLocalBufferData = newmem; + mLocalBufferFrameCount = pBuffer->frameCount; + } + } + + // need to fetch more data + const size_t outputDesired = pBuffer->frameCount - mRemaining; + mBuffer.frameCount = mSpeed == AUDIO_TIMESTRETCH_SPEED_NORMAL + ? outputDesired : outputDesired * mSpeed + 1; + + status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts); + + ALOG_ASSERT(res == OK || mBuffer.frameCount == 0); + if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe. + ALOGD("buffer error"); + if (mRemaining == 0) { + pBuffer->raw = NULL; + pBuffer->frameCount = 0; + return res; + } else { // return partial count + pBuffer->raw = mLocalBufferData; + pBuffer->frameCount = mRemaining; + return OK; + } + } + + // time-stretch the data + size_t dstAvailable = min(mLocalBufferFrameCount - mRemaining, outputDesired); + size_t srcAvailable = mBuffer.frameCount; + processFrames((uint8_t*)mLocalBufferData + mRemaining * mFrameSize, &dstAvailable, + mBuffer.raw, &srcAvailable); + + // release all data consumed + mBuffer.frameCount = srcAvailable; + mTrackBufferProvider->releaseBuffer(&mBuffer); + + // update buffer vars with the actual data processed and return with buffer + mRemaining += dstAvailable; + + pBuffer->raw = mLocalBufferData; + pBuffer->frameCount = mRemaining; + + return OK; +} + +void TimestretchBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) +{ + ALOGV("TimestretchBufferProvider(%p)::releaseBuffer(%p (%zu))", + this, pBuffer, pBuffer->frameCount); + + // BYPASS + //return mTrackBufferProvider->releaseBuffer(pBuffer); + + // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount"); + if (pBuffer->frameCount < mRemaining) { + memcpy(mLocalBufferData, + (uint8_t*)mLocalBufferData + pBuffer->frameCount * mFrameSize, + (mRemaining - pBuffer->frameCount) * mFrameSize); + mRemaining -= pBuffer->frameCount; + } else if (pBuffer->frameCount == mRemaining) { + mRemaining = 0; + } else { + LOG_ALWAYS_FATAL("Releasing more frames(%zu) than available(%zu)", + pBuffer->frameCount, mRemaining); + } + + pBuffer->raw = NULL; + pBuffer->frameCount = 0; +} + +void TimestretchBufferProvider::reset() +{ + mRemaining = 0; +} + +status_t TimestretchBufferProvider::setPlaybackRate(float speed, float pitch) +{ + mSpeed = speed; + mPitch = pitch; + + sonicSetSpeed(mSonicStream, speed); + //TODO: pitch is ignored for now + return OK; +} + +void TimestretchBufferProvider::processFrames(void *dstBuffer, size_t *dstFrames, + const void *srcBuffer, size_t *srcFrames) +{ + ALOGV("processFrames(%zu %zu) remaining(%zu)", *dstFrames, *srcFrames, mRemaining); + // Note dstFrames is the required number of frames. + + // Ensure consumption from src is as expected. + const size_t targetSrc = *dstFrames * mSpeed; + if (*srcFrames < targetSrc) { // limit dst frames to that possible + *dstFrames = *srcFrames / mSpeed; + } else if (*srcFrames > targetSrc + 1) { + *srcFrames = targetSrc + 1; + } + + switch (mFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + if (sonicWriteFloatToStream(mSonicStream, (float*)srcBuffer, *srcFrames) != 1) { + ALOGE("sonicWriteFloatToStream cannot realloc"); + *srcFrames = 0; // cannot consume all of srcBuffer + } + *dstFrames = sonicReadFloatFromStream(mSonicStream, (float*)dstBuffer, *dstFrames); + break; + case AUDIO_FORMAT_PCM_16_BIT: + if (sonicWriteShortToStream(mSonicStream, (short*)srcBuffer, *srcFrames) != 1) { + ALOGE("sonicWriteShortToStream cannot realloc"); + *srcFrames = 0; // cannot consume all of srcBuffer + } + *dstFrames = sonicReadShortFromStream(mSonicStream, (short*)dstBuffer, *dstFrames); + break; + default: + // could also be caught on construction + LOG_ALWAYS_FATAL("invalid format %#x for TimestretchBufferProvider", mFormat); + } +} + +// ---------------------------------------------------------------------------- +} // namespace android diff --git a/services/audioflinger/BufferProviders.h b/services/audioflinger/BufferProviders.h new file mode 100644 index 0000000..42030c0 --- /dev/null +++ b/services/audioflinger/BufferProviders.h @@ -0,0 +1,193 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_BUFFER_PROVIDERS_H +#define ANDROID_BUFFER_PROVIDERS_H + +#include <stdint.h> +#include <sys/types.h> + +#include <hardware/audio_effect.h> +#include <media/AudioBufferProvider.h> +#include <system/audio.h> +#include <sonic.h> + +namespace android { + +// ---------------------------------------------------------------------------- + +class PassthruBufferProvider : public AudioBufferProvider { +public: + PassthruBufferProvider() : mTrackBufferProvider(NULL) { } + + virtual ~PassthruBufferProvider() { } + + // call this to release the buffer to the upstream provider. + // treat it as an audio discontinuity for future samples. + virtual void reset() { } + + // set the upstream buffer provider. Consider calling "reset" before this function. + virtual void setBufferProvider(AudioBufferProvider *p) { + mTrackBufferProvider = p; + } + +protected: + AudioBufferProvider *mTrackBufferProvider; +}; + +// Base AudioBufferProvider class used for DownMixerBufferProvider, RemixBufferProvider, +// and ReformatBufferProvider. +// It handles a private buffer for use in converting format or channel masks from the +// input data to a form acceptable by the mixer. +// TODO: Make a ResamplerBufferProvider when integers are entirely removed from the +// processing pipeline. +class CopyBufferProvider : public PassthruBufferProvider { +public: + // Use a private buffer of bufferFrameCount frames (each frame is outputFrameSize bytes). + // If bufferFrameCount is 0, no private buffer is created and in-place modification of + // the upstream buffer provider's buffers is performed by copyFrames(). + CopyBufferProvider(size_t inputFrameSize, size_t outputFrameSize, + size_t bufferFrameCount); + virtual ~CopyBufferProvider(); + + // Overrides AudioBufferProvider methods + virtual status_t getNextBuffer(Buffer *buffer, int64_t pts); + virtual void releaseBuffer(Buffer *buffer); + + // Overrides PassthruBufferProvider + virtual void reset(); + + // this function should be supplied by the derived class. It converts + // #frames in the *src pointer to the *dst pointer. It is public because + // some providers will allow this to work on arbitrary buffers outside + // of the internal buffers. + virtual void copyFrames(void *dst, const void *src, size_t frames) = 0; + +protected: + const size_t mInputFrameSize; + const size_t mOutputFrameSize; +private: + AudioBufferProvider::Buffer mBuffer; + const size_t mLocalBufferFrameCount; + void *mLocalBufferData; + size_t mConsumed; +}; + +// DownmixerBufferProvider derives from CopyBufferProvider to provide +// position dependent downmixing by an Audio Effect. +class DownmixerBufferProvider : public CopyBufferProvider { +public: + DownmixerBufferProvider(audio_channel_mask_t inputChannelMask, + audio_channel_mask_t outputChannelMask, audio_format_t format, + uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount); + virtual ~DownmixerBufferProvider(); + //Overrides + virtual void copyFrames(void *dst, const void *src, size_t frames); + + bool isValid() const { return mDownmixHandle != NULL; } + static status_t init(); + static bool isMultichannelCapable() { return sIsMultichannelCapable; } + +protected: + effect_handle_t mDownmixHandle; + effect_config_t mDownmixConfig; + + // effect descriptor for the downmixer used by the mixer + static effect_descriptor_t sDwnmFxDesc; + // indicates whether a downmix effect has been found and is usable by this mixer + static bool sIsMultichannelCapable; + // FIXME: should we allow effects outside of the framework? + // We need to here. A special ioId that must be <= -2 so it does not map to a session. + static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2; +}; + +// RemixBufferProvider derives from CopyBufferProvider to perform an +// upmix or downmix to the proper channel count and mask. +class RemixBufferProvider : public CopyBufferProvider { +public: + RemixBufferProvider(audio_channel_mask_t inputChannelMask, + audio_channel_mask_t outputChannelMask, audio_format_t format, + size_t bufferFrameCount); + //Overrides + virtual void copyFrames(void *dst, const void *src, size_t frames); + +protected: + const audio_format_t mFormat; + const size_t mSampleSize; + const size_t mInputChannels; + const size_t mOutputChannels; + int8_t mIdxAry[sizeof(uint32_t) * 8]; // 32 bits => channel indices +}; + +// ReformatBufferProvider derives from CopyBufferProvider to convert the input data +// to an acceptable mixer input format type. +class ReformatBufferProvider : public CopyBufferProvider { +public: + ReformatBufferProvider(int32_t channelCount, + audio_format_t inputFormat, audio_format_t outputFormat, + size_t bufferFrameCount); + virtual void copyFrames(void *dst, const void *src, size_t frames); + +protected: + const uint32_t mChannelCount; + const audio_format_t mInputFormat; + const audio_format_t mOutputFormat; +}; + +// TimestretchBufferProvider derives from PassthruBufferProvider for time stretching +class TimestretchBufferProvider : public PassthruBufferProvider { +public: + TimestretchBufferProvider(int32_t channelCount, + audio_format_t format, uint32_t sampleRate, float speed, float pitch); + virtual ~TimestretchBufferProvider(); + + // Overrides AudioBufferProvider methods + virtual status_t getNextBuffer(Buffer* buffer, int64_t pts); + virtual void releaseBuffer(Buffer* buffer); + + // Overrides PassthruBufferProvider + virtual void reset(); + + virtual status_t setPlaybackRate(float speed, float pitch); + + // processes frames + // dstBuffer is where to place the data + // dstFrames [in/out] is the desired frames (return with actual placed in buffer) + // srcBuffer is the source data + // srcFrames [in/out] is the available source frames (return with consumed) + virtual void processFrames(void *dstBuffer, size_t *dstFrames, + const void *srcBuffer, size_t *srcFrames); + +protected: + const uint32_t mChannelCount; + const audio_format_t mFormat; + const uint32_t mSampleRate; // const for now (TODO change this) + const size_t mFrameSize; + float mSpeed; + float mPitch; + +private: + AudioBufferProvider::Buffer mBuffer; + size_t mLocalBufferFrameCount; + void *mLocalBufferData; + size_t mRemaining; + sonicStream mSonicStream; +}; + +// ---------------------------------------------------------------------------- +} // namespace android + +#endif // ANDROID_BUFFER_PROVIDERS_H diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp index efbdcff..834947f 100644 --- a/services/audioflinger/PatchPanel.cpp +++ b/services/audioflinger/PatchPanel.cpp @@ -200,26 +200,17 @@ status_t AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *pa status = BAD_VALUE; goto exit; } - // limit to connections between devices and input streams for HAL before 3.0 - if (patch->sinks[i].ext.mix.hw_module == srcModule && - (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) && - (patch->sinks[i].type != AUDIO_PORT_TYPE_MIX)) { - ALOGW("createAudioPatch() invalid sink type %d for device source", - patch->sinks[i].type); - status = BAD_VALUE; - goto exit; - } } - if (patch->sinks[0].ext.device.hw_module != srcModule) { - // limit to device to device connection if not on same hw module - if (patch->sinks[0].type != AUDIO_PORT_TYPE_DEVICE) { - ALOGW("createAudioPatch() invalid sink type for cross hw module"); - status = INVALID_OPERATION; - goto exit; - } - // special case num sources == 2 -=> reuse an exiting output mix to connect to the - // sink + // manage patches requiring a software bridge + // - Device to device AND + // - source HW module != destination HW module OR + // - audio HAL version < 3.0 + // - special patch request with 2 sources (reuse one existing output mix) + if ((patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) && + ((patch->sinks[0].ext.device.hw_module != srcModule) || + (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) || + (patch->num_sources == 2))) { if (patch->num_sources == 2) { if (patch->sources[1].type != AUDIO_PORT_TYPE_MIX || patch->sinks[0].ext.device.hw_module != @@ -304,6 +295,11 @@ status_t AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *pa &halHandle); } } else { + if (patch->sinks[0].type != AUDIO_PORT_TYPE_MIX) { + status = INVALID_OPERATION; + goto exit; + } + sp<ThreadBase> thread = audioflinger->checkRecordThread_l( patch->sinks[0].ext.mix.handle); if (thread == 0) { @@ -472,6 +468,7 @@ status_t AudioFlinger::PatchPanel::createPatchConnections(Patch *patch, // this track is given the same buffer as the PatchRecord buffer patch->mPatchTrack = new PlaybackThread::PatchTrack( patch->mPlaybackThread.get(), + audioPatch->sources[1].ext.mix.usecase.stream, sampleRate, outChannelMask, format, @@ -578,8 +575,8 @@ status_t AudioFlinger::PatchPanel::releaseAudioPatch(audio_patch_handle_t handle break; } - if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE && - patch->sinks[0].ext.device.hw_module != srcModule) { + if (removedPatch->mRecordPatchHandle != AUDIO_PATCH_HANDLE_NONE || + removedPatch->mPlaybackPatchHandle != AUDIO_PATCH_HANDLE_NONE) { clearPatchConnections(removedPatch); break; } @@ -693,5 +690,4 @@ status_t AudioFlinger::PatchPanel::setAudioPortConfig(const struct audio_port_co return NO_ERROR; } - } // namespace android diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h index 45df6a9..c51021b 100644 --- a/services/audioflinger/PlaybackTracks.h +++ b/services/audioflinger/PlaybackTracks.h @@ -298,6 +298,7 @@ class PatchTrack : public Track, public PatchProxyBufferProvider { public: PatchTrack(PlaybackThread *playbackThread, + audio_stream_type_t streamType, uint32_t sampleRate, audio_channel_mask_t channelMask, audio_format_t format, diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h index 204a9d6..25d6d95 100644 --- a/services/audioflinger/RecordTracks.h +++ b/services/audioflinger/RecordTracks.h @@ -34,6 +34,7 @@ public: IAudioFlinger::track_flags_t flags, track_type type); virtual ~RecordTrack(); + virtual status_t initCheck() const; virtual status_t start(AudioSystem::sync_event_t event, int triggerSession); virtual void stop(); @@ -66,21 +67,6 @@ private: bool mOverflow; // overflow on most recent attempt to fill client buffer - // updated by RecordThread::readInputParameters_l() - AudioResampler *mResampler; - - // interleaved stereo pairs of fixed-point Q4.27 - int32_t *mRsmpOutBuffer; - // current allocated frame count for the above, which may be larger than needed - size_t mRsmpOutFrameCount; - - size_t mRsmpInUnrel; // unreleased frames remaining from - // most recent getNextBuffer - // for debug only - - // rolling counter that is never cleared - int32_t mRsmpInFront; // next available frame - AudioBufferProvider::Buffer mSink; // references client's buffer sink in shared memory // sync event triggering actual audio capture. Frames read before this event will @@ -93,7 +79,10 @@ private: ssize_t mFramesToDrop; // used by resampler to find source frames - ResamplerBufferProvider *mResamplerBufferProvider; + ResamplerBufferProvider *mResamplerBufferProvider; + + // used by the record thread to convert frames to proper destination format + RecordBufferConverter *mRecordBufferConverter; }; // playback track, used by PatchPanel diff --git a/services/audioflinger/ServiceUtilities.cpp b/services/audioflinger/ServiceUtilities.cpp index fae19a1..8246fef 100644 --- a/services/audioflinger/ServiceUtilities.cpp +++ b/services/audioflinger/ServiceUtilities.cpp @@ -50,13 +50,6 @@ bool captureHotwordAllowed() { return ok; } -bool captureFmTunerAllowed() { - static const String16 sCaptureFmTunerAllowed("android.permission.ACCESS_FM_RADIO"); - bool ok = checkCallingPermission(sCaptureFmTunerAllowed); - if (!ok) ALOGE("android.permission.ACCESS_FM_RADIO"); - return ok; -} - bool settingsAllowed() { if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true; static const String16 sAudioSettings("android.permission.MODIFY_AUDIO_SETTINGS"); diff --git a/services/audioflinger/ServiceUtilities.h b/services/audioflinger/ServiceUtilities.h index ce18a90..df6f6f4 100644 --- a/services/audioflinger/ServiceUtilities.h +++ b/services/audioflinger/ServiceUtilities.h @@ -23,7 +23,6 @@ extern pid_t getpid_cached; bool recordingAllowed(); bool captureAudioOutputAllowed(); bool captureHotwordAllowed(); -bool captureFmTunerAllowed(); bool settingsAllowed(); bool modifyAudioRoutingAllowed(); bool dumpAllowed(); diff --git a/services/audioflinger/SpdifStreamOut.cpp b/services/audioflinger/SpdifStreamOut.cpp index d23588e..45b541a 100644 --- a/services/audioflinger/SpdifStreamOut.cpp +++ b/services/audioflinger/SpdifStreamOut.cpp @@ -32,10 +32,12 @@ namespace android { * If the AudioFlinger is processing encoded data and the HAL expects * PCM then we need to wrap the data in an SPDIF wrapper. */ -SpdifStreamOut::SpdifStreamOut(AudioHwDevice *dev, audio_output_flags_t flags) +SpdifStreamOut::SpdifStreamOut(AudioHwDevice *dev, + audio_output_flags_t flags, + audio_format_t format) : AudioStreamOut(dev,flags) , mRateMultiplier(1) - , mSpdifEncoder(this) + , mSpdifEncoder(this, format) , mRenderPositionHal(0) , mPreviousHalPosition32(0) { @@ -49,15 +51,15 @@ status_t SpdifStreamOut::open( { struct audio_config customConfig = *config; - customConfig.format = AUDIO_FORMAT_PCM_16_BIT; - customConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO; - // Some data bursts run at a higher sample rate. + // TODO Move this into the audio_utils as a static method. switch(config->format) { case AUDIO_FORMAT_E_AC3: mRateMultiplier = 4; break; case AUDIO_FORMAT_AC3: + case AUDIO_FORMAT_DTS: + case AUDIO_FORMAT_DTS_HD: mRateMultiplier = 1; break; default: @@ -67,6 +69,9 @@ status_t SpdifStreamOut::open( } customConfig.sample_rate = config->sample_rate * mRateMultiplier; + customConfig.format = AUDIO_FORMAT_PCM_16_BIT; + customConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO; + // Always print this because otherwise it could be very confusing if the // HAL and AudioFlinger are using different formats. // Print before open() because HAL may modify customConfig. diff --git a/services/audioflinger/SpdifStreamOut.h b/services/audioflinger/SpdifStreamOut.h index cb82ac7..d81c064 100644 --- a/services/audioflinger/SpdifStreamOut.h +++ b/services/audioflinger/SpdifStreamOut.h @@ -38,7 +38,8 @@ namespace android { class SpdifStreamOut : public AudioStreamOut { public: - SpdifStreamOut(AudioHwDevice *dev, audio_output_flags_t flags); + SpdifStreamOut(AudioHwDevice *dev, audio_output_flags_t flags, + audio_format_t format); virtual ~SpdifStreamOut() { } @@ -77,8 +78,9 @@ private: class MySPDIFEncoder : public SPDIFEncoder { public: - MySPDIFEncoder(SpdifStreamOut *spdifStreamOut) - : mSpdifStreamOut(spdifStreamOut) + MySPDIFEncoder(SpdifStreamOut *spdifStreamOut, audio_format_t format) + : SPDIFEncoder(format) + , mSpdifStreamOut(spdifStreamOut) { } diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp index 4efb3d7..b30fd20 100644 --- a/services/audioflinger/Threads.cpp +++ b/services/audioflinger/Threads.cpp @@ -86,7 +86,13 @@ #define ALOGVV(a...) do { } while(0) #endif +// TODO: Move these macro/inlines to a header file. #define max(a, b) ((a) > (b) ? (a) : (b)) +template <typename T> +static inline T min(const T& a, const T& b) +{ + return a < b ? a : b; +} namespace android { @@ -1602,13 +1608,19 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac // If you change this calculation, also review the start threshold which is related. if (!(*flags & IAudioFlinger::TRACK_FAST) && audio_is_linear_pcm(format) && sharedBuffer == 0) { + // this must match AudioTrack.cpp calculateMinFrameCount(). + // TODO: Move to a common library uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream); uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate); if (minBufCount < 2) { minBufCount = 2; } + // For normal mixing tracks, if speed is > 1.0f (normal), AudioTrack + // or the client should compute and pass in a larger buffer request. size_t minFrameCount = - minBufCount * sourceFramesNeeded(sampleRate, mNormalFrameCount, mSampleRate); + minBufCount * sourceFramesNeededWithTimestretch( + sampleRate, mNormalFrameCount, + mSampleRate, AUDIO_TIMESTRETCH_SPEED_NORMAL /*speed*/); if (frameCount < minFrameCount) { // including frameCount == 0 frameCount = minFrameCount; } @@ -3586,21 +3598,17 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac // hence the test on (mMixerStatus == MIXER_TRACKS_READY) meaning the track was mixed // during last round size_t desiredFrames; - uint32_t sr = track->sampleRate(); - if (sr == mSampleRate) { - desiredFrames = mNormalFrameCount; - } else { - desiredFrames = sourceFramesNeeded(sr, mNormalFrameCount, mSampleRate); - // add frames already consumed but not yet released by the resampler - // because mAudioTrackServerProxy->framesReady() will include these frames - desiredFrames += mAudioMixer->getUnreleasedFrames(track->name()); -#if 0 - // the minimum track buffer size is normally twice the number of frames necessary - // to fill one buffer and the resampler should not leave more than one buffer worth - // of unreleased frames after each pass, but just in case... - ALOG_ASSERT(desiredFrames <= cblk->frameCount_); -#endif - } + const uint32_t sampleRate = track->mAudioTrackServerProxy->getSampleRate(); + float speed, pitch; + track->mAudioTrackServerProxy->getPlaybackRate(&speed, &pitch); + + desiredFrames = sourceFramesNeededWithTimestretch( + sampleRate, mNormalFrameCount, mSampleRate, speed); + // TODO: ONLY USED FOR LEGACY RESAMPLERS, remove when they are removed. + // add frames already consumed but not yet released by the resampler + // because mAudioTrackServerProxy->framesReady() will include these frames + desiredFrames += mAudioMixer->getUnreleasedFrames(track->name()); + uint32_t minFrames = 1; if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() && (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) { @@ -3763,6 +3771,17 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac AudioMixer::RESAMPLE, AudioMixer::SAMPLE_RATE, (void *)(uintptr_t)reqSampleRate); + + // set the playback rate as an float array {speed, pitch} + float playbackRate[2]; + track->mAudioTrackServerProxy->getPlaybackRate( + &playbackRate[0] /*speed*/, &playbackRate[1] /*pitch*/); + mAudioMixer->setParameter( + name, + AudioMixer::TIMESTRETCH, + AudioMixer::PLAYBACK_RATE, + playbackRate); + /* * Select the appropriate output buffer for the track. * @@ -5290,7 +5309,6 @@ failed: ; // FIXME mNormalSource } - AudioFlinger::RecordThread::~RecordThread() { if (mFastCapture != 0) { @@ -5594,6 +5612,9 @@ reacquire_wakelock: continue; } + // TODO: This code probably should be moved to RecordTrack. + // TODO: Update the activeTrack buffer converter in case of reconfigure. + enum { OVERRUN_UNKNOWN, OVERRUN_TRUE, @@ -5608,131 +5629,28 @@ reacquire_wakelock: size_t framesOut = activeTrack->mSink.frameCount; LOG_ALWAYS_FATAL_IF((status == OK) != (framesOut > 0)); - int32_t front = activeTrack->mRsmpInFront; - ssize_t filled = rear - front; + // check available frames and handle overrun conditions + // if the record track isn't draining fast enough. + bool hasOverrun; size_t framesIn; - - if (filled < 0) { - // should not happen, but treat like a massive overrun and re-sync - framesIn = 0; - activeTrack->mRsmpInFront = rear; - overrun = OVERRUN_TRUE; - } else if ((size_t) filled <= mRsmpInFrames) { - framesIn = (size_t) filled; - } else { - // client is not keeping up with server, but give it latest data - framesIn = mRsmpInFrames; - activeTrack->mRsmpInFront = front = rear - framesIn; + activeTrack->mResamplerBufferProvider->sync(&framesIn, &hasOverrun); + if (hasOverrun) { overrun = OVERRUN_TRUE; } - if (framesOut == 0 || framesIn == 0) { break; } - if (activeTrack->mResampler == NULL) { - // no resampling - if (framesIn > framesOut) { - framesIn = framesOut; - } else { - framesOut = framesIn; - } - int8_t *dst = activeTrack->mSink.i8; - while (framesIn > 0) { - front &= mRsmpInFramesP2 - 1; - size_t part1 = mRsmpInFramesP2 - front; - if (part1 > framesIn) { - part1 = framesIn; - } - int8_t *src = (int8_t *)mRsmpInBuffer + (front * mFrameSize); - if (mChannelCount == activeTrack->mChannelCount) { - memcpy(dst, src, part1 * mFrameSize); - } else if (mChannelCount == 1) { - upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, (const int16_t *)src, - part1); - } else { - downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, - (const int16_t *)src, part1); - } - dst += part1 * activeTrack->mFrameSize; - front += part1; - framesIn -= part1; - } - activeTrack->mRsmpInFront += framesOut; - - } else { - // resampling - // FIXME framesInNeeded should really be part of resampler API, and should - // depend on the SRC ratio - // to keep mRsmpInBuffer full so resampler always has sufficient input - size_t framesInNeeded; - // FIXME only re-calculate when it changes, and optimize for common ratios - // Do not precompute in/out because floating point is not associative - // e.g. a*b/c != a*(b/c). - const double in(mSampleRate); - const double out(activeTrack->mSampleRate); - framesInNeeded = ceil(framesOut * in / out) + 1; - ALOGV("need %u frames in to produce %u out given in/out ratio of %.4g", - framesInNeeded, framesOut, in / out); - // Although we theoretically have framesIn in circular buffer, some of those are - // unreleased frames, and thus must be discounted for purpose of budgeting. - size_t unreleased = activeTrack->mRsmpInUnrel; - framesIn = framesIn > unreleased ? framesIn - unreleased : 0; - if (framesIn < framesInNeeded) { - ALOGV("not enough to resample: have %u frames in but need %u in to " - "produce %u out given in/out ratio of %.4g", - framesIn, framesInNeeded, framesOut, in / out); - size_t newFramesOut = framesIn > 0 ? floor((framesIn - 1) * out / in) : 0; - LOG_ALWAYS_FATAL_IF(newFramesOut >= framesOut); - if (newFramesOut == 0) { - break; - } - framesInNeeded = ceil(newFramesOut * in / out) + 1; - ALOGV("now need %u frames in to produce %u out given out/in ratio of %.4g", - framesInNeeded, newFramesOut, out / in); - LOG_ALWAYS_FATAL_IF(framesIn < framesInNeeded); - ALOGV("success 2: have %u frames in and need %u in to produce %u out " - "given in/out ratio of %.4g", - framesIn, framesInNeeded, newFramesOut, in / out); - framesOut = newFramesOut; - } else { - ALOGV("success 1: have %u in and need %u in to produce %u out " - "given in/out ratio of %.4g", - framesIn, framesInNeeded, framesOut, in / out); - } - - // reallocate mRsmpOutBuffer as needed; we will grow but never shrink - if (activeTrack->mRsmpOutFrameCount < framesOut) { - // FIXME why does each track need it's own mRsmpOutBuffer? can't they share? - delete[] activeTrack->mRsmpOutBuffer; - // resampler always outputs stereo - activeTrack->mRsmpOutBuffer = new int32_t[framesOut * FCC_2]; - activeTrack->mRsmpOutFrameCount = framesOut; - } - - // resampler accumulates, but we only have one source track - memset(activeTrack->mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t)); - activeTrack->mResampler->resample(activeTrack->mRsmpOutBuffer, framesOut, - // FIXME how about having activeTrack implement this interface itself? - activeTrack->mResamplerBufferProvider - /*this*/ /* AudioBufferProvider* */); - // ditherAndClamp() works as long as all buffers returned by - // activeTrack->getNextBuffer() are 32 bit aligned which should be always true. - if (activeTrack->mChannelCount == 1) { - // temporarily type pun mRsmpOutBuffer from Q4.27 to int16_t - ditherAndClamp(activeTrack->mRsmpOutBuffer, activeTrack->mRsmpOutBuffer, - framesOut); - // the resampler always outputs stereo samples: - // do post stereo to mono conversion - downmix_to_mono_i16_from_stereo_i16(activeTrack->mSink.i16, - (const int16_t *)activeTrack->mRsmpOutBuffer, framesOut); - } else { - ditherAndClamp((int32_t *)activeTrack->mSink.raw, - activeTrack->mRsmpOutBuffer, framesOut); - } - // now done with mRsmpOutBuffer - - } + // Don't allow framesOut to be larger than what is possible with resampling + // from framesIn. + // This isn't strictly necessary but helps limit buffer resizing in + // RecordBufferConverter. TODO: remove when no longer needed. + framesOut = min(framesOut, + destinationFramesPossible( + framesIn, mSampleRate, activeTrack->mSampleRate)); + // process frames from the RecordThread buffer provider to the RecordTrack buffer + framesOut = activeTrack->mRecordBufferConverter->convert( + activeTrack->mSink.raw, activeTrack->mResamplerBufferProvider, framesOut); if (framesOut > 0 && (overrun == OVERRUN_UNKNOWN)) { overrun = OVERRUN_FALSE; @@ -6041,12 +5959,9 @@ status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrac // was initialized to some value closer to the thread's mRsmpInFront, then the track could // see previously buffered data before it called start(), but with greater risk of overrun. - recordTrack->mRsmpInFront = mRsmpInRear; - recordTrack->mRsmpInUnrel = 0; - // FIXME why reset? - if (recordTrack->mResampler != NULL) { - recordTrack->mResampler->reset(); - } + recordTrack->mResamplerBufferProvider->reset(); + // clear any converter state as new data will be discontinuous + recordTrack->mRecordBufferConverter->reset(); recordTrack->mState = TrackBase::STARTING_2; // signal thread to start mWaitWorkCV.broadcast(); @@ -6222,12 +6137,52 @@ void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args write(fd, result.string(), result.size()); } + +void AudioFlinger::RecordThread::ResamplerBufferProvider::reset() +{ + sp<ThreadBase> threadBase = mRecordTrack->mThread.promote(); + RecordThread *recordThread = (RecordThread *) threadBase.get(); + mRsmpInFront = recordThread->mRsmpInRear; + mRsmpInUnrel = 0; +} + +void AudioFlinger::RecordThread::ResamplerBufferProvider::sync( + size_t *framesAvailable, bool *hasOverrun) +{ + sp<ThreadBase> threadBase = mRecordTrack->mThread.promote(); + RecordThread *recordThread = (RecordThread *) threadBase.get(); + const int32_t rear = recordThread->mRsmpInRear; + const int32_t front = mRsmpInFront; + const ssize_t filled = rear - front; + + size_t framesIn; + bool overrun = false; + if (filled < 0) { + // should not happen, but treat like a massive overrun and re-sync + framesIn = 0; + mRsmpInFront = rear; + overrun = true; + } else if ((size_t) filled <= recordThread->mRsmpInFrames) { + framesIn = (size_t) filled; + } else { + // client is not keeping up with server, but give it latest data + framesIn = recordThread->mRsmpInFrames; + mRsmpInFront = /* front = */ rear - framesIn; + overrun = true; + } + if (framesAvailable != NULL) { + *framesAvailable = framesIn; + } + if (hasOverrun != NULL) { + *hasOverrun = overrun; + } +} + // AudioBufferProvider interface status_t AudioFlinger::RecordThread::ResamplerBufferProvider::getNextBuffer( AudioBufferProvider::Buffer* buffer, int64_t pts __unused) { - RecordTrack *activeTrack = mRecordTrack; - sp<ThreadBase> threadBase = activeTrack->mThread.promote(); + sp<ThreadBase> threadBase = mRecordTrack->mThread.promote(); if (threadBase == 0) { buffer->frameCount = 0; buffer->raw = NULL; @@ -6235,7 +6190,7 @@ status_t AudioFlinger::RecordThread::ResamplerBufferProvider::getNextBuffer( } RecordThread *recordThread = (RecordThread *) threadBase.get(); int32_t rear = recordThread->mRsmpInRear; - int32_t front = activeTrack->mRsmpInFront; + int32_t front = mRsmpInFront; ssize_t filled = rear - front; // FIXME should not be P2 (don't want to increase latency) // FIXME if client not keeping up, discard @@ -6252,17 +6207,16 @@ status_t AudioFlinger::RecordThread::ResamplerBufferProvider::getNextBuffer( part1 = ask; } if (part1 == 0) { - // Higher-level should keep mRsmpInBuffer full, and not call resampler if empty - LOG_ALWAYS_FATAL("RecordThread::getNextBuffer() starved"); + // out of data is fine since the resampler will return a short-count. buffer->raw = NULL; buffer->frameCount = 0; - activeTrack->mRsmpInUnrel = 0; + mRsmpInUnrel = 0; return NOT_ENOUGH_DATA; } buffer->raw = recordThread->mRsmpInBuffer + front * recordThread->mChannelCount; buffer->frameCount = part1; - activeTrack->mRsmpInUnrel = part1; + mRsmpInUnrel = part1; return NO_ERROR; } @@ -6270,18 +6224,197 @@ status_t AudioFlinger::RecordThread::ResamplerBufferProvider::getNextBuffer( void AudioFlinger::RecordThread::ResamplerBufferProvider::releaseBuffer( AudioBufferProvider::Buffer* buffer) { - RecordTrack *activeTrack = mRecordTrack; size_t stepCount = buffer->frameCount; if (stepCount == 0) { return; } - ALOG_ASSERT(stepCount <= activeTrack->mRsmpInUnrel); - activeTrack->mRsmpInUnrel -= stepCount; - activeTrack->mRsmpInFront += stepCount; + ALOG_ASSERT(stepCount <= mRsmpInUnrel); + mRsmpInUnrel -= stepCount; + mRsmpInFront += stepCount; buffer->raw = NULL; buffer->frameCount = 0; } +AudioFlinger::RecordThread::RecordBufferConverter::RecordBufferConverter( + audio_channel_mask_t srcChannelMask, audio_format_t srcFormat, + uint32_t srcSampleRate, + audio_channel_mask_t dstChannelMask, audio_format_t dstFormat, + uint32_t dstSampleRate) : + mSrcChannelMask(AUDIO_CHANNEL_INVALID), // updateParameters will set following vars + // mSrcFormat + // mSrcSampleRate + // mDstChannelMask + // mDstFormat + // mDstSampleRate + // mSrcChannelCount + // mDstChannelCount + // mDstFrameSize + mBuf(NULL), mBufFrames(0), mBufFrameSize(0), + mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpOutFrameCount(0) +{ + (void)updateParameters(srcChannelMask, srcFormat, srcSampleRate, + dstChannelMask, dstFormat, dstSampleRate); +} + +AudioFlinger::RecordThread::RecordBufferConverter::~RecordBufferConverter() { + free(mBuf); + delete mResampler; + free(mRsmpOutBuffer); +} + +size_t AudioFlinger::RecordThread::RecordBufferConverter::convert(void *dst, + AudioBufferProvider *provider, size_t frames) +{ + if (mSrcSampleRate == mDstSampleRate) { + ALOGVV("NO RESAMPLING sampleRate:%u mSrcFormat:%#x mDstFormat:%#x", + mSrcSampleRate, mSrcFormat, mDstFormat); + + AudioBufferProvider::Buffer buffer; + for (size_t i = frames; i > 0; ) { + buffer.frameCount = i; + status_t status = provider->getNextBuffer(&buffer, 0); + if (status != OK || buffer.frameCount == 0) { + frames -= i; // cannot fill request. + break; + } + // convert to destination buffer + convert(dst, buffer.raw, buffer.frameCount); + + dst = (int8_t*)dst + buffer.frameCount * mDstFrameSize; + i -= buffer.frameCount; + provider->releaseBuffer(&buffer); + } + } else { + ALOGVV("RESAMPLING mSrcSampleRate:%u mDstSampleRate:%u mSrcFormat:%#x mDstFormat:%#x", + mSrcSampleRate, mDstSampleRate, mSrcFormat, mDstFormat); + + // reallocate mRsmpOutBuffer as needed; we will grow but never shrink + if (mRsmpOutFrameCount < frames) { + // FIXME why does each track need it's own mRsmpOutBuffer? can't they share? + free(mRsmpOutBuffer); + // resampler always outputs stereo (FOR NOW) + (void)posix_memalign(&mRsmpOutBuffer, 32, frames * FCC_2 * sizeof(int32_t) /*Q4.27*/); + mRsmpOutFrameCount = frames; + } + // resampler accumulates, but we only have one source track + memset(mRsmpOutBuffer, 0, frames * FCC_2 * sizeof(int32_t)); + frames = mResampler->resample((int32_t*)mRsmpOutBuffer, frames, provider); + + // convert to destination buffer + convert(dst, mRsmpOutBuffer, frames); + } + return frames; +} + +status_t AudioFlinger::RecordThread::RecordBufferConverter::updateParameters( + audio_channel_mask_t srcChannelMask, audio_format_t srcFormat, + uint32_t srcSampleRate, + audio_channel_mask_t dstChannelMask, audio_format_t dstFormat, + uint32_t dstSampleRate) +{ + // quick evaluation if there is any change. + if (mSrcFormat == srcFormat + && mSrcChannelMask == srcChannelMask + && mSrcSampleRate == srcSampleRate + && mDstFormat == dstFormat + && mDstChannelMask == dstChannelMask + && mDstSampleRate == dstSampleRate) { + return NO_ERROR; + } + + const bool valid = + audio_is_input_channel(srcChannelMask) + && audio_is_input_channel(dstChannelMask) + && audio_is_valid_format(srcFormat) && audio_is_linear_pcm(srcFormat) + && audio_is_valid_format(dstFormat) && audio_is_linear_pcm(dstFormat) + && (srcSampleRate <= dstSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) + ; // no upsampling checks for now + if (!valid) { + return BAD_VALUE; + } + + mSrcFormat = srcFormat; + mSrcChannelMask = srcChannelMask; + mSrcSampleRate = srcSampleRate; + mDstFormat = dstFormat; + mDstChannelMask = dstChannelMask; + mDstSampleRate = dstSampleRate; + + // compute derived parameters + mSrcChannelCount = audio_channel_count_from_in_mask(srcChannelMask); + mDstChannelCount = audio_channel_count_from_in_mask(dstChannelMask); + mDstFrameSize = mDstChannelCount * audio_bytes_per_sample(mDstFormat); + + // do we need a format buffer? + if (mSrcFormat != mDstFormat && mDstChannelCount != mSrcChannelCount) { + mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(mSrcFormat); + } else { + mBufFrameSize = 0; + } + mBufFrames = 0; // force the buffer to be resized. + + // do we need to resample? + if (mSrcSampleRate != mDstSampleRate) { + if (mResampler != NULL) { + delete mResampler; + } + mResampler = AudioResampler::create(AUDIO_FORMAT_PCM_16_BIT, + mSrcChannelCount, mDstSampleRate); // may seem confusing... + mResampler->setSampleRate(mSrcSampleRate); + mResampler->setVolume(AudioMixer::UNITY_GAIN_FLOAT, AudioMixer::UNITY_GAIN_FLOAT); + } + return NO_ERROR; +} + +void AudioFlinger::RecordThread::RecordBufferConverter::convert( + void *dst, /*const*/ void *src, size_t frames) +{ + // check if a memcpy will do + if (mResampler == NULL + && mSrcChannelCount == mDstChannelCount + && mSrcFormat == mDstFormat) { + memcpy(dst, src, + frames * mDstChannelCount * audio_bytes_per_sample(mDstFormat)); + return; + } + // reallocate buffer if needed + if (mBufFrameSize != 0 && mBufFrames < frames) { + free(mBuf); + mBufFrames = frames; + (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize); + } + // do processing + if (mResampler != NULL) { + // src channel count is always >= 2. + void *dstBuf = mBuf != NULL ? mBuf : dst; + // ditherAndClamp() works as long as all buffers returned by + // activeTrack->getNextBuffer() are 32 bit aligned which should be always true. + if (mDstChannelCount == 1) { + // the resampler always outputs stereo samples. + // FIXME: this rewrites back into src + ditherAndClamp((int32_t *)src, (const int32_t *)src, frames); + downmix_to_mono_i16_from_stereo_i16((int16_t *)dstBuf, + (const int16_t *)src, frames); + } else { + ditherAndClamp((int32_t *)dstBuf, (const int32_t *)src, frames); + } + } else if (mSrcChannelCount != mDstChannelCount) { + void *dstBuf = mBuf != NULL ? mBuf : dst; + if (mSrcChannelCount == 1) { + upmix_to_stereo_i16_from_mono_i16((int16_t *)dstBuf, (const int16_t *)src, + frames); + } else { + downmix_to_mono_i16_from_stereo_i16((int16_t *)dstBuf, + (const int16_t *)src, frames); + } + } + if (mSrcFormat != mDstFormat) { + void *srcBuf = mBuf != NULL ? mBuf : src; + memcpy_by_audio_format(dst, mDstFormat, srcBuf, mSrcFormat, + frames * mDstChannelCount); + } +} + bool AudioFlinger::RecordThread::checkForNewParameter_l(const String8& keyValuePair, status_t& status) { @@ -6303,7 +6436,7 @@ bool AudioFlinger::RecordThread::checkForNewParameter_l(const String8& keyValueP reconfig = true; } if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) { - if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) { + if (!audio_is_linear_pcm((audio_format_t) value)) { status = BAD_VALUE; } else { reqFormat = (audio_format_t) value; @@ -6377,10 +6510,10 @@ bool AudioFlinger::RecordThread::checkForNewParameter_l(const String8& keyValueP } if (reconfig) { if (status == BAD_VALUE && - reqFormat == mInput->stream->common.get_format(&mInput->stream->common) && - reqFormat == AUDIO_FORMAT_PCM_16_BIT && + audio_is_linear_pcm(mInput->stream->common.get_format(&mInput->stream->common)) && + audio_is_linear_pcm(reqFormat) && (mInput->stream->common.get_sample_rate(&mInput->stream->common) - <= (2 * samplingRate)) && + <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate)) && audio_channel_count_from_in_mask( mInput->stream->common.get_channels(&mInput->stream->common)) <= FCC_2 && (channelMask == AUDIO_CHANNEL_IN_MONO || @@ -6451,6 +6584,8 @@ void AudioFlinger::RecordThread::readInputParameters_l() // The value is somewhat arbitrary, and could probably be even larger. // A larger value should allow more old data to be read after a track calls start(), // without increasing latency. + // + // Note this is independent of the maximum downsampling ratio permitted for capture. mRsmpInFrames = mFrameCount * 7; mRsmpInFramesP2 = roundup(mRsmpInFrames); delete[] mRsmpInBuffer; diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h index d600ea9..27bc56b 100644 --- a/services/audioflinger/Threads.h +++ b/services/audioflinger/Threads.h @@ -1036,17 +1036,127 @@ class RecordThread : public ThreadBase public: class RecordTrack; + + /* The ResamplerBufferProvider is used to retrieve recorded input data from the + * RecordThread. It maintains local state on the relative position of the read + * position of the RecordTrack compared with the RecordThread. + */ class ResamplerBufferProvider : public AudioBufferProvider - // derives from AudioBufferProvider interface for use by resampler { public: - ResamplerBufferProvider(RecordTrack* recordTrack) : mRecordTrack(recordTrack) { } + ResamplerBufferProvider(RecordTrack* recordTrack) : + mRecordTrack(recordTrack), + mRsmpInUnrel(0), mRsmpInFront(0) { } virtual ~ResamplerBufferProvider() { } + + // called to set the ResamplerBufferProvider to head of the RecordThread data buffer, + // skipping any previous data read from the hal. + virtual void reset(); + + /* Synchronizes RecordTrack position with the RecordThread. + * Calculates available frames and handle overruns if the RecordThread + * has advanced faster than the ResamplerBufferProvider has retrieved data. + * TODO: why not do this for every getNextBuffer? + * + * Parameters + * framesAvailable: pointer to optional output size_t to store record track + * frames available. + * hasOverrun: pointer to optional boolean, returns true if track has overrun. + */ + + virtual void sync(size_t *framesAvailable = NULL, bool *hasOverrun = NULL); + // AudioBufferProvider interface virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts); virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer); private: RecordTrack * const mRecordTrack; + size_t mRsmpInUnrel; // unreleased frames remaining from + // most recent getNextBuffer + // for debug only + int32_t mRsmpInFront; // next available frame + // rolling counter that is never cleared + }; + + /* The RecordBufferConverter is used for format, channel, and sample rate + * conversion for a RecordTrack. + * + * TODO: Self contained, so move to a separate file later. + * + * RecordBufferConverter uses the convert() method rather than exposing a + * buffer provider interface; this is to save a memory copy. + */ + class RecordBufferConverter + { + public: + RecordBufferConverter( + audio_channel_mask_t srcChannelMask, audio_format_t srcFormat, + uint32_t srcSampleRate, + audio_channel_mask_t dstChannelMask, audio_format_t dstFormat, + uint32_t dstSampleRate); + + ~RecordBufferConverter(); + + /* Converts input data from an AudioBufferProvider by format, channelMask, + * and sampleRate to a destination buffer. + * + * Parameters + * dst: buffer to place the converted data. + * provider: buffer provider to obtain source data. + * frames: number of frames to convert + * + * Returns the number of frames converted. + */ + size_t convert(void *dst, AudioBufferProvider *provider, size_t frames); + + // returns NO_ERROR if constructor was successful + status_t initCheck() const { + // mSrcChannelMask set on successful updateParameters + return mSrcChannelMask != AUDIO_CHANNEL_INVALID ? NO_ERROR : NO_INIT; + } + + // allows dynamic reconfigure of all parameters + status_t updateParameters( + audio_channel_mask_t srcChannelMask, audio_format_t srcFormat, + uint32_t srcSampleRate, + audio_channel_mask_t dstChannelMask, audio_format_t dstFormat, + uint32_t dstSampleRate); + + // called to reset resampler buffers on record track discontinuity + void reset() { + if (mResampler != NULL) { + mResampler->reset(); + } + } + + private: + // internal convert function for format and channel mask. + void convert(void *dst, /*const*/ void *src, size_t frames); + + // user provided information + audio_channel_mask_t mSrcChannelMask; + audio_format_t mSrcFormat; + uint32_t mSrcSampleRate; + audio_channel_mask_t mDstChannelMask; + audio_format_t mDstFormat; + uint32_t mDstSampleRate; + + // derived information + uint32_t mSrcChannelCount; + uint32_t mDstChannelCount; + size_t mDstFrameSize; + + // format conversion buffer + void *mBuf; + size_t mBufFrames; + size_t mBufFrameSize; + + // resampler info + AudioResampler *mResampler; + // interleaved stereo pairs of fixed-point Q4.27 or float depending on resampler + void *mRsmpOutBuffer; + // current allocated frame count for the above, which may be larger than needed + size_t mRsmpOutFrameCount; }; #include "RecordTracks.h" diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp index dc9f249..da2d634 100644 --- a/services/audioflinger/Tracks.cpp +++ b/services/audioflinger/Tracks.cpp @@ -903,9 +903,14 @@ status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& times mPreviousTimestampValid = false; return INVALID_OPERATION; } + // FIXME Not accurate under dynamic changes of sample rate and speed. + // Do not use track's mSampleRate as it is not current for mixer tracks. + uint32_t sampleRate = mAudioTrackServerProxy->getSampleRate(); + float speed, pitch; + mAudioTrackServerProxy->getPlaybackRate(&speed, &pitch); uint32_t unpresentedFrames = - ((int64_t) playbackThread->mLatchQ.mUnpresentedFrames * mSampleRate) / - playbackThread->mSampleRate; + ((double) playbackThread->mLatchQ.mUnpresentedFrames * sampleRate * speed) + / playbackThread->mSampleRate; // FIXME Since we're using a raw pointer as the key, it is theoretically possible // for a brand new track to share the same address as a recently destroyed // track, and thus for us to get the frames released of the wrong track. @@ -1861,13 +1866,14 @@ void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue() AudioFlinger::PlaybackThread::PatchTrack::PatchTrack(PlaybackThread *playbackThread, + audio_stream_type_t streamType, uint32_t sampleRate, audio_channel_mask_t channelMask, audio_format_t format, size_t frameCount, void *buffer, IAudioFlinger::track_flags_t flags) - : Track(playbackThread, NULL, AUDIO_STREAM_PATCH, + : Track(playbackThread, NULL, streamType, sampleRate, format, channelMask, frameCount, buffer, 0, 0, getuid(), flags, TYPE_PATCH), mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true)) @@ -1989,29 +1995,30 @@ AudioFlinger::RecordThread::RecordTrack::RecordTrack( ((flags & IAudioFlinger::TRACK_FAST) ? ALLOC_PIPE : ALLOC_CBLK) : ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE), type), - mOverflow(false), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpOutFrameCount(0), - // See real initialization of mRsmpInFront at RecordThread::start() - mRsmpInUnrel(0), mRsmpInFront(0), mFramesToDrop(0), mResamplerBufferProvider(NULL) + mOverflow(false), + mFramesToDrop(0) { if (mCblk == NULL) { return; } + mRecordBufferConverter = new RecordBufferConverter( + thread->mChannelMask, thread->mFormat, thread->mSampleRate, + channelMask, format, sampleRate); + // Check if the RecordBufferConverter construction was successful. + // If not, don't continue with construction. + // + // NOTE: It would be extremely rare that the record track cannot be created + // for the current device, but a pending or future device change would make + // the record track configuration valid. + if (mRecordBufferConverter->initCheck() != NO_ERROR) { + ALOGE("RecordTrack unable to create record buffer converter"); + return; + } + mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, mFrameSize, !isExternalTrack()); - - uint32_t channelCount = audio_channel_count_from_in_mask(channelMask); - // FIXME I don't understand either of the channel count checks - if (thread->mSampleRate != sampleRate && thread->mChannelCount <= FCC_2 && - channelCount <= FCC_2) { - // sink SR - mResampler = AudioResampler::create(AUDIO_FORMAT_PCM_16_BIT, - thread->mChannelCount, sampleRate); - // source SR - mResampler->setSampleRate(thread->mSampleRate); - mResampler->setVolume(AudioMixer::UNITY_GAIN_FLOAT, AudioMixer::UNITY_GAIN_FLOAT); - mResamplerBufferProvider = new ResamplerBufferProvider(this); - } + mResamplerBufferProvider = new ResamplerBufferProvider(this); if (flags & IAudioFlinger::TRACK_FAST) { ALOG_ASSERT(thread->mFastTrackAvail); @@ -2022,11 +2029,19 @@ AudioFlinger::RecordThread::RecordTrack::RecordTrack( AudioFlinger::RecordThread::RecordTrack::~RecordTrack() { ALOGV("%s", __func__); - delete mResampler; - delete[] mRsmpOutBuffer; + delete mRecordBufferConverter; delete mResamplerBufferProvider; } +status_t AudioFlinger::RecordThread::RecordTrack::initCheck() const +{ + status_t status = TrackBase::initCheck(); + if (status == NO_ERROR && mServerProxy == 0) { + status = BAD_VALUE; + } + return status; +} + // AudioBufferProvider interface status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts __unused) diff --git a/services/audioflinger/tests/Android.mk b/services/audioflinger/tests/Android.mk index 8604ef5..536eb93 100644 --- a/services/audioflinger/tests/Android.mk +++ b/services/audioflinger/tests/Android.mk @@ -39,11 +39,13 @@ endif LOCAL_SRC_FILES:= \ test-mixer.cpp \ ../AudioMixer.cpp.arm \ + ../BufferProviders.cpp LOCAL_C_INCLUDES := \ $(call include-path-for, audio-effects) \ $(call include-path-for, audio-utils) \ - frameworks/av/services/audioflinger + frameworks/av/services/audioflinger \ + external/sonic LOCAL_STATIC_LIBRARIES := \ libsndfile @@ -57,7 +59,8 @@ LOCAL_SHARED_LIBRARIES := \ libdl \ libcutils \ libutils \ - liblog + liblog \ + libsonic LOCAL_MODULE:= test-mixer diff --git a/services/audioflinger/tests/resampler_tests.cpp b/services/audioflinger/tests/resampler_tests.cpp index d6217ba..9e375db 100644 --- a/services/audioflinger/tests/resampler_tests.cpp +++ b/services/audioflinger/tests/resampler_tests.cpp @@ -48,7 +48,10 @@ void resample(int channels, void *output, if (thisFrames == 0 || thisFrames > outputFrames - i) { thisFrames = outputFrames - i; } - resampler->resample((int32_t*) output + channels*i, thisFrames, provider); + size_t framesResampled = resampler->resample( + (int32_t*) output + channels*i, thisFrames, provider); + // we should have enough buffer space, so there is no short count. + ASSERT_EQ(thisFrames, framesResampled); i += thisFrames; } } diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h index 116d0d6..48d0e29 100644 --- a/services/audiopolicy/AudioPolicyInterface.h +++ b/services/audiopolicy/AudioPolicyInterface.h @@ -110,6 +110,7 @@ public: audio_format_t format, audio_channel_mask_t channelMask, audio_output_flags_t flags, + int selectedDeviceId, const audio_offload_info_t *offloadInfo) = 0; // indicates to the audio policy manager that the output starts being used by corresponding stream. virtual status_t startOutput(audio_io_handle_t output, diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h index a4cc759..4205589 100755 --- a/services/audiopolicy/common/include/Volume.h +++ b/services/audiopolicy/common/include/Volume.h @@ -18,6 +18,10 @@ #include <system/audio.h> #include <utils/Log.h> +#include <math.h> + +// Absolute min volume in dB (can be represented in single precision normal float value) +#define VOLUME_MIN_DB (-758) class VolumeCurvePoint { @@ -32,7 +36,7 @@ public: /** * 4 points to define the volume attenuation curve, each characterized by the volume * index (from 0 to 100) at which they apply, and the attenuation in dB at that index. - * we use 100 steps to avoid rounding errors when computing the volume in volIndexToAmpl() + * we use 100 steps to avoid rounding errors when computing the volume in volIndexToDb() * * @todo shall become configurable */ @@ -134,4 +138,20 @@ public: } } + static inline float DbToAmpl(float decibels) + { + if (decibels <= VOLUME_MIN_DB) { + return 0.0f; + } + return exp( decibels * 0.115129f); // exp( dB * ln(10) / 20 ) + } + + static inline float AmplToDb(float amplification) + { + if (amplification == 0) { + return VOLUME_MIN_DB; + } + return 20 * log10(amplification); + } + }; diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk index 71ba1cb..7c265aa 100644 --- a/services/audiopolicy/common/managerdefinitions/Android.mk +++ b/services/audiopolicy/common/managerdefinitions/Android.mk @@ -25,6 +25,7 @@ LOCAL_SHARED_LIBRARIES := \ LOCAL_C_INCLUDES += \ $(LOCAL_PATH)/include \ $(TOPDIR)frameworks/av/services/audiopolicy/common/include \ + $(TOPDIR)frameworks/av/services/audiopolicy LOCAL_EXPORT_C_INCLUDE_DIRS := \ $(LOCAL_PATH)/include diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h index 7536a37..18bcfdb 100644 --- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h +++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h @@ -34,12 +34,11 @@ class AudioInputDescriptor: public AudioPortConfig public: AudioInputDescriptor(const sp<IOProfile>& profile); void setIoHandle(audio_io_handle_t ioHandle); - + audio_port_handle_t getId() const; audio_module_handle_t getModuleHandle() const; status_t dump(int fd); - audio_port_handle_t mId; audio_io_handle_t mIoHandle; // input handle audio_devices_t mDevice; // current device this input is routed to AudioMix *mPolicyMix; // non NULL when used by a dynamic policy @@ -57,6 +56,9 @@ public: const struct audio_port_config *srcConfig = NULL) const; virtual sp<AudioPort> getAudioPort() const { return mProfile; } void toAudioPort(struct audio_port *port) const; + +private: + audio_port_handle_t mId; }; class AudioInputCollection : diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h index 43ee691..f1aee46 100644 --- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h +++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h @@ -27,24 +27,36 @@ namespace android { class IOProfile; class AudioMix; +class AudioPolicyClientInterface; // descriptor for audio outputs. Used to maintain current configuration of each opened audio output // and keep track of the usage of this output by each audio stream type. class AudioOutputDescriptor: public AudioPortConfig { public: - AudioOutputDescriptor(const sp<IOProfile>& profile); + AudioOutputDescriptor(const sp<AudioPort>& port, + AudioPolicyClientInterface *clientInterface); + virtual ~AudioOutputDescriptor() {} status_t dump(int fd); + void log(const char* indent); + + audio_port_handle_t getId() const; + virtual audio_devices_t device() const; + virtual bool sharesHwModuleWith(const sp<AudioOutputDescriptor> outputDesc); + virtual audio_devices_t supportedDevices(); + virtual bool isDuplicated() const { return false; } + virtual uint32_t latency() { return 0; } + virtual bool isFixedVolume(audio_devices_t device); + virtual sp<AudioOutputDescriptor> subOutput1() { return 0; } + virtual sp<AudioOutputDescriptor> subOutput2() { return 0; } + virtual bool setVolume(float volume, + audio_stream_type_t stream, + audio_devices_t device, + uint32_t delayMs, + bool force); + virtual void changeRefCount(audio_stream_type_t stream, int delta); - audio_devices_t device() const; - void changeRefCount(audio_stream_type_t stream, int delta); - - void setIoHandle(audio_io_handle_t ioHandle); - bool isDuplicated() const { return (mOutput1 != NULL && mOutput2 != NULL); } - audio_devices_t supportedDevices(); - uint32_t latency(); - bool sharesHwModuleWith(const sp<AudioOutputDescriptor> outputDesc); bool isActive(uint32_t inPastMs = 0) const; bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0, @@ -52,32 +64,69 @@ public: virtual void toAudioPortConfig(struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig = NULL) const; - virtual sp<AudioPort> getAudioPort() const { return mProfile; } - void toAudioPort(struct audio_port *port) const; + virtual sp<AudioPort> getAudioPort() const { return mPort; } + virtual void toAudioPort(struct audio_port *port) const; audio_module_handle_t getModuleHandle() const; - audio_port_handle_t mId; - audio_io_handle_t mIoHandle; // output handle - uint32_t mLatency; // - audio_output_flags_t mFlags; // + sp<AudioPort> mPort; audio_devices_t mDevice; // current device this output is routed to - AudioMix *mPolicyMix; // non NULL when used by a dynamic policy audio_patch_handle_t mPatchHandle; uint32_t mRefCount[AUDIO_STREAM_CNT]; // number of streams of each type using this output nsecs_t mStopTime[AUDIO_STREAM_CNT]; - sp<AudioOutputDescriptor> mOutput1; // used by duplicated outputs: first output - sp<AudioOutputDescriptor> mOutput2; // used by duplicated outputs: second output - float mCurVolume[AUDIO_STREAM_CNT]; // current stream volume + float mCurVolume[AUDIO_STREAM_CNT]; // current stream volume in dB int mMuteCount[AUDIO_STREAM_CNT]; // mute request counter - const sp<IOProfile> mProfile; // I/O profile this output derives from bool mStrategyMutedByDevice[NUM_STRATEGIES]; // strategies muted because of incompatible // device selection. See checkDeviceMuteStrategies() + AudioPolicyClientInterface *mClientInterface; + +protected: + audio_port_handle_t mId; +}; + +// Audio output driven by a software mixer in audio flinger. +class SwAudioOutputDescriptor: public AudioOutputDescriptor +{ +public: + SwAudioOutputDescriptor(const sp<IOProfile>& profile, + AudioPolicyClientInterface *clientInterface); + virtual ~SwAudioOutputDescriptor() {} + + status_t dump(int fd); + + void setIoHandle(audio_io_handle_t ioHandle); + + virtual audio_devices_t device() const; + virtual bool sharesHwModuleWith(const sp<AudioOutputDescriptor> outputDesc); + virtual audio_devices_t supportedDevices(); + virtual uint32_t latency(); + virtual bool isDuplicated() const { return (mOutput1 != NULL && mOutput2 != NULL); } + virtual bool isFixedVolume(audio_devices_t device); + virtual sp<AudioOutputDescriptor> subOutput1() { return mOutput1; } + virtual sp<AudioOutputDescriptor> subOutput2() { return mOutput2; } + virtual void changeRefCount(audio_stream_type_t stream, int delta); + virtual bool setVolume(float volume, + audio_stream_type_t stream, + audio_devices_t device, + uint32_t delayMs, + bool force); + + virtual void toAudioPortConfig(struct audio_port_config *dstConfig, + const struct audio_port_config *srcConfig = NULL) const; + virtual void toAudioPort(struct audio_port *port) const; + + const sp<IOProfile> mProfile; // I/O profile this output derives from + audio_io_handle_t mIoHandle; // output handle + uint32_t mLatency; // + audio_output_flags_t mFlags; // + AudioMix *mPolicyMix; // non NULL when used by a dynamic policy + sp<SwAudioOutputDescriptor> mOutput1; // used by duplicated outputs: first output + sp<SwAudioOutputDescriptor> mOutput2; // used by duplicated outputs: second output uint32_t mDirectOpenCount; // number of clients using this output (direct outputs only) }; -class AudioOutputCollection : - public DefaultKeyedVector< audio_io_handle_t, sp<AudioOutputDescriptor> > +class SwAudioOutputCollection : + public DefaultKeyedVector< audio_io_handle_t, sp<SwAudioOutputDescriptor> > { public: bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const; @@ -96,9 +145,9 @@ public: */ audio_io_handle_t getA2dpOutput() const; - sp<AudioOutputDescriptor> getOutputFromId(audio_port_handle_t id) const; + sp<SwAudioOutputDescriptor> getOutputFromId(audio_port_handle_t id) const; - sp<AudioOutputDescriptor> getPrimaryOutput() const; + sp<SwAudioOutputDescriptor> getPrimaryOutput() const; /** * return true if any output is playing anything besides the stream to ignore diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h index 988aed6..d51f4e1 100644 --- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h +++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h @@ -24,7 +24,7 @@ namespace android { -class AudioOutputDescriptor; +class SwAudioOutputDescriptor; /** * custom mix entry in mPolicyMixes @@ -33,19 +33,19 @@ class AudioPolicyMix : public RefBase { public: AudioPolicyMix() {} - const sp<AudioOutputDescriptor> &getOutput() const; + const sp<SwAudioOutputDescriptor> &getOutput() const; - void setOutput(sp<AudioOutputDescriptor> &output); + void setOutput(sp<SwAudioOutputDescriptor> &output); void clearOutput(); - android::AudioMix &getMix(); + android::AudioMix *getMix(); void setMix(AudioMix &mix); private: AudioMix mMix; // Audio policy mix descriptor - sp<AudioOutputDescriptor> mOutput; // Corresponding output stream + sp<SwAudioOutputDescriptor> mOutput; // Corresponding output stream }; @@ -58,24 +58,24 @@ public: status_t unregisterMix(String8 address); - void closeOutput(sp<AudioOutputDescriptor> &desc); + void closeOutput(sp<SwAudioOutputDescriptor> &desc); /** * Try to find an output descriptor for the given attributes. * - * @param[in] attributes to consider for the research of output descriptor. + * @param[in] attributes to consider fowr the research of output descriptor. * @param[out] desc to return if an output could be found. * * @return NO_ERROR if an output was found for the given attribute (in this case, the * descriptor output param is initialized), error code otherwise. */ - status_t getOutputForAttr(audio_attributes_t attributes, sp<AudioOutputDescriptor> &desc); + status_t getOutputForAttr(audio_attributes_t attributes, sp<SwAudioOutputDescriptor> &desc); audio_devices_t getDeviceAndMixForInputSource(audio_source_t inputSource, audio_devices_t availableDeviceTypes, AudioMix **policyMix); - status_t getInputMixForAttr(audio_attributes_t attr, AudioMix *&policyMix); + status_t getInputMixForAttr(audio_attributes_t attr, AudioMix **policyMix); }; }; // namespace android diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h index 4f7f2bc..1c2c27e 100644 --- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h +++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h @@ -32,13 +32,11 @@ class AudioPort : public virtual RefBase { public: AudioPort(const String8& name, audio_port_type_t type, - audio_port_role_t role, const sp<HwModule>& module); + audio_port_role_t role); virtual ~AudioPort() {} - audio_port_handle_t getHandle() { return mId; } - - void attach(const sp<HwModule>& module); - bool isAttached() { return mId != 0; } + virtual void attach(const sp<HwModule>& module); + bool isAttached() { return mModule != 0; } static audio_port_handle_t getNextUniqueId(); @@ -64,8 +62,12 @@ public: // searches for an exact match status_t checkExactChannelMask(audio_channel_mask_t channelMask) const; // searches for a compatible match, currently implemented for input channel masks only - status_t checkCompatibleChannelMask(audio_channel_mask_t channelMask) const; - status_t checkFormat(audio_format_t format) const; + status_t checkCompatibleChannelMask(audio_channel_mask_t channelMask, + audio_channel_mask_t *updatedChannelMask) const; + + status_t checkExactFormat(audio_format_t format) const; + // searches for a compatible match, currently implemented for input formats only + status_t checkCompatibleFormat(audio_format_t format, audio_format_t *updatedFormat) const; status_t checkGain(const struct audio_gain_config *gainConfig, int index) const; uint32_t pickSamplingRate() const; @@ -73,11 +75,19 @@ public: audio_format_t pickFormat() const; static const audio_format_t sPcmFormatCompareTable[]; + static int compareFormatsGoodToBad( + const audio_format_t *format1, const audio_format_t *format2) { + // compareFormats sorts from bad to good, we reverse it here + return compareFormats(*format2, *format1); + } static int compareFormats(audio_format_t format1, audio_format_t format2); audio_module_handle_t getModuleHandle() const; + uint32_t getModuleVersion() const; + const char *getModuleName() const; void dump(int fd, int spaces) const; + void log(const char* indent) const; String8 mName; audio_port_type_t mType; @@ -94,13 +104,6 @@ public: uint32_t mFlags; // attribute flags (e.g primary output, // direct output...). - -protected: - //TODO - clarify the role of mId in this case, both an "attached" indicator - // and a unique ID for identifying a port to the (upcoming) selection API, - // and its relationship to the mId in AudioOutputDescriptor and AudioInputDescriptor. - audio_port_handle_t mId; - private: static volatile int32_t mNextUniqueId; }; diff --git a/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h b/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h index 53cb4a3..f8c4d08 100644 --- a/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h +++ b/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h @@ -39,11 +39,12 @@ struct StringToEnum { }; #define STRING_TO_ENUM(string) { #string, string } +#define NAME_TO_ENUM(name, value) { name, value } #ifndef ARRAY_SIZE #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) #endif -const StringToEnum sDeviceNameToEnumTable[] = { +const StringToEnum sDeviceTypeToEnumTable[] = { STRING_TO_ENUM(AUDIO_DEVICE_OUT_EARPIECE), STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER), STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER_SAFE), @@ -94,6 +95,57 @@ const StringToEnum sDeviceNameToEnumTable[] = { STRING_TO_ENUM(AUDIO_DEVICE_IN_LOOPBACK), }; +const StringToEnum sDeviceNameToEnumTable[] = { + NAME_TO_ENUM("Earpiece", AUDIO_DEVICE_OUT_EARPIECE), + NAME_TO_ENUM("Speaker", AUDIO_DEVICE_OUT_SPEAKER), + NAME_TO_ENUM("Speaker Protected", AUDIO_DEVICE_OUT_SPEAKER_SAFE), + NAME_TO_ENUM("Wired Headset", AUDIO_DEVICE_OUT_WIRED_HEADSET), + NAME_TO_ENUM("Wired Headphones", AUDIO_DEVICE_OUT_WIRED_HEADPHONE), + NAME_TO_ENUM("BT SCO", AUDIO_DEVICE_OUT_BLUETOOTH_SCO), + NAME_TO_ENUM("BT SCO Headset", AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET), + NAME_TO_ENUM("BT SCO Car Kit", AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT), + NAME_TO_ENUM("", AUDIO_DEVICE_OUT_ALL_SCO), + NAME_TO_ENUM("BT A2DP Out", AUDIO_DEVICE_OUT_BLUETOOTH_A2DP), + NAME_TO_ENUM("BT A2DP Headphones", AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES), + NAME_TO_ENUM("BT A2DP Speaker", AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER), + NAME_TO_ENUM("", AUDIO_DEVICE_OUT_ALL_A2DP), + NAME_TO_ENUM("HDMI Out", AUDIO_DEVICE_OUT_AUX_DIGITAL), + NAME_TO_ENUM("HDMI Out", AUDIO_DEVICE_OUT_HDMI), + NAME_TO_ENUM("Analog Dock Out", AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET), + NAME_TO_ENUM("Digital Dock Out", AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET), + NAME_TO_ENUM("USB Host Out", AUDIO_DEVICE_OUT_USB_ACCESSORY), + NAME_TO_ENUM("USB Device Out", AUDIO_DEVICE_OUT_USB_DEVICE), + NAME_TO_ENUM("", AUDIO_DEVICE_OUT_ALL_USB), + NAME_TO_ENUM("Reroute Submix Out", AUDIO_DEVICE_OUT_REMOTE_SUBMIX), + NAME_TO_ENUM("Telephony Tx", AUDIO_DEVICE_OUT_TELEPHONY_TX), + NAME_TO_ENUM("Line Out", AUDIO_DEVICE_OUT_LINE), + NAME_TO_ENUM("HDMI ARC Out", AUDIO_DEVICE_OUT_HDMI_ARC), + NAME_TO_ENUM("S/PDIF Out", AUDIO_DEVICE_OUT_SPDIF), + NAME_TO_ENUM("FM transceiver Out", AUDIO_DEVICE_OUT_FM), + NAME_TO_ENUM("Aux Line Out", AUDIO_DEVICE_OUT_AUX_LINE), + NAME_TO_ENUM("Ambient Mic", AUDIO_DEVICE_IN_AMBIENT), + NAME_TO_ENUM("Built-In Mic", AUDIO_DEVICE_IN_BUILTIN_MIC), + NAME_TO_ENUM("BT SCO Headset Mic", AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET), + NAME_TO_ENUM("", AUDIO_DEVICE_IN_ALL_SCO), + NAME_TO_ENUM("Wired Headset Mic", AUDIO_DEVICE_IN_WIRED_HEADSET), + NAME_TO_ENUM("HDMI In", AUDIO_DEVICE_IN_AUX_DIGITAL), + NAME_TO_ENUM("HDMI In", AUDIO_DEVICE_IN_HDMI), + NAME_TO_ENUM("Telephony Rx", AUDIO_DEVICE_IN_TELEPHONY_RX), + NAME_TO_ENUM("Telephony Rx", AUDIO_DEVICE_IN_VOICE_CALL), + NAME_TO_ENUM("Built-In Back Mic", AUDIO_DEVICE_IN_BACK_MIC), + NAME_TO_ENUM("Reroute Submix In", AUDIO_DEVICE_IN_REMOTE_SUBMIX), + NAME_TO_ENUM("Analog Dock In", AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET), + NAME_TO_ENUM("Digital Dock In", AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET), + NAME_TO_ENUM("USB Host In", AUDIO_DEVICE_IN_USB_ACCESSORY), + NAME_TO_ENUM("USB Device In", AUDIO_DEVICE_IN_USB_DEVICE), + NAME_TO_ENUM("FM Tuner In", AUDIO_DEVICE_IN_FM_TUNER), + NAME_TO_ENUM("TV Tuner In", AUDIO_DEVICE_IN_TV_TUNER), + NAME_TO_ENUM("Line In", AUDIO_DEVICE_IN_LINE), + NAME_TO_ENUM("S/PDIF In", AUDIO_DEVICE_IN_SPDIF), + NAME_TO_ENUM("BT A2DP In", AUDIO_DEVICE_IN_BLUETOOTH_A2DP), + NAME_TO_ENUM("Loopback In", AUDIO_DEVICE_IN_LOOPBACK), +}; + const StringToEnum sOutputFlagNameToEnumTable[] = { STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DIRECT), STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY), @@ -134,6 +186,8 @@ const StringToEnum sFormatNameToEnumTable[] = { STRING_TO_ENUM(AUDIO_FORMAT_OPUS), STRING_TO_ENUM(AUDIO_FORMAT_AC3), STRING_TO_ENUM(AUDIO_FORMAT_E_AC3), + STRING_TO_ENUM(AUDIO_FORMAT_DTS), + STRING_TO_ENUM(AUDIO_FORMAT_DTS_HD), }; const StringToEnum sOutChannelsNameToEnumTable[] = { diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h index d15f6b4..aa37eec 100644 --- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h +++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h @@ -41,19 +41,22 @@ public: const struct audio_port_config *srcConfig = NULL) const; // AudioPort + virtual void attach(const sp<HwModule>& module); virtual void loadGains(cnode *root); virtual void toAudioPort(struct audio_port *port) const; + audio_port_handle_t getId() const; audio_devices_t type() const { return mDeviceType; } status_t dump(int fd, int spaces, int index) const; + void log() const; String8 mAddress; - audio_port_handle_t mId; static String8 emptyNameStr; private: - audio_devices_t mDeviceType; + audio_devices_t mDeviceType; + audio_port_handle_t mId; friend class DeviceVector; }; diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h index 095e759..ab6fcc1 100644 --- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h +++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h @@ -33,7 +33,7 @@ class HwModule; class IOProfile : public AudioPort { public: - IOProfile(const String8& name, audio_port_role_t role, const sp<HwModule>& module); + IOProfile(const String8& name, audio_port_role_t role); virtual ~IOProfile(); // This method is used for both output and input. @@ -45,7 +45,9 @@ public: uint32_t samplingRate, uint32_t *updatedSamplingRate, audio_format_t format, + audio_format_t *updatedFormat, audio_channel_mask_t channelMask, + audio_channel_mask_t *updatedChannelMask, uint32_t flags) const; void dump(int fd); diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp index fa66728..937160b 100644 --- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp +++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp @@ -27,9 +27,9 @@ namespace android { AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile) - : mId(0), mIoHandle(0), + : mIoHandle(0), mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL), mPatchHandle(0), mRefCount(0), - mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile), mIsSoundTrigger(false) + mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile), mIsSoundTrigger(false), mId(0) { if (profile != NULL) { mSamplingRate = profile->pickSamplingRate(); @@ -49,9 +49,17 @@ void AudioInputDescriptor::setIoHandle(audio_io_handle_t ioHandle) audio_module_handle_t AudioInputDescriptor::getModuleHandle() const { + if (mProfile == 0) { + return 0; + } return mProfile->getModuleHandle(); } +audio_port_handle_t AudioInputDescriptor::getId() const +{ + return mId; +} + void AudioInputDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig) const { @@ -68,7 +76,7 @@ void AudioInputDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig dstConfig->id = mId; dstConfig->role = AUDIO_PORT_ROLE_SINK; dstConfig->type = AUDIO_PORT_TYPE_MIX; - dstConfig->ext.mix.hw_module = mProfile->mModule->mHandle; + dstConfig->ext.mix.hw_module = getModuleHandle(); dstConfig->ext.mix.handle = mIoHandle; dstConfig->ext.mix.usecase.source = mInputSource; } @@ -80,7 +88,7 @@ void AudioInputDescriptor::toAudioPort(struct audio_port *port) const mProfile->toAudioPort(port); port->id = mId; toAudioPortConfig(&port->active_config); - port->ext.mix.hw_module = mProfile->mModule->mHandle; + port->ext.mix.hw_module = getModuleHandle(); port->ext.mix.handle = mIoHandle; port->ext.mix.latency_class = AUDIO_LATENCY_NORMAL; } @@ -91,7 +99,7 @@ status_t AudioInputDescriptor::dump(int fd) char buffer[SIZE]; String8 result; - snprintf(buffer, SIZE, " ID: %d\n", mId); + snprintf(buffer, SIZE, " ID: %d\n", getId()); result.append(buffer); snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate); result.append(buffer); @@ -130,7 +138,7 @@ sp<AudioInputDescriptor> AudioInputCollection::getInputFromId(audio_port_handle_ sp<AudioInputDescriptor> inputDesc = NULL; for (size_t i = 0; i < size(); i++) { inputDesc = valueAt(i); - if (inputDesc->mId == id) { + if (inputDesc->getId() == id) { break; } } diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp index cdb5b51..596aa1d 100644 --- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp +++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp @@ -17,9 +17,11 @@ #define LOG_TAG "APM::AudioOutputDescriptor" //#define LOG_NDEBUG 0 +#include <AudioPolicyInterface.h> #include "AudioOutputDescriptor.h" #include "IOProfile.h" #include "AudioGain.h" +#include "Volume.h" #include "HwModule.h" #include <media/AudioPolicy.h> @@ -29,11 +31,10 @@ namespace android { -AudioOutputDescriptor::AudioOutputDescriptor(const sp<IOProfile>& profile) - : mId(0), mIoHandle(0), mLatency(0), - mFlags((audio_output_flags_t)0), mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL), - mPatchHandle(0), - mOutput1(0), mOutput2(0), mProfile(profile), mDirectOpenCount(0) +AudioOutputDescriptor::AudioOutputDescriptor(const sp<AudioPort>& port, + AudioPolicyClientInterface *clientInterface) + : mPort(port), mDevice(AUDIO_DEVICE_NONE), + mPatchHandle(0), mClientInterface(clientInterface), mId(0) { // clear usage count for all stream types for (int i = 0; i < AUDIO_STREAM_CNT; i++) { @@ -45,66 +46,50 @@ AudioOutputDescriptor::AudioOutputDescriptor(const sp<IOProfile>& profile) for (int i = 0; i < NUM_STRATEGIES; i++) { mStrategyMutedByDevice[i] = false; } - if (profile != NULL) { - mFlags = (audio_output_flags_t)profile->mFlags; - mSamplingRate = profile->pickSamplingRate(); - mFormat = profile->pickFormat(); - mChannelMask = profile->pickChannelMask(); - if (profile->mGains.size() > 0) { - profile->mGains[0]->getDefaultConfig(&mGain); + if (port != NULL) { + mSamplingRate = port->pickSamplingRate(); + mFormat = port->pickFormat(); + mChannelMask = port->pickChannelMask(); + if (port->mGains.size() > 0) { + port->mGains[0]->getDefaultConfig(&mGain); } } } audio_module_handle_t AudioOutputDescriptor::getModuleHandle() const { - return mProfile->getModuleHandle(); + return mPort->getModuleHandle(); } -audio_devices_t AudioOutputDescriptor::device() const +audio_port_handle_t AudioOutputDescriptor::getId() const { - if (isDuplicated()) { - return (audio_devices_t)(mOutput1->mDevice | mOutput2->mDevice); - } else { - return mDevice; - } + return mId; } -void AudioOutputDescriptor::setIoHandle(audio_io_handle_t ioHandle) +audio_devices_t AudioOutputDescriptor::device() const { - mId = AudioPort::getNextUniqueId(); - mIoHandle = ioHandle; + return mDevice; } -uint32_t AudioOutputDescriptor::latency() +audio_devices_t AudioOutputDescriptor::supportedDevices() { - if (isDuplicated()) { - return (mOutput1->mLatency > mOutput2->mLatency) ? mOutput1->mLatency : mOutput2->mLatency; - } else { - return mLatency; - } + return mDevice; } bool AudioOutputDescriptor::sharesHwModuleWith( const sp<AudioOutputDescriptor> outputDesc) { - if (isDuplicated()) { - return mOutput1->sharesHwModuleWith(outputDesc) || mOutput2->sharesHwModuleWith(outputDesc); - } else if (outputDesc->isDuplicated()){ - return sharesHwModuleWith(outputDesc->mOutput1) || sharesHwModuleWith(outputDesc->mOutput2); + if (outputDesc->isDuplicated()) { + return sharesHwModuleWith(outputDesc->subOutput1()) || + sharesHwModuleWith(outputDesc->subOutput2()); } else { - return (mProfile->mModule == outputDesc->mProfile->mModule); + return (getModuleHandle() == outputDesc->getModuleHandle()); } } void AudioOutputDescriptor::changeRefCount(audio_stream_type_t stream, int delta) { - // forward usage count change to attached outputs - if (isDuplicated()) { - mOutput1->changeRefCount(stream, delta); - mOutput2->changeRefCount(stream, delta); - } if ((delta + (int)mRefCount[stream]) < 0) { ALOGW("changeRefCount() invalid delta %d for stream %d, refCount %d", delta, stream, mRefCount[stream]); @@ -115,15 +100,6 @@ void AudioOutputDescriptor::changeRefCount(audio_stream_type_t stream, ALOGV("changeRefCount() stream %d, count %d", stream, mRefCount[stream]); } -audio_devices_t AudioOutputDescriptor::supportedDevices() -{ - if (isDuplicated()) { - return (audio_devices_t)(mOutput1->supportedDevices() | mOutput2->supportedDevices()); - } else { - return mProfile->mSupportedDevices.types() ; - } -} - bool AudioOutputDescriptor::isActive(uint32_t inPastMs) const { nsecs_t sysTime = 0; @@ -160,12 +136,33 @@ bool AudioOutputDescriptor::isStreamActive(audio_stream_type_t stream, return false; } + +bool AudioOutputDescriptor::isFixedVolume(audio_devices_t device __unused) +{ + return false; +} + +bool AudioOutputDescriptor::setVolume(float volume, + audio_stream_type_t stream, + audio_devices_t device __unused, + uint32_t delayMs, + bool force) +{ + // We actually change the volume if: + // - the float value returned by computeVolume() changed + // - the force flag is set + if (volume != mCurVolume[stream] || force) { + ALOGV("setVolume() for stream %d, volume %f, delay %d", stream, volume, delayMs); + mCurVolume[stream] = volume; + return true; + } + return false; +} + void AudioOutputDescriptor::toAudioPortConfig( struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig) const { - ALOG_ASSERT(!isDuplicated(), "toAudioPortConfig() called on duplicated output %d", mIoHandle); - dstConfig->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK| AUDIO_PORT_CONFIG_FORMAT|AUDIO_PORT_CONFIG_GAIN; if (srcConfig != NULL) { @@ -176,22 +173,16 @@ void AudioOutputDescriptor::toAudioPortConfig( dstConfig->id = mId; dstConfig->role = AUDIO_PORT_ROLE_SOURCE; dstConfig->type = AUDIO_PORT_TYPE_MIX; - dstConfig->ext.mix.hw_module = mProfile->mModule->mHandle; - dstConfig->ext.mix.handle = mIoHandle; + dstConfig->ext.mix.hw_module = getModuleHandle(); dstConfig->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT; } void AudioOutputDescriptor::toAudioPort( struct audio_port *port) const { - ALOG_ASSERT(!isDuplicated(), "toAudioPort() called on duplicated output %d", mIoHandle); - mProfile->toAudioPort(port); + mPort->toAudioPort(port); port->id = mId; - toAudioPortConfig(&port->active_config); - port->ext.mix.hw_module = mProfile->mModule->mHandle; - port->ext.mix.handle = mIoHandle; - port->ext.mix.latency_class = - mFlags & AUDIO_OUTPUT_FLAG_FAST ? AUDIO_LATENCY_LOW : AUDIO_LATENCY_NORMAL; + port->ext.mix.hw_module = getModuleHandle(); } status_t AudioOutputDescriptor::dump(int fd) @@ -208,10 +199,6 @@ status_t AudioOutputDescriptor::dump(int fd) result.append(buffer); snprintf(buffer, SIZE, " Channels: %08x\n", mChannelMask); result.append(buffer); - snprintf(buffer, SIZE, " Latency: %d\n", mLatency); - result.append(buffer); - snprintf(buffer, SIZE, " Flags %08x\n", mFlags); - result.append(buffer); snprintf(buffer, SIZE, " Devices %08x\n", device()); result.append(buffer); snprintf(buffer, SIZE, " Stream volume refCount muteCount\n"); @@ -226,11 +213,165 @@ status_t AudioOutputDescriptor::dump(int fd) return NO_ERROR; } -bool AudioOutputCollection::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const +void AudioOutputDescriptor::log(const char* indent) +{ + ALOGI("%sID: %d,0x%X, [rt:%d fmt:0x%X ch:0x%X]", + indent, mId, mId, mSamplingRate, mFormat, mChannelMask); +} + +// SwAudioOutputDescriptor implementation +SwAudioOutputDescriptor::SwAudioOutputDescriptor( + const sp<IOProfile>& profile, AudioPolicyClientInterface *clientInterface) + : AudioOutputDescriptor(profile, clientInterface), + mProfile(profile), mIoHandle(0), mLatency(0), + mFlags((audio_output_flags_t)0), mPolicyMix(NULL), + mOutput1(0), mOutput2(0), mDirectOpenCount(0) +{ + if (profile != NULL) { + mFlags = (audio_output_flags_t)profile->mFlags; + } +} + +void SwAudioOutputDescriptor::setIoHandle(audio_io_handle_t ioHandle) +{ + mId = AudioPort::getNextUniqueId(); + mIoHandle = ioHandle; +} + + +status_t SwAudioOutputDescriptor::dump(int fd) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + snprintf(buffer, SIZE, " Latency: %d\n", mLatency); + result.append(buffer); + snprintf(buffer, SIZE, " Flags %08x\n", mFlags); + result.append(buffer); + write(fd, result.string(), result.size()); + + AudioOutputDescriptor::dump(fd); + + return NO_ERROR; +} + +audio_devices_t SwAudioOutputDescriptor::device() const +{ + if (isDuplicated()) { + return (audio_devices_t)(mOutput1->mDevice | mOutput2->mDevice); + } else { + return mDevice; + } +} + +bool SwAudioOutputDescriptor::sharesHwModuleWith( + const sp<AudioOutputDescriptor> outputDesc) +{ + if (isDuplicated()) { + return mOutput1->sharesHwModuleWith(outputDesc) || mOutput2->sharesHwModuleWith(outputDesc); + } else if (outputDesc->isDuplicated()){ + return sharesHwModuleWith(outputDesc->subOutput1()) || + sharesHwModuleWith(outputDesc->subOutput2()); + } else { + return AudioOutputDescriptor::sharesHwModuleWith(outputDesc); + } +} + +audio_devices_t SwAudioOutputDescriptor::supportedDevices() +{ + if (isDuplicated()) { + return (audio_devices_t)(mOutput1->supportedDevices() | mOutput2->supportedDevices()); + } else { + return mProfile->mSupportedDevices.types() ; + } +} + +uint32_t SwAudioOutputDescriptor::latency() +{ + if (isDuplicated()) { + return (mOutput1->mLatency > mOutput2->mLatency) ? mOutput1->mLatency : mOutput2->mLatency; + } else { + return mLatency; + } +} + +void SwAudioOutputDescriptor::changeRefCount(audio_stream_type_t stream, + int delta) +{ + // forward usage count change to attached outputs + if (isDuplicated()) { + mOutput1->changeRefCount(stream, delta); + mOutput2->changeRefCount(stream, delta); + } + AudioOutputDescriptor::changeRefCount(stream, delta); +} + + +bool SwAudioOutputDescriptor::isFixedVolume(audio_devices_t device) +{ + // unit gain if rerouting to external policy + if (device == AUDIO_DEVICE_OUT_REMOTE_SUBMIX) { + if (mPolicyMix != NULL) { + ALOGV("max gain when rerouting for output=%d", mIoHandle); + return true; + } + } + return false; +} + +void SwAudioOutputDescriptor::toAudioPortConfig( + struct audio_port_config *dstConfig, + const struct audio_port_config *srcConfig) const +{ + + ALOG_ASSERT(!isDuplicated(), "toAudioPortConfig() called on duplicated output %d", mIoHandle); + AudioOutputDescriptor::toAudioPortConfig(dstConfig, srcConfig); + + dstConfig->ext.mix.handle = mIoHandle; +} + +void SwAudioOutputDescriptor::toAudioPort( + struct audio_port *port) const +{ + ALOG_ASSERT(!isDuplicated(), "toAudioPort() called on duplicated output %d", mIoHandle); + + AudioOutputDescriptor::toAudioPort(port); + + toAudioPortConfig(&port->active_config); + port->ext.mix.handle = mIoHandle; + port->ext.mix.latency_class = + mFlags & AUDIO_OUTPUT_FLAG_FAST ? AUDIO_LATENCY_LOW : AUDIO_LATENCY_NORMAL; +} + +bool SwAudioOutputDescriptor::setVolume(float volume, + audio_stream_type_t stream, + audio_devices_t device, + uint32_t delayMs, + bool force) +{ + bool changed = AudioOutputDescriptor::setVolume(volume, stream, device, delayMs, force); + + if (changed) { + // Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is + // enabled + float volume = Volume::DbToAmpl(mCurVolume[stream]); + if (stream == AUDIO_STREAM_BLUETOOTH_SCO) { + mClientInterface->setStreamVolume( + AUDIO_STREAM_VOICE_CALL, volume, mIoHandle, delayMs); + } + mClientInterface->setStreamVolume(stream, volume, mIoHandle, delayMs); + } + return changed; +} + +// SwAudioOutputCollection implementation + +bool SwAudioOutputCollection::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const { nsecs_t sysTime = systemTime(); for (size_t i = 0; i < this->size(); i++) { - const sp<AudioOutputDescriptor> outputDesc = this->valueAt(i); + const sp<SwAudioOutputDescriptor> outputDesc = this->valueAt(i); if (outputDesc->isStreamActive(stream, inPastMs, sysTime)) { return true; } @@ -238,12 +379,12 @@ bool AudioOutputCollection::isStreamActive(audio_stream_type_t stream, uint32_t return false; } -bool AudioOutputCollection::isStreamActiveRemotely(audio_stream_type_t stream, +bool SwAudioOutputCollection::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const { nsecs_t sysTime = systemTime(); for (size_t i = 0; i < size(); i++) { - const sp<AudioOutputDescriptor> outputDesc = valueAt(i); + const sp<SwAudioOutputDescriptor> outputDesc = valueAt(i); if (((outputDesc->device() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) != 0) && outputDesc->isStreamActive(stream, inPastMs, sysTime)) { // do not consider re routing (when the output is going to a dynamic policy) @@ -256,10 +397,10 @@ bool AudioOutputCollection::isStreamActiveRemotely(audio_stream_type_t stream, return false; } -audio_io_handle_t AudioOutputCollection::getA2dpOutput() const +audio_io_handle_t SwAudioOutputCollection::getA2dpOutput() const { for (size_t i = 0; i < size(); i++) { - sp<AudioOutputDescriptor> outputDesc = valueAt(i); + sp<SwAudioOutputDescriptor> outputDesc = valueAt(i); if (!outputDesc->isDuplicated() && outputDesc->device() & AUDIO_DEVICE_OUT_ALL_A2DP) { return this->keyAt(i); } @@ -267,10 +408,10 @@ audio_io_handle_t AudioOutputCollection::getA2dpOutput() const return 0; } -sp<AudioOutputDescriptor> AudioOutputCollection::getPrimaryOutput() const +sp<SwAudioOutputDescriptor> SwAudioOutputCollection::getPrimaryOutput() const { for (size_t i = 0; i < size(); i++) { - const sp<AudioOutputDescriptor> outputDesc = valueAt(i); + const sp<SwAudioOutputDescriptor> outputDesc = valueAt(i); if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) { return outputDesc; } @@ -278,26 +419,26 @@ sp<AudioOutputDescriptor> AudioOutputCollection::getPrimaryOutput() const return NULL; } -sp<AudioOutputDescriptor> AudioOutputCollection::getOutputFromId(audio_port_handle_t id) const +sp<SwAudioOutputDescriptor> SwAudioOutputCollection::getOutputFromId(audio_port_handle_t id) const { - sp<AudioOutputDescriptor> outputDesc = NULL; + sp<SwAudioOutputDescriptor> outputDesc = NULL; for (size_t i = 0; i < size(); i++) { outputDesc = valueAt(i); - if (outputDesc->mId == id) { + if (outputDesc->getId() == id) { break; } } return outputDesc; } -bool AudioOutputCollection::isAnyOutputActive(audio_stream_type_t streamToIgnore) const +bool SwAudioOutputCollection::isAnyOutputActive(audio_stream_type_t streamToIgnore) const { for (size_t s = 0 ; s < AUDIO_STREAM_CNT ; s++) { if (s == (size_t) streamToIgnore) { continue; } for (size_t i = 0; i < size(); i++) { - const sp<AudioOutputDescriptor> outputDesc = valueAt(i); + const sp<SwAudioOutputDescriptor> outputDesc = valueAt(i); if (outputDesc->mRefCount[s] != 0) { return true; } @@ -306,15 +447,15 @@ bool AudioOutputCollection::isAnyOutputActive(audio_stream_type_t streamToIgnore return false; } -audio_devices_t AudioOutputCollection::getSupportedDevices(audio_io_handle_t handle) const +audio_devices_t SwAudioOutputCollection::getSupportedDevices(audio_io_handle_t handle) const { - sp<AudioOutputDescriptor> outputDesc = valueFor(handle); + sp<SwAudioOutputDescriptor> outputDesc = valueFor(handle); audio_devices_t devices = outputDesc->mProfile->mSupportedDevices.types(); return devices; } -status_t AudioOutputCollection::dump(int fd) const +status_t SwAudioOutputCollection::dump(int fd) const { const size_t SIZE = 256; char buffer[SIZE]; diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp index 3a317fa..a06d867 100644 --- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp +++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp @@ -54,8 +54,8 @@ status_t AudioPatch::dump(int fd, int spaces, int index) const for (size_t i = 0; i < mPatch.num_sources; i++) { if (mPatch.sources[i].type == AUDIO_PORT_TYPE_DEVICE) { snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "", - mPatch.sources[i].id, ConfigParsingUtils::enumToString(sDeviceNameToEnumTable, - ARRAY_SIZE(sDeviceNameToEnumTable), + mPatch.sources[i].id, ConfigParsingUtils::enumToString(sDeviceTypeToEnumTable, + ARRAY_SIZE(sDeviceTypeToEnumTable), mPatch.sources[i].ext.device.type)); } else { snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "", @@ -68,8 +68,8 @@ status_t AudioPatch::dump(int fd, int spaces, int index) const for (size_t i = 0; i < mPatch.num_sinks; i++) { if (mPatch.sinks[i].type == AUDIO_PORT_TYPE_DEVICE) { snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "", - mPatch.sinks[i].id, ConfigParsingUtils::enumToString(sDeviceNameToEnumTable, - ARRAY_SIZE(sDeviceNameToEnumTable), + mPatch.sinks[i].id, ConfigParsingUtils::enumToString(sDeviceTypeToEnumTable, + ARRAY_SIZE(sDeviceTypeToEnumTable), mPatch.sinks[i].ext.device.type)); } else { snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "", diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp index 84a53eb..77fc0b9 100644 --- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp +++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp @@ -26,12 +26,12 @@ namespace android { -void AudioPolicyMix::setOutput(sp<AudioOutputDescriptor> &output) +void AudioPolicyMix::setOutput(sp<SwAudioOutputDescriptor> &output) { mOutput = output; } -const sp<AudioOutputDescriptor> &AudioPolicyMix::getOutput() const +const sp<SwAudioOutputDescriptor> &AudioPolicyMix::getOutput() const { return mOutput; } @@ -46,9 +46,9 @@ void AudioPolicyMix::setMix(AudioMix &mix) mMix = mix; } -android::AudioMix &AudioPolicyMix::getMix() +android::AudioMix *AudioPolicyMix::getMix() { - return mMix; + return &mMix; } status_t AudioPolicyMixCollection::registerMix(String8 address, AudioMix mix) @@ -88,7 +88,7 @@ status_t AudioPolicyMixCollection::getAudioPolicyMix(String8 address, return NO_ERROR; } -void AudioPolicyMixCollection::closeOutput(sp<AudioOutputDescriptor> &desc) +void AudioPolicyMixCollection::closeOutput(sp<SwAudioOutputDescriptor> &desc) { for (size_t i = 0; i < size(); i++) { sp<AudioPolicyMix> policyMix = valueAt(i); @@ -99,40 +99,40 @@ void AudioPolicyMixCollection::closeOutput(sp<AudioOutputDescriptor> &desc) } status_t AudioPolicyMixCollection::getOutputForAttr(audio_attributes_t attributes, - sp<AudioOutputDescriptor> &desc) + sp<SwAudioOutputDescriptor> &desc) { for (size_t i = 0; i < size(); i++) { sp<AudioPolicyMix> policyMix = valueAt(i); - AudioMix mix = policyMix->getMix(); - - if (mix.mMixType == MIX_TYPE_PLAYERS) { - for (size_t j = 0; j < mix.mCriteria.size(); j++) { - if ((RULE_MATCH_ATTRIBUTE_USAGE == mix.mCriteria[j].mRule && - mix.mCriteria[j].mAttr.mUsage == attributes.usage) || - (RULE_EXCLUDE_ATTRIBUTE_USAGE == mix.mCriteria[j].mRule && - mix.mCriteria[j].mAttr.mUsage != attributes.usage)) { + AudioMix *mix = policyMix->getMix(); + + if (mix->mMixType == MIX_TYPE_PLAYERS) { + for (size_t j = 0; j < mix->mCriteria.size(); j++) { + if ((RULE_MATCH_ATTRIBUTE_USAGE == mix->mCriteria[j].mRule && + mix->mCriteria[j].mAttr.mUsage == attributes.usage) || + (RULE_EXCLUDE_ATTRIBUTE_USAGE == mix->mCriteria[j].mRule && + mix->mCriteria[j].mAttr.mUsage != attributes.usage)) { desc = policyMix->getOutput(); break; } if (strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 && strncmp(attributes.tags + strlen("addr="), - mix.mRegistrationId.string(), + mix->mRegistrationId.string(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) { desc = policyMix->getOutput(); break; } } - } else if (mix.mMixType == MIX_TYPE_RECORDERS) { + } else if (mix->mMixType == MIX_TYPE_RECORDERS) { if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE && strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 && strncmp(attributes.tags + strlen("addr="), - mix.mRegistrationId.string(), + mix->mRegistrationId.string(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) { desc = policyMix->getOutput(); } } if (desc != 0) { - desc->mPolicyMix = &mix; + desc->mPolicyMix = mix; return NO_ERROR; } } @@ -144,19 +144,19 @@ audio_devices_t AudioPolicyMixCollection::getDeviceAndMixForInputSource(audio_so AudioMix **policyMix) { for (size_t i = 0; i < size(); i++) { - AudioMix mix = valueAt(i)->getMix(); + AudioMix *mix = valueAt(i)->getMix(); - if (mix.mMixType != MIX_TYPE_RECORDERS) { + if (mix->mMixType != MIX_TYPE_RECORDERS) { continue; } - for (size_t j = 0; j < mix.mCriteria.size(); j++) { - if ((RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET == mix.mCriteria[j].mRule && - mix.mCriteria[j].mAttr.mSource == inputSource) || - (RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET == mix.mCriteria[j].mRule && - mix.mCriteria[j].mAttr.mSource != inputSource)) { + for (size_t j = 0; j < mix->mCriteria.size(); j++) { + if ((RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule && + mix->mCriteria[j].mAttr.mSource == inputSource) || + (RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule && + mix->mCriteria[j].mAttr.mSource != inputSource)) { if (availDevices & AUDIO_DEVICE_IN_REMOTE_SUBMIX) { if (policyMix != NULL) { - *policyMix = &mix; + *policyMix = mix; } return AUDIO_DEVICE_IN_REMOTE_SUBMIX; } @@ -167,7 +167,7 @@ audio_devices_t AudioPolicyMixCollection::getDeviceAndMixForInputSource(audio_so return AUDIO_DEVICE_NONE; } -status_t AudioPolicyMixCollection::getInputMixForAttr(audio_attributes_t attr, AudioMix *&policyMix) +status_t AudioPolicyMixCollection::getInputMixForAttr(audio_attributes_t attr, AudioMix **policyMix) { if (strncmp(attr.tags, "addr=", strlen("addr=")) != 0) { return BAD_VALUE; @@ -180,13 +180,13 @@ status_t AudioPolicyMixCollection::getInputMixForAttr(audio_attributes_t attr, A return BAD_VALUE; } sp<AudioPolicyMix> audioPolicyMix = valueAt(index); - AudioMix mix = audioPolicyMix->getMix(); + AudioMix *mix = audioPolicyMix->getMix(); - if (mix.mMixType != MIX_TYPE_PLAYERS) { + if (mix->mMixType != MIX_TYPE_PLAYERS) { ALOGW("getInputForAttr() bad policy mix type for address %s", address.string()); return BAD_VALUE; } - policyMix = &mix; + *policyMix = mix; return NO_ERROR; } diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp index 46a119e..f3978ec 100644 --- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp +++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp @@ -16,7 +16,7 @@ #define LOG_TAG "APM::AudioPort" //#define LOG_NDEBUG 0 - +#include <media/AudioResamplerPublic.h> #include "AudioPort.h" #include "HwModule.h" #include "AudioGain.h" @@ -31,8 +31,8 @@ int32_t volatile AudioPort::mNextUniqueId = 1; // --- AudioPort class implementation AudioPort::AudioPort(const String8& name, audio_port_type_t type, - audio_port_role_t role, const sp<HwModule>& module) : - mName(name), mType(type), mRole(role), mModule(module), mFlags(0), mId(0) + audio_port_role_t role) : + mName(name), mType(type), mRole(role), mFlags(0) { mUseInChannelMask = ((type == AUDIO_PORT_TYPE_DEVICE) && (role == AUDIO_PORT_ROLE_SOURCE)) || ((type == AUDIO_PORT_TYPE_MIX) && (role == AUDIO_PORT_ROLE_SINK)); @@ -40,7 +40,6 @@ AudioPort::AudioPort(const String8& name, audio_port_type_t type, void AudioPort::attach(const sp<HwModule>& module) { - mId = getNextUniqueId(); mModule = module; } @@ -51,9 +50,28 @@ audio_port_handle_t AudioPort::getNextUniqueId() audio_module_handle_t AudioPort::getModuleHandle() const { + if (mModule == 0) { + return 0; + } return mModule->mHandle; } +uint32_t AudioPort::getModuleVersion() const +{ + if (mModule == 0) { + return 0; + } + return mModule->mHalVersion; +} + +const char *AudioPort::getModuleName() const +{ + if (mModule == 0) { + return ""; + } + return mModule->mName; +} + void AudioPort::toAudioPort(struct audio_port *port) const { port->role = mRole; @@ -198,6 +216,7 @@ void AudioPort::loadFormats(char *name) } str = strtok(NULL, "|"); } + mFormats.sort(compareFormatsGoodToBad); } void AudioPort::loadInChannels(char *name) @@ -340,6 +359,9 @@ status_t AudioPort::checkCompatibleSamplingRate(uint32_t samplingRate, uint32_t *updatedSamplingRate) const { if (mSamplingRates.isEmpty()) { + if (updatedSamplingRate != NULL) { + *updatedSamplingRate = samplingRate; + } return NO_ERROR; } @@ -369,16 +391,11 @@ status_t AudioPort::checkCompatibleSamplingRate(uint32_t samplingRate, } } } - // This uses hard-coded knowledge about AudioFlinger resampling ratios. - // TODO Move these assumptions out. - static const uint32_t kMaxDownSampleRatio = 6; // beyond this aliasing occurs - static const uint32_t kMaxUpSampleRatio = 256; // beyond this sample rate inaccuracies occur - // due to approximation by an int32_t of the - // phase increments + // Prefer to down-sample from a higher sampling rate, as we get the desired frequency spectrum. if (minAbove >= 0) { candidate = mSamplingRates[minAbove]; - if (candidate / kMaxDownSampleRatio <= samplingRate) { + if (candidate / AUDIO_RESAMPLER_DOWN_RATIO_MAX <= samplingRate) { if (updatedSamplingRate != NULL) { *updatedSamplingRate = candidate; } @@ -388,7 +405,7 @@ status_t AudioPort::checkCompatibleSamplingRate(uint32_t samplingRate, // But if we have to up-sample from a lower sampling rate, that's OK. if (maxBelow >= 0) { candidate = mSamplingRates[maxBelow]; - if (candidate * kMaxUpSampleRatio >= samplingRate) { + if (candidate * AUDIO_RESAMPLER_UP_RATIO_MAX >= samplingRate) { if (updatedSamplingRate != NULL) { *updatedSamplingRate = candidate; } @@ -413,10 +430,13 @@ status_t AudioPort::checkExactChannelMask(audio_channel_mask_t channelMask) cons return BAD_VALUE; } -status_t AudioPort::checkCompatibleChannelMask(audio_channel_mask_t channelMask) - const +status_t AudioPort::checkCompatibleChannelMask(audio_channel_mask_t channelMask, + audio_channel_mask_t *updatedChannelMask) const { if (mChannelMasks.isEmpty()) { + if (updatedChannelMask != NULL) { + *updatedChannelMask = channelMask; + } return NO_ERROR; } @@ -425,6 +445,9 @@ status_t AudioPort::checkCompatibleChannelMask(audio_channel_mask_t channelMask) // FIXME Does not handle multi-channel automatic conversions yet audio_channel_mask_t supported = mChannelMasks[i]; if (supported == channelMask) { + if (updatedChannelMask != NULL) { + *updatedChannelMask = channelMask; + } return NO_ERROR; } if (isRecordThread) { @@ -434,6 +457,9 @@ status_t AudioPort::checkCompatibleChannelMask(audio_channel_mask_t channelMask) && channelMask == AUDIO_CHANNEL_IN_MONO) || (supported == AUDIO_CHANNEL_IN_MONO && (channelMask == AUDIO_CHANNEL_IN_FRONT_BACK || channelMask == AUDIO_CHANNEL_IN_STEREO))) { + if (updatedChannelMask != NULL) { + *updatedChannelMask = supported; + } return NO_ERROR; } } @@ -441,7 +467,7 @@ status_t AudioPort::checkCompatibleChannelMask(audio_channel_mask_t channelMask) return BAD_VALUE; } -status_t AudioPort::checkFormat(audio_format_t format) const +status_t AudioPort::checkExactFormat(audio_format_t format) const { if (mFormats.isEmpty()) { return NO_ERROR; @@ -455,6 +481,33 @@ status_t AudioPort::checkFormat(audio_format_t format) const return BAD_VALUE; } +status_t AudioPort::checkCompatibleFormat(audio_format_t format, audio_format_t *updatedFormat) + const +{ + if (mFormats.isEmpty()) { + if (updatedFormat != NULL) { + *updatedFormat = format; + } + return NO_ERROR; + } + + const bool checkInexact = // when port is input and format is linear pcm + mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK + && audio_is_linear_pcm(format); + + for (size_t i = 0; i < mFormats.size(); ++i) { + if (mFormats[i] == format || + (checkInexact && audio_is_linear_pcm(mFormats[i]))) { + // for inexact checks we take the first linear pcm format since + // mFormats is sorted from best PCM format to worst PCM format. + if (updatedFormat != NULL) { + *updatedFormat = mFormats[i]; + } + return NO_ERROR; + } + } + return BAD_VALUE; +} uint32_t AudioPort::pickSamplingRate() const { @@ -629,7 +682,7 @@ void AudioPort::dump(int fd, int spaces) const char buffer[SIZE]; String8 result; - if (mName.size() != 0) { + if (mName.length() != 0) { snprintf(buffer, SIZE, "%*s- name: %s\n", spaces, "", mName.string()); result.append(buffer); } @@ -687,13 +740,16 @@ void AudioPort::dump(int fd, int spaces) const if (mGains.size() != 0) { snprintf(buffer, SIZE, "%*s- gains:\n", spaces, ""); write(fd, buffer, strlen(buffer) + 1); - result.append(buffer); for (size_t i = 0; i < mGains.size(); i++) { mGains[i]->dump(fd, spaces + 2, i); } } } +void AudioPort::log(const char* indent) const +{ + ALOGI("%s Port[nm:%s, type:%d, role:%d]", indent, mName.string(), mType, mRole); +} // --- AudioPortConfig class implementation @@ -735,7 +791,7 @@ status_t AudioPortConfig::applyAudioPortConfig( mChannelMask = config->channel_mask; } if (config->config_mask & AUDIO_PORT_CONFIG_FORMAT) { - status = audioport->checkFormat(config->format); + status = audioport->checkExactFormat(config->format); if (status != NO_ERROR) { goto exit; } diff --git a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp index fe5bc5f..9ab1d61 100644 --- a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp +++ b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp @@ -113,8 +113,8 @@ audio_devices_t ConfigParsingUtils::parseDeviceNames(char *name) char *devName = strtok(name, "|"); while (devName != NULL) { if (strlen(devName) != 0) { - device |= stringToEnum(sDeviceNameToEnumTable, - ARRAY_SIZE(sDeviceNameToEnumTable), + device |= stringToEnum(sDeviceTypeToEnumTable, + ARRAY_SIZE(sDeviceTypeToEnumTable), devName); } devName = strtok(NULL, "|"); @@ -224,8 +224,8 @@ void ConfigParsingUtils::loadGlobalConfig(cnode *root, const sp<HwModule>& modul availableOutputDevices.types()); } else if (strcmp(DEFAULT_OUTPUT_DEVICE_TAG, node->name) == 0) { audio_devices_t device = (audio_devices_t)stringToEnum( - sDeviceNameToEnumTable, - ARRAY_SIZE(sDeviceNameToEnumTable), + sDeviceTypeToEnumTable, + ARRAY_SIZE(sDeviceTypeToEnumTable), (char *)node->value); if (device != AUDIO_DEVICE_NONE) { defaultOutputDevice = new DeviceDescriptor(String8("default-output"), device); diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp index 7df7d75..9573583 100644 --- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp +++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp @@ -29,13 +29,23 @@ String8 DeviceDescriptor::emptyNameStr = String8(""); DeviceDescriptor::DeviceDescriptor(const String8& name, audio_devices_t type) : AudioPort(name, AUDIO_PORT_TYPE_DEVICE, audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK : - AUDIO_PORT_ROLE_SOURCE, - NULL), - mAddress(""), mDeviceType(type) + AUDIO_PORT_ROLE_SOURCE), + mAddress(""), mDeviceType(type), mId(0) { } +audio_port_handle_t DeviceDescriptor::getId() const +{ + return mId; +} + +void DeviceDescriptor::attach(const sp<HwModule>& module) +{ + AudioPort::attach(module); + mId = getNextUniqueId(); +} + bool DeviceDescriptor::equals(const sp<DeviceDescriptor>& other) const { // Devices are considered equal if they: @@ -139,11 +149,14 @@ void DeviceVector::loadDevicesFromName(char *name, char *devName = strtok(name, "|"); while (devName != NULL) { if (strlen(devName) != 0) { - audio_devices_t type = ConfigParsingUtils::stringToEnum(sDeviceNameToEnumTable, - ARRAY_SIZE(sDeviceNameToEnumTable), + audio_devices_t type = ConfigParsingUtils::stringToEnum(sDeviceTypeToEnumTable, + ARRAY_SIZE(sDeviceTypeToEnumTable), devName); if (type != AUDIO_DEVICE_NONE) { - sp<DeviceDescriptor> dev = new DeviceDescriptor(String8(name), type); + devName = (char *)ConfigParsingUtils::enumToString(sDeviceNameToEnumTable, + ARRAY_SIZE(sDeviceNameToEnumTable), + type); + sp<DeviceDescriptor> dev = new DeviceDescriptor(String8(devName), type); if (type == AUDIO_DEVICE_IN_REMOTE_SUBMIX || type == AUDIO_DEVICE_OUT_REMOTE_SUBMIX ) { dev->mAddress = String8("0"); @@ -183,7 +196,7 @@ sp<DeviceDescriptor> DeviceVector::getDeviceFromId(audio_port_handle_t id) const { sp<DeviceDescriptor> device; for (size_t i = 0; i < size(); i++) { - if (itemAt(i)->getHandle() == id) { + if (itemAt(i)->getId() == id) { device = itemAt(i); break; } @@ -303,8 +316,8 @@ status_t DeviceDescriptor::dump(int fd, int spaces, int index) const result.append(buffer); } snprintf(buffer, SIZE, "%*s- type: %-48s\n", spaces, "", - ConfigParsingUtils::enumToString(sDeviceNameToEnumTable, - ARRAY_SIZE(sDeviceNameToEnumTable), + ConfigParsingUtils::enumToString(sDeviceTypeToEnumTable, + ARRAY_SIZE(sDeviceTypeToEnumTable), mDeviceType)); result.append(buffer); if (mAddress.size() != 0) { @@ -317,4 +330,16 @@ status_t DeviceDescriptor::dump(int fd, int spaces, int index) const return NO_ERROR; } +void DeviceDescriptor::log() const +{ + ALOGI("Device id:%d type:0x%X:%s, addr:%s", + mId, + mDeviceType, + ConfigParsingUtils::enumToString( + sDeviceNameToEnumTable, ARRAY_SIZE(sDeviceNameToEnumTable), mDeviceType), + mAddress.string()); + + AudioPort::log(" "); +} + }; // namespace android diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp index 0097d69..e955447 100644 --- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp +++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp @@ -48,7 +48,7 @@ status_t HwModule::loadInput(cnode *root) { cnode *node = root->first_child; - sp<IOProfile> profile = new IOProfile(String8(root->name), AUDIO_PORT_ROLE_SINK, this); + sp<IOProfile> profile = new IOProfile(String8(root->name), AUDIO_PORT_ROLE_SINK); while (node) { if (strcmp(node->name, SAMPLING_RATES_TAG) == 0) { @@ -83,6 +83,7 @@ status_t HwModule::loadInput(cnode *root) ALOGV("loadInput() adding input Supported Devices %04x", profile->mSupportedDevices.types()); + profile->attach(this); mInputProfiles.add(profile); return NO_ERROR; } else { @@ -94,7 +95,7 @@ status_t HwModule::loadOutput(cnode *root) { cnode *node = root->first_child; - sp<IOProfile> profile = new IOProfile(String8(root->name), AUDIO_PORT_ROLE_SOURCE, this); + sp<IOProfile> profile = new IOProfile(String8(root->name), AUDIO_PORT_ROLE_SOURCE); while (node) { if (strcmp(node->name, SAMPLING_RATES_TAG) == 0) { @@ -128,7 +129,7 @@ status_t HwModule::loadOutput(cnode *root) ALOGV("loadOutput() adding output Supported Devices %04x, mFlags %04x", profile->mSupportedDevices.types(), profile->mFlags); - + profile->attach(this); mOutputProfiles.add(profile); return NO_ERROR; } else { @@ -154,7 +155,6 @@ status_t HwModule::loadDevice(cnode *root) return BAD_VALUE; } sp<DeviceDescriptor> deviceDesc = new DeviceDescriptor(String8(root->name), type); - deviceDesc->mModule = this; node = root->first_child; while (node) { @@ -183,7 +183,7 @@ status_t HwModule::loadDevice(cnode *root) status_t HwModule::addOutputProfile(String8 name, const audio_config_t *config, audio_devices_t device, String8 address) { - sp<IOProfile> profile = new IOProfile(name, AUDIO_PORT_ROLE_SOURCE, this); + sp<IOProfile> profile = new IOProfile(name, AUDIO_PORT_ROLE_SOURCE); profile->mSamplingRates.add(config->sample_rate); profile->mChannelMasks.add(config->channel_mask); @@ -193,6 +193,7 @@ status_t HwModule::addOutputProfile(String8 name, const audio_config_t *config, devDesc->mAddress = address; profile->mSupportedDevices.add(devDesc); + profile->attach(this); mOutputProfiles.add(profile); return NO_ERROR; @@ -213,7 +214,7 @@ status_t HwModule::removeOutputProfile(String8 name) status_t HwModule::addInputProfile(String8 name, const audio_config_t *config, audio_devices_t device, String8 address) { - sp<IOProfile> profile = new IOProfile(name, AUDIO_PORT_ROLE_SINK, this); + sp<IOProfile> profile = new IOProfile(name, AUDIO_PORT_ROLE_SINK); profile->mSamplingRates.add(config->sample_rate); profile->mChannelMasks.add(config->channel_mask); @@ -225,6 +226,7 @@ status_t HwModule::addInputProfile(String8 name, const audio_config_t *config, ALOGV("addInputProfile() name %s rate %d mask 0x08", name.string(), config->sample_rate, config->channel_mask); + profile->attach(this); mInputProfiles.add(profile); return NO_ERROR; diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp index 376dd22..7b6d51d 100644 --- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp +++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp @@ -23,9 +23,8 @@ namespace android { -IOProfile::IOProfile(const String8& name, audio_port_role_t role, - const sp<HwModule>& module) - : AudioPort(name, AUDIO_PORT_TYPE_MIX, role, module) +IOProfile::IOProfile(const String8& name, audio_port_role_t role) + : AudioPort(name, AUDIO_PORT_TYPE_MIX, role) { } @@ -41,7 +40,9 @@ bool IOProfile::isCompatibleProfile(audio_devices_t device, uint32_t samplingRate, uint32_t *updatedSamplingRate, audio_format_t format, + audio_format_t *updatedFormat, audio_channel_mask_t channelMask, + audio_channel_mask_t *updatedChannelMask, uint32_t flags) const { const bool isPlaybackThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SOURCE; @@ -72,7 +73,14 @@ bool IOProfile::isCompatibleProfile(audio_devices_t device, return false; } - if (!audio_is_valid_format(format) || checkFormat(format) != NO_ERROR) { + if (!audio_is_valid_format(format)) { + return false; + } + if (isPlaybackThread && checkExactFormat(format) != NO_ERROR) { + return false; + } + audio_format_t myUpdatedFormat = format; + if (isRecordThread && checkCompatibleFormat(format, &myUpdatedFormat) != NO_ERROR) { return false; } @@ -80,8 +88,9 @@ bool IOProfile::isCompatibleProfile(audio_devices_t device, checkExactChannelMask(channelMask) != NO_ERROR)) { return false; } + audio_channel_mask_t myUpdatedChannelMask = channelMask; if (isRecordThread && (!audio_is_input_channel(channelMask) || - checkCompatibleChannelMask(channelMask) != NO_ERROR)) { + checkCompatibleChannelMask(channelMask, &myUpdatedChannelMask) != NO_ERROR)) { return false; } @@ -100,6 +109,12 @@ bool IOProfile::isCompatibleProfile(audio_devices_t device, if (updatedSamplingRate != NULL) { *updatedSamplingRate = myUpdatedSamplingRate; } + if (updatedFormat != NULL) { + *updatedFormat = myUpdatedFormat; + } + if (updatedChannelMask != NULL) { + *updatedChannelMask = myUpdatedChannelMask; + } return true; } diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h index eadaa77..db0573f 100755 --- a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h +++ b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h @@ -134,16 +134,16 @@ public: audio_policy_dev_state_t state) = 0; /** - * Translate a volume index given by the UI to an amplification value for a stream type + * Translate a volume index given by the UI to an amplification value in dB for a stream type * and a device category. * * @param[in] deviceCategory for which the conversion is requested. * @param[in] stream type for which the conversion is requested. * @param[in] indexInUi index received from the UI to be translated. * - * @return amplification value matching the UI index for this given device and stream. + * @return amplification value in dB matching the UI index for this given device and stream. */ - virtual float volIndexToAmpl(Volume::device_category deviceCategory, audio_stream_type_t stream, + virtual float volIndexToDb(Volume::device_category deviceCategory, audio_stream_type_t stream, int indexInUi) = 0; /** diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h index 4f5427e..6d43df2 100755 --- a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h +++ b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h @@ -43,7 +43,7 @@ public: virtual const AudioPolicyMixCollection &getAudioPolicyMixCollection() const = 0; - virtual const AudioOutputCollection &getOutputs() const = 0; + virtual const SwAudioOutputCollection &getOutputs() const = 0; virtual const AudioInputCollection &getInputs() const = 0; diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp index b4d7246..50f1609 100755 --- a/services/audiopolicy/enginedefault/src/Engine.cpp +++ b/services/audiopolicy/enginedefault/src/Engine.cpp @@ -63,13 +63,14 @@ status_t Engine::initCheck() return (mApmObserver != NULL) ? NO_ERROR : NO_INIT; } -float Engine::volIndexToAmpl(Volume::device_category category, audio_stream_type_t streamType, +float Engine::volIndexToDb(Volume::device_category category, audio_stream_type_t streamType, int indexInUi) { const StreamDescriptor &streamDesc = mApmObserver->getStreamDescriptors().valueAt(streamType); - return Gains::volIndexToAmpl(category, streamDesc, indexInUi); + return Gains::volIndexToDb(category, streamDesc, indexInUi); } + status_t Engine::initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax) { ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax); @@ -243,7 +244,7 @@ routing_strategy Engine::getStrategyForStream(audio_stream_type_t stream) routing_strategy Engine::getStrategyForUsage(audio_usage_t usage) { - const AudioOutputCollection &outputs = mApmObserver->getOutputs(); + const SwAudioOutputCollection &outputs = mApmObserver->getOutputs(); // usage to strategy mapping switch (usage) { @@ -291,7 +292,7 @@ audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const const DeviceVector &availableOutputDevices = mApmObserver->getAvailableOutputDevices(); const DeviceVector &availableInputDevices = mApmObserver->getAvailableInputDevices(); - const AudioOutputCollection &outputs = mApmObserver->getOutputs(); + const SwAudioOutputCollection &outputs = mApmObserver->getOutputs(); uint32_t device = AUDIO_DEVICE_NONE; uint32_t availableOutputDevicesType = availableOutputDevices.types(); @@ -358,7 +359,7 @@ audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const if (((availableInputDevices.types() & AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) || (((txDevice & availPrimaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) && - (primaryOutput->getAudioPort()->mModule->mHalVersion < + (primaryOutput->getAudioPort()->getModuleVersion() < AUDIO_DEVICE_API_VERSION_3_0))) { availableOutputDevicesType = availPrimaryOutputDevices; } @@ -515,7 +516,7 @@ audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const if (device2 == AUDIO_DEVICE_NONE) { device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE; } - if ((device2 == AUDIO_DEVICE_NONE)) { + if (device2 == AUDIO_DEVICE_NONE) { device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE; } if (device2 == AUDIO_DEVICE_NONE) { @@ -582,7 +583,7 @@ audio_devices_t Engine::getDeviceForInputSource(audio_source_t inputSource) cons { const DeviceVector &availableOutputDevices = mApmObserver->getAvailableOutputDevices(); const DeviceVector &availableInputDevices = mApmObserver->getAvailableInputDevices(); - const AudioOutputCollection &outputs = mApmObserver->getOutputs(); + const SwAudioOutputCollection &outputs = mApmObserver->getOutputs(); audio_devices_t availableDeviceTypes = availableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN; uint32_t device = AUDIO_DEVICE_NONE; diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h index f44556c..56a4748 100755 --- a/services/audiopolicy/enginedefault/src/Engine.h +++ b/services/audiopolicy/enginedefault/src/Engine.h @@ -101,10 +101,10 @@ private: { return mPolicyEngine->initializeVolumeCurves(isSpeakerDrcEnabled); } - virtual float volIndexToAmpl(Volume::device_category deviceCategory, + virtual float volIndexToDb(Volume::device_category deviceCategory, audio_stream_type_t stream,int indexInUi) { - return mPolicyEngine->volIndexToAmpl(deviceCategory, stream, indexInUi); + return mPolicyEngine->volIndexToDb(deviceCategory, stream, indexInUi); } private: Engine *mPolicyEngine; @@ -141,7 +141,7 @@ private: audio_devices_t getDeviceForStrategy(routing_strategy strategy) const; audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const; - float volIndexToAmpl(Volume::device_category category, + float volIndexToDb(Volume::device_category category, audio_stream_type_t stream, int indexInUi); status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax); void initializeVolumeCurves(bool isSpeakerDrcEnabled); diff --git a/services/audiopolicy/enginedefault/src/Gains.cpp b/services/audiopolicy/enginedefault/src/Gains.cpp index a684fdd..78f2909 100644 --- a/services/audiopolicy/enginedefault/src/Gains.cpp +++ b/services/audiopolicy/enginedefault/src/Gains.cpp @@ -197,10 +197,10 @@ const VolumeCurvePoint *Gains::sVolumeProfiles[AUDIO_STREAM_CNT] }; //static -float Gains::volIndexToAmpl(audio_devices_t device, const StreamDescriptor& streamDesc, - int indexInUi) +float Gains::volIndexToDb(Volume::device_category deviceCategory, + const StreamDescriptor& streamDesc, + int indexInUi) { - Volume::device_category deviceCategory = Volume::getDeviceCategory(device); const VolumeCurvePoint *curve = streamDesc.getVolumeCurvePoint(deviceCategory); // the volume index in the UI is relative to the min and max volume indices for this stream type @@ -212,7 +212,7 @@ float Gains::volIndexToAmpl(audio_devices_t device, const StreamDescriptor& stre // find what part of the curve this index volume belongs to, or if it's out of bounds int segment = 0; if (volIdx < curve[Volume::VOLMIN].mIndex) { // out of bounds - return 0.0f; + return VOLUME_MIN_DB; } else if (volIdx < curve[Volume::VOLKNEE1].mIndex) { segment = 0; } else if (volIdx < curve[Volume::VOLKNEE2].mIndex) { @@ -220,7 +220,7 @@ float Gains::volIndexToAmpl(audio_devices_t device, const StreamDescriptor& stre } else if (volIdx <= curve[Volume::VOLMAX].mIndex) { segment = 2; } else { // out of bounds - return 1.0f; + return 0.0f; } // linear interpolation in the attenuation table in dB @@ -231,17 +231,25 @@ float Gains::volIndexToAmpl(audio_devices_t device, const StreamDescriptor& stre ((float)(curve[segment+1].mIndex - curve[segment].mIndex)) ); - float amplification = exp( decibels * 0.115129f); // exp( dB * ln(10) / 20 ) - - ALOGVV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f] ampl=%.5f", + ALOGVV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f]", curve[segment].mIndex, volIdx, curve[segment+1].mIndex, curve[segment].mDBAttenuation, decibels, - curve[segment+1].mDBAttenuation, - amplification); + curve[segment+1].mDBAttenuation); + + return decibels; +} - return amplification; + +//static +float Gains::volIndexToAmpl(Volume::device_category deviceCategory, + const StreamDescriptor& streamDesc, + int indexInUi) +{ + return Volume::DbToAmpl(volIndexToDb(deviceCategory, streamDesc, indexInUi)); } + + }; // namespace android diff --git a/services/audiopolicy/enginedefault/src/Gains.h b/services/audiopolicy/enginedefault/src/Gains.h index b5601ca..7620b7d 100644 --- a/services/audiopolicy/enginedefault/src/Gains.h +++ b/services/audiopolicy/enginedefault/src/Gains.h @@ -29,8 +29,13 @@ class StreamDescriptor; class Gains { public : - static float volIndexToAmpl(audio_devices_t device, const StreamDescriptor& streamDesc, - int indexInUi); + static float volIndexToAmpl(Volume::device_category deviceCategory, + const StreamDescriptor& streamDesc, + int indexInUi); + + static float volIndexToDb(Volume::device_category deviceCategory, + const StreamDescriptor& streamDesc, + int indexInUi); // default volume curve static const VolumeCurvePoint sDefaultVolumeCurve[Volume::VOLCNT]; diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp index 797a2b4..ba9f996 100644 --- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp +++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp @@ -157,7 +157,7 @@ status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t device, // outputs must be closed after checkOutputForAllStrategies() is executed if (!outputs.isEmpty()) { for (size_t i = 0; i < outputs.size(); i++) { - sp<AudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]); + sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]); // close unused outputs after device disconnection or direct outputs that have been // opened by checkOutputsForDevice() to query dynamic parameters if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) || @@ -176,18 +176,17 @@ status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t device, updateCallRouting(newDevice); } for (size_t i = 0; i < mOutputs.size(); i++) { - audio_io_handle_t output = mOutputs.keyAt(i); - if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (output != mPrimaryOutput)) { - audio_devices_t newDevice = getNewOutputDevice(mOutputs.keyAt(i), - true /*fromCache*/); + sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i); + if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (desc != mPrimaryOutput)) { + audio_devices_t newDevice = getNewOutputDevice(desc, true /*fromCache*/); // do not force device change on duplicated output because if device is 0, it will // also force a device 0 for the two outputs it is duplicated to which may override // a valid device selection on those outputs. - bool force = !mOutputs.valueAt(i)->isDuplicated() + bool force = !desc->isDuplicated() && (!device_distinguishes_on_address(device) // always force when disconnecting (a non-duplicated device) || (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE)); - setOutputDevice(output, newDevice, force, 0); + setOutputDevice(desc, newDevice, force, 0); } } @@ -349,10 +348,11 @@ void AudioPolicyManager::updateCallRouting(audio_devices_t rxDevice, int delayMs AUDIO_OUTPUT_FLAG_NONE, AUDIO_FORMAT_INVALID); if (output != AUDIO_IO_HANDLE_NONE) { - sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); + sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); ALOG_ASSERT(!outputDesc->isDuplicated(), "updateCallRouting() RX device output is duplicated"); outputDesc->toAudioPortConfig(&patch.sources[1]); + patch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH; patch.num_sources = 2; } @@ -395,6 +395,7 @@ void AudioPolicyManager::updateCallRouting(audio_devices_t rxDevice, int delayMs ALOG_ASSERT(!outputDesc->isDuplicated(), "updateCallRouting() RX device output is duplicated"); outputDesc->toAudioPortConfig(&patch.sources[1]); + patch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH; patch.num_sources = 2; } @@ -448,13 +449,13 @@ void AudioPolicyManager::setPhoneState(audio_mode_t state) checkOutputForAllStrategies(); updateDevicesAndOutputs(); - sp<AudioOutputDescriptor> hwOutputDesc = mOutputs.valueFor(mPrimaryOutput); + sp<SwAudioOutputDescriptor> hwOutputDesc = mPrimaryOutput; int delayMs = 0; if (isStateInCall(state)) { nsecs_t sysTime = systemTime(); for (size_t i = 0; i < mOutputs.size(); i++) { - sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i); + sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i); // mute media and sonification strategies and delay device switch by the largest // latency of any output where either strategy is active. // This avoid sending the ring tone or music tail into the earpiece or headset. @@ -464,14 +465,14 @@ void AudioPolicyManager::setPhoneState(audio_mode_t state) isStrategyActive(desc, STRATEGY_SONIFICATION, SONIFICATION_HEADSET_MUSIC_DELAY, sysTime)) && - (delayMs < (int)desc->mLatency*2)) { - delayMs = desc->mLatency*2; + (delayMs < (int)desc->latency()*2)) { + delayMs = desc->latency()*2; } - setStrategyMute(STRATEGY_MEDIA, true, mOutputs.keyAt(i)); - setStrategyMute(STRATEGY_MEDIA, false, mOutputs.keyAt(i), MUTE_TIME_MS, + setStrategyMute(STRATEGY_MEDIA, true, desc); + setStrategyMute(STRATEGY_MEDIA, false, desc, MUTE_TIME_MS, getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/)); - setStrategyMute(STRATEGY_SONIFICATION, true, mOutputs.keyAt(i)); - setStrategyMute(STRATEGY_SONIFICATION, false, mOutputs.keyAt(i), MUTE_TIME_MS, + setStrategyMute(STRATEGY_SONIFICATION, true, desc); + setStrategyMute(STRATEGY_SONIFICATION, false, desc, MUTE_TIME_MS, getDeviceForStrategy(STRATEGY_SONIFICATION, true /*fromCache*/)); } } @@ -547,13 +548,13 @@ void AudioPolicyManager::setForceUse(audio_policy_force_use_t usage, updateCallRouting(newDevice); } for (size_t i = 0; i < mOutputs.size(); i++) { - audio_io_handle_t output = mOutputs.keyAt(i); - audio_devices_t newDevice = getNewOutputDevice(output, true /*fromCache*/); - if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (output != mPrimaryOutput)) { - setOutputDevice(output, newDevice, (newDevice != AUDIO_DEVICE_NONE)); + sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(i); + audio_devices_t newDevice = getNewOutputDevice(outputDesc, true /*fromCache*/); + if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (outputDesc != mPrimaryOutput)) { + setOutputDevice(outputDesc, newDevice, (newDevice != AUDIO_DEVICE_NONE)); } if (forceVolumeReeval && (newDevice != AUDIO_DEVICE_NONE)) { - applyStreamVolumes(output, newDevice, 0, true); + applyStreamVolumes(outputDesc, newDevice, 0, true); } } @@ -584,8 +585,10 @@ sp<IOProfile> AudioPolicyManager::getProfileForDirectOutput( } for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) { sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j]; - bool found = profile->isCompatibleProfile(device, String8(""), samplingRate, - NULL /*updatedSamplingRate*/, format, channelMask, + bool found = profile->isCompatibleProfile(device, String8(""), + samplingRate, NULL /*updatedSamplingRate*/, + format, NULL /*updatedFormat*/, + channelMask, NULL /*updatedChannelMask*/, flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD ? AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD : AUDIO_OUTPUT_FLAG_DIRECT); if (found && (mAvailableOutputDevices.types() & profile->mSupportedDevices.types())) { @@ -621,6 +624,7 @@ status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr, audio_format_t format, audio_channel_mask_t channelMask, audio_output_flags_t flags, + audio_port_handle_t selectedDeviceId, const audio_offload_info_t *offloadInfo) { audio_attributes_t attributes; @@ -639,7 +643,7 @@ status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr, } stream_type_to_audio_attributes(*stream, &attributes); } - sp<AudioOutputDescriptor> desc; + sp<SwAudioOutputDescriptor> desc; if (mPolicyMixes.getOutputForAttr(attributes, desc) == NO_ERROR) { ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr"); if (!audio_is_linear_pcm(format)) { @@ -675,6 +679,17 @@ status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr, if (*output == AUDIO_IO_HANDLE_NONE) { return INVALID_OPERATION; } + + // Explicit routing? + sp<DeviceDescriptor> deviceDesc; + + for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) { + if (mAvailableOutputDevices[i]->getId() == selectedDeviceId) { + deviceDesc = mAvailableOutputDevices[i]; + break; + } + } + mOutputRoutes.addRoute(session, *stream, deviceDesc); return NO_ERROR; } @@ -699,7 +714,8 @@ audio_io_handle_t AudioPolicyManager::getOutputForDevice( if (mTestOutputs[mCurOutput] == 0) { ALOGV("getOutput() opening test output"); - sp<AudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(NULL); + sp<AudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(NULL, + mpClientInterface); outputDesc->mDevice = mTestDevice; outputDesc->mLatency = mTestLatencyMs; outputDesc->mFlags = @@ -775,10 +791,10 @@ audio_io_handle_t AudioPolicyManager::getOutputForDevice( } if (profile != 0) { - sp<AudioOutputDescriptor> outputDesc = NULL; + sp<SwAudioOutputDescriptor> outputDesc = NULL; for (size_t i = 0; i < mOutputs.size(); i++) { - sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i); + sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i); if (!desc->isDuplicated() && (profile == desc->mProfile)) { outputDesc = desc; // reuse direct output if currently open and configured with same parameters @@ -795,7 +811,7 @@ audio_io_handle_t AudioPolicyManager::getOutputForDevice( if (outputDesc != NULL) { closeOutput(outputDesc->mIoHandle); } - outputDesc = new AudioOutputDescriptor(profile); + outputDesc = new SwAudioOutputDescriptor(profile, mpClientInterface); outputDesc->mDevice = device; outputDesc->mLatency = 0; outputDesc->mFlags =(audio_output_flags_t) (outputDesc->mFlags | flags); @@ -806,7 +822,7 @@ audio_io_handle_t AudioPolicyManager::getOutputForDevice( if (offloadInfo != NULL) { config.offload_info = *offloadInfo; } - status = mpClientInterface->openOutput(profile->mModule->mHandle, + status = mpClientInterface->openOutput(profile->getModuleHandle(), &output, &config, &outputDesc->mDevice, @@ -856,7 +872,6 @@ audio_io_handle_t AudioPolicyManager::getOutputForDevice( } non_direct_output: - // ignoring channel mask due to downmix capability in mixer // open a non direct output @@ -874,7 +889,7 @@ non_direct_output: ALOGW_IF((output == 0), "getOutput() could not find output for stream %d, samplingRate %d," "format %d, channels %x, flags %x", stream, samplingRate, format, channelMask, flags); - ALOGV("getOutput() returns output %d", output); + ALOGV(" getOutputForDevice() returns output %d", output); return output; } @@ -902,7 +917,7 @@ audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_h audio_io_handle_t outputPrimary = 0; for (size_t i = 0; i < outputs.size(); i++) { - sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]); + sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]); if (!outputDesc->isDuplicated()) { // if a valid format is specified, skip output if not compatible if (format != AUDIO_FORMAT_INVALID) { @@ -941,15 +956,59 @@ status_t AudioPolicyManager::startOutput(audio_io_handle_t output, audio_stream_type_t stream, audio_session_t session) { - ALOGV("startOutput() output %d, stream %d, session %d", output, stream, session); + ALOGV("startOutput() output %d, stream %d, session %d", + output, stream, session); ssize_t index = mOutputs.indexOfKey(output); if (index < 0) { ALOGW("startOutput() unknown output %d", output); return BAD_VALUE; } + sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index); + + audio_devices_t newDevice; + if (outputDesc->mPolicyMix != NULL) { + newDevice = AUDIO_DEVICE_OUT_REMOTE_SUBMIX; + } else { + newDevice = AUDIO_DEVICE_NONE; + } + + uint32_t delayMs = 0; + + // Routing? + mOutputRoutes.incRouteActivity(session); + + status_t status = startSource(outputDesc, stream, newDevice, &delayMs); + + if (status != NO_ERROR) { + mOutputRoutes.decRouteActivity(session); + } + // Automatically enable the remote submix input when output is started on a re routing mix + // of type MIX_TYPE_RECORDERS + if (audio_is_remote_submix_device(newDevice) && outputDesc->mPolicyMix != NULL && + outputDesc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS) { + setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX, + AUDIO_POLICY_DEVICE_STATE_AVAILABLE, + outputDesc->mPolicyMix->mRegistrationId, + "remote-submix"); + } + + if (delayMs != 0) { + usleep(delayMs * 1000); + } + + return status; +} + +status_t AudioPolicyManager::startSource(sp<AudioOutputDescriptor> outputDesc, + audio_stream_type_t stream, + audio_devices_t device, + uint32_t *delayMs) +{ // cannot start playback of STREAM_TTS if any other output is being used uint32_t beaconMuteLatency = 0; + + *delayMs = 0; if (stream == AUDIO_STREAM_TTS) { ALOGV("\t found BEACON stream"); if (mOutputs.isAnyOutputActive(AUDIO_STREAM_TTS /*streamToIgnore*/)) { @@ -962,8 +1021,6 @@ status_t AudioPolicyManager::startOutput(audio_io_handle_t output, beaconMuteLatency = handleEventForBeacon(STARTING_OUTPUT); } - sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index); - // increment usage count for this stream on the requested output: // NOTE that the usage count is the same for duplicated output and hardware output which is // necessary for a correct control of hardware output routing by startOutput() and stopOutput() @@ -971,11 +1028,8 @@ status_t AudioPolicyManager::startOutput(audio_io_handle_t output, if (outputDesc->mRefCount[stream] == 1) { // starting an output being rerouted? - audio_devices_t newDevice; - if (outputDesc->mPolicyMix != NULL) { - newDevice = AUDIO_DEVICE_OUT_REMOTE_SUBMIX; - } else { - newDevice = getNewOutputDevice(output, false /*fromCache*/); + if (device == AUDIO_DEVICE_NONE) { + device = getNewOutputDevice(outputDesc, false /*fromCache*/); } routing_strategy strategy = getStrategy(stream); bool shouldWait = (strategy == STRATEGY_SONIFICATION) || @@ -991,7 +1045,7 @@ status_t AudioPolicyManager::startOutput(audio_io_handle_t output, // In this case, the audio HAL must receive the new device selection so that it can // change the device currently selected by the other active output. if (outputDesc->sharesHwModuleWith(desc) && - desc->device() != newDevice) { + desc->device() != device) { force = true; } // wait for audio on other active outputs to be presented when starting @@ -1003,7 +1057,7 @@ status_t AudioPolicyManager::startOutput(audio_io_handle_t output, } } } - uint32_t muteWaitMs = setOutputDevice(output, newDevice, force); + uint32_t muteWaitMs = setOutputDevice(outputDesc, device, force); // handle special case for sonification while in call if (isInCall()) { @@ -1012,32 +1066,18 @@ status_t AudioPolicyManager::startOutput(audio_io_handle_t output, // apply volume rules for current stream and device if necessary checkAndSetVolume(stream, - mStreams[stream].getVolumeIndex(newDevice), - output, - newDevice); + mStreams.valueFor(stream).getVolumeIndex(device), + outputDesc, + device); // update the outputs if starting an output with a stream that can affect notification // routing handleNotificationRoutingForStream(stream); - // Automatically enable the remote submix input when output is started on a re routing mix - // of type MIX_TYPE_RECORDERS - if (audio_is_remote_submix_device(newDevice) && outputDesc->mPolicyMix != NULL && - outputDesc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS) { - setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX, - AUDIO_POLICY_DEVICE_STATE_AVAILABLE, - outputDesc->mPolicyMix->mRegistrationId, - "remote-submix"); - } - // force reevaluating accessibility routing when ringtone or alarm starts if (strategy == STRATEGY_SONIFICATION) { mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY); } - - if (waitMs > muteWaitMs) { - usleep((waitMs - muteWaitMs) * 2 * 1000); - } } return NO_ERROR; } @@ -1054,8 +1094,32 @@ status_t AudioPolicyManager::stopOutput(audio_io_handle_t output, return BAD_VALUE; } - sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index); + sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index); + + if (outputDesc->mRefCount[stream] == 1) { + // Automatically disable the remote submix input when output is stopped on a + // re routing mix of type MIX_TYPE_RECORDERS + if (audio_is_remote_submix_device(outputDesc->mDevice) && + outputDesc->mPolicyMix != NULL && + outputDesc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS) { + setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX, + AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE, + outputDesc->mPolicyMix->mRegistrationId, + "remote-submix"); + } + } + + // Routing? + if (outputDesc->mRefCount[stream] > 0) { + mOutputRoutes.decRouteActivity(session); + } + + return stopSource(outputDesc, stream); +} +status_t AudioPolicyManager::stopSource(sp<AudioOutputDescriptor> outputDesc, + audio_stream_type_t stream) +{ // always handle stream stop, check which stream type is stopping handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT); @@ -1067,41 +1131,31 @@ status_t AudioPolicyManager::stopOutput(audio_io_handle_t output, if (outputDesc->mRefCount[stream] > 0) { // decrement usage count of this stream on the output outputDesc->changeRefCount(stream, -1); + // store time at which the stream was stopped - see isStreamActive() if (outputDesc->mRefCount[stream] == 0) { - // Automatically disable the remote submix input when output is stopped on a - // re routing mix of type MIX_TYPE_RECORDERS - if (audio_is_remote_submix_device(outputDesc->mDevice) && - outputDesc->mPolicyMix != NULL && - outputDesc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS) { - setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX, - AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE, - outputDesc->mPolicyMix->mRegistrationId, - "remote-submix"); - } - outputDesc->mStopTime[stream] = systemTime(); - audio_devices_t newDevice = getNewOutputDevice(output, false /*fromCache*/); + audio_devices_t newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/); // delay the device switch by twice the latency because stopOutput() is executed when // the track stop() command is received and at that time the audio track buffer can // still contain data that needs to be drained. The latency only covers the audio HAL // and kernel buffers. Also the latency does not always include additional delay in the // audio path (audio DSP, CODEC ...) - setOutputDevice(output, newDevice, false, outputDesc->mLatency*2); + setOutputDevice(outputDesc, newDevice, false, outputDesc->latency()*2); // force restoring the device selection on other active outputs if it differs from the // one being selected for this output for (size_t i = 0; i < mOutputs.size(); i++) { audio_io_handle_t curOutput = mOutputs.keyAt(i); sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i); - if (curOutput != output && + if (desc != outputDesc && desc->isActive() && outputDesc->sharesHwModuleWith(desc) && (newDevice != desc->device())) { - setOutputDevice(curOutput, - getNewOutputDevice(curOutput, false /*fromCache*/), + setOutputDevice(desc, + getNewOutputDevice(desc, false /*fromCache*/), true, - outputDesc->mLatency*2); + outputDesc->latency()*2); } } // update the outputs if stopping one with a stream that can affect notification routing @@ -1109,7 +1163,7 @@ status_t AudioPolicyManager::stopOutput(audio_io_handle_t output, } return NO_ERROR; } else { - ALOGW("stopOutput() refcount is already 0 for output %d", output); + ALOGW("stopOutput() refcount is already 0"); return INVALID_OPERATION; } } @@ -1138,7 +1192,10 @@ void AudioPolicyManager::releaseOutput(audio_io_handle_t output, } #endif //AUDIO_POLICY_TEST - sp<AudioOutputDescriptor> desc = mOutputs.valueAt(index); + // Routing + mOutputRoutes.removeRoute(session); + + sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(index); if (desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) { if (desc->mDirectOpenCount <= 0) { ALOGW("releaseOutput() invalid open count %d for output %d", @@ -1150,8 +1207,9 @@ void AudioPolicyManager::releaseOutput(audio_io_handle_t output, // If effects where present on the output, audioflinger moved them to the primary // output by default: move them back to the appropriate output. audio_io_handle_t dstOutput = getOutputForEffect(); - if (dstOutput != mPrimaryOutput) { - mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, mPrimaryOutput, dstOutput); + if (dstOutput != mPrimaryOutput->mIoHandle) { + mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, + mPrimaryOutput->mIoHandle, dstOutput); } mpClientInterface->onAudioPortListUpdate(); } @@ -1189,7 +1247,7 @@ status_t AudioPolicyManager::getInputForAttr(const audio_attributes_t *attr, if (inputSource == AUDIO_SOURCE_REMOTE_SUBMIX && strncmp(attr->tags, "addr=", strlen("addr=")) == 0) { - status_t ret = mPolicyMixes.getInputMixForAttr(*attr, policyMix); + status_t ret = mPolicyMixes.getInputMixForAttr(*attr, &policyMix); if (ret != NO_ERROR) { return ret; } @@ -1247,48 +1305,54 @@ status_t AudioPolicyManager::getInputForAttr(const audio_attributes_t *attr, } } - sp<IOProfile> profile = getInputProfile(device, address, - samplingRate, format, channelMask, - flags); - if (profile == 0) { - //retry without flags - audio_input_flags_t log_flags = flags; - flags = AUDIO_INPUT_FLAG_NONE; + // find a compatible input profile (not necessarily identical in parameters) + sp<IOProfile> profile; + // samplingRate and flags may be updated by getInputProfile + uint32_t profileSamplingRate = samplingRate; + audio_format_t profileFormat = format; + audio_channel_mask_t profileChannelMask = channelMask; + audio_input_flags_t profileFlags = flags; + for (;;) { profile = getInputProfile(device, address, - samplingRate, format, channelMask, - flags); - if (profile == 0) { + profileSamplingRate, profileFormat, profileChannelMask, + profileFlags); + if (profile != 0) { + break; // success + } else if (profileFlags != AUDIO_INPUT_FLAG_NONE) { + profileFlags = AUDIO_INPUT_FLAG_NONE; // retry + } else { // fail ALOGW("getInputForAttr() could not find profile for device 0x%X, samplingRate %u," "format %#x, channelMask 0x%X, flags %#x", - device, samplingRate, format, channelMask, log_flags); + device, samplingRate, format, channelMask, flags); return BAD_VALUE; } } - if (profile->mModule->mHandle == 0) { - ALOGE("getInputForAttr(): HW module %s not opened", profile->mModule->mName); + if (profile->getModuleHandle() == 0) { + ALOGE("getInputForAttr(): HW module %s not opened", profile->getModuleName()); return NO_INIT; } audio_config_t config = AUDIO_CONFIG_INITIALIZER; - config.sample_rate = samplingRate; - config.channel_mask = channelMask; - config.format = format; + config.sample_rate = profileSamplingRate; + config.channel_mask = profileChannelMask; + config.format = profileFormat; - status_t status = mpClientInterface->openInput(profile->mModule->mHandle, + status_t status = mpClientInterface->openInput(profile->getModuleHandle(), input, &config, &device, address, halInputSource, - flags); + profileFlags); // only accept input with the exact requested set of parameters if (status != NO_ERROR || *input == AUDIO_IO_HANDLE_NONE || - (samplingRate != config.sample_rate) || - (format != config.format) || - (channelMask != config.channel_mask)) { - ALOGW("getInputForAttr() failed opening input: samplingRate %d, format %d, channelMask %x", + (profileSamplingRate != config.sample_rate) || + (profileFormat != config.format) || + (profileChannelMask != config.channel_mask)) { + ALOGW("getInputForAttr() failed opening input: samplingRate %d, format %d," + " channelMask %x", samplingRate, format, channelMask); if (*input != AUDIO_IO_HANDLE_NONE) { mpClientInterface->closeInput(*input); @@ -1300,15 +1364,15 @@ status_t AudioPolicyManager::getInputForAttr(const audio_attributes_t *attr, inputDesc->mInputSource = inputSource; inputDesc->mRefCount = 0; inputDesc->mOpenRefCount = 1; - inputDesc->mSamplingRate = samplingRate; - inputDesc->mFormat = format; - inputDesc->mChannelMask = channelMask; + inputDesc->mSamplingRate = profileSamplingRate; + inputDesc->mFormat = profileFormat; + inputDesc->mChannelMask = profileChannelMask; inputDesc->mDevice = device; inputDesc->mSessions.add(session); inputDesc->mIsSoundTrigger = isSoundTrigger; inputDesc->mPolicyMix = policyMix; - ALOGV("getInputForAttr() returns input type = %d", inputType); + ALOGV("getInputForAttr() returns input type = %d", *inputType); addInput(*input, inputDesc); mpClientInterface->onAudioPortListUpdate(); @@ -1505,8 +1569,8 @@ status_t AudioPolicyManager::setStreamVolumeIndex(audio_stream_type_t stream, audio_devices_t device) { - if ((index < mStreams[stream].getVolumeIndexMin()) || - (index > mStreams[stream].getVolumeIndexMax())) { + if ((index < mStreams.valueFor(stream).getVolumeIndexMin()) || + (index > mStreams.valueFor(stream).getVolumeIndexMax())) { return BAD_VALUE; } if (!audio_is_output_device(device)) { @@ -1514,7 +1578,7 @@ status_t AudioPolicyManager::setStreamVolumeIndex(audio_stream_type_t stream, } // Force max volume if stream cannot be muted - if (!mStreams.canBeMuted(stream)) index = mStreams[stream].getVolumeIndexMax(); + if (!mStreams.canBeMuted(stream)) index = mStreams.valueFor(stream).getVolumeIndexMax(); ALOGV("setStreamVolumeIndex() stream %d, device %04x, index %d", stream, device, index); @@ -1543,16 +1607,17 @@ status_t AudioPolicyManager::setStreamVolumeIndex(audio_stream_type_t stream, } status_t status = NO_ERROR; for (size_t i = 0; i < mOutputs.size(); i++) { - audio_devices_t curDevice = Volume::getDeviceForVolume(mOutputs.valueAt(i)->device()); + sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i); + audio_devices_t curDevice = Volume::getDeviceForVolume(desc->device()); if ((device == AUDIO_DEVICE_OUT_DEFAULT) || ((curDevice & strategyDevice) != 0)) { - status_t volStatus = checkAndSetVolume(stream, index, mOutputs.keyAt(i), curDevice); + status_t volStatus = checkAndSetVolume(stream, index, desc, curDevice); if (volStatus != NO_ERROR) { status = volStatus; } } if ((device == AUDIO_DEVICE_OUT_DEFAULT) || ((curDevice & accessibilityDevice) != 0)) { status_t volStatus = checkAndSetVolume(AUDIO_STREAM_ACCESSIBILITY, - index, mOutputs.keyAt(i), curDevice); + index, desc, curDevice); } } return status; @@ -1575,7 +1640,7 @@ status_t AudioPolicyManager::getStreamVolumeIndex(audio_stream_type_t stream, } device = Volume::getDeviceForVolume(device); - *index = mStreams[stream].getVolumeIndex(device); + *index = mStreams.valueFor(stream).getVolumeIndex(device); ALOGV("getStreamVolumeIndex() stream %d device %08x index %d", stream, device, *index); return NO_ERROR; } @@ -1599,7 +1664,7 @@ audio_io_handle_t AudioPolicyManager::selectOutputForEffects( audio_io_handle_t outputDeepBuffer = 0; for (size_t i = 0; i < outputs.size(); i++) { - sp<AudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]); + sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]); ALOGV("selectOutputForEffects outputs[%zu] flags %x", i, desc->mFlags); if ((desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) { outputOffloaded = outputs[i]; @@ -1653,6 +1718,16 @@ status_t AudioPolicyManager::registerEffect(const effect_descriptor_t *desc, return mEffects.registerEffect(desc, io, strategy, session, id); } +bool AudioPolicyManager::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const +{ + return mOutputs.isStreamActive(stream, inPastMs); +} + +bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const +{ + return mOutputs.isStreamActiveRemotely(stream, inPastMs); +} + bool AudioPolicyManager::isSourceActive(audio_source_t source) const { for (size_t i = 0; i < mInputs.size(); i++) { @@ -1803,7 +1878,7 @@ status_t AudioPolicyManager::dump(int fd) snprintf(buffer, SIZE, "\nAudioPolicyManager Dump: %p\n", this); result.append(buffer); - snprintf(buffer, SIZE, " Primary Output: %d\n", mPrimaryOutput); + snprintf(buffer, SIZE, " Primary Output: %d\n", mPrimaryOutput->mIoHandle); result.append(buffer); snprintf(buffer, SIZE, " Phone state: %d\n", mEngine->getPhoneState()); result.append(buffer); @@ -2021,7 +2096,7 @@ status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch, } if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) { - sp<AudioOutputDescriptor> outputDesc = mOutputs.getOutputFromId(patch->sources[0].id); + sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputFromId(patch->sources[0].id); if (outputDesc == NULL) { ALOGV("createAudioPatch() output not found for id %d", patch->sources[0].id); return BAD_VALUE; @@ -2055,9 +2130,12 @@ status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch, patch->sources[0].sample_rate, NULL, // updatedSamplingRate patch->sources[0].format, + NULL, // updatedFormat patch->sources[0].channel_mask, + NULL, // updatedChannelMask AUDIO_OUTPUT_FLAG_NONE /*FIXME*/)) { - ALOGV("createAudioPatch() profile not supported for device %08x", devDesc->type()); + ALOGV("createAudioPatch() profile not supported for device %08x", + devDesc->type()); return INVALID_OPERATION; } devices.add(devDesc); @@ -2069,7 +2147,7 @@ status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch, // TODO: reconfigure output format and channels here ALOGV("createAudioPatch() setting device %08x on output %d", devices.types(), outputDesc->mIoHandle); - setOutputDevice(outputDesc->mIoHandle, devices.types(), true, 0, handle); + setOutputDevice(outputDesc, devices.types(), true, 0, handle); index = mAudioPatches.indexOfKey(*handle); if (index >= 0) { if (patchDesc != 0 && patchDesc != mAudioPatches.valueAt(index)) { @@ -2109,7 +2187,9 @@ status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch, patch->sinks[0].sample_rate, NULL, /*updatedSampleRate*/ patch->sinks[0].format, + NULL, /*updatedFormat*/ patch->sinks[0].channel_mask, + NULL, /*updatedChannelMask*/ // FIXME for the parameter type, // and the NONE (audio_output_flags_t) @@ -2163,8 +2243,12 @@ status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch, } sinkDeviceDesc->toAudioPortConfig(&newPatch.sinks[i], &patch->sinks[i]); - if (srcDeviceDesc->mModule != sinkDeviceDesc->mModule) { - // only one sink supported when connected devices across HW modules + // create a software bridge in PatchPanel if: + // - source and sink devices are on differnt HW modules OR + // - audio HAL version is < 3.0 + if ((srcDeviceDesc->getModuleHandle() != sinkDeviceDesc->getModuleHandle()) || + (srcDeviceDesc->mModule->mHalVersion < AUDIO_DEVICE_API_VERSION_3_0)) { + // support only one sink device for now to simplify output selection logic if (patch->num_sinks > 1) { return INVALID_OPERATION; } @@ -2181,6 +2265,7 @@ status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch, return INVALID_OPERATION; } outputDesc->toAudioPortConfig(&newPatch.sources[1], &patch->sources[0]); + newPatch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH; newPatch.num_sources = 2; } } @@ -2242,14 +2327,14 @@ status_t AudioPolicyManager::releaseAudioPatch(audio_patch_handle_t handle, struct audio_patch *patch = &patchDesc->mPatch; patchDesc->mUid = mUidCached; if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) { - sp<AudioOutputDescriptor> outputDesc = mOutputs.getOutputFromId(patch->sources[0].id); + sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputFromId(patch->sources[0].id); if (outputDesc == NULL) { ALOGV("releaseAudioPatch() output not found for id %d", patch->sources[0].id); return BAD_VALUE; } - setOutputDevice(outputDesc->mIoHandle, - getNewOutputDevice(outputDesc->mIoHandle, true /*fromCache*/), + setOutputDevice(outputDesc, + getNewOutputDevice(outputDesc, true /*fromCache*/), true, 0, NULL); @@ -2308,7 +2393,7 @@ status_t AudioPolicyManager::setAudioPortConfig(const struct audio_port_config * sp<AudioPortConfig> audioPortConfig; if (config->type == AUDIO_PORT_TYPE_MIX) { if (config->role == AUDIO_PORT_ROLE_SOURCE) { - sp<AudioOutputDescriptor> outputDesc = mOutputs.getOutputFromId(config->id); + sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputFromId(config->id); if (outputDesc == NULL) { return BAD_VALUE; } @@ -2390,7 +2475,6 @@ AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterfa #ifdef AUDIO_POLICY_TEST Thread(false), #endif //AUDIO_POLICY_TEST - mPrimaryOutput((audio_io_handle_t)0), mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f), mA2dpSuspended(false), mSpeakerDrcEnabled(false), @@ -2474,7 +2558,8 @@ AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterfa if ((profileType & outputDeviceTypes) == 0) { continue; } - sp<AudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(outProfile); + sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile, + mpClientInterface); outputDesc->mDevice = profileType; audio_config_t config = AUDIO_CONFIG_INITIALIZER; @@ -2482,7 +2567,7 @@ AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterfa config.channel_mask = outputDesc->mChannelMask; config.format = outputDesc->mFormat; audio_io_handle_t output = AUDIO_IO_HANDLE_NONE; - status_t status = mpClientInterface->openOutput(outProfile->mModule->mHandle, + status_t status = mpClientInterface->openOutput(outProfile->getModuleHandle(), &output, &config, &outputDesc->mDevice, @@ -2510,10 +2595,10 @@ AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterfa } if (mPrimaryOutput == 0 && outProfile->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) { - mPrimaryOutput = output; + mPrimaryOutput = outputDesc; } addOutput(output, outputDesc); - setOutputDevice(output, + setOutputDevice(outputDesc, outputDesc->mDevice, true); } @@ -2558,7 +2643,7 @@ AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterfa config.channel_mask = inputDesc->mChannelMask; config.format = inputDesc->mFormat; audio_io_handle_t input = AUDIO_IO_HANDLE_NONE; - status_t status = mpClientInterface->openInput(inProfile->mModule->mHandle, + status_t status = mpClientInterface->openInput(inProfile->getModuleHandle(), &input, &config, &inputDesc->mDevice, @@ -2620,7 +2705,7 @@ AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterfa if (mPrimaryOutput != 0) { AudioParameter outputCmd = AudioParameter(); outputCmd.addInt(String8("set_id"), 0); - mpClientInterface->setParameters(mPrimaryOutput, outputCmd.toString()); + mpClientInterface->setParameters(mPrimaryOutput->mIoHandle, outputCmd.toString()); mTestDevice = AUDIO_DEVICE_OUT_SPEAKER; mTestSamplingRate = 44100; @@ -2760,20 +2845,21 @@ bool AudioPolicyManager::threadLoop() if (param.get(String8("test_cmd_policy_reopen"), value) == NO_ERROR) { param.remove(String8("test_cmd_policy_reopen")); - sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(mPrimaryOutput); - mpClientInterface->closeOutput(mPrimaryOutput); + mpClientInterface->closeOutput(mpClientInterface->closeOutput(mPrimaryOutput);); - audio_module_handle_t moduleHandle = outputDesc->mModule->mHandle; + audio_module_handle_t moduleHandle = mPrimaryOutput->getModuleHandle(); - removeOutput(mPrimaryOutput); - sp<AudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(NULL); + removeOutput(mPrimaryOutput->mIoHandle); + sp<SwAudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(NULL, + mpClientInterface); outputDesc->mDevice = AUDIO_DEVICE_OUT_SPEAKER; audio_config_t config = AUDIO_CONFIG_INITIALIZER; config.sample_rate = outputDesc->mSamplingRate; config.channel_mask = outputDesc->mChannelMask; config.format = outputDesc->mFormat; + audio_io_handle_t handle; status_t status = mpClientInterface->openOutput(moduleHandle, - &mPrimaryOutput, + &handle, &config, &outputDesc->mDevice, String8(""), @@ -2787,10 +2873,11 @@ bool AudioPolicyManager::threadLoop() outputDesc->mSamplingRate = config.sample_rate; outputDesc->mChannelMask = config.channel_mask; outputDesc->mFormat = config.format; + mPrimaryOutput = outputDesc; AudioParameter outputCmd = AudioParameter(); outputCmd.addInt(String8("set_id"), 0); - mpClientInterface->setParameters(mPrimaryOutput, outputCmd.toString()); - addOutput(mPrimaryOutput, outputDesc); + mpClientInterface->setParameters(handle, outputCmd.toString()); + addOutput(handle, outputDesc); } } @@ -2822,7 +2909,7 @@ int AudioPolicyManager::testOutputIndex(audio_io_handle_t output) // --- -void AudioPolicyManager::addOutput(audio_io_handle_t output, sp<AudioOutputDescriptor> outputDesc) +void AudioPolicyManager::addOutput(audio_io_handle_t output, sp<SwAudioOutputDescriptor> outputDesc) { outputDesc->setIoHandle(output); mOutputs.add(output, outputDesc); @@ -2841,7 +2928,7 @@ void AudioPolicyManager::addInput(audio_io_handle_t input, sp<AudioInputDescript nextAudioPortGeneration(); } -void AudioPolicyManager::findIoHandlesByAddress(sp<AudioOutputDescriptor> desc /*in*/, +void AudioPolicyManager::findIoHandlesByAddress(sp<SwAudioOutputDescriptor> desc /*in*/, const audio_devices_t device /*in*/, const String8 address /*in*/, SortedVector<audio_io_handle_t>& outputs /*out*/) { @@ -2860,7 +2947,7 @@ status_t AudioPolicyManager::checkOutputsForDevice(const sp<DeviceDescriptor> de const String8 address) { audio_devices_t device = devDesc->type(); - sp<AudioOutputDescriptor> desc; + sp<SwAudioOutputDescriptor> desc; // erase all current sample rates, formats and channel masks devDesc->clearCapabilities(); @@ -2868,7 +2955,7 @@ status_t AudioPolicyManager::checkOutputsForDevice(const sp<DeviceDescriptor> de // first list already open outputs that can be routed to this device for (size_t i = 0; i < mOutputs.size(); i++) { desc = mOutputs.valueAt(i); - if (!desc->isDuplicated() && (desc->mProfile->mSupportedDevices.types() & device)) { + if (!desc->isDuplicated() && (desc->supportedDevices() & device)) { if (!device_distinguishes_on_address(device)) { ALOGV("checkOutputsForDevice(): adding opened output %d", mOutputs.keyAt(i)); outputs.add(mOutputs.keyAt(i)); @@ -2927,7 +3014,7 @@ status_t AudioPolicyManager::checkOutputsForDevice(const sp<DeviceDescriptor> de ALOGV("opening output for device %08x with params %s profile %p", device, address.string(), profile.get()); - desc = new AudioOutputDescriptor(profile); + desc = new SwAudioOutputDescriptor(profile, mpClientInterface); desc->mDevice = device; audio_config_t config = AUDIO_CONFIG_INITIALIZER; config.sample_rate = desc->mSamplingRate; @@ -2937,7 +3024,7 @@ status_t AudioPolicyManager::checkOutputsForDevice(const sp<DeviceDescriptor> de config.offload_info.channel_mask = desc->mChannelMask; config.offload_info.format = desc->mFormat; audio_io_handle_t output = AUDIO_IO_HANDLE_NONE; - status_t status = mpClientInterface->openOutput(profile->mModule->mHandle, + status_t status = mpClientInterface->openOutput(profile->getModuleHandle(), &output, &config, &desc->mDevice, @@ -3007,7 +3094,7 @@ status_t AudioPolicyManager::checkOutputsForDevice(const sp<DeviceDescriptor> de config.offload_info.sample_rate = config.sample_rate; config.offload_info.channel_mask = config.channel_mask; config.offload_info.format = config.format; - status = mpClientInterface->openOutput(profile->mModule->mHandle, + status = mpClientInterface->openOutput(profile->getModuleHandle(), &output, &config, &desc->mDevice, @@ -3032,7 +3119,7 @@ status_t AudioPolicyManager::checkOutputsForDevice(const sp<DeviceDescriptor> de address.string()); } policyMix->setOutput(desc); - desc->mPolicyMix = &(policyMix->getMix()); + desc->mPolicyMix = policyMix->getMix(); } else if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) { // no duplicated output for direct outputs and @@ -3040,28 +3127,29 @@ status_t AudioPolicyManager::checkOutputsForDevice(const sp<DeviceDescriptor> de audio_io_handle_t duplicatedOutput = AUDIO_IO_HANDLE_NONE; // set initial stream volume for device - applyStreamVolumes(output, device, 0, true); + applyStreamVolumes(desc, device, 0, true); //TODO: configure audio effect output stage here // open a duplicating output thread for the new output and the primary output - duplicatedOutput = mpClientInterface->openDuplicateOutput(output, - mPrimaryOutput); + duplicatedOutput = + mpClientInterface->openDuplicateOutput(output, + mPrimaryOutput->mIoHandle); if (duplicatedOutput != AUDIO_IO_HANDLE_NONE) { // add duplicated output descriptor - sp<AudioOutputDescriptor> dupOutputDesc = - new AudioOutputDescriptor(NULL); - dupOutputDesc->mOutput1 = mOutputs.valueFor(mPrimaryOutput); - dupOutputDesc->mOutput2 = mOutputs.valueFor(output); + sp<SwAudioOutputDescriptor> dupOutputDesc = + new SwAudioOutputDescriptor(NULL, mpClientInterface); + dupOutputDesc->mOutput1 = mPrimaryOutput; + dupOutputDesc->mOutput2 = desc; dupOutputDesc->mSamplingRate = desc->mSamplingRate; dupOutputDesc->mFormat = desc->mFormat; dupOutputDesc->mChannelMask = desc->mChannelMask; dupOutputDesc->mLatency = desc->mLatency; addOutput(duplicatedOutput, dupOutputDesc); - applyStreamVolumes(duplicatedOutput, device, 0, true); + applyStreamVolumes(dupOutputDesc, device, 0, true); } else { ALOGW("checkOutputsForDevice() could not open dup output for %d and %d", - mPrimaryOutput, output); + mPrimaryOutput->mIoHandle, output); mpClientInterface->closeOutput(output); removeOutput(output); nextAudioPortGeneration(); @@ -3083,7 +3171,7 @@ status_t AudioPolicyManager::checkOutputsForDevice(const sp<DeviceDescriptor> de if (device_distinguishes_on_address(device)) { ALOGV("checkOutputsForDevice(): setOutputDevice(dev=0x%x, addr=%s)", device, address.string()); - setOutputDevice(output, device, true/*force*/, 0/*delay*/, + setOutputDevice(desc, device, true/*force*/, 0/*delay*/, NULL/*patch handle*/, address.string()); } ALOGV("checkOutputsForDevice(): adding output %d", output); @@ -3101,10 +3189,9 @@ status_t AudioPolicyManager::checkOutputsForDevice(const sp<DeviceDescriptor> de if (!desc->isDuplicated()) { // exact match on device if (device_distinguishes_on_address(device) && - (desc->mProfile->mSupportedDevices.types() == device)) { + (desc->supportedDevices() == device)) { findIoHandlesByAddress(desc, device, address, outputs); - } else if (!(desc->mProfile->mSupportedDevices.types() - & mAvailableOutputDevices.types())) { + } else if (!(desc->supportedDevices() & mAvailableOutputDevices.types())) { ALOGV("checkOutputsForDevice(): disconnecting adding output %d", mOutputs.keyAt(i)); outputs.add(mOutputs.keyAt(i)); @@ -3212,7 +3299,7 @@ status_t AudioPolicyManager::checkInputsForDevice(audio_devices_t device, config.channel_mask = desc->mChannelMask; config.format = desc->mFormat; audio_io_handle_t input = AUDIO_IO_HANDLE_NONE; - status_t status = mpClientInterface->openInput(profile->mModule->mHandle, + status_t status = mpClientInterface->openInput(profile->getModuleHandle(), &input, &config, &desc->mDevice, @@ -3339,7 +3426,7 @@ void AudioPolicyManager::closeOutput(audio_io_handle_t output) { ALOGV("closeOutput(%d)", output); - sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); + sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); if (outputDesc == NULL) { ALOGW("closeOutput() unknown output %d", output); return; @@ -3348,7 +3435,7 @@ void AudioPolicyManager::closeOutput(audio_io_handle_t output) // look for duplicated outputs connected to the output being removed. for (size_t i = 0; i < mOutputs.size(); i++) { - sp<AudioOutputDescriptor> dupOutputDesc = mOutputs.valueAt(i); + sp<SwAudioOutputDescriptor> dupOutputDesc = mOutputs.valueAt(i); if (dupOutputDesc->isDuplicated() && (dupOutputDesc->mOutput1 == outputDesc || dupOutputDesc->mOutput2 == outputDesc)) { @@ -3417,8 +3504,9 @@ void AudioPolicyManager::closeInput(audio_io_handle_t input) mInputs.removeItem(input); } -SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevice(audio_devices_t device, - AudioOutputCollection openOutputs) +SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevice( + audio_devices_t device, + SwAudioOutputCollection openOutputs) { SortedVector<audio_io_handle_t> outputs; @@ -3459,14 +3547,14 @@ void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy) // associated with policies in the "before" and "after" output vectors ALOGVV("checkOutputForStrategy(): policy related outputs"); for (size_t i = 0 ; i < mPreviousOutputs.size() ; i++) { - const sp<AudioOutputDescriptor> desc = mPreviousOutputs.valueAt(i); + const sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueAt(i); if (desc != 0 && desc->mPolicyMix != NULL) { srcOutputs.add(desc->mIoHandle); ALOGVV(" previous outputs: adding %d", desc->mIoHandle); } } for (size_t i = 0 ; i < mOutputs.size() ; i++) { - const sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i); + const sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i); if (desc != 0 && desc->mPolicyMix != NULL) { dstOutputs.add(desc->mIoHandle); ALOGVV(" new outputs: adding %d", desc->mIoHandle); @@ -3478,10 +3566,10 @@ void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy) strategy, srcOutputs[0], dstOutputs[0]); // mute strategy while moving tracks from one output to another for (size_t i = 0; i < srcOutputs.size(); i++) { - sp<AudioOutputDescriptor> desc = mOutputs.valueFor(srcOutputs[i]); + sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(srcOutputs[i]); if (isStrategyActive(desc, strategy)) { - setStrategyMute(strategy, true, srcOutputs[i]); - setStrategyMute(strategy, false, srcOutputs[i], MUTE_TIME_MS, newDevice); + setStrategyMute(strategy, true, desc); + setStrategyMute(strategy, false, desc, MUTE_TIME_MS, newDevice); } } @@ -3578,12 +3666,11 @@ void AudioPolicyManager::checkA2dpSuspend() } } -audio_devices_t AudioPolicyManager::getNewOutputDevice(audio_io_handle_t output, bool fromCache) +audio_devices_t AudioPolicyManager::getNewOutputDevice(const sp<AudioOutputDescriptor>& outputDesc, + bool fromCache) { audio_devices_t device = AUDIO_DEVICE_NONE; - sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); - ssize_t index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle); if (index >= 0) { sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index); @@ -3761,9 +3848,9 @@ uint32_t AudioPolicyManager::setBeaconMute(bool mute) { ALOGV("\t muting %d", mute); uint32_t maxLatency = 0; for (size_t i = 0; i < mOutputs.size(); i++) { - sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i); + sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i); setStreamMute(AUDIO_STREAM_TTS, mute/*on*/, - desc->mIoHandle, + desc, 0 /*delay*/, AUDIO_DEVICE_NONE); const uint32_t latency = desc->latency() * 2; if (latency > maxLatency) { @@ -3779,6 +3866,21 @@ uint32_t AudioPolicyManager::setBeaconMute(bool mute) { audio_devices_t AudioPolicyManager::getDeviceForStrategy(routing_strategy strategy, bool fromCache) { + // Routing + // see if we have an explicit route + // scan the whole RouteMap, for each entry, convert the stream type to a strategy + // (getStrategy(stream)). + // if the strategy from the stream type in the RouteMap is the same as the argument above, + // and activity count is non-zero + // the device = the device from the descriptor in the RouteMap, and exit. + for (size_t routeIndex = 0; routeIndex < mOutputRoutes.size(); routeIndex++) { + sp<SessionRoute> route = mOutputRoutes.valueAt(routeIndex); + routing_strategy strat = getStrategy(route->mStreamType); + if (strat == strategy && route->mDeviceDescriptor != 0 /*&& route->mActivityCount != 0*/) { + return route->mDeviceDescriptor->type(); + } + } + if (fromCache) { ALOGVV("getDeviceForStrategy() from cache strategy %d, device %x", strategy, mDeviceForStrategy[strategy]); @@ -3812,7 +3914,7 @@ uint32_t AudioPolicyManager::checkDeviceMuteStrategies(sp<AudioOutputDescriptor> for (size_t i = 0; i < NUM_STRATEGIES; i++) { audio_devices_t curDevice = getDeviceForStrategy((routing_strategy)i, false /*fromCache*/); - curDevice = curDevice & outputDesc->mProfile->mSupportedDevices.types(); + curDevice = curDevice & outputDesc->supportedDevices(); bool mute = shouldMute && (curDevice & device) && (curDevice != device); bool doMute = false; @@ -3831,10 +3933,9 @@ uint32_t AudioPolicyManager::checkDeviceMuteStrategies(sp<AudioOutputDescriptor> == AUDIO_DEVICE_NONE) { continue; } - audio_io_handle_t curOutput = mOutputs.keyAt(j); - ALOGVV("checkDeviceMuteStrategies() %s strategy %d (curDevice %04x) on output %d", - mute ? "muting" : "unmuting", i, curDevice, curOutput); - setStrategyMute((routing_strategy)i, mute, curOutput, mute ? 0 : delayMs); + ALOGVV("checkDeviceMuteStrategies() %s strategy %d (curDevice %04x)", + mute ? "muting" : "unmuting", i, curDevice); + setStrategyMute((routing_strategy)i, mute, desc, mute ? 0 : delayMs); if (isStrategyActive(desc, (routing_strategy)i)) { if (mute) { // FIXME: should not need to double latency if volume could be applied @@ -3859,9 +3960,9 @@ uint32_t AudioPolicyManager::checkDeviceMuteStrategies(sp<AudioOutputDescriptor> } for (size_t i = 0; i < NUM_STRATEGIES; i++) { if (isStrategyActive(outputDesc, (routing_strategy)i)) { - setStrategyMute((routing_strategy)i, true, outputDesc->mIoHandle); + setStrategyMute((routing_strategy)i, true, outputDesc); // do tempMute unmute after twice the mute wait time - setStrategyMute((routing_strategy)i, false, outputDesc->mIoHandle, + setStrategyMute((routing_strategy)i, false, outputDesc, muteWaitMs *2, device); } } @@ -3876,36 +3977,35 @@ uint32_t AudioPolicyManager::checkDeviceMuteStrategies(sp<AudioOutputDescriptor> return 0; } -uint32_t AudioPolicyManager::setOutputDevice(audio_io_handle_t output, +uint32_t AudioPolicyManager::setOutputDevice(const sp<AudioOutputDescriptor>& outputDesc, audio_devices_t device, bool force, int delayMs, audio_patch_handle_t *patchHandle, const char* address) { - ALOGV("setOutputDevice() output %d device %04x delayMs %d", output, device, delayMs); - sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); + ALOGV("setOutputDevice() device %04x delayMs %d", device, delayMs); AudioParameter param; uint32_t muteWaitMs; if (outputDesc->isDuplicated()) { - muteWaitMs = setOutputDevice(outputDesc->mOutput1->mIoHandle, device, force, delayMs); - muteWaitMs += setOutputDevice(outputDesc->mOutput2->mIoHandle, device, force, delayMs); + muteWaitMs = setOutputDevice(outputDesc->subOutput1(), device, force, delayMs); + muteWaitMs += setOutputDevice(outputDesc->subOutput2(), device, force, delayMs); return muteWaitMs; } // no need to proceed if new device is not AUDIO_DEVICE_NONE and not supported by current // output profile if ((device != AUDIO_DEVICE_NONE) && - ((device & outputDesc->mProfile->mSupportedDevices.types()) == 0)) { + ((device & outputDesc->supportedDevices()) == 0)) { return 0; } // filter devices according to output selected - device = (audio_devices_t)(device & outputDesc->mProfile->mSupportedDevices.types()); + device = (audio_devices_t)(device & outputDesc->supportedDevices()); audio_devices_t prevDevice = outputDesc->mDevice; - ALOGV("setOutputDevice() prevDevice %04x", prevDevice); + ALOGV("setOutputDevice() prevDevice 0x%04x", prevDevice); if (device != AUDIO_DEVICE_NONE) { outputDesc->mDevice = device; @@ -3918,10 +4018,10 @@ uint32_t AudioPolicyManager::setOutputDevice(audio_io_handle_t output, // AND force is not specified // AND the output is connected by a valid audio patch. // Doing this check here allows the caller to call setOutputDevice() without conditions - if ((device == AUDIO_DEVICE_NONE || device == prevDevice) && !force && - outputDesc->mPatchHandle != 0) { - ALOGV("setOutputDevice() setting same device %04x or null device for output %d", - device, output); + if ((device == AUDIO_DEVICE_NONE || device == prevDevice) && + !force && + outputDesc->mPatchHandle != 0) { + ALOGV("setOutputDevice() setting same device 0x%04x or null device", device); return muteWaitMs; } @@ -3929,7 +4029,7 @@ uint32_t AudioPolicyManager::setOutputDevice(audio_io_handle_t output, // do the routing if (device == AUDIO_DEVICE_NONE) { - resetOutputDevice(output, delayMs, NULL); + resetOutputDevice(outputDesc, delayMs, NULL); } else { DeviceVector deviceList = (address == NULL) ? mAvailableOutputDevices.getDevicesFromType(device) @@ -3996,16 +4096,15 @@ uint32_t AudioPolicyManager::setOutputDevice(audio_io_handle_t output, } // update stream volumes according to new device - applyStreamVolumes(output, device, delayMs); + applyStreamVolumes(outputDesc, device, delayMs); return muteWaitMs; } -status_t AudioPolicyManager::resetOutputDevice(audio_io_handle_t output, +status_t AudioPolicyManager::resetOutputDevice(const sp<AudioOutputDescriptor>& outputDesc, int delayMs, audio_patch_handle_t *patchHandle) { - sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); ssize_t index; if (patchHandle) { index = mAudioPatches.indexOfKey(*patchHandle); @@ -4115,12 +4214,15 @@ status_t AudioPolicyManager::resetInputDevice(audio_io_handle_t input, sp<IOProfile> AudioPolicyManager::getInputProfile(audio_devices_t device, String8 address, uint32_t& samplingRate, - audio_format_t format, - audio_channel_mask_t channelMask, + audio_format_t& format, + audio_channel_mask_t& channelMask, audio_input_flags_t flags) { // Choose an input profile based on the requested capture parameters: select the first available // profile supporting all requested parameters. + // + // TODO: perhaps isCompatibleProfile should return a "matching" score so we can return + // the best matching profile, not the first one. for (size_t i = 0; i < mHwModules.size(); i++) { @@ -4133,7 +4235,11 @@ sp<IOProfile> AudioPolicyManager::getInputProfile(audio_devices_t device, // profile->log(); if (profile->isCompatibleProfile(device, address, samplingRate, &samplingRate /*updatedSamplingRate*/, - format, channelMask, (audio_output_flags_t) flags)) { + format, + &format /*updatedFormat*/, + channelMask, + &channelMask /*updatedChannelMask*/, + (audio_output_flags_t) flags)) { return profile; } @@ -4162,17 +4268,10 @@ audio_devices_t AudioPolicyManager::getDeviceForInputSource(audio_source_t input } float AudioPolicyManager::computeVolume(audio_stream_type_t stream, - int index, - audio_io_handle_t output, - audio_devices_t device) + int index, + audio_devices_t device) { - float volume = 1.0; - sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); - - if (device == AUDIO_DEVICE_NONE) { - device = outputDesc->device(); - } - volume = mEngine->volIndexToAmpl(Volume::getDeviceCategory(device), stream, index); + float volumeDb = mEngine->volIndexToDb(Volume::getDeviceCategory(device), stream, index); // if a headset is connected, apply the following rules to ring tones and notifications // to avoid sound level bursts in user's ears: @@ -4190,41 +4289,39 @@ float AudioPolicyManager::computeVolume(audio_stream_type_t stream, || ((stream_strategy == STRATEGY_ENFORCED_AUDIBLE) && (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) && mStreams.canBeMuted(stream)) { - volume *= SONIFICATION_HEADSET_VOLUME_FACTOR; + volumeDb += SONIFICATION_HEADSET_VOLUME_FACTOR_DB; // when the phone is ringing we must consider that music could have been paused just before // by the music application and behave as if music was active if the last music track was // just stopped if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY) || mLimitRingtoneVolume) { audio_devices_t musicDevice = getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/); - float musicVol = computeVolume(AUDIO_STREAM_MUSIC, - mStreams[AUDIO_STREAM_MUSIC].getVolumeIndex(musicDevice), - output, + float musicVolDB = computeVolume(AUDIO_STREAM_MUSIC, + mStreams.valueFor(AUDIO_STREAM_MUSIC).getVolumeIndex(musicDevice), musicDevice); - float minVol = (musicVol > SONIFICATION_HEADSET_VOLUME_MIN) ? - musicVol : SONIFICATION_HEADSET_VOLUME_MIN; - if (volume > minVol) { - volume = minVol; - ALOGV("computeVolume limiting volume to %f musicVol %f", minVol, musicVol); + float minVolDB = (musicVolDB > SONIFICATION_HEADSET_VOLUME_MIN_DB) ? + musicVolDB : SONIFICATION_HEADSET_VOLUME_MIN_DB; + if (volumeDb > minVolDB) { + volumeDb = minVolDB; + ALOGV("computeVolume limiting volume to %f musicVol %f", minVolDB, musicVolDB); } } } - return volume; + return volumeDb; } status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream, - int index, - audio_io_handle_t output, - audio_devices_t device, - int delayMs, - bool force) + int index, + const sp<AudioOutputDescriptor>& outputDesc, + audio_devices_t device, + int delayMs, + bool force) { - // do not change actual stream volume if the stream is muted - if (mOutputs.valueFor(output)->mMuteCount[stream] != 0) { + if (outputDesc->mMuteCount[stream] != 0) { ALOGVV("checkAndSetVolume() stream %d muted count %d", - stream, mOutputs.valueFor(output)->mMuteCount[stream]); + stream, outputDesc->mMuteCount[stream]); return NO_ERROR; } audio_policy_forced_cfg_t forceUseForComm = @@ -4237,45 +4334,28 @@ status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream, return INVALID_OPERATION; } - float volume = computeVolume(stream, index, output, device); - // unit gain if rerouting to external policy - if (device == AUDIO_DEVICE_OUT_REMOTE_SUBMIX) { - ssize_t index = mOutputs.indexOfKey(output); - if (index >= 0) { - sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index); - if (outputDesc->mPolicyMix != NULL) { - ALOGV("max gain when rerouting for output=%d", output); - volume = 1.0f; - } - } - + if (device == AUDIO_DEVICE_NONE) { + device = outputDesc->device(); } - // We actually change the volume if: - // - the float value returned by computeVolume() changed - // - the force flag is set - if (volume != mOutputs.valueFor(output)->mCurVolume[stream] || - force) { - mOutputs.valueFor(output)->mCurVolume[stream] = volume; - ALOGVV("checkAndSetVolume() for output %d stream %d, volume %f, delay %d", output, stream, volume, delayMs); - // Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is - // enabled - if (stream == AUDIO_STREAM_BLUETOOTH_SCO) { - mpClientInterface->setStreamVolume(AUDIO_STREAM_VOICE_CALL, volume, output, delayMs); - } - mpClientInterface->setStreamVolume(stream, volume, output, delayMs); + + float volumeDb = computeVolume(stream, index, device); + if (outputDesc->isFixedVolume(device)) { + volumeDb = 0.0f; } + outputDesc->setVolume(volumeDb, stream, device, delayMs, force); + if (stream == AUDIO_STREAM_VOICE_CALL || stream == AUDIO_STREAM_BLUETOOTH_SCO) { float voiceVolume; // Force voice volume to max for bluetooth SCO as volume is managed by the headset if (stream == AUDIO_STREAM_VOICE_CALL) { - voiceVolume = (float)index/(float)mStreams[stream].getVolumeIndexMax(); + voiceVolume = (float)index/(float)mStreams.valueFor(stream).getVolumeIndexMax(); } else { voiceVolume = 1.0; } - if (voiceVolume != mLastVoiceVolume && output == mPrimaryOutput) { + if (voiceVolume != mLastVoiceVolume && outputDesc == mPrimaryOutput) { mpClientInterface->setVoiceVolume(voiceVolume, delayMs); mLastVoiceVolume = voiceVolume; } @@ -4284,20 +4364,20 @@ status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream, return NO_ERROR; } -void AudioPolicyManager::applyStreamVolumes(audio_io_handle_t output, - audio_devices_t device, - int delayMs, - bool force) +void AudioPolicyManager::applyStreamVolumes(const sp<AudioOutputDescriptor>& outputDesc, + audio_devices_t device, + int delayMs, + bool force) { - ALOGVV("applyStreamVolumes() for output %d and device %x", output, device); + ALOGVV("applyStreamVolumes() for device %08x", device); for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) { if (stream == AUDIO_STREAM_PATCH) { continue; } checkAndSetVolume((audio_stream_type_t)stream, - mStreams[stream].getVolumeIndex(device), - output, + mStreams.valueFor((audio_stream_type_t)stream).getVolumeIndex(device), + outputDesc, device, delayMs, force); @@ -4305,10 +4385,10 @@ void AudioPolicyManager::applyStreamVolumes(audio_io_handle_t output, } void AudioPolicyManager::setStrategyMute(routing_strategy strategy, - bool on, - audio_io_handle_t output, - int delayMs, - audio_devices_t device) + bool on, + const sp<AudioOutputDescriptor>& outputDesc, + int delayMs, + audio_devices_t device) { ALOGVV("setStrategyMute() strategy %d, mute %d, output %d", strategy, on, output); for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) { @@ -4316,32 +4396,31 @@ void AudioPolicyManager::setStrategyMute(routing_strategy strategy, continue; } if (getStrategy((audio_stream_type_t)stream) == strategy) { - setStreamMute((audio_stream_type_t)stream, on, output, delayMs, device); + setStreamMute((audio_stream_type_t)stream, on, outputDesc, delayMs, device); } } } void AudioPolicyManager::setStreamMute(audio_stream_type_t stream, - bool on, - audio_io_handle_t output, - int delayMs, - audio_devices_t device) + bool on, + const sp<AudioOutputDescriptor>& outputDesc, + int delayMs, + audio_devices_t device) { - const StreamDescriptor &streamDesc = mStreams[stream]; - sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); + const StreamDescriptor& streamDesc = mStreams.valueFor(stream); if (device == AUDIO_DEVICE_NONE) { device = outputDesc->device(); } - ALOGVV("setStreamMute() stream %d, mute %d, output %d, mMuteCount %d device %04x", - stream, on, output, outputDesc->mMuteCount[stream], device); + ALOGVV("setStreamMute() stream %d, mute %d, mMuteCount %d device %04x", + stream, on, outputDesc->mMuteCount[stream], device); if (on) { if (outputDesc->mMuteCount[stream] == 0) { if (streamDesc.canBeMuted() && ((stream != AUDIO_STREAM_ENFORCED_AUDIBLE) || (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) { - checkAndSetVolume(stream, 0, output, device, delayMs); + checkAndSetVolume(stream, 0, outputDesc, device, delayMs); } } // increment mMuteCount after calling checkAndSetVolume() so that volume change is not ignored @@ -4354,7 +4433,7 @@ void AudioPolicyManager::setStreamMute(audio_stream_type_t stream, if (--outputDesc->mMuteCount[stream] == 0) { checkAndSetVolume(stream, streamDesc.getVolumeIndex(device), - output, + outputDesc, device, delayMs); } @@ -4373,7 +4452,7 @@ void AudioPolicyManager::handleIncallSonification(audio_stream_type_t stream, const routing_strategy stream_strategy = getStrategy(stream); if ((stream_strategy == STRATEGY_SONIFICATION) || ((stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL))) { - sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(mPrimaryOutput); + sp<SwAudioOutputDescriptor> outputDesc = mPrimaryOutput; ALOGV("handleIncallSonification() stream %d starting %d device %x stateChange %d", stream, starting, outputDesc->mDevice, stateChange); if (outputDesc->mRefCount[stream]) { @@ -4406,6 +4485,70 @@ void AudioPolicyManager::handleIncallSonification(audio_stream_type_t stream, } } +// --- SessionRoute class implementation +void AudioPolicyManager::SessionRoute::log(const char* prefix) { + ALOGI("%s[SessionRoute strm:0x%X, sess:0x%X, dev:0x%X refs:%d act:%d", + prefix, mStreamType, mSession, + mDeviceDescriptor != 0 ? mDeviceDescriptor->type() : AUDIO_DEVICE_NONE, + mRefCount, mActivityCount); +} + +// --- SessionRouteMap class implementation +bool AudioPolicyManager::SessionRouteMap::hasRoute(audio_session_t session) +{ + return indexOfKey(session) >= 0 && valueFor(session)->mDeviceDescriptor != 0; +} + +void AudioPolicyManager::SessionRouteMap::addRoute(audio_session_t session, + audio_stream_type_t streamType, + sp<DeviceDescriptor> deviceDescriptor) +{ + sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0; + if (route != NULL) { + route->mRefCount++; + route->mDeviceDescriptor = deviceDescriptor; + } else { + route = new AudioPolicyManager::SessionRoute(session, streamType, deviceDescriptor); + route->mRefCount++; + add(session, route); + } +} + +void AudioPolicyManager::SessionRouteMap::removeRoute(audio_session_t session) +{ + sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0; + if (route != 0) { + ALOG_ASSERT(route->mRefCount > 0); + --route->mRefCount; + if (route->mRefCount <= 0) { + removeItem(session); + } + } +} + +int AudioPolicyManager::SessionRouteMap::incRouteActivity(audio_session_t session) +{ + sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0; + return route != 0 ? ++(route->mActivityCount) : -1; +} + +int AudioPolicyManager::SessionRouteMap::decRouteActivity(audio_session_t session) +{ + sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0; + if (route != 0 && route->mActivityCount > 0) { + return --(route->mActivityCount); + } else { + return -1; + } +} + +void AudioPolicyManager::SessionRouteMap::log(const char* caption) { + ALOGI("%s ----", caption); + for(size_t index = 0; index < size(); index++) { + valueAt(index)->log(" "); + } +} + void AudioPolicyManager::defaultAudioPolicyConfig(void) { sp<HwModule> module; @@ -4417,7 +4560,8 @@ void AudioPolicyManager::defaultAudioPolicyConfig(void) module = new HwModule("primary"); - profile = new IOProfile(String8("primary"), AUDIO_PORT_ROLE_SOURCE, module); + profile = new IOProfile(String8("primary"), AUDIO_PORT_ROLE_SOURCE); + profile->attach(module); profile->mSamplingRates.add(44100); profile->mFormats.add(AUDIO_FORMAT_PCM_16_BIT); profile->mChannelMasks.add(AUDIO_CHANNEL_OUT_STEREO); @@ -4425,7 +4569,8 @@ void AudioPolicyManager::defaultAudioPolicyConfig(void) profile->mFlags = AUDIO_OUTPUT_FLAG_PRIMARY; module->mOutputProfiles.add(profile); - profile = new IOProfile(String8("primary"), AUDIO_PORT_ROLE_SINK, module); + profile = new IOProfile(String8("primary"), AUDIO_PORT_ROLE_SINK); + profile->attach(module); profile->mSamplingRates.add(8000); profile->mFormats.add(AUDIO_FORMAT_PCM_16_BIT); profile->mChannelMasks.add(AUDIO_CHANNEL_IN_MONO); diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h index 02b678a..fe6b986 100644 --- a/services/audiopolicy/managerdefault/AudioPolicyManager.h +++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h @@ -49,8 +49,11 @@ namespace android { // Attenuation applied to STRATEGY_SONIFICATION streams when a headset is connected: 6dB #define SONIFICATION_HEADSET_VOLUME_FACTOR 0.5 +#define SONIFICATION_HEADSET_VOLUME_FACTOR_DB (-6) // Min volume for STRATEGY_SONIFICATION streams when limited by music volume: -36dB #define SONIFICATION_HEADSET_VOLUME_MIN 0.016 +#define SONIFICATION_HEADSET_VOLUME_MIN_DB (-36) + // Time in milliseconds during which we consider that music is still active after a music // track was stopped - see computeVolume() #define SONIFICATION_HEADSET_MUSIC_DELAY 5000 @@ -110,6 +113,7 @@ public: audio_format_t format, audio_channel_mask_t channelMask, audio_output_flags_t flags, + audio_port_handle_t selectedDeviceId, const audio_offload_info_t *offloadInfo); virtual status_t startOutput(audio_io_handle_t output, audio_stream_type_t stream, @@ -172,19 +176,15 @@ public: return mEffects.setEffectEnabled(id, enabled); } - virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const - { - return mOutputs.isStreamActive(stream, inPastMs); - } + virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const; // return whether a stream is playing remotely, override to change the definition of // local/remote playback, used for instance by notification manager to not make // media players lose audio focus when not playing locally // For the base implementation, "remotely" means playing during screen mirroring which // uses an output for playback with a non-empty, non "0" address. - virtual bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) const - { - return mOutputs.isStreamActiveRemotely(stream, inPastMs); - } + virtual bool isStreamActiveRemotely(audio_stream_type_t stream, + uint32_t inPastMs = 0) const; + virtual bool isSourceActive(audio_source_t source) const; virtual status_t dump(int fd); @@ -227,6 +227,46 @@ public: // return the strategy corresponding to a given stream type routing_strategy getStrategy(audio_stream_type_t stream) const; +protected: + class SessionRoute : public RefBase + { + public: + friend class SessionRouteMap; + SessionRoute(audio_session_t session, + audio_stream_type_t streamType, + sp<DeviceDescriptor> deviceDescriptor) + : mSession(session), + mStreamType(streamType), + mDeviceDescriptor(deviceDescriptor), + mRefCount(0), + mActivityCount(0) {} + + audio_session_t mSession; + audio_stream_type_t mStreamType; + + sp<DeviceDescriptor> mDeviceDescriptor; + + // "reference" counting + int mRefCount; // +/- on references + int mActivityCount; // +/- on start/stop + + void log(const char* prefix); + }; + + class SessionRouteMap: public KeyedVector<audio_session_t, sp<SessionRoute>> + { + public: + bool hasRoute(audio_session_t session); + void addRoute(audio_session_t session, audio_stream_type_t streamType, + sp<DeviceDescriptor> deviceDescriptor); + void removeRoute(audio_session_t session); + + int incRouteActivity(audio_session_t session); + int decRouteActivity(audio_session_t session); + + void log(const char* caption); + }; + // From AudioPolicyManagerObserver virtual const AudioPatchCollection &getAudioPatches() const { @@ -240,7 +280,7 @@ public: { return mPolicyMixes; } - virtual const AudioOutputCollection &getOutputs() const + virtual const SwAudioOutputCollection &getOutputs() const { return mOutputs; } @@ -265,7 +305,7 @@ public: return mDefaultOutputDevice; } protected: - void addOutput(audio_io_handle_t output, sp<AudioOutputDescriptor> outputDesc); + void addOutput(audio_io_handle_t output, sp<SwAudioOutputDescriptor> outputDesc); void removeOutput(audio_io_handle_t output); void addInput(audio_io_handle_t input, sp<AudioInputDescriptor> inputDesc); @@ -288,13 +328,13 @@ protected: // change the route of the specified output. Returns the number of ms we have slept to // allow new routing to take effect in certain cases. - virtual uint32_t setOutputDevice(audio_io_handle_t output, + virtual uint32_t setOutputDevice(const sp<AudioOutputDescriptor>& outputDesc, audio_devices_t device, bool force = false, int delayMs = 0, audio_patch_handle_t *patchHandle = NULL, const char* address = NULL); - status_t resetOutputDevice(audio_io_handle_t output, + status_t resetOutputDevice(const sp<AudioOutputDescriptor>& outputDesc, int delayMs = 0, audio_patch_handle_t *patchHandle = NULL); status_t setInputDevice(audio_io_handle_t input, @@ -309,29 +349,31 @@ protected: // compute the actual volume for a given stream according to the requested index and a particular // device - virtual float computeVolume(audio_stream_type_t stream, int index, - audio_io_handle_t output, audio_devices_t device); + virtual float computeVolume(audio_stream_type_t stream, + int index, + audio_devices_t device); // check that volume change is permitted, compute and send new volume to audio hardware virtual status_t checkAndSetVolume(audio_stream_type_t stream, int index, - audio_io_handle_t output, + const sp<AudioOutputDescriptor>& outputDesc, audio_devices_t device, int delayMs = 0, bool force = false); // apply all stream volumes to the specified output and device - void applyStreamVolumes(audio_io_handle_t output, audio_devices_t device, int delayMs = 0, bool force = false); + void applyStreamVolumes(const sp<AudioOutputDescriptor>& outputDesc, + audio_devices_t device, int delayMs = 0, bool force = false); // Mute or unmute all streams handled by the specified strategy on the specified output void setStrategyMute(routing_strategy strategy, bool on, - audio_io_handle_t output, + const sp<AudioOutputDescriptor>& outputDesc, int delayMs = 0, audio_devices_t device = (audio_devices_t)0); // Mute or unmute the stream on the specified output void setStreamMute(audio_stream_type_t stream, bool on, - audio_io_handle_t output, + const sp<AudioOutputDescriptor>& outputDesc, int delayMs = 0, audio_devices_t device = (audio_devices_t)0); @@ -384,7 +426,8 @@ protected: // must be called every time a condition that affects the device choice for a given output is // changed: connected device, phone state, force use, output start, output stop.. // see getDeviceForStrategy() for the use of fromCache parameter - audio_devices_t getNewOutputDevice(audio_io_handle_t output, bool fromCache); + audio_devices_t getNewOutputDevice(const sp<AudioOutputDescriptor>& outputDesc, + bool fromCache); // updates cache of device used by all strategies (mDeviceForStrategy[]) // must be called every time a condition that affects the device choice for a given strategy is @@ -412,7 +455,7 @@ protected: #endif //AUDIO_POLICY_TEST SortedVector<audio_io_handle_t> getOutputsForDevice(audio_devices_t device, - AudioOutputCollection openOutputs); + SwAudioOutputCollection openOutputs); bool vectorsEqual(SortedVector<audio_io_handle_t>& outputs1, SortedVector<audio_io_handle_t>& outputs2); @@ -427,12 +470,12 @@ protected: audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs, audio_output_flags_t flags, audio_format_t format); - // samplingRate parameter is an in/out and so may be modified + // samplingRate, format, channelMask are in/out and so may be modified sp<IOProfile> getInputProfile(audio_devices_t device, String8 address, uint32_t& samplingRate, - audio_format_t format, - audio_channel_mask_t channelMask, + audio_format_t& format, + audio_channel_mask_t& channelMask, audio_input_flags_t flags); sp<IOProfile> getProfileForDirectOutput(audio_devices_t device, uint32_t samplingRate, @@ -453,28 +496,39 @@ protected: audio_devices_t availablePrimaryOutputDevices() const { - return mOutputs.getSupportedDevices(mPrimaryOutput) & mAvailableOutputDevices.types(); + return mPrimaryOutput->supportedDevices() & mAvailableOutputDevices.types(); } audio_devices_t availablePrimaryInputDevices() const { - return mAvailableInputDevices.getDevicesFromHwModule( - mOutputs.valueFor(mPrimaryOutput)->getModuleHandle()); + return mAvailableInputDevices.getDevicesFromHwModule(mPrimaryOutput->getModuleHandle()); } void updateCallRouting(audio_devices_t rxDevice, int delayMs = 0); + status_t startSource(sp<AudioOutputDescriptor> outputDesc, + audio_stream_type_t stream, + audio_devices_t device, + uint32_t *delayMs); + status_t stopSource(sp<AudioOutputDescriptor> outputDesc, + audio_stream_type_t stream); + uid_t mUidCached; AudioPolicyClientInterface *mpClientInterface; // audio policy client interface - audio_io_handle_t mPrimaryOutput; // primary output handle + sp<SwAudioOutputDescriptor> mPrimaryOutput; // primary output descriptor // list of descriptors for outputs currently opened - AudioOutputCollection mOutputs; + + SwAudioOutputCollection mOutputs; // copy of mOutputs before setDeviceConnectionState() opens new outputs // reset to mOutputs when updateDevicesAndOutputs() is called. - AudioOutputCollection mPreviousOutputs; + SwAudioOutputCollection mPreviousOutputs; AudioInputCollection mInputs; // list of input descriptors + DeviceVector mAvailableOutputDevices; // all available output devices DeviceVector mAvailableInputDevices; // all available input devices + SessionRouteMap mOutputRoutes; + SessionRouteMap mInputRoutes; + StreamDescriptorCollection mStreams; // stream descriptors for volume control bool mLimitRingtoneVolume; // limit ringtone volume to music volume if headset connected audio_devices_t mDeviceForStrategy[NUM_STRATEGIES]; @@ -539,7 +593,7 @@ private: // in mProfile->mSupportedDevices) matches the device whose address is to be matched. // see deviceDistinguishesOnAddress(audio_devices_t) for whether the device type is one // where addresses are used to distinguish between one connected device and another. - void findIoHandlesByAddress(sp<AudioOutputDescriptor> desc /*in*/, + void findIoHandlesByAddress(sp<SwAudioOutputDescriptor> desc /*in*/, const audio_devices_t device /*in*/, const String8 address /*in*/, SortedVector<audio_io_handle_t>& outputs /*out*/); diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp index e9ff838..9510727 100644 --- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp +++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp @@ -150,6 +150,7 @@ status_t AudioPolicyService::getOutputForAttr(const audio_attributes_t *attr, audio_format_t format, audio_channel_mask_t channelMask, audio_output_flags_t flags, + int mSelectedDeviceId, const audio_offload_info_t *offloadInfo) { if (mAudioPolicyManager == NULL) { @@ -158,7 +159,7 @@ status_t AudioPolicyService::getOutputForAttr(const audio_attributes_t *attr, ALOGV("getOutput()"); Mutex::Autolock _l(mLock); return mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, samplingRate, - format, channelMask, flags, offloadInfo); + format, channelMask, flags, mSelectedDeviceId, offloadInfo); } status_t AudioPolicyService::startOutput(audio_io_handle_t output, @@ -261,8 +262,7 @@ status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr, return BAD_VALUE; } - if (((attr->source == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) || - ((attr->source == AUDIO_SOURCE_FM_TUNER) && !captureFmTunerAllowed())) { + if ((attr->source == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) { return BAD_VALUE; } sp<AudioPolicyEffects>audioPolicyEffects; diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp index 5a91192..e4ca5dc 100644 --- a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp +++ b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp @@ -255,8 +255,7 @@ status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr, inputSource = AUDIO_SOURCE_MIC; } - if (((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) || - ((inputSource == AUDIO_SOURCE_FM_TUNER) && !captureFmTunerAllowed())) { + if ((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) { return BAD_VALUE; } @@ -569,6 +568,7 @@ status_t AudioPolicyService::getOutputForAttr(const audio_attributes_t *attr, audio_format_t format, audio_channel_mask_t channelMask, audio_output_flags_t flags, + int selectedDeviceId __unused, const audio_offload_info_t *offloadInfo) { if (attr != NULL) { diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h index 0378384..f8dabd3 100644 --- a/services/audiopolicy/service/AudioPolicyService.h +++ b/services/audiopolicy/service/AudioPolicyService.h @@ -84,6 +84,7 @@ public: audio_format_t format = AUDIO_FORMAT_DEFAULT, audio_channel_mask_t channelMask = 0, audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, + int selectedDeviceId = AUDIO_PORT_HANDLE_NONE, const audio_offload_info_t *offloadInfo = NULL); virtual status_t startOutput(audio_io_handle_t output, audio_stream_type_t stream, diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp index 44977da..05ede92 100644 --- a/services/camera/libcameraservice/api1/Camera2Client.cpp +++ b/services/camera/libcameraservice/api1/Camera2Client.cpp @@ -121,7 +121,8 @@ status_t Camera2Client::initialize(CameraModule *module) } case CAMERA_DEVICE_API_VERSION_3_0: case CAMERA_DEVICE_API_VERSION_3_1: - case CAMERA_DEVICE_API_VERSION_3_2: { + case CAMERA_DEVICE_API_VERSION_3_2: + case CAMERA_DEVICE_API_VERSION_3_3: { sp<ZslProcessor3> zslProc = new ZslProcessor3(this, mCaptureSequencer); mZslProcessor = zslProc; diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp index 8587e0e..bf1692d 100644 --- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp +++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp @@ -65,6 +65,7 @@ CameraDeviceClient::CameraDeviceClient(const sp<CameraService>& cameraService, int servicePid) : Camera2ClientBase(cameraService, remoteCallback, clientPackageName, cameraId, cameraFacing, clientPid, clientUid, servicePid), + mInputStream(), mRequestIdCounter(0) { ATRACE_CALL(); @@ -127,6 +128,7 @@ status_t CameraDeviceClient::submitRequestList(List<sp<CaptureRequest> > request List<const CameraMetadata> metadataRequestList; int32_t requestId = mRequestIdCounter; uint32_t loopCounter = 0; + bool isReprocess = false; for (List<sp<CaptureRequest> >::iterator it = requests.begin(); it != requests.end(); ++it) { sp<CaptureRequest> request = *it; @@ -134,6 +136,18 @@ status_t CameraDeviceClient::submitRequestList(List<sp<CaptureRequest> > request ALOGE("%s: Camera %d: Sent null request.", __FUNCTION__, mCameraId); return BAD_VALUE; + } else if (it == requests.begin()) { + isReprocess = request->mIsReprocess; + if (isReprocess && !mInputStream.configured) { + ALOGE("%s: Camera %d: no input stream is configured."); + return BAD_VALUE; + } else if (isReprocess && streaming) { + ALOGE("%s: Camera %d: streaming reprocess requests not supported."); + return BAD_VALUE; + } + } else if (isReprocess != request->mIsReprocess) { + ALOGE("%s: Camera %d: Sent regular and reprocess requests."); + return BAD_VALUE; } CameraMetadata metadata(request->mMetadata); @@ -182,6 +196,10 @@ status_t CameraDeviceClient::submitRequestList(List<sp<CaptureRequest> > request metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0], outputStreamIds.size()); + if (isReprocess) { + metadata.update(ANDROID_REQUEST_INPUT_STREAMS, &mInputStream.id, 1); + } + metadata.update(ANDROID_REQUEST_ID, &requestId, /*size*/1); loopCounter++; // loopCounter starts from 1 ALOGV("%s: Camera %d: Creating request with ID %d (%d of %zu)", @@ -260,8 +278,8 @@ status_t CameraDeviceClient::beginConfigure() { } status_t CameraDeviceClient::endConfigure() { - ALOGV("%s: ending configure (%zu streams)", - __FUNCTION__, mStreamMap.size()); + ALOGV("%s: ending configure (%d input stream, %zu output streams)", + __FUNCTION__, mInputStream.configured ? 1 : 0, mStreamMap.size()); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; @@ -284,19 +302,25 @@ status_t CameraDeviceClient::deleteStream(int streamId) { if (!mDevice.get()) return DEAD_OBJECT; - // Guard against trying to delete non-created streams + bool isInput = false; ssize_t index = NAME_NOT_FOUND; - for (size_t i = 0; i < mStreamMap.size(); ++i) { - if (streamId == mStreamMap.valueAt(i)) { - index = i; - break; + + if (mInputStream.configured && mInputStream.id == streamId) { + isInput = true; + } else { + // Guard against trying to delete non-created streams + for (size_t i = 0; i < mStreamMap.size(); ++i) { + if (streamId == mStreamMap.valueAt(i)) { + index = i; + break; + } } - } - if (index == NAME_NOT_FOUND) { - ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream " - "created yet", __FUNCTION__, mCameraId, streamId); - return BAD_VALUE; + if (index == NAME_NOT_FOUND) { + ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream " + "created yet", __FUNCTION__, mCameraId, streamId); + return BAD_VALUE; + } } // Also returns BAD_VALUE if stream ID was not valid @@ -307,8 +331,11 @@ status_t CameraDeviceClient::deleteStream(int streamId) { " already checked and the stream ID (%d) should be valid.", __FUNCTION__, mCameraId, streamId); } else if (res == OK) { - mStreamMap.removeItemsAt(index); - + if (isInput) { + mInputStream.configured = false; + } else { + mStreamMap.removeItemsAt(index); + } } return res; @@ -450,6 +477,58 @@ status_t CameraDeviceClient::createStream(const OutputConfiguration &outputConfi } +status_t CameraDeviceClient::createInputStream(int width, int height, + int format) { + + ATRACE_CALL(); + ALOGV("%s (w = %d, h = %d, f = 0x%x)", __FUNCTION__, width, height, format); + + status_t res; + if ( (res = checkPid(__FUNCTION__) ) != OK) return res; + + Mutex::Autolock icl(mBinderSerializationLock); + if (!mDevice.get()) return DEAD_OBJECT; + + if (mInputStream.configured) { + ALOGE("%s: Camera %d: Already has an input stream " + " configuration. (ID %zd)", __FUNCTION__, mCameraId, + mInputStream.id); + return ALREADY_EXISTS; + } + + int streamId = -1; + res = mDevice->createInputStream(width, height, format, &streamId); + if (res == OK) { + mInputStream.configured = true; + mInputStream.width = width; + mInputStream.height = height; + mInputStream.format = format; + mInputStream.id = streamId; + + ALOGV("%s: Camera %d: Successfully created a new input stream ID %d", + __FUNCTION__, mCameraId, streamId); + + return streamId; + } + + return res; +} + +status_t CameraDeviceClient::getInputBufferProducer( + /*out*/sp<IGraphicBufferProducer> *producer) { + status_t res; + if ( (res = checkPid(__FUNCTION__) ) != OK) return res; + + if (producer == NULL) { + return BAD_VALUE; + } + + Mutex::Autolock icl(mBinderSerializationLock); + if (!mDevice.get()) return DEAD_OBJECT; + + return mDevice->getInputBufferProducer(producer); +} + bool CameraDeviceClient::roundBufferDimensionNearest(int32_t width, int32_t height, int32_t format, android_dataspace dataSpace, const CameraMetadata& info, /*out*/int32_t* outWidth, /*out*/int32_t* outHeight) { @@ -592,6 +671,42 @@ status_t CameraDeviceClient::flush(int64_t* lastFrameNumber) { return mDevice->flush(lastFrameNumber); } +status_t CameraDeviceClient::prepare(int streamId) { + ATRACE_CALL(); + ALOGV("%s", __FUNCTION__); + + status_t res = OK; + if ( (res = checkPid(__FUNCTION__) ) != OK) return res; + + Mutex::Autolock icl(mBinderSerializationLock); + + // Guard against trying to prepare non-created streams + ssize_t index = NAME_NOT_FOUND; + for (size_t i = 0; i < mStreamMap.size(); ++i) { + if (streamId == mStreamMap.valueAt(i)) { + index = i; + break; + } + } + + if (index == NAME_NOT_FOUND) { + ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream " + "created yet", __FUNCTION__, mCameraId, streamId); + return BAD_VALUE; + } + + // Also returns BAD_VALUE if stream ID was not valid + res = mDevice->prepare(streamId); + + if (res == BAD_VALUE) { + ALOGE("%s: Camera %d: Unexpected BAD_VALUE when preparing stream, but we" + " already checked and the stream ID (%d) should be valid.", + __FUNCTION__, mCameraId, streamId); + } + + return res; +} + status_t CameraDeviceClient::dump(int fd, const Vector<String16>& args) { String8 result; result.appendFormat("CameraDeviceClient[%d] (%p) dump:\n", @@ -602,13 +717,19 @@ status_t CameraDeviceClient::dump(int fd, const Vector<String16>& args) { result.append(" State:\n"); result.appendFormat(" Request ID counter: %d\n", mRequestIdCounter); + if (mInputStream.configured) { + result.appendFormat(" Current input stream ID: %d\n", + mInputStream.id); + } else { + result.append(" No input stream configured.\n"); + } if (!mStreamMap.isEmpty()) { - result.append(" Current stream IDs:\n"); + result.append(" Current output stream IDs:\n"); for (size_t i = 0; i < mStreamMap.size(); i++) { result.appendFormat(" Stream %d\n", mStreamMap.valueAt(i)); } } else { - result.append(" No streams configured.\n"); + result.append(" No output streams configured.\n"); } write(fd, result.string(), result.size()); // TODO: print dynamic/request section from most recent requests @@ -645,6 +766,14 @@ void CameraDeviceClient::notifyShutter(const CaptureResultExtras& resultExtras, } } +void CameraDeviceClient::notifyPrepared(int streamId) { + // Thread safe. Don't bother locking. + sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback(); + if (remoteCb != 0) { + remoteCb->onPrepared(streamId); + } +} + void CameraDeviceClient::detachDevice() { if (mDevice == 0) return; diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h index a3dbb90..b8d8bea 100644 --- a/services/camera/libcameraservice/api2/CameraDeviceClient.h +++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h @@ -86,6 +86,13 @@ public: virtual status_t createStream(const OutputConfiguration &outputConfiguration); + // Create an input stream of width, height, and format. + virtual status_t createInputStream(int width, int height, int format); + + // Get the buffer producer of the input stream + virtual status_t getInputBufferProducer( + /*out*/sp<IGraphicBufferProducer> *producer); + // Create a request object from a template. virtual status_t createDefaultRequest(int templateId, /*out*/ @@ -102,6 +109,9 @@ public: virtual status_t flush(/*out*/ int64_t* lastFrameNumber = NULL); + // Prepare stream by preallocating its buffers + virtual status_t prepare(int streamId); + /** * Interface used by CameraService */ @@ -128,6 +138,7 @@ public: virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode, const CaptureResultExtras& resultExtras); virtual void notifyShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp); + virtual void notifyPrepared(int streamId); /** * Interface used by independent components of CameraDeviceClient. @@ -161,10 +172,18 @@ private: android_dataspace dataSpace, const CameraMetadata& info, /*out*/int32_t* outWidth, /*out*/int32_t* outHeight); - // IGraphicsBufferProducer binder -> Stream ID + // IGraphicsBufferProducer binder -> Stream ID for output streams KeyedVector<sp<IBinder>, int> mStreamMap; - // Stream ID + struct InputStreamConfiguration { + bool configured; + int32_t width; + int32_t height; + int32_t format; + int32_t id; + } mInputStream; + + // Request ID Vector<int> mStreamingRequestList; int32_t mRequestIdCounter; diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp index c0c2314..ba0b264 100644 --- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp +++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp @@ -280,6 +280,14 @@ void Camera2ClientBase<TClientBase>::notifyAutoWhitebalance(uint8_t newState, } template <typename TClientBase> +void Camera2ClientBase<TClientBase>::notifyPrepared(int streamId) { + (void)streamId; + + ALOGV("%s: Stream %d now prepared", + __FUNCTION__, streamId); +} + +template <typename TClientBase> int Camera2ClientBase<TClientBase>::getCameraId() const { return TClientBase::mCameraId; } diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h index 168ea0a..f1cacdf 100644 --- a/services/camera/libcameraservice/common/Camera2ClientBase.h +++ b/services/camera/libcameraservice/common/Camera2ClientBase.h @@ -72,7 +72,7 @@ public: virtual void notifyAutoExposure(uint8_t newState, int triggerId); virtual void notifyAutoWhitebalance(uint8_t newState, int triggerId); - + virtual void notifyPrepared(int streamId); int getCameraId() const; const sp<CameraDeviceBase>& diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h index fe55b9e..f02fc32 100644 --- a/services/camera/libcameraservice/common/CameraDeviceBase.h +++ b/services/camera/libcameraservice/common/CameraDeviceBase.h @@ -30,6 +30,7 @@ #include "camera/CameraMetadata.h" #include "camera/CaptureResult.h" #include "common/CameraModule.h" +#include "gui/IGraphicBufferProducer.h" namespace android { @@ -110,6 +111,14 @@ class CameraDeviceBase : public virtual RefBase { android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id) = 0; /** + * Create an input stream of width, height, and format. + * + * Return value is the stream ID if non-negative and an error if negative. + */ + virtual status_t createInputStream(uint32_t width, uint32_t height, + int32_t format, /*out*/ int32_t *id) = 0; + + /** * Create an input reprocess stream that uses buffers from an existing * output stream. */ @@ -150,6 +159,10 @@ class CameraDeviceBase : public virtual RefBase { */ virtual status_t configureStreams() = 0; + // get the buffer producer of the input stream + virtual status_t getInputBufferProducer( + sp<IGraphicBufferProducer> *producer) = 0; + /** * Create a metadata buffer with fields that the HAL device believes are * best for the given use case @@ -186,6 +199,7 @@ class CameraDeviceBase : public virtual RefBase { virtual void notifyIdle() = 0; virtual void notifyShutter(const CaptureResultExtras &resultExtras, nsecs_t timestamp) = 0; + virtual void notifyPrepared(int streamId) = 0; // Required only for API1 virtual void notifyAutoFocus(uint8_t newState, int triggerId) = 0; @@ -268,6 +282,12 @@ class CameraDeviceBase : public virtual RefBase { virtual status_t flush(int64_t *lastFrameNumber = NULL) = 0; /** + * Prepare stream by preallocating buffers for it asynchronously. + * Calls notifyPrepared() once allocation is complete. + */ + virtual status_t prepare(int streamId) = 0; + + /** * Get the HAL device version. */ virtual uint32_t getDeviceVersion() = 0; diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp index 878986b..f6645f3 100644 --- a/services/camera/libcameraservice/device2/Camera2Device.cpp +++ b/services/camera/libcameraservice/device2/Camera2Device.cpp @@ -618,6 +618,12 @@ status_t Camera2Device::flush(int64_t* /*lastFrameNumber*/) { return waitUntilDrained(); } +status_t Camera2Device::prepare(int streamId) { + ATRACE_CALL(); + ALOGE("%s: Camera %d: unimplemented", __FUNCTION__, mId); + return NO_INIT; +} + uint32_t Camera2Device::getDeviceVersion() { ATRACE_CALL(); return mDeviceVersion; @@ -1581,4 +1587,18 @@ int Camera2Device::ReprocessStreamAdapter::release_buffer( return OK; } +// camera 2 devices don't support reprocessing +status_t Camera2Device::createInputStream( + uint32_t width, uint32_t height, int format, int *id) { + ALOGE("%s: camera 2 devices don't support reprocessing", __FUNCTION__); + return INVALID_OPERATION; +} + +// camera 2 devices don't support reprocessing +status_t Camera2Device::getInputBufferProducer( + sp<IGraphicBufferProducer> *producer) { + ALOGE("%s: camera 2 devices don't support reprocessing", __FUNCTION__); + return INVALID_OPERATION; +} + }; // namespace android diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h index 9b32fa6..fd1240a 100644 --- a/services/camera/libcameraservice/device2/Camera2Device.h +++ b/services/camera/libcameraservice/device2/Camera2Device.h @@ -59,6 +59,8 @@ class Camera2Device: public CameraDeviceBase { virtual status_t createStream(sp<ANativeWindow> consumer, uint32_t width, uint32_t height, int format, android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id); + virtual status_t createInputStream( + uint32_t width, uint32_t height, int format, int *id); virtual status_t createReprocessStreamFromStream(int outputId, int *id); virtual status_t getStreamInfo(int id, uint32_t *width, uint32_t *height, uint32_t *format); @@ -67,6 +69,8 @@ class Camera2Device: public CameraDeviceBase { virtual status_t deleteReprocessStream(int id); // No-op on HAL2 devices virtual status_t configureStreams(); + virtual status_t getInputBufferProducer( + sp<IGraphicBufferProducer> *producer); virtual status_t createDefaultRequest(int templateId, CameraMetadata *request); virtual status_t waitUntilDrained(); virtual status_t setNotifyCallback(NotificationListener *listener); @@ -80,6 +84,9 @@ class Camera2Device: public CameraDeviceBase { buffer_handle_t *buffer, wp<BufferReleasedListener> listener); // Flush implemented as just a wait virtual status_t flush(int64_t *lastFrameNumber = NULL); + // Prepare is a no-op + virtual status_t prepare(int streamId); + virtual uint32_t getDeviceVersion(); virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const; diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp index 8236788..ec9c70c 100644 --- a/services/camera/libcameraservice/device3/Camera3Device.cpp +++ b/services/camera/libcameraservice/device3/Camera3Device.cpp @@ -62,6 +62,7 @@ Camera3Device::Camera3Device(int id): mUsePartialResult(false), mNumPartialResults(1), mNextResultFrameNumber(0), + mNextReprocessResultFrameNumber(0), mNextShutterFrameNumber(0), mListener(NULL) { @@ -174,6 +175,8 @@ status_t Camera3Device::initialize(CameraModule *module) return res; } + mPreparerThread = new PreparerThread(); + /** Everything is good to go */ mDeviceVersion = device->common.version; @@ -201,6 +204,17 @@ status_t Camera3Device::initialize(CameraModule *module) } } + camera_metadata_entry configs = + mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS); + for (uint32_t i = 0; i < configs.count; i += 4) { + if (configs.data.i32[i] == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED && + configs.data.i32[i + 3] == + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_INPUT) { + mSupportedOpaqueInputSizes.add(Size(configs.data.i32[i + 1], + configs.data.i32[i + 2])); + } + } + return OK; } @@ -1019,6 +1033,20 @@ status_t Camera3Device::configureStreams() { return configureStreamsLocked(); } +status_t Camera3Device::getInputBufferProducer( + sp<IGraphicBufferProducer> *producer) { + Mutex::Autolock il(mInterfaceLock); + Mutex::Autolock l(mLock); + + if (producer == NULL) { + return BAD_VALUE; + } else if (mInputStream == NULL) { + return INVALID_OPERATION; + } + + return mInputStream->getInputBufferProducer(producer); +} + status_t Camera3Device::createDefaultRequest(int templateId, CameraMetadata *request) { ATRACE_CALL(); @@ -1164,7 +1192,8 @@ status_t Camera3Device::setNotifyCallback(NotificationListener *listener) { ALOGW("%s: Replacing old callback listener", __FUNCTION__); } mListener = listener; - mRequestThread->setNotifyCallback(listener); + mRequestThread->setNotificationListener(listener); + mPreparerThread->setNotificationListener(listener); return OK; } @@ -1310,6 +1339,34 @@ status_t Camera3Device::flush(int64_t *frameNumber) { return res; } +status_t Camera3Device::prepare(int streamId) { + ATRACE_CALL(); + ALOGV("%s: Camera %d: Preparing stream %d", __FUNCTION__, mId, streamId); + + sp<Camera3StreamInterface> stream; + ssize_t outputStreamIdx = mOutputStreams.indexOfKey(streamId); + if (outputStreamIdx == NAME_NOT_FOUND) { + CLOGE("Stream %d does not exist", streamId); + return BAD_VALUE; + } + + stream = mOutputStreams.editValueAt(outputStreamIdx); + + if (stream->isUnpreparable() || stream->hasOutstandingBuffers() ) { + ALOGE("%s: Camera %d: Stream %d has already been a request target", + __FUNCTION__, mId, streamId); + return BAD_VALUE; + } + + if (mRequestThread->isStreamPending(stream)) { + ALOGE("%s: Camera %d: Stream %d is already a target in a pending request", + __FUNCTION__, mId, streamId); + return BAD_VALUE; + } + + return mPreparerThread->prepare(stream); +} + uint32_t Camera3Device::getDeviceVersion() { ATRACE_CALL(); Mutex::Autolock il(mInterfaceLock); @@ -1383,6 +1440,11 @@ sp<Camera3Device::CaptureRequest> Camera3Device::createCaptureRequest( return NULL; } } + // Check if stream is being prepared + if (mInputStream->isPreparing()) { + CLOGE("Request references an input stream that's being prepared!"); + return NULL; + } newRequest->mInputStream = mInputStream; newRequest->mSettings.erase(ANDROID_REQUEST_INPUT_STREAMS); @@ -1415,6 +1477,11 @@ sp<Camera3Device::CaptureRequest> Camera3Device::createCaptureRequest( return NULL; } } + // Check if stream is being prepared + if (stream->isPreparing()) { + CLOGE("Request references an output stream that's being prepared!"); + return NULL; + } newRequest->mOutputStreams.push(stream); } @@ -1423,6 +1490,17 @@ sp<Camera3Device::CaptureRequest> Camera3Device::createCaptureRequest( return newRequest; } +bool Camera3Device::isOpaqueInputSizeSupported(uint32_t width, uint32_t height) { + for (uint32_t i = 0; i < mSupportedOpaqueInputSizes.size(); i++) { + Size size = mSupportedOpaqueInputSizes[i]; + if (size.width == width && size.height == height) { + return true; + } + } + + return false; +} + status_t Camera3Device::configureStreamsLocked() { ATRACE_CALL(); status_t res; @@ -1879,7 +1957,6 @@ bool Camera3Device::insert3AResult(CameraMetadata& result, int32_t tag, return true; } - void Camera3Device::returnOutputBuffers( const camera3_stream_buffer_t *outputBuffers, size_t numBuffers, nsecs_t timestamp) { @@ -1947,20 +2024,31 @@ void Camera3Device::removeInFlightRequestIfReadyLocked(int idx) { void Camera3Device::sendCaptureResult(CameraMetadata &pendingMetadata, CaptureResultExtras &resultExtras, CameraMetadata &collectedPartialResult, - uint32_t frameNumber) { + uint32_t frameNumber, + bool reprocess) { if (pendingMetadata.isEmpty()) return; Mutex::Autolock l(mOutputLock); // TODO: need to track errors for tighter bounds on expected frame number - if (frameNumber < mNextResultFrameNumber) { - SET_ERR("Out-of-order capture result metadata submitted! " + if (reprocess) { + if (frameNumber < mNextReprocessResultFrameNumber) { + SET_ERR("Out-of-order reprocess capture result metadata submitted! " "(got frame number %d, expecting %d)", - frameNumber, mNextResultFrameNumber); - return; + frameNumber, mNextReprocessResultFrameNumber); + return; + } + mNextReprocessResultFrameNumber = frameNumber + 1; + } else { + if (frameNumber < mNextResultFrameNumber) { + SET_ERR("Out-of-order capture result metadata submitted! " + "(got frame number %d, expecting %d)", + frameNumber, mNextResultFrameNumber); + return; + } + mNextResultFrameNumber = frameNumber + 1; } - mNextResultFrameNumber = frameNumber + 1; CaptureResult captureResult; captureResult.mResultExtras = resultExtras; @@ -2170,7 +2258,7 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) { CameraMetadata metadata; metadata = result->result; sendCaptureResult(metadata, request.resultExtras, - collectedPartialResult, frameNumber); + collectedPartialResult, frameNumber, hasInputBufferInRequest); } } @@ -2332,7 +2420,8 @@ void Camera3Device::notifyShutter(const camera3_shutter_msg_t &msg, // send pending result and buffers sendCaptureResult(r.pendingMetadata, r.resultExtras, - r.partialResult.collectedResult, msg.frame_number); + r.partialResult.collectedResult, msg.frame_number, + r.hasInputBuffer); returnOutputBuffers(r.pendingOutputBuffers.array(), r.pendingOutputBuffers.size(), r.shutterTimestamp); r.pendingOutputBuffers.clear(); @@ -2367,7 +2456,7 @@ CameraMetadata Camera3Device::getLatestRequestLocked() { Camera3Device::RequestThread::RequestThread(wp<Camera3Device> parent, sp<StatusTracker> statusTracker, camera3_device_t *hal3Device) : - Thread(false), + Thread(/*canCallJava*/false), mParent(parent), mStatusTracker(statusTracker), mHal3Device(hal3Device), @@ -2383,7 +2472,7 @@ Camera3Device::RequestThread::RequestThread(wp<Camera3Device> parent, mStatusId = statusTracker->addComponent(); } -void Camera3Device::RequestThread::setNotifyCallback( +void Camera3Device::RequestThread::setNotificationListener( NotificationListener *listener) { Mutex::Autolock l(mRequestLock); mListener = listener; @@ -2669,7 +2758,6 @@ bool Camera3Device::RequestThread::threadLoop() { // Fill in buffers if (nextRequest->mInputStream != NULL) { - request.input_buffer = &inputBuffer; res = nextRequest->mInputStream->getInputBuffer(&inputBuffer); if (res != OK) { // Can't get input buffer from gralloc queue - this could be due to @@ -2686,6 +2774,7 @@ bool Camera3Device::RequestThread::threadLoop() { cleanUpFailedRequest(request, nextRequest, outputBuffers); return true; } + request.input_buffer = &inputBuffer; totalNumBuffers += 1; } else { request.input_buffer = NULL; @@ -2797,6 +2886,26 @@ CameraMetadata Camera3Device::RequestThread::getLatestRequest() const { return mLatestRequest; } +bool Camera3Device::RequestThread::isStreamPending( + sp<Camera3StreamInterface>& stream) { + Mutex::Autolock l(mRequestLock); + + for (const auto& request : mRequestQueue) { + for (const auto& s : request->mOutputStreams) { + if (stream == s) return true; + } + if (stream == request->mInputStream) return true; + } + + for (const auto& request : mRepeatingRequests) { + for (const auto& s : request->mOutputStreams) { + if (stream == s) return true; + } + if (stream == request->mInputStream) return true; + } + + return false; +} void Camera3Device::RequestThread::cleanUpFailedRequest( camera3_capture_request_t &request, @@ -3144,6 +3253,138 @@ status_t Camera3Device::RequestThread::addDummyTriggerIds( return OK; } +/** + * PreparerThread inner class methods + */ + +Camera3Device::PreparerThread::PreparerThread() : + Thread(/*canCallJava*/false), mActive(false), mCancelNow(false) { +} + +Camera3Device::PreparerThread::~PreparerThread() { + Thread::requestExitAndWait(); + if (mCurrentStream != nullptr) { + mCurrentStream->cancelPrepare(); + ATRACE_ASYNC_END("stream prepare", mCurrentStream->getId()); + mCurrentStream.clear(); + } + clear(); +} + +status_t Camera3Device::PreparerThread::prepare(sp<Camera3StreamInterface>& stream) { + status_t res; + + Mutex::Autolock l(mLock); + + res = stream->startPrepare(); + if (res == OK) { + // No preparation needed, fire listener right off + ALOGV("%s: Stream %d already prepared", __FUNCTION__, stream->getId()); + if (mListener) { + mListener->notifyPrepared(stream->getId()); + } + return OK; + } else if (res != NOT_ENOUGH_DATA) { + return res; + } + + // Need to prepare, start up thread if necessary + if (!mActive) { + // mRunning will change to false before the thread fully shuts down, so wait to be sure it + // isn't running + Thread::requestExitAndWait(); + res = Thread::run("C3PrepThread", PRIORITY_BACKGROUND); + if (res != OK) { + ALOGE("%s: Unable to start preparer stream: %d (%s)", __FUNCTION__, res, strerror(-res)); + if (mListener) { + mListener->notifyPrepared(stream->getId()); + } + return res; + } + mCancelNow = false; + mActive = true; + ALOGV("%s: Preparer stream started", __FUNCTION__); + } + + // queue up the work + mPendingStreams.push_back(stream); + ALOGV("%s: Stream %d queued for preparing", __FUNCTION__, stream->getId()); + + return OK; +} + +status_t Camera3Device::PreparerThread::clear() { + status_t res; + + Mutex::Autolock l(mLock); + + for (const auto& stream : mPendingStreams) { + stream->cancelPrepare(); + } + mPendingStreams.clear(); + mCancelNow = true; + + return OK; +} + +void Camera3Device::PreparerThread::setNotificationListener(NotificationListener *listener) { + Mutex::Autolock l(mLock); + mListener = listener; +} + +bool Camera3Device::PreparerThread::threadLoop() { + status_t res; + { + Mutex::Autolock l(mLock); + if (mCurrentStream == nullptr) { + // End thread if done with work + if (mPendingStreams.empty()) { + ALOGV("%s: Preparer stream out of work", __FUNCTION__); + // threadLoop _must not_ re-acquire mLock after it sets mActive to false; would + // cause deadlock with prepare()'s requestExitAndWait triggered by !mActive. + mActive = false; + return false; + } + + // Get next stream to prepare + auto it = mPendingStreams.begin(); + mCurrentStream = *it; + mPendingStreams.erase(it); + ATRACE_ASYNC_BEGIN("stream prepare", mCurrentStream->getId()); + ALOGV("%s: Preparing stream %d", __FUNCTION__, mCurrentStream->getId()); + } else if (mCancelNow) { + mCurrentStream->cancelPrepare(); + ATRACE_ASYNC_END("stream prepare", mCurrentStream->getId()); + ALOGV("%s: Cancelling stream %d prepare", __FUNCTION__, mCurrentStream->getId()); + mCurrentStream.clear(); + mCancelNow = false; + return true; + } + } + + res = mCurrentStream->prepareNextBuffer(); + if (res == NOT_ENOUGH_DATA) return true; + if (res != OK) { + // Something bad happened; try to recover by cancelling prepare and + // signalling listener anyway + ALOGE("%s: Stream %d returned error %d (%s) during prepare", __FUNCTION__, + mCurrentStream->getId(), res, strerror(-res)); + mCurrentStream->cancelPrepare(); + } + + // This stream has finished, notify listener + Mutex::Autolock l(mLock); + if (mListener) { + ALOGV("%s: Stream %d prepare done, signaling listener", __FUNCTION__, + mCurrentStream->getId()); + mListener->notifyPrepared(mCurrentStream->getId()); + } + + ATRACE_ASYNC_END("stream prepare", mCurrentStream->getId()); + mCurrentStream.clear(); + + return true; +} /** * Static callback forwarding methods from HAL to instance diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h index a77548d..4fbcb2e 100644 --- a/services/camera/libcameraservice/device3/Camera3Device.h +++ b/services/camera/libcameraservice/device3/Camera3Device.h @@ -116,6 +116,8 @@ class Camera3Device : virtual status_t deleteReprocessStream(int id); virtual status_t configureStreams(); + virtual status_t getInputBufferProducer( + sp<IGraphicBufferProducer> *producer); virtual status_t createDefaultRequest(int templateId, CameraMetadata *request); @@ -136,6 +138,8 @@ class Camera3Device : virtual status_t flush(int64_t *lastFrameNumber = NULL); + virtual status_t prepare(int streamId); + virtual uint32_t getDeviceVersion(); virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const; @@ -179,6 +183,14 @@ class Camera3Device : uint32_t mDeviceVersion; + struct Size { + uint32_t width; + uint32_t height; + Size(uint32_t w = 0, uint32_t h = 0) : width(w), height(h){} + }; + // Map from format to size. + Vector<Size> mSupportedOpaqueInputSizes; + enum Status { STATUS_ERROR, STATUS_UNINITIALIZED, @@ -324,11 +336,11 @@ class Camera3Device : */ bool tryLockSpinRightRound(Mutex& lock); - struct Size { - int width; - int height; - Size(int w, int h) : width(w), height(h){} - }; + /** + * Helper function to determine if an input size for implementation defined + * format is supported. + */ + bool isOpaqueInputSizeSupported(uint32_t width, uint32_t height); /** * Helper function to get the largest Jpeg resolution (in area) @@ -364,7 +376,7 @@ class Camera3Device : sp<camera3::StatusTracker> statusTracker, camera3_device_t *hal3Device); - void setNotifyCallback(NotificationListener *listener); + void setNotificationListener(NotificationListener *listener); /** * Call after stream (re)-configuration is completed. @@ -428,6 +440,12 @@ class Camera3Device : */ CameraMetadata getLatestRequest() const; + /** + * Returns true if the stream is a target of any queued or repeating + * capture request + */ + bool isStreamPending(sp<camera3::Camera3StreamInterface>& stream); + protected: virtual bool threadLoop(); @@ -549,7 +567,6 @@ class Camera3Device : Vector<camera3_stream_buffer_t> pendingOutputBuffers; - // Fields used by the partial result only struct PartialResultInFlight { // Set by process_capture_result once 3A has been sent to clients @@ -600,7 +617,8 @@ class Camera3Device : resultExtras(extras), hasInputBuffer(hasInput){ } -}; + }; + // Map from frame number to the in-flight request state typedef KeyedVector<uint32_t, InFlightRequest> InFlightMap; @@ -632,6 +650,45 @@ class Camera3Device : sp<camera3::StatusTracker> mStatusTracker; /** + * Thread for preparing streams + */ + class PreparerThread : private Thread, public virtual RefBase { + public: + PreparerThread(); + ~PreparerThread(); + + void setNotificationListener(NotificationListener *listener); + + /** + * Queue up a stream to be prepared. Streams are processed by + * a background thread in FIFO order + */ + status_t prepare(sp<camera3::Camera3StreamInterface>& stream); + + /** + * Cancel all current and pending stream preparation + */ + status_t clear(); + + private: + Mutex mLock; + + virtual bool threadLoop(); + + // Guarded by mLock + + NotificationListener *mListener; + List<sp<camera3::Camera3StreamInterface> > mPendingStreams; + bool mActive; + bool mCancelNow; + + // Only accessed by threadLoop and the destructor + + sp<camera3::Camera3StreamInterface> mCurrentStream; + }; + sp<PreparerThread> mPreparerThread; + + /** * Output result queue and current HAL device 3A state */ @@ -639,8 +696,10 @@ class Camera3Device : Mutex mOutputLock; /**** Scope for mOutputLock ****/ - + // the minimal frame number of the next non-reprocess result uint32_t mNextResultFrameNumber; + // the minimal frame number of the next reprocess result + uint32_t mNextReprocessResultFrameNumber; uint32_t mNextShutterFrameNumber; List<CaptureResult> mResultQueue; Condition mResultSignal; @@ -669,7 +728,8 @@ class Camera3Device : // partial results, and the frame number to the result queue. void sendCaptureResult(CameraMetadata &pendingMetadata, CaptureResultExtras &resultExtras, - CameraMetadata &collectedPartialResult, uint32_t frameNumber); + CameraMetadata &collectedPartialResult, uint32_t frameNumber, + bool reprocess); /**** Scope for mInFlightLock ****/ diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp index 01edfff..ecb8ac8 100644 --- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp +++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp @@ -87,7 +87,7 @@ status_t Camera3DummyStream::disconnectLocked() { return OK; } -status_t Camera3DummyStream::getEndpointUsage(uint32_t *usage) { +status_t Camera3DummyStream::getEndpointUsage(uint32_t *usage) const { *usage = DUMMY_USAGE; return OK; } diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h index d023c57..3a3dbf4 100644 --- a/services/camera/libcameraservice/device3/Camera3DummyStream.h +++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h @@ -89,7 +89,7 @@ class Camera3DummyStream : virtual status_t configureQueueLocked(); - virtual status_t getEndpointUsage(uint32_t *usage); + virtual status_t getEndpointUsage(uint32_t *usage) const; }; // class Camera3DummyStream diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp index 8696413..23b1c45 100644 --- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp +++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp @@ -67,13 +67,18 @@ bool Camera3IOStreamBase::hasOutstandingBuffersLocked() const { void Camera3IOStreamBase::dump(int fd, const Vector<String16> &args) const { (void) args; String8 lines; + + uint32_t consumerUsage = 0; + status_t res = getEndpointUsage(&consumerUsage); + if (res != OK) consumerUsage = 0; + lines.appendFormat(" State: %d\n", mState); - lines.appendFormat(" Dims: %d x %d, format 0x%x\n", + lines.appendFormat(" Dims: %d x %d, format 0x%x, dataspace 0x%x\n", camera3_stream::width, camera3_stream::height, - camera3_stream::format); + camera3_stream::format, camera3_stream::data_space); lines.appendFormat(" Max size: %zu\n", mMaxSize); - lines.appendFormat(" Usage: %d, max HAL buffers: %d\n", - camera3_stream::usage, camera3_stream::max_buffers); + lines.appendFormat(" Combined usage: %d, max HAL buffers: %d\n", + camera3_stream::usage | consumerUsage, camera3_stream::max_buffers); lines.appendFormat(" Frames produced: %d, last timestamp: %" PRId64 " ns\n", mFrameCount, mLastTimestamp); lines.appendFormat(" Total buffers: %zu, currently dequeued: %zu\n", @@ -156,13 +161,11 @@ void Camera3IOStreamBase::handoutBufferLocked(camera3_stream_buffer &buffer, // Inform tracker about becoming busy if (mHandoutTotalBufferCount == 0 && mState != STATE_IN_CONFIG && - mState != STATE_IN_RECONFIG) { + mState != STATE_IN_RECONFIG && mState != STATE_PREPARING) { /** * Avoid a spurious IDLE->ACTIVE->IDLE transition when using buffers * before/after register_stream_buffers during initial configuration - * or re-configuration. - * - * TODO: IN_CONFIG and IN_RECONFIG checks only make sense for <HAL3.2 + * or re-configuration, or during prepare pre-allocation */ sp<StatusTracker> statusTracker = mStatusTracker.promote(); if (statusTracker != 0) { @@ -177,9 +180,11 @@ void Camera3IOStreamBase::handoutBufferLocked(camera3_stream_buffer &buffer, } status_t Camera3IOStreamBase::getBufferPreconditionCheckLocked() const { - // Allow dequeue during IN_[RE]CONFIG for registration + // Allow dequeue during IN_[RE]CONFIG for registration, in + // PREPARING for pre-allocation if (mState != STATE_CONFIGURED && - mState != STATE_IN_CONFIG && mState != STATE_IN_RECONFIG) { + mState != STATE_IN_CONFIG && mState != STATE_IN_RECONFIG && + mState != STATE_PREPARING) { ALOGE("%s: Stream %d: Can't get buffers in unconfigured state %d", __FUNCTION__, mId, mState); return INVALID_OPERATION; @@ -240,13 +245,11 @@ status_t Camera3IOStreamBase::returnAnyBufferLocked( mHandoutTotalBufferCount--; if (mHandoutTotalBufferCount == 0 && mState != STATE_IN_CONFIG && - mState != STATE_IN_RECONFIG) { + mState != STATE_IN_RECONFIG && mState != STATE_PREPARING) { /** * Avoid a spurious IDLE->ACTIVE->IDLE transition when using buffers * before/after register_stream_buffers during initial configuration - * or re-configuration. - * - * TODO: IN_CONFIG and IN_RECONFIG checks only make sense for <HAL3.2 + * or re-configuration, or during prepare pre-allocation */ ALOGV("%s: Stream %d: All buffers returned; now idle", __FUNCTION__, mId); diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h index abcf2b1..f5727e8 100644 --- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h +++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h @@ -84,7 +84,7 @@ class Camera3IOStreamBase : virtual size_t getHandoutInputBufferCountLocked(); - virtual status_t getEndpointUsage(uint32_t *usage) = 0; + virtual status_t getEndpointUsage(uint32_t *usage) const = 0; status_t getBufferPreconditionCheckLocked() const; status_t returnBufferPreconditionCheckLocked() const; diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp index 6bf671e..84c5754 100644 --- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp +++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp @@ -65,8 +65,8 @@ status_t Camera3InputStream::getInputBufferLocked( assert(mConsumer != 0); BufferItem bufferItem; - res = mConsumer->acquireBuffer(&bufferItem, /*waitForFence*/false); + res = mConsumer->acquireBuffer(&bufferItem, /*waitForFence*/false); if (res != OK) { ALOGE("%s: Stream %d: Can't acquire next output buffer: %s (%d)", __FUNCTION__, mId, strerror(-res), res); @@ -162,6 +162,21 @@ status_t Camera3InputStream::returnInputBufferLocked( return returnAnyBufferLocked(buffer, /*timestamp*/0, /*output*/false); } +status_t Camera3InputStream::getInputBufferProducerLocked( + sp<IGraphicBufferProducer> *producer) { + ATRACE_CALL(); + + if (producer == NULL) { + return BAD_VALUE; + } else if (mProducer == NULL) { + ALOGE("%s: No input stream is configured"); + return INVALID_OPERATION; + } + + *producer = mProducer; + return OK; +} + status_t Camera3InputStream::disconnectLocked() { status_t res; @@ -212,10 +227,17 @@ status_t Camera3InputStream::configureQueueLocked() { res = producer->query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBuffers); if (res != OK || minUndequeuedBuffers < 0) { ALOGE("%s: Stream %d: Could not query min undequeued buffers (error %d, bufCount %d)", - __FUNCTION__, mId, res, minUndequeuedBuffers); + __FUNCTION__, mId, res, minUndequeuedBuffers); return res; } size_t minBufs = static_cast<size_t>(minUndequeuedBuffers); + + if (camera3_stream::max_buffers == 0) { + ALOGE("%s: %d: HAL sets max_buffer to 0. Must be at least 1.", + __FUNCTION__, __LINE__); + return INVALID_OPERATION; + } + /* * We promise never to 'acquire' more than camera3_stream::max_buffers * at any one time. @@ -232,6 +254,8 @@ status_t Camera3InputStream::configureQueueLocked() { mConsumer = new BufferItemConsumer(consumer, camera3_stream::usage, mTotalBufferCount); mConsumer->setName(String8::format("Camera3-InputStream-%d", mId)); + + mProducer = producer; } res = mConsumer->setDefaultBufferSize(camera3_stream::width, @@ -251,7 +275,7 @@ status_t Camera3InputStream::configureQueueLocked() { return OK; } -status_t Camera3InputStream::getEndpointUsage(uint32_t *usage) { +status_t Camera3InputStream::getEndpointUsage(uint32_t *usage) const { // Per HAL3 spec, input streams have 0 for their initial usage field. *usage = 0; return OK; diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h index fd17f4f..9f3de10 100644 --- a/services/camera/libcameraservice/device3/Camera3InputStream.h +++ b/services/camera/libcameraservice/device3/Camera3InputStream.h @@ -49,6 +49,7 @@ class Camera3InputStream : public Camera3IOStreamBase { private: sp<BufferItemConsumer> mConsumer; + sp<IGraphicBufferProducer> mProducer; Vector<BufferItem> mBuffersInFlight; /** @@ -68,11 +69,13 @@ class Camera3InputStream : public Camera3IOStreamBase { virtual status_t getInputBufferLocked(camera3_stream_buffer *buffer); virtual status_t returnInputBufferLocked( const camera3_stream_buffer &buffer); + virtual status_t getInputBufferProducerLocked( + sp<IGraphicBufferProducer> *producer); virtual status_t disconnectLocked(); virtual status_t configureQueueLocked(); - virtual status_t getEndpointUsage(uint32_t *usage); + virtual status_t getEndpointUsage(uint32_t *usage) const; }; // class Camera3InputStream diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp index 0c739e9..7a0331b 100644 --- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp +++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp @@ -209,6 +209,13 @@ status_t Camera3OutputStream::returnBufferCheckedLocked( } } mLock.lock(); + + // Once a valid buffer has been returned to the queue, can no longer + // dequeue all buffers for preallocation. + if (buffer.status != CAMERA3_BUFFER_STATUS_ERROR) { + mStreamUnpreparable = true; + } + if (res != OK) { close(anwReleaseFence); } @@ -390,14 +397,28 @@ status_t Camera3OutputStream::disconnectLocked() { return OK; } -status_t Camera3OutputStream::getEndpointUsage(uint32_t *usage) { +status_t Camera3OutputStream::getEndpointUsage(uint32_t *usage) const { status_t res; int32_t u = 0; res = mConsumer->query(mConsumer.get(), NATIVE_WINDOW_CONSUMER_USAGE_BITS, &u); - *usage = u; + // If an opaque output stream's endpoint is ImageReader, add + // GRALLOC_USAGE_HW_CAMERA_ZSL to the usage so HAL knows it will be used + // for the ZSL use case. + // Assume it's for ImageReader if the consumer usage doesn't have any of these bits set: + // 1. GRALLOC_USAGE_HW_TEXTURE + // 2. GRALLOC_USAGE_HW_RENDER + // 3. GRALLOC_USAGE_HW_COMPOSER + // 4. GRALLOC_USAGE_HW_VIDEO_ENCODER + if (camera3_stream::format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED && + (u & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_COMPOSER | + GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) { + u |= GRALLOC_USAGE_HW_CAMERA_ZSL; + } + + *usage = u; return res; } diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h index 12b2ebb..513b695 100644 --- a/services/camera/libcameraservice/device3/Camera3OutputStream.h +++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h @@ -99,7 +99,7 @@ class Camera3OutputStream : virtual status_t configureQueueLocked(); - virtual status_t getEndpointUsage(uint32_t *usage); + virtual status_t getEndpointUsage(uint32_t *usage) const; }; // class Camera3OutputStream diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp index 4acbce3..3821da1 100644 --- a/services/camera/libcameraservice/device3/Camera3Stream.cpp +++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp @@ -194,6 +194,11 @@ status_t Camera3Stream::finishConfiguration(camera3_device *hal3Device) { return OK; } + // Reset prepared state, since buffer config has changed, and existing + // allocations are no longer valid + mPrepared = false; + mStreamUnpreparable = false; + status_t res; res = configureQueueLocked(); if (res != OK) { @@ -244,6 +249,125 @@ status_t Camera3Stream::cancelConfiguration() { return OK; } +bool Camera3Stream::isUnpreparable() { + ATRACE_CALL(); + + Mutex::Autolock l(mLock); + return mStreamUnpreparable; +} + +status_t Camera3Stream::startPrepare() { + ATRACE_CALL(); + + Mutex::Autolock l(mLock); + status_t res = OK; + + // This function should be only called when the stream is configured already. + if (mState != STATE_CONFIGURED) { + ALOGE("%s: Stream %d: Can't prepare stream if stream is not in CONFIGURED " + "state %d", __FUNCTION__, mId, mState); + return INVALID_OPERATION; + } + + // This function can't be called if the stream has already received filled + // buffers + if (mStreamUnpreparable) { + ALOGE("%s: Stream %d: Can't prepare stream that's already in use", + __FUNCTION__, mId); + return INVALID_OPERATION; + } + + if (getHandoutOutputBufferCountLocked() > 0) { + ALOGE("%s: Stream %d: Can't prepare stream that has outstanding buffers", + __FUNCTION__, mId); + return INVALID_OPERATION; + } + + if (mPrepared) return OK; + + size_t bufferCount = getBufferCountLocked(); + + mPreparedBuffers.insertAt(camera3_stream_buffer_t(), /*index*/0, bufferCount); + mPreparedBufferIdx = 0; + + mState = STATE_PREPARING; + + return NOT_ENOUGH_DATA; +} + +bool Camera3Stream::isPreparing() const { + Mutex::Autolock l(mLock); + return mState == STATE_PREPARING; +} + +status_t Camera3Stream::prepareNextBuffer() { + ATRACE_CALL(); + + Mutex::Autolock l(mLock); + status_t res = OK; + + // This function should be only called when the stream is preparing + if (mState != STATE_PREPARING) { + ALOGE("%s: Stream %d: Can't prepare buffer if stream is not in PREPARING " + "state %d", __FUNCTION__, mId, mState); + return INVALID_OPERATION; + } + + // Get next buffer - this may allocate, and take a while for large buffers + res = getBufferLocked( &mPreparedBuffers.editItemAt(mPreparedBufferIdx) ); + if (res != OK) { + ALOGE("%s: Stream %d: Unable to allocate buffer %d during preparation", + __FUNCTION__, mId, mPreparedBufferIdx); + return NO_INIT; + } + + mPreparedBufferIdx++; + + // Check if we still have buffers left to allocate + if (mPreparedBufferIdx < mPreparedBuffers.size()) { + return NOT_ENOUGH_DATA; + } + + // Done with prepare - mark stream as such, and return all buffers + // via cancelPrepare + mPrepared = true; + + return cancelPrepareLocked(); +} + +status_t Camera3Stream::cancelPrepare() { + ATRACE_CALL(); + + Mutex::Autolock l(mLock); + + return cancelPrepareLocked(); +} + +status_t Camera3Stream::cancelPrepareLocked() { + status_t res = OK; + + // This function should be only called when the stream is mid-preparing. + if (mState != STATE_PREPARING) { + ALOGE("%s: Stream %d: Can't cancel prepare stream if stream is not in " + "PREPARING state %d", __FUNCTION__, mId, mState); + return INVALID_OPERATION; + } + + // Return all valid buffers to stream, in ERROR state to indicate + // they weren't filled. + for (size_t i = 0; i < mPreparedBufferIdx; i++) { + mPreparedBuffers.editItemAt(i).release_fence = -1; + mPreparedBuffers.editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR; + returnBufferLocked(mPreparedBuffers[i], 0); + } + mPreparedBuffers.clear(); + mPreparedBufferIdx = 0; + + mState = STATE_CONFIGURED; + + return res; +} + status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer) { ATRACE_CALL(); Mutex::Autolock l(mLock); @@ -346,6 +470,13 @@ status_t Camera3Stream::returnInputBuffer(const camera3_stream_buffer &buffer) { return res; } +status_t Camera3Stream::getInputBufferProducer(sp<IGraphicBufferProducer> *producer) { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + + return getInputBufferProducerLocked(producer); +} + void Camera3Stream::fireBufferListenersLocked( const camera3_stream_buffer& /*buffer*/, bool acquired, bool output) { List<wp<Camera3StreamBufferListener> >::iterator it, end; @@ -420,15 +551,13 @@ status_t Camera3Stream::registerBuffersLocked(camera3_device *hal3Device) { ALOGE("%s: register_stream_buffers is deprecated in HAL3.2; " "must be set to NULL in camera3_device::ops", __FUNCTION__); return INVALID_OPERATION; - } else { - ALOGD("%s: Skipping NULL check for deprecated register_stream_buffers", __FUNCTION__); } return OK; - } else { - ALOGV("%s: register_stream_buffers using deprecated code path", __FUNCTION__); } + ALOGV("%s: register_stream_buffers using deprecated code path", __FUNCTION__); + status_t res; size_t bufferCount = getBufferCountLocked(); @@ -484,6 +613,8 @@ status_t Camera3Stream::registerBuffersLocked(camera3_device *hal3Device) { returnBufferLocked(streamBuffers[i], 0); } + mPrepared = true; + return res; } @@ -505,6 +636,10 @@ status_t Camera3Stream::returnInputBufferLocked( ALOGE("%s: This type of stream does not support input", __FUNCTION__); return INVALID_OPERATION; } +status_t Camera3Stream::getInputBufferProducerLocked(sp<IGraphicBufferProducer> *producer) { + ALOGE("%s: This type of stream does not support input", __FUNCTION__); + return INVALID_OPERATION; +} void Camera3Stream::addBufferListener( wp<Camera3StreamBufferListener> listener) { diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h index aba27fe..0543c66 100644 --- a/services/camera/libcameraservice/device3/Camera3Stream.h +++ b/services/camera/libcameraservice/device3/Camera3Stream.h @@ -57,8 +57,15 @@ namespace camera3 { * re-registering buffers with HAL. * * STATE_CONFIGURED: Stream is configured, and has registered buffers with the - * HAL. The stream's getBuffer/returnBuffer work. The priv pointer may still be - * modified. + * HAL (if necessary). The stream's getBuffer/returnBuffer work. The priv + * pointer may still be modified. + * + * STATE_PREPARING: The stream's buffers are being pre-allocated for use. On + * older HALs, this is done as part of configuration, but in newer HALs + * buffers may be allocated at time of first use. But some use cases require + * buffer allocation upfront, to minmize disruption due to lengthy allocation + * duration. In this state, only prepareNextBuffer() and cancelPrepare() + * may be called. * * Transition table: * @@ -82,6 +89,12 @@ namespace camera3 { * STATE_CONFIGURED => STATE_CONSTRUCTED: * When disconnect() is called after making sure stream is idle with * waitUntilIdle(). + * STATE_CONFIGURED => STATE_PREPARING: + * When startPrepare is called before the stream has a buffer + * queued back into it for the first time. + * STATE_PREPARING => STATE_CONFIGURED: + * When sufficient prepareNextBuffer calls have been made to allocate + * all stream buffers, or cancelPrepare is called. * * Status Tracking: * Each stream is tracked by StatusTracker as a separate component, @@ -167,6 +180,73 @@ class Camera3Stream : status_t cancelConfiguration(); /** + * Determine whether the stream has already become in-use (has received + * a valid filled buffer), which determines if a stream can still have + * prepareNextBuffer called on it. + */ + bool isUnpreparable(); + + /** + * Start stream preparation. May only be called in the CONFIGURED state, + * when no valid buffers have yet been returned to this stream. + * + * If no prepartion is necessary, returns OK and does not transition to + * PREPARING state. Otherwise, returns NOT_ENOUGH_DATA and transitions + * to PREPARING. + * + * This call performs no allocation, so is quick to call. + * + * Returns: + * OK if no more buffers need to be preallocated + * NOT_ENOUGH_DATA if calls to prepareNextBuffer are needed to finish + * buffer pre-allocation, and transitions to the PREPARING state. + * NO_INIT in case of a serious error from the HAL device + * INVALID_OPERATION if called when not in CONFIGURED state, or a + * valid buffer has already been returned to this stream. + */ + status_t startPrepare(); + + /** + * Check if the stream is mid-preparing. + */ + bool isPreparing() const; + + /** + * Continue stream buffer preparation by allocating the next + * buffer for this stream. May only be called in the PREPARED state. + * + * Returns OK and transitions to the CONFIGURED state if all buffers + * are allocated after the call concludes. Otherwise returns NOT_ENOUGH_DATA. + * + * This call allocates one buffer, which may take several milliseconds for + * large buffers. + * + * Returns: + * OK if no more buffers need to be preallocated, and transitions + * to the CONFIGURED state. + * NOT_ENOUGH_DATA if more calls to prepareNextBuffer are needed to finish + * buffer pre-allocation. + * NO_INIT in case of a serious error from the HAL device + * INVALID_OPERATION if called when not in CONFIGURED state, or a + * valid buffer has already been returned to this stream. + */ + status_t prepareNextBuffer(); + + /** + * Cancel stream preparation early. In case allocation needs to be + * stopped, this method transitions the stream back to the CONFIGURED state. + * Buffers that have been allocated with prepareNextBuffer remain that way, + * but a later use of prepareNextBuffer will require just as many + * calls as if the earlier prepare attempt had not existed. + * + * Returns: + * OK if cancellation succeeded, and transitions to the CONFIGURED state + * INVALID_OPERATION if not in the PREPARING state + * NO_INIT in case of a serious error from the HAL device + */ + status_t cancelPrepare(); + + /** * Fill in the camera3_stream_buffer with the next valid buffer for this * stream, to hand over to the HAL. * @@ -205,6 +285,10 @@ class Camera3Stream : */ status_t returnInputBuffer(const camera3_stream_buffer &buffer); + // get the buffer producer of the input buffer queue. + // only apply to input streams. + status_t getInputBufferProducer(sp<IGraphicBufferProducer> *producer); + /** * Whether any of the stream's buffers are currently in use by the HAL, * including buffers that have been returned but not yet had their @@ -259,7 +343,8 @@ class Camera3Stream : STATE_CONSTRUCTED, STATE_IN_CONFIG, STATE_IN_RECONFIG, - STATE_CONFIGURED + STATE_CONFIGURED, + STATE_PREPARING } mState; mutable Mutex mLock; @@ -285,6 +370,9 @@ class Camera3Stream : virtual status_t returnInputBufferLocked( const camera3_stream_buffer &buffer); virtual bool hasOutstandingBuffersLocked() const = 0; + // Get the buffer producer of the input buffer queue. Only apply to input streams. + virtual status_t getInputBufferProducerLocked(sp<IGraphicBufferProducer> *producer); + // Can return -ENOTCONN when we are already disconnected (not an error) virtual status_t disconnectLocked() = 0; @@ -305,13 +393,17 @@ class Camera3Stream : // Get the usage flags for the other endpoint, or return // INVALID_OPERATION if they cannot be obtained. - virtual status_t getEndpointUsage(uint32_t *usage) = 0; + virtual status_t getEndpointUsage(uint32_t *usage) const = 0; // Tracking for idle state wp<StatusTracker> mStatusTracker; // Status tracker component ID int mStatusId; + // Tracking for stream prepare - whether this stream can still have + // prepareNextBuffer called on it. + bool mStreamUnpreparable; + private: uint32_t oldUsage; uint32_t oldMaxBuffers; @@ -326,6 +418,18 @@ class Camera3Stream : bool acquired, bool output); List<wp<Camera3StreamBufferListener> > mBufferListenerList; + status_t cancelPrepareLocked(); + + // Tracking for PREPARING state + + // State of buffer preallocation. Only true if either prepareNextBuffer + // has been called sufficient number of times, or stream configuration + // had to register buffers with the HAL + bool mPrepared; + + Vector<camera3_stream_buffer_t> mPreparedBuffers; + size_t mPreparedBufferIdx; + }; // class Camera3Stream }; // namespace camera3 diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h index da989cd..d177b57 100644 --- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h +++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h @@ -89,6 +89,68 @@ class Camera3StreamInterface : public virtual RefBase { virtual status_t cancelConfiguration() = 0; /** + * Determine whether the stream has already become in-use (has received + * a valid filled buffer), which determines if a stream can still have + * prepareNextBuffer called on it. + */ + virtual bool isUnpreparable() = 0; + + /** + * Start stream preparation. May only be called in the CONFIGURED state, + * when no valid buffers have yet been returned to this stream. + * + * If no prepartion is necessary, returns OK and does not transition to + * PREPARING state. Otherwise, returns NOT_ENOUGH_DATA and transitions + * to PREPARING. + * + * Returns: + * OK if no more buffers need to be preallocated + * NOT_ENOUGH_DATA if calls to prepareNextBuffer are needed to finish + * buffer pre-allocation, and transitions to the PREPARING state. + * NO_INIT in case of a serious error from the HAL device + * INVALID_OPERATION if called when not in CONFIGURED state, or a + * valid buffer has already been returned to this stream. + */ + virtual status_t startPrepare() = 0; + + /** + * Check if the stream is mid-preparing. + */ + virtual bool isPreparing() const = 0; + + /** + * Continue stream buffer preparation by allocating the next + * buffer for this stream. May only be called in the PREPARED state. + * + * Returns OK and transitions to the CONFIGURED state if all buffers + * are allocated after the call concludes. Otherwise returns NOT_ENOUGH_DATA. + * + * Returns: + * OK if no more buffers need to be preallocated, and transitions + * to the CONFIGURED state. + * NOT_ENOUGH_DATA if more calls to prepareNextBuffer are needed to finish + * buffer pre-allocation. + * NO_INIT in case of a serious error from the HAL device + * INVALID_OPERATION if called when not in CONFIGURED state, or a + * valid buffer has already been returned to this stream. + */ + virtual status_t prepareNextBuffer() = 0; + + /** + * Cancel stream preparation early. In case allocation needs to be + * stopped, this method transitions the stream back to the CONFIGURED state. + * Buffers that have been allocated with prepareNextBuffer remain that way, + * but a later use of prepareNextBuffer will require just as many + * calls as if the earlier prepare attempt had not existed. + * + * Returns: + * OK if cancellation succeeded, and transitions to the CONFIGURED state + * INVALID_OPERATION if not in the PREPARING state + * NO_INIT in case of a serious error from the HAL device + */ + virtual status_t cancelPrepare() = 0; + + /** * Fill in the camera3_stream_buffer with the next valid buffer for this * stream, to hand over to the HAL. * @@ -128,6 +190,13 @@ class Camera3StreamInterface : public virtual RefBase { virtual status_t returnInputBuffer(const camera3_stream_buffer &buffer) = 0; /** + * Get the buffer producer of the input buffer queue. + * + * This method only applies to input streams. + */ + virtual status_t getInputBufferProducer(sp<IGraphicBufferProducer> *producer) = 0; + + /** * Whether any of the stream's buffers are currently in use by the HAL, * including buffers that have been returned but not yet had their * release fence signaled. diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp index 7296d47..75a69ed 100644 --- a/services/mediaresourcemanager/ResourceManagerService.cpp +++ b/services/mediaresourcemanager/ResourceManagerService.cpp @@ -126,6 +126,7 @@ void ResourceManagerService::addResource( Mutex::Autolock lock(mLock); ResourceInfos& infos = getResourceInfosForEdit(pid, mMap); ResourceInfo& info = getResourceInfoForEdit(clientId, client, infos); + // TODO: do the merge instead of append. info.resources.appendVector(resources); } @@ -197,19 +198,58 @@ bool ResourceManagerService::reclaimResource( } } } + + if (clients.size() == 0) { + // if we are here, run the third pass to free one codec with the same type. + for (size_t i = 0; i < resources.size(); ++i) { + String8 type = resources[i].mType; + if (type == kResourceSecureCodec || type == kResourceNonSecureCodec) { + sp<IResourceManagerClient> client; + if (!getLowestPriorityBiggestClient_l(callingPid, type, &client)) { + return false; + } + clients.push_back(client); + } + } + } } if (clients.size() == 0) { return false; } + sp<IResourceManagerClient> failedClient; for (size_t i = 0; i < clients.size(); ++i) { ALOGV("reclaimResource from client %p", clients[i].get()); if (!clients[i]->reclaimResource()) { - return false; + failedClient = clients[i]; + break; } } - return true; + + { + Mutex::Autolock lock(mLock); + bool found = false; + for (size_t i = 0; i < mMap.size(); ++i) { + ResourceInfos &infos = mMap.editValueAt(i); + for (size_t j = 0; j < infos.size();) { + if (infos[j].client == failedClient) { + j = infos.removeAt(j); + found = true; + } else { + ++j; + } + } + if (found) { + break; + } + } + if (!found) { + ALOGV("didn't find failed client"); + } + } + + return (failedClient == NULL); } bool ResourceManagerService::getAllClients_l( diff --git a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp index b73e1bc..48d1395 100644 --- a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp +++ b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 The Android Open Source Project + * Copyright 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -118,6 +118,20 @@ protected: client3->reset(); } + // test set up + // --------------------------------------------------------------------------------- + // pid priority client type number + // --------------------------------------------------------------------------------- + // kTestPid1(30) 30 mTestClient1 secure codec 1 + // graphic memory 200 + // graphic memory 200 + // --------------------------------------------------------------------------------- + // kTestPid2(20) 20 mTestClient2 non-secure codec 1 + // graphic memory 300 + // ------------------------------------------- + // mTestClient3 secure codec 1 + // graphic memory 100 + // --------------------------------------------------------------------------------- void addResource() { // kTestPid1 mTestClient1 Vector<MediaResource> resources1; @@ -202,10 +216,12 @@ protected: int lowPriorityPid = 100; EXPECT_FALSE(mService->getAllClients_l(lowPriorityPid, type, &clients)); int midPriorityPid = 25; - EXPECT_FALSE(mService->getAllClients_l(lowPriorityPid, type, &clients)); + // some higher priority process (e.g. kTestPid2) owns the resource, so getAllClients_l + // will fail. + EXPECT_FALSE(mService->getAllClients_l(midPriorityPid, type, &clients)); int highPriorityPid = 10; - EXPECT_TRUE(mService->getAllClients_l(10, unknowType, &clients)); - EXPECT_TRUE(mService->getAllClients_l(10, type, &clients)); + EXPECT_TRUE(mService->getAllClients_l(highPriorityPid, unknowType, &clients)); + EXPECT_TRUE(mService->getAllClients_l(highPriorityPid, type, &clients)); EXPECT_EQ(2u, clients.size()); EXPECT_EQ(mTestClient3, clients[0]); @@ -308,6 +324,30 @@ protected: // nothing left EXPECT_FALSE(mService->reclaimResource(10, resources)); } + + // ### secure codecs can coexist and secure codec can coexist with non-secure codec ### + { + addResource(); + mService->mSupportsMultipleSecureCodecs = true; + mService->mSupportsSecureWithNonSecureCodec = true; + + Vector<MediaResource> resources; + resources.push_back(MediaResource(String8(kResourceSecureCodec), 1)); + + EXPECT_TRUE(mService->reclaimResource(10, resources)); + // secure codec from lowest process got reclaimed + verifyClients(true, false, false); + + // call again should reclaim another secure codec from lowest process + EXPECT_TRUE(mService->reclaimResource(10, resources)); + verifyClients(false, false, true); + + // nothing left + EXPECT_FALSE(mService->reclaimResource(10, resources)); + + // clean up client 2 which still has non secure codec left + mService->removeResource((int64_t) mTestClient2.get()); + } } void testReclaimResourceNonSecure() { @@ -360,6 +400,26 @@ protected: // nothing left EXPECT_FALSE(mService->reclaimResource(10, resources)); } + + // ### secure codec can coexist with non-secure codec ### + { + addResource(); + mService->mSupportsSecureWithNonSecureCodec = true; + + Vector<MediaResource> resources; + resources.push_back(MediaResource(String8(kResourceNonSecureCodec), 1)); + + EXPECT_TRUE(mService->reclaimResource(10, resources)); + // one non secure codec from lowest process got reclaimed + verifyClients(false, true, false); + + // nothing left + EXPECT_FALSE(mService->reclaimResource(10, resources)); + + // clean up client 1 and 3 which still have secure codec left + mService->removeResource((int64_t) mTestClient1.get()); + mService->removeResource((int64_t) mTestClient3.get()); + } } void testGetLowestPriorityBiggestClient() { |