diff options
Diffstat (limited to 'services')
65 files changed, 6160 insertions, 1440 deletions
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp index 60810d5..a269886 100644 --- a/services/audioflinger/AudioFlinger.cpp +++ b/services/audioflinger/AudioFlinger.cpp @@ -169,7 +169,8 @@ AudioFlinger::AudioFlinger() mBtNrecIsOff(false), mIsLowRamDevice(true), mIsDeviceTypeKnown(false), - mGlobalEffectEnableTime(0) + mGlobalEffectEnableTime(0), + mPrimaryOutputSampleRate(0) { getpid_cached = getpid(); char value[PROPERTY_VALUE_MAX]; @@ -1609,6 +1610,19 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module, mHardwareStatus = AUDIO_HW_OUTPUT_OPEN; audio_stream_out_t *outStream = NULL; + + // FOR TESTING ONLY: + // Enable increased sink precision for mixing mode if kEnableExtendedPrecision is true. + if (kEnableExtendedPrecision && // Check only for Normal Mixing mode + !(flags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT))) { + // Update format + //config.format = AUDIO_FORMAT_PCM_FLOAT; + //config.format = AUDIO_FORMAT_PCM_24_BIT_PACKED; + //config.format = AUDIO_FORMAT_PCM_32_BIT; + //config.format = AUDIO_FORMAT_PCM_8_24_BIT; + // ALOGV("openOutput() upgrading format to %#08x", config.format); + } + status_t status = hwDevHal->open_output_stream(hwDevHal, id, *pDevices, @@ -1632,9 +1646,9 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module, if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) { thread = new OffloadThread(this, output, id, *pDevices); ALOGV("openOutput() created offload output: ID %d thread %p", id, thread); - } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) || - (config.format != AUDIO_FORMAT_PCM_16_BIT) || - (config.channel_mask != AUDIO_CHANNEL_OUT_STEREO)) { + } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) + || !isValidPcmSinkFormat(config.format) + || (config.channel_mask != AUDIO_CHANNEL_OUT_STEREO)) { thread = new DirectOutputThread(this, output, id, *pDevices); ALOGV("openOutput() created direct output: ID %d thread %p", id, thread); } else { @@ -1668,6 +1682,8 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module, mHardwareStatus = AUDIO_HW_SET_MODE; hwDevHal->set_mode(hwDevHal, mMode); mHardwareStatus = AUDIO_HW_IDLE; + + mPrimaryOutputSampleRate = config.sample_rate; } return id; } diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h index 19b1732..1ccef24 100644 --- a/services/audioflinger/AudioFlinger.h +++ b/services/audioflinger/AudioFlinger.h @@ -50,6 +50,8 @@ #include <media/AudioBufferProvider.h> #include <media/ExtendedAudioBufferProvider.h> + +#include "FastCapture.h" #include "FastMixer.h" #include <media/nbaio/NBAIO.h> #include "AudioWatchdog.h" @@ -323,6 +325,24 @@ private: audio_devices_t devices); void purgeStaleEffects_l(); + // Set kEnableExtendedPrecision to true to use extended precision in MixerThread + static const bool kEnableExtendedPrecision = false; + + // Returns true if format is permitted for the PCM sink in the MixerThread + static inline bool isValidPcmSinkFormat(audio_format_t format) { + switch (format) { + case AUDIO_FORMAT_PCM_16_BIT: + return true; + case AUDIO_FORMAT_PCM_FLOAT: + case AUDIO_FORMAT_PCM_24_BIT_PACKED: + case AUDIO_FORMAT_PCM_32_BIT: + case AUDIO_FORMAT_PCM_8_24_BIT: + return kEnableExtendedPrecision; + default: + return false; + } + } + // standby delay for MIXER and DUPLICATING playback threads is read from property // ro.audio.flinger_standbytime_ms or defaults to kDefaultStandbyTimeInNsecs static nsecs_t mStandbyTimeInNsecs; @@ -690,6 +710,9 @@ private: nsecs_t mGlobalEffectEnableTime; // when a global effect was last enabled sp<PatchPanel> mPatchPanel; + + uint32_t mPrimaryOutputSampleRate; // sample rate of the primary output, or zero if none + // protected by mHardwareLock }; #undef INCLUDING_FROM_AUDIOFLINGER_H diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp index ace3bf1..af312c4 100644 --- a/services/audioflinger/AudioMixer.cpp +++ b/services/audioflinger/AudioMixer.cpp @@ -40,8 +40,36 @@ #include <media/EffectsFactoryApi.h> +#include "AudioMixerOps.h" #include "AudioMixer.h" +// Use the FCC_2 macro for code assuming Fixed Channel Count of 2 and +// whose stereo assumption may need to be revisited later. +#ifndef FCC_2 +#define FCC_2 2 +#endif + +/* VERY_VERY_VERBOSE_LOGGING will show exactly which process hook and track hook is + * being used. This is a considerable amount of log spam, so don't enable unless you + * are verifying the hook based code. + */ +//#define VERY_VERY_VERBOSE_LOGGING +#ifdef VERY_VERY_VERBOSE_LOGGING +#define ALOGVV ALOGV +//define ALOGVV printf // for test-mixer.cpp +#else +#define ALOGVV(a...) do { } while (0) +#endif + +// Set kUseNewMixer to true to use the new mixer engine. Otherwise the +// original code will be used. This is false for now. +static const bool kUseNewMixer = false; + +// Set kUseFloat to true to allow floating input into the mixer engine. +// If kUseNewMixer is false, this is ignored or may be overridden internally +// because of downmix/upmix support. +static const bool kUseFloat = true; + namespace android { // ---------------------------------------------------------------------------- @@ -265,8 +293,8 @@ int AudioMixer::getTrackName(audio_channel_mask_t channelMask, // assume default parameters for the track, except where noted below track_t* t = &mState.tracks[n]; t->needs = 0; - t->volume[0] = UNITY_GAIN; - t->volume[1] = UNITY_GAIN; + t->volume[0] = UNITY_GAIN_INT; + t->volume[1] = UNITY_GAIN_INT; // no initialization needed // t->prevVolume[0] // t->prevVolume[1] @@ -300,15 +328,19 @@ int AudioMixer::getTrackName(audio_channel_mask_t channelMask, t->downmixerBufferProvider = NULL; t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT; t->mFormat = format; - t->mMixerInFormat = AUDIO_FORMAT_PCM_16_BIT; - if (t->mFormat != t->mMixerInFormat) { - prepareTrackForReformat(t, n); - } - status_t status = initTrackDownmix(&mState.tracks[n], n, channelMask); + t->mMixerInFormat = kUseFloat && kUseNewMixer + ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + // Check the downmixing (or upmixing) requirements. + status_t status = initTrackDownmix(t, n, channelMask); if (status != OK) { ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask); return -1; } + // initTrackDownmix() may change the input format requirement. + // If you desire floating point input to the mixer, it may change + // to integer because the downmixer requires integer to process. + ALOGVV("mMixerFormat:%#x mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat); + prepareTrackForReformat(t, n); mTrackNames |= 1 << n; return TRACK0 + n; } @@ -443,6 +475,7 @@ status_t AudioMixer::prepareTrackForDownmix(track_t* pTrack, int trackName) }// end of scope for local variables that are not used in goto label "noDownmixForActiveTrack" // initialization successful: + pTrack->mMixerInFormat = AUDIO_FORMAT_PCM_16_BIT; // 16 bit input is required for downmix pTrack->downmixerBufferProvider = pDbp; reconfigureBufferProviders(pTrack); return NO_ERROR; @@ -467,12 +500,15 @@ status_t AudioMixer::prepareTrackForReformat(track_t* pTrack, int trackName) { ALOGV("AudioMixer::prepareTrackForReformat(%d) with format %#x", trackName, pTrack->mFormat); // discard the previous reformatter if there was one - unprepareTrackForReformat(pTrack, trackName); - pTrack->mReformatBufferProvider = new ReformatBufferProvider( - audio_channel_count_from_out_mask(pTrack->channelMask), - pTrack->mFormat, pTrack->mMixerInFormat); - reconfigureBufferProviders(pTrack); - return NO_ERROR; + unprepareTrackForReformat(pTrack, trackName); + // only configure reformatter if needed + if (pTrack->mFormat != pTrack->mMixerInFormat) { + pTrack->mReformatBufferProvider = new ReformatBufferProvider( + audio_channel_count_from_out_mask(pTrack->channelMask), + pTrack->mFormat, pTrack->mMixerInFormat); + reconfigureBufferProviders(pTrack); + } + return NO_ERROR; } void AudioMixer::reconfigureBufferProviders(track_t* pTrack) @@ -536,6 +572,44 @@ void AudioMixer::disable(int name) } } +/* Sets the volume ramp variables for the AudioMixer. + * + * The volume ramp variables are used to transition between the previous + * volume to the target volume. The duration of the transition is + * set by ramp, which is either 0 for immediate, or typically one state + * framecount period. + * + * @param newFloatValue new volume target in float [0.0, 1.0]. + * @param ramp number of frames to increment over. ramp is 0 if the volume + * should be set immediately. + * @param volume reference to the U4.12 target volume, set on return. + * @param prevVolume reference to the U4.27 previous volume, set on return. + * @param volumeInc reference to the increment per output audio frame, set on return. + * @return true if the volume has changed, false if volume is same. + */ +static inline bool setVolumeRampVariables(float newFloatValue, int32_t ramp, + int16_t &volume, int32_t &prevVolume, int32_t &volumeInc) { + int32_t newValue = newFloatValue * AudioMixer::UNITY_GAIN_INT; + if (newValue > AudioMixer::UNITY_GAIN_INT) { + newValue = AudioMixer::UNITY_GAIN_INT; + } else if (newValue < 0) { + ALOGE("negative volume %.7g", newFloatValue); + newValue = 0; // should never happen, but for safety check. + } + if (newValue == volume) { + return false; + } + if (ramp != 0) { + volumeInc = ((newValue - volume) << 16) / ramp; + prevVolume = (volumeInc == 0 ? newValue : volume) << 16; + } else { + volumeInc = 0; + prevVolume = newValue << 16; + } + volume = newValue; + return true; +} + void AudioMixer::setParameter(int name, int target, int param, void *value) { name -= TRACK0; @@ -558,8 +632,15 @@ void AudioMixer::setParameter(int name, int target, int param, void *value) track.channelMask = mask; track.channelCount = channelCount; // the mask has changed, does this track need a downmixer? - initTrackDownmix(&mState.tracks[name], name, mask); + // update to try using our desired format (if we aren't already using it) + track.mMixerInFormat = kUseFloat && kUseNewMixer + ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + status_t status = initTrackDownmix(&mState.tracks[name], name, mask); + ALOGE_IF(status != OK, + "Invalid channel mask %#x, initTrackDownmix returned %d", + mask, status); ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", mask); + prepareTrackForReformat(&track, name); // format may have changed invalidateState(1 << name); } } break; @@ -583,11 +664,7 @@ void AudioMixer::setParameter(int name, int target, int param, void *value) ALOG_ASSERT(audio_is_linear_pcm(format), "Invalid format %#x", format); track.mFormat = format; ALOGV("setParameter(TRACK, FORMAT, %#x)", format); - //if (track.mFormat != track.mMixerInFormat) - { - ALOGD("Reformatting!"); - prepareTrackForReformat(&track, name); - } + prepareTrackForReformat(&track, name); invalidateState(1 << name); } } break; @@ -637,41 +714,23 @@ void AudioMixer::setParameter(int name, int target, int param, void *value) switch (param) { case VOLUME0: case VOLUME1: - if (track.volume[param-VOLUME0] != valueInt) { - ALOGV("setParameter(VOLUME, VOLUME0/1: %04x)", valueInt); - track.prevVolume[param-VOLUME0] = track.volume[param-VOLUME0] << 16; - track.volume[param-VOLUME0] = valueInt; - if (target == VOLUME) { - track.prevVolume[param-VOLUME0] = valueInt << 16; - track.volumeInc[param-VOLUME0] = 0; - } else { - int32_t d = (valueInt<<16) - track.prevVolume[param-VOLUME0]; - int32_t volInc = d / int32_t(mState.frameCount); - track.volumeInc[param-VOLUME0] = volInc; - if (volInc == 0) { - track.prevVolume[param-VOLUME0] = valueInt << 16; - } - } + if (setVolumeRampVariables(*reinterpret_cast<float*>(value), + target == RAMP_VOLUME ? mState.frameCount : 0, + track.volume[param - VOLUME0], track.prevVolume[param - VOLUME0], + track.volumeInc[param - VOLUME0])) { + ALOGV("setParameter(%s, VOLUME%d: %04x)", + target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0, + track.volume[param - VOLUME0]); invalidateState(1 << name); } break; case AUXLEVEL: //ALOG_ASSERT(0 <= valueInt && valueInt <= MAX_GAIN_INT, "bad aux level %d", valueInt); - if (track.auxLevel != valueInt) { - ALOGV("setParameter(VOLUME, AUXLEVEL: %04x)", valueInt); - track.prevAuxLevel = track.auxLevel << 16; - track.auxLevel = valueInt; - if (target == VOLUME) { - track.prevAuxLevel = valueInt << 16; - track.auxInc = 0; - } else { - int32_t d = (valueInt<<16) - track.prevAuxLevel; - int32_t volInc = d / int32_t(mState.frameCount); - track.auxInc = volInc; - if (volInc == 0) { - track.prevAuxLevel = valueInt << 16; - } - } + if (setVolumeRampVariables(*reinterpret_cast<float*>(value), + target == RAMP_VOLUME ? mState.frameCount : 0, + track.auxLevel, track.prevAuxLevel, track.auxInc)) { + ALOGV("setParameter(%s, AUXLEVEL: %04x)", + target == VOLUME ? "VOLUME" : "RAMP_VOLUME", track.auxLevel); invalidateState(1 << name); } break; @@ -703,7 +762,20 @@ bool AudioMixer::track_t::setResampler(uint32_t value, uint32_t devSampleRate) } else { quality = AudioResampler::DEFAULT_QUALITY; } - const int bits = mMixerInFormat == AUDIO_FORMAT_PCM_16_BIT ? 16 : /* FLOAT */ 32; + + int bits; + switch (mMixerInFormat) { + case AUDIO_FORMAT_PCM_16_BIT: + bits = 16; + break; + case AUDIO_FORMAT_PCM_FLOAT: + bits = 32; // 32 bits to the AudioResampler::create() indicates float. + break; + default: + LOG_ALWAYS_FATAL("Invalid mMixerInFormat: %#x", mMixerInFormat); + break; + } + ALOGVV("Creating resampler with %d bits\n", bits); resampler = AudioResampler::create( bits, // the resampler sees the number of channels after the downmixer, if any @@ -828,16 +900,19 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) if (n & NEEDS_RESAMPLE) { all16BitsStereoNoResample = false; resampling = true; - t.hook = track__genericResample; + t.hook = getTrackHook(TRACKTYPE_RESAMPLE, FCC_2, + t.mMixerInFormat, t.mMixerFormat); ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2, "Track %d needs downmix + resample", i); } else { if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){ - t.hook = track__16BitsMono; + t.hook = getTrackHook(TRACKTYPE_NORESAMPLEMONO, FCC_2, + t.mMixerInFormat, t.mMixerFormat); all16BitsStereoNoResample = false; } if ((n & NEEDS_CHANNEL_COUNT__MASK) >= NEEDS_CHANNEL_2){ - t.hook = track__16BitsStereo; + t.hook = getTrackHook(TRACKTYPE_NORESAMPLE, FCC_2, + t.mMixerInFormat, t.mMixerFormat); ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2, "Track %d needs downmix", i); } @@ -868,7 +943,10 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) state->hook = process__genericNoResampling; if (all16BitsStereoNoResample && !volumeRamp) { if (countActiveTracks == 1) { - state->hook = process__OneTrack16BitsStereoNoResampling; + const int i = 31 - __builtin_clz(state->enabledTracks); + track_t& t = state->tracks[i]; + state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK, FCC_2, + t.mMixerInFormat, t.mMixerFormat); } } } @@ -911,6 +989,7 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux) { + ALOGVV("track__genericResample\n"); t->resampler->setSampleRate(t->sampleRate); // ramp gain - resample to temp buffer and scale/mix in 2nd step @@ -918,7 +997,7 @@ void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFram // always resample with unity gain when sending to auxiliary buffer to be able // to apply send level after resampling // TODO: modify each resampler to support aux channel? - t->resampler->setVolume(UNITY_GAIN, UNITY_GAIN); + t->resampler->setVolume(UNITY_GAIN_INT, UNITY_GAIN_INT); memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t)); t->resampler->resample(temp, outFrameCount, t->bufferProvider); if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) { @@ -928,7 +1007,7 @@ void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFram } } else { if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) { - t->resampler->setVolume(UNITY_GAIN, UNITY_GAIN); + t->resampler->setVolume(UNITY_GAIN_INT, UNITY_GAIN_INT); memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t)); t->resampler->resample(temp, outFrameCount, t->bufferProvider); volumeRampStereo(t, out, outFrameCount, temp, aux); @@ -1022,6 +1101,7 @@ void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32 void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux) { + ALOGVV("track__16BitsStereo\n"); const int16_t *in = static_cast<const int16_t *>(t->in); if (CC_UNLIKELY(aux != NULL)) { @@ -1113,6 +1193,7 @@ void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux) { + ALOGVV("track__16BitsMono\n"); const int16_t *in = static_cast<int16_t const *>(t->in); if (CC_UNLIKELY(aux != NULL)) { @@ -1200,6 +1281,7 @@ void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, // no-op case void AudioMixer::process__nop(state_t* state, int64_t pts) { + ALOGVV("process__nop\n"); uint32_t e0 = state->enabledTracks; size_t sampleCount = state->frameCount * MAX_NUM_CHANNELS; while (e0) { @@ -1247,6 +1329,7 @@ void AudioMixer::process__nop(state_t* state, int64_t pts) // generic code without resampling void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) { + ALOGVV("process__genericNoResampling\n"); int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32))); // acquire each track's buffer @@ -1329,18 +1412,12 @@ void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) } } } - switch (t1.mMixerFormat) { - case AUDIO_FORMAT_PCM_FLOAT: - memcpy_to_float_from_q4_27(reinterpret_cast<float *>(out), outTemp, BLOCKSIZE * 2); - out += BLOCKSIZE * 2; // output is 2 floats/frame. - break; - case AUDIO_FORMAT_PCM_16_BIT: - ditherAndClamp(out, outTemp, BLOCKSIZE); - out += BLOCKSIZE; // output is 1 int32_t (2 int16_t samples)/frame - break; - default: - LOG_ALWAYS_FATAL("bad mixer format: %d", t1.mMixerFormat); - } + + convertMixerFormat(out, t1.mMixerFormat, outTemp, t1.mMixerInFormat, + BLOCKSIZE * FCC_2); + // TODO: fix ugly casting due to choice of out pointer type + out = reinterpret_cast<int32_t*>((uint8_t*)out + + BLOCKSIZE * FCC_2 * audio_bytes_per_sample(t1.mMixerFormat)); numFrames += BLOCKSIZE; } while (numFrames < state->frameCount); } @@ -1359,6 +1436,7 @@ void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) // generic code with resampling void AudioMixer::process__genericResampling(state_t* state, int64_t pts) { + ALOGVV("process__genericResampling\n"); // this const just means that local variable outTemp doesn't change int32_t* const outTemp = state->outputTemp; const size_t size = sizeof(int32_t) * MAX_NUM_CHANNELS * state->frameCount; @@ -1422,16 +1500,7 @@ void AudioMixer::process__genericResampling(state_t* state, int64_t pts) } } } - switch (t1.mMixerFormat) { - case AUDIO_FORMAT_PCM_FLOAT: - memcpy_to_float_from_q4_27(reinterpret_cast<float*>(out), outTemp, numFrames*2); - break; - case AUDIO_FORMAT_PCM_16_BIT: - ditherAndClamp(out, outTemp, numFrames); - break; - default: - LOG_ALWAYS_FATAL("bad mixer format: %d", t1.mMixerFormat); - } + convertMixerFormat(out, t1.mMixerFormat, outTemp, t1.mMixerInFormat, numFrames * FCC_2); } } @@ -1439,6 +1508,7 @@ void AudioMixer::process__genericResampling(state_t* state, int64_t pts) void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state, int64_t pts) { + ALOGVV("process__OneTrack16BitsStereoNoResampling\n"); // This method is only called when state->enabledTracks has exactly // one bit set. The asserts below would verify this, but are commented out // since the whole point of this method is to optimize performance. @@ -1450,6 +1520,7 @@ void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state, AudioBufferProvider::Buffer& b(t.buffer); int32_t* out = t.mainBuffer; + float *fout = reinterpret_cast<float*>(out); size_t numFrames = state->frameCount; const int16_t vl = t.volume[0]; @@ -1463,9 +1534,10 @@ void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state, // in == NULL can happen if the track was flushed just after having // been enabled for mixing. - if (in == NULL || ((unsigned long)in & 3)) { - memset(out, 0, numFrames*MAX_NUM_CHANNELS*sizeof(int16_t)); - ALOGE_IF(((unsigned long)in & 3), "process stereo track: input buffer alignment pb: " + if (in == NULL || (((uintptr_t)in) & 3)) { + memset(out, 0, numFrames + * MAX_NUM_CHANNELS * audio_bytes_per_sample(t.mMixerFormat)); + ALOGE_IF((((uintptr_t)in) & 3), "process stereo track: input buffer alignment pb: " "buffer %p track %d, channels %d, needs %08x", in, i, t.channelCount, t.needs); return; @@ -1473,8 +1545,7 @@ void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state, size_t outFrames = b.frameCount; switch (t.mMixerFormat) { - case AUDIO_FORMAT_PCM_FLOAT: { - float *fout = reinterpret_cast<float*>(out); + case AUDIO_FORMAT_PCM_FLOAT: do { uint32_t rl = *reinterpret_cast<const uint32_t *>(in); in += 2; @@ -1485,9 +1556,9 @@ void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state, // Note: In case of later int16_t sink output, // conversion and clamping is done by memcpy_to_i16_from_float(). } while (--outFrames); - } break; + break; case AUDIO_FORMAT_PCM_16_BIT: - if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN || uint32_t(vr) > UNITY_GAIN)) { + if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN_INT || uint32_t(vr) > UNITY_GAIN_INT)) { // volume is boosted, so we might need to clamp even though // we process only one track. do { @@ -1662,5 +1733,275 @@ int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS, ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect"); } +/* This process hook is called when there is a single track without + * aux buffer, volume ramp, or resampling. + * TODO: Update the hook selection: this can properly handle aux and ramp. + */ +template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA> +void AudioMixer::process_NoResampleOneTrack(state_t* state, int64_t pts) +{ + ALOGVV("process_NoResampleOneTrack\n"); + // CLZ is faster than CTZ on ARM, though really not sure if true after 31 - clz. + const int i = 31 - __builtin_clz(state->enabledTracks); + ALOG_ASSERT((1 << i) == state->enabledTracks, "more than 1 track enabled"); + track_t *t = &state->tracks[i]; + TO* out = reinterpret_cast<TO*>(t->mainBuffer); + TA* aux = reinterpret_cast<TA*>(t->auxBuffer); + const bool ramp = t->needsRamp(); + + for (size_t numFrames = state->frameCount; numFrames; ) { + AudioBufferProvider::Buffer& b(t->buffer); + // get input buffer + b.frameCount = numFrames; + const int64_t outputPTS = calculateOutputPTS(*t, pts, state->frameCount - numFrames); + t->bufferProvider->getNextBuffer(&b, outputPTS); + const TI *in = reinterpret_cast<TI*>(b.raw); + + // in == NULL can happen if the track was flushed just after having + // been enabled for mixing. + if (in == NULL || (((uintptr_t)in) & 3)) { + memset(out, 0, numFrames + * NCHAN * audio_bytes_per_sample(t->mMixerFormat)); + ALOGE_IF((((uintptr_t)in) & 3), "process_NoResampleOneTrack: bus error: " + "buffer %p track %p, channels %d, needs %#x", + in, t, t->channelCount, t->needs); + return; + } + + const size_t outFrames = b.frameCount; + if (ramp) { + volumeRampMulti<MIXTYPE_MULTI_SAVEONLY, NCHAN>(out, outFrames, in, aux, + t->prevVolume, t->volumeInc, &t->prevAuxLevel, t->auxInc); + } else { + volumeMulti<MIXTYPE_MULTI_SAVEONLY, NCHAN>(out, outFrames, in, aux, + t->volume, t->auxLevel); + } + out += outFrames * NCHAN; + if (aux != NULL) { + aux += NCHAN; + } + numFrames -= b.frameCount; + + // release buffer + t->bufferProvider->releaseBuffer(&b); + } + if (ramp) { + t->adjustVolumeRamp(aux != NULL); + } +} + +/* This track hook is called to do resampling then mixing, + * pulling from the track's upstream AudioBufferProvider. + */ +template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA> +void AudioMixer::track__Resample(track_t* t, TO* out, size_t outFrameCount, TO* temp, TA* aux) +{ + ALOGVV("track__Resample\n"); + t->resampler->setSampleRate(t->sampleRate); + + const bool ramp = t->needsRamp(); + if (ramp || aux != NULL) { + // if ramp: resample with unity gain to temp buffer and scale/mix in 2nd step. + // if aux != NULL: resample with unity gain to temp buffer then apply send level. + + t->resampler->setVolume(UNITY_GAIN_INT, UNITY_GAIN_INT); + memset(temp, 0, outFrameCount * NCHAN * sizeof(TO)); + t->resampler->resample((int32_t*)temp, outFrameCount, t->bufferProvider); + if (ramp) { + volumeRampMulti<MIXTYPE_MULTI, NCHAN>(out, outFrameCount, temp, aux, + t->prevVolume, t->volumeInc, &t->prevAuxLevel, t->auxInc); + t->adjustVolumeRamp(aux != NULL); + } else { + volumeMulti<MIXTYPE_MULTI, NCHAN>(out, outFrameCount, temp, aux, + t->volume, t->auxLevel); + } + } else { // constant volume gain + t->resampler->setVolume(t->volume[0], t->volume[1]); + t->resampler->resample((int32_t*)out, outFrameCount, t->bufferProvider); + } +} + +/* This track hook is called to mix a track, when no resampling is required. + * The input buffer should be present in t->in. + */ +template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA> +void AudioMixer::track__NoResample(track_t* t, TO* out, size_t frameCount, + TO* temp __unused, TA* aux) +{ + ALOGVV("track__NoResample\n"); + const TI *in = static_cast<const TI *>(t->in); + + if (t->needsRamp()) { + volumeRampMulti<MIXTYPE, NCHAN>(out, frameCount, in, aux, + t->prevVolume, t->volumeInc, &t->prevAuxLevel, t->auxInc); + t->adjustVolumeRamp(aux != NULL); + } else { + volumeMulti<MIXTYPE, NCHAN>(out, frameCount, in, aux, t->volume, t->auxLevel); + } + // MIXTYPE_MONOEXPAND reads a single input channel and expands to NCHAN output channels. + // MIXTYPE_MULTI reads NCHAN input channels and places to NCHAN output channels. + in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * NCHAN; + t->in = in; +} + +/* The Mixer engine generates either int32_t (Q4_27) or float data. + * We use this function to convert the engine buffers + * to the desired mixer output format, either int16_t (Q.15) or float. + */ +void AudioMixer::convertMixerFormat(void *out, audio_format_t mixerOutFormat, + void *in, audio_format_t mixerInFormat, size_t sampleCount) +{ + switch (mixerInFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + switch (mixerOutFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + memcpy(out, in, sampleCount * sizeof(float)); // MEMCPY. TODO optimize out + break; + case AUDIO_FORMAT_PCM_16_BIT: + memcpy_to_i16_from_float((int16_t*)out, (float*)in, sampleCount); + break; + default: + LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat); + break; + } + break; + case AUDIO_FORMAT_PCM_16_BIT: + switch (mixerOutFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + memcpy_to_float_from_q4_27((float*)out, (int32_t*)in, sampleCount); + break; + case AUDIO_FORMAT_PCM_16_BIT: + // two int16_t are produced per iteration + ditherAndClamp((int32_t*)out, (int32_t*)in, sampleCount >> 1); + break; + default: + LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat); + break; + } + break; + default: + LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat); + break; + } +} + +/* Returns the proper track hook to use for mixing the track into the output buffer. + */ +AudioMixer::hook_t AudioMixer::getTrackHook(int trackType, int channels, + audio_format_t mixerInFormat, audio_format_t mixerOutFormat __unused) +{ + if (!kUseNewMixer && channels == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) { + switch (trackType) { + case TRACKTYPE_NOP: + return track__nop; + case TRACKTYPE_RESAMPLE: + return track__genericResample; + case TRACKTYPE_NORESAMPLEMONO: + return track__16BitsMono; + case TRACKTYPE_NORESAMPLE: + return track__16BitsStereo; + default: + LOG_ALWAYS_FATAL("bad trackType: %d", trackType); + break; + } + } + LOG_ALWAYS_FATAL_IF(channels != FCC_2); // TODO: must be stereo right now + switch (trackType) { + case TRACKTYPE_NOP: + return track__nop; + case TRACKTYPE_RESAMPLE: + switch (mixerInFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + return (AudioMixer::hook_t) + track__Resample<MIXTYPE_MULTI, 2, float, float, int32_t>; + case AUDIO_FORMAT_PCM_16_BIT: + return (AudioMixer::hook_t)\ + track__Resample<MIXTYPE_MULTI, 2, int32_t, int16_t, int32_t>; + default: + LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat); + break; + } + break; + case TRACKTYPE_NORESAMPLEMONO: + switch (mixerInFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + return (AudioMixer::hook_t) + track__NoResample<MIXTYPE_MONOEXPAND, 2, float, float, int32_t>; + case AUDIO_FORMAT_PCM_16_BIT: + return (AudioMixer::hook_t) + track__NoResample<MIXTYPE_MONOEXPAND, 2, int32_t, int16_t, int32_t>; + default: + LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat); + break; + } + break; + case TRACKTYPE_NORESAMPLE: + switch (mixerInFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + return (AudioMixer::hook_t) + track__NoResample<MIXTYPE_MULTI, 2, float, float, int32_t>; + case AUDIO_FORMAT_PCM_16_BIT: + return (AudioMixer::hook_t) + track__NoResample<MIXTYPE_MULTI, 2, int32_t, int16_t, int32_t>; + default: + LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat); + break; + } + break; + default: + LOG_ALWAYS_FATAL("bad trackType: %d", trackType); + break; + } + return NULL; +} + +/* Returns the proper process hook for mixing tracks. Currently works only for + * PROCESSTYPE_NORESAMPLEONETRACK, a mix involving one track, no resampling. + */ +AudioMixer::process_hook_t AudioMixer::getProcessHook(int processType, int channels, + audio_format_t mixerInFormat, audio_format_t mixerOutFormat) +{ + if (processType != PROCESSTYPE_NORESAMPLEONETRACK) { // Only NORESAMPLEONETRACK + LOG_ALWAYS_FATAL("bad processType: %d", processType); + return NULL; + } + if (!kUseNewMixer && channels == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) { + return process__OneTrack16BitsStereoNoResampling; + } + LOG_ALWAYS_FATAL_IF(channels != FCC_2); // TODO: must be stereo right now + switch (mixerInFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + switch (mixerOutFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY, 2, + float, float, int32_t>; + case AUDIO_FORMAT_PCM_16_BIT: + return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY, 2, + int16_t, float, int32_t>; + default: + LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat); + break; + } + break; + case AUDIO_FORMAT_PCM_16_BIT: + switch (mixerOutFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY, 2, + float, int16_t, int32_t>; + case AUDIO_FORMAT_PCM_16_BIT: + return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY, 2, + int16_t, int16_t, int32_t>; + default: + LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat); + break; + } + break; + default: + LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat); + break; + } + return NULL; +} + // ---------------------------------------------------------------------------- }; // namespace android diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h index 573ba96..e6de00c 100644 --- a/services/audioflinger/AudioMixer.h +++ b/services/audioflinger/AudioMixer.h @@ -31,7 +31,7 @@ #include <media/nbaio/NBLog.h> // FIXME This is actually unity gain, which might not be max in future, expressed in U.12 -#define MAX_GAIN_INT AudioMixer::UNITY_GAIN +#define MAX_GAIN_INT AudioMixer::UNITY_GAIN_INT namespace android { @@ -58,7 +58,8 @@ public: // maximum number of channels supported for the content static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = 8; - static const uint16_t UNITY_GAIN = 0x1000; + static const uint16_t UNITY_GAIN_INT = 0x1000; + static const float UNITY_GAIN_FLOAT = 1.0f; enum { // names @@ -220,6 +221,7 @@ private: // 16-byte boundary + bool needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; } bool setResampler(uint32_t sampleRate, uint32_t devSampleRate); bool doesResample() const { return resampler != NULL; } void resetResampler() { if (resampler != NULL) resampler->reset(); } @@ -228,12 +230,14 @@ private: resampler->getUnreleasedFrames() : 0; }; }; + typedef void (*process_hook_t)(state_t* state, int64_t pts); + // pad to 32-bytes to fill cache line struct state_t { uint32_t enabledTracks; uint32_t needsChanged; size_t frameCount; - void (*hook)(state_t* state, int64_t pts); // one of process__*, never NULL + process_hook_t hook; // one of process__*, never NULL int32_t *outputTemp; int32_t *resampleTemp; NBLog::Writer* mLog; @@ -344,6 +348,38 @@ private: static uint64_t sLocalTimeFreq; static pthread_once_t sOnceControl; static void sInitRoutine(); + + // multi-format process hooks + template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA> + static void process_NoResampleOneTrack(state_t* state, int64_t pts); + + // multi-format track hooks + template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA> + static void track__Resample(track_t* t, TO* out, size_t frameCount, + TO* temp __unused, TA* aux); + template <int MIXTYPE, int NCHAN, typename TO, typename TI, typename TA> + static void track__NoResample(track_t* t, TO* out, size_t frameCount, + TO* temp __unused, TA* aux); + + static void convertMixerFormat(void *out, audio_format_t mixerOutFormat, + void *in, audio_format_t mixerInFormat, size_t sampleCount); + + // hook types + enum { + PROCESSTYPE_NORESAMPLEONETRACK, + }; + enum { + TRACKTYPE_NOP, + TRACKTYPE_RESAMPLE, + TRACKTYPE_NORESAMPLE, + TRACKTYPE_NORESAMPLEMONO, + }; + + // functions for determining the proper process and track hooks. + static process_hook_t getProcessHook(int processType, int channels, + audio_format_t mixerInFormat, audio_format_t mixerOutFormat); + static hook_t getTrackHook(int trackType, int channels, + audio_format_t mixerInFormat, audio_format_t mixerOutFormat); }; // ---------------------------------------------------------------------------- diff --git a/services/audioflinger/AudioMixerOps.h b/services/audioflinger/AudioMixerOps.h new file mode 100644 index 0000000..de92946 --- /dev/null +++ b/services/audioflinger/AudioMixerOps.h @@ -0,0 +1,361 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_MIXER_OPS_H +#define ANDROID_AUDIO_MIXER_OPS_H + +namespace android { + +/* Behavior of is_same<>::value is true if the types are identical, + * false otherwise. Identical to the STL std::is_same. + */ +template<typename T, typename U> +struct is_same +{ + static const bool value = false; +}; + +template<typename T> +struct is_same<T, T> // partial specialization +{ + static const bool value = true; +}; + + +/* MixMul is a multiplication operator to scale an audio input signal + * by a volume gain, with the formula: + * + * O(utput) = I(nput) * V(olume) + * + * The output, input, and volume may have different types. + * There are 27 variants, of which 14 are actually defined in an + * explicitly templated class. + * + * The following type variables and the underlying meaning: + * + * Output type TO: int32_t (Q4.27) or int16_t (Q.15) or float [-1,1] + * Input signal type TI: int32_t (Q4.27) or int16_t (Q.15) or float [-1,1] + * Volume type TV: int32_t (U4.28) or int16_t (U4.12) or float [-1,1] + * + * For high precision audio, only the <TO, TI, TV> = <float, float, float> + * needs to be accelerated. This is perhaps the easiest form to do quickly as well. + */ + +template <typename TO, typename TI, typename TV> +inline TO MixMul(TI value, TV volume) { + COMPILE_TIME_ASSERT_FUNCTION_SCOPE(false); + // should not be here :-). + // To avoid mistakes, this template is always specialized. + return value * volume; +} + +template <> +inline int32_t MixMul<int32_t, int16_t, int16_t>(int16_t value, int16_t volume) { + return value * volume; +} + +template <> +inline int32_t MixMul<int32_t, int32_t, int16_t>(int32_t value, int16_t volume) { + return (value >> 12) * volume; +} + +template <> +inline int32_t MixMul<int32_t, int16_t, int32_t>(int16_t value, int32_t volume) { + return value * (volume >> 16); +} + +template <> +inline int32_t MixMul<int32_t, int32_t, int32_t>(int32_t value, int32_t volume) { + return (value >> 12) * (volume >> 16); +} + +template <> +inline float MixMul<float, float, int16_t>(float value, int16_t volume) { + static const float norm = 1. / (1 << 12); + return value * volume * norm; +} + +template <> +inline float MixMul<float, float, int32_t>(float value, int32_t volume) { + static const float norm = 1. / (1 << 28); + return value * volume * norm; +} + +template <> +inline int16_t MixMul<int16_t, float, int16_t>(float value, int16_t volume) { + return clamp16_from_float(MixMul<float, float, int16_t>(value, volume)); +} + +template <> +inline int16_t MixMul<int16_t, float, int32_t>(float value, int32_t volume) { + return clamp16_from_float(MixMul<float, float, int32_t>(value, volume)); +} + +template <> +inline float MixMul<float, int16_t, int16_t>(int16_t value, int16_t volume) { + static const float norm = 1. / (1 << (15 + 12)); + return static_cast<float>(value) * static_cast<float>(volume) * norm; +} + +template <> +inline float MixMul<float, int16_t, int32_t>(int16_t value, int32_t volume) { + static const float norm = 1. / (1ULL << (15 + 28)); + return static_cast<float>(value) * static_cast<float>(volume) * norm; +} + +template <> +inline int16_t MixMul<int16_t, int16_t, int16_t>(int16_t value, int16_t volume) { + return clamp16(MixMul<int32_t, int16_t, int16_t>(value, volume) >> 12); +} + +template <> +inline int16_t MixMul<int16_t, int32_t, int16_t>(int32_t value, int16_t volume) { + return clamp16(MixMul<int32_t, int32_t, int16_t>(value, volume) >> 12); +} + +template <> +inline int16_t MixMul<int16_t, int16_t, int32_t>(int16_t value, int32_t volume) { + return clamp16(MixMul<int32_t, int16_t, int32_t>(value, volume) >> 12); +} + +template <> +inline int16_t MixMul<int16_t, int32_t, int32_t>(int32_t value, int32_t volume) { + return clamp16(MixMul<int32_t, int32_t, int32_t>(value, volume) >> 12); +} + +/* + * MixAccum is used to add into an accumulator register of a possibly different + * type. The TO and TI types are the same as MixMul. + */ + +template <typename TO, typename TI> +inline void MixAccum(TO *auxaccum, TI value) { + if (!is_same<TO, TI>::value) { + LOG_ALWAYS_FATAL("MixAccum type not properly specialized: %d %d\n", + sizeof(TO), sizeof(TI)); + } + *auxaccum += value; +} + +template<> +inline void MixAccum<float, int16_t>(float *auxaccum, int16_t value) { + static const float norm = 1. / (1 << 15); + *auxaccum += norm * value; +} + +template<> +inline void MixAccum<float, int32_t>(float *auxaccum, int32_t value) { + static const float norm = 1. / (1 << 27); + *auxaccum += norm * value; +} + +template<> +inline void MixAccum<int32_t, int16_t>(int32_t *auxaccum, int16_t value) { + *auxaccum += value << 12; +} + +template<> +inline void MixAccum<int32_t, float>(int32_t *auxaccum, float value) { + *auxaccum += clampq4_27_from_float(value); +} + +/* MixMulAux is just like MixMul except it combines with + * an accumulator operation MixAccum. + */ + +template <typename TO, typename TI, typename TV, typename TA> +inline TO MixMulAux(TI value, TV volume, TA *auxaccum) { + MixAccum<TA, TI>(auxaccum, value); + return MixMul<TO, TI, TV>(value, volume); +} + +/* MIXTYPE is used to determine how the samples in the input frame + * are mixed with volume gain into the output frame. + * See the volumeRampMulti functions below for more details. + */ +enum { + MIXTYPE_MULTI, + MIXTYPE_MONOEXPAND, + MIXTYPE_MULTI_SAVEONLY, +}; + +/* + * The volumeRampMulti and volumeRamp functions take a MIXTYPE + * which indicates the per-frame mixing and accumulation strategy. + * + * MIXTYPE_MULTI: + * NCHAN represents number of input and output channels. + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TV: int32_t (U4.28) or int16_t (U4.12) or float + * vol: represents a volume array. + * + * This accumulates into the out pointer. + * + * MIXTYPE_MONOEXPAND: + * Single input channel. NCHAN represents number of output channels. + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TV: int32_t (U4.28) or int16_t (U4.12) or float + * Input channel count is 1. + * vol: represents volume array. + * + * This accumulates into the out pointer. + * + * MIXTYPE_MULTI_SAVEONLY: + * NCHAN represents number of input and output channels. + * TO: int16_t (Q.15) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TV: int32_t (U4.28) or int16_t (U4.12) or float + * vol: represents a volume array. + * + * MIXTYPE_MULTI_SAVEONLY does not accumulate into the out pointer. + */ + +template <int MIXTYPE, int NCHAN, + typename TO, typename TI, typename TV, typename TA, typename TAV> +inline void volumeRampMulti(TO* out, size_t frameCount, + const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc) +{ +#ifdef ALOGVV + ALOGVV("volumeRampMulti, MIXTYPE:%d\n", MIXTYPE); +#endif + if (aux != NULL) { + do { + TA auxaccum = 0; + switch (MIXTYPE) { + case MIXTYPE_MULTI: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum); + vol[i] += volinc[i]; + } + break; + case MIXTYPE_MULTI_SAVEONLY: + for (int i = 0; i < NCHAN; ++i) { + *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum); + vol[i] += volinc[i]; + } + break; + case MIXTYPE_MONOEXPAND: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMulAux<TO, TI, TV, TA>(*in, vol[i], &auxaccum); + vol[i] += volinc[i]; + } + in++; + break; + default: + LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE); + break; + } + auxaccum /= NCHAN; + *aux++ += MixMul<TA, TA, TAV>(auxaccum, *vola); + vola[0] += volainc; + } while (--frameCount); + } else { + do { + switch (MIXTYPE) { + case MIXTYPE_MULTI: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMul<TO, TI, TV>(*in++, vol[i]); + vol[i] += volinc[i]; + } + break; + case MIXTYPE_MULTI_SAVEONLY: + for (int i = 0; i < NCHAN; ++i) { + *out++ = MixMul<TO, TI, TV>(*in++, vol[i]); + vol[i] += volinc[i]; + } + break; + case MIXTYPE_MONOEXPAND: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMul<TO, TI, TV>(*in, vol[i]); + vol[i] += volinc[i]; + } + in++; + break; + default: + LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE); + break; + } + } while (--frameCount); + } +} + +template <int MIXTYPE, int NCHAN, + typename TO, typename TI, typename TV, typename TA, typename TAV> +inline void volumeMulti(TO* out, size_t frameCount, + const TI* in, TA* aux, const TV *vol, TAV vola) +{ +#ifdef ALOGVV + ALOGVV("volumeMulti MIXTYPE:%d\n", MIXTYPE); +#endif + if (aux != NULL) { + do { + TA auxaccum = 0; + switch (MIXTYPE) { + case MIXTYPE_MULTI: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum); + } + break; + case MIXTYPE_MULTI_SAVEONLY: + for (int i = 0; i < NCHAN; ++i) { + *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum); + } + break; + case MIXTYPE_MONOEXPAND: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMulAux<TO, TI, TV, TA>(*in, vol[i], &auxaccum); + } + in++; + break; + default: + LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE); + break; + } + auxaccum /= NCHAN; + *aux++ += MixMul<TA, TA, TAV>(auxaccum, vola); + } while (--frameCount); + } else { + do { + switch (MIXTYPE) { + case MIXTYPE_MULTI: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMul<TO, TI, TV>(*in++, vol[i]); + } + break; + case MIXTYPE_MULTI_SAVEONLY: + for (int i = 0; i < NCHAN; ++i) { + *out++ = MixMul<TO, TI, TV>(*in++, vol[i]); + } + break; + case MIXTYPE_MONOEXPAND: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMul<TO, TI, TV>(*in, vol[i]); + } + in++; + break; + default: + LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE); + break; + } + } while (--frameCount); + } +} + +}; + +#endif /* ANDROID_AUDIO_MIXER_OPS_H */ diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp index 562c4ea..b8a0357 100644 --- a/services/audioflinger/AudioResampler.cpp +++ b/services/audioflinger/AudioResampler.cpp @@ -259,13 +259,14 @@ AudioResampler::AudioResampler(int bitDepth, int inChannelCount, mPhaseFraction(0), mLocalTimeFreq(0), mPTS(AudioBufferProvider::kInvalidPTS), mQuality(quality) { // sanity check on format - if ((bitDepth != 16) ||(inChannelCount < 1) || (inChannelCount > 2)) { - ALOGE("Unsupported sample format, %d bits, %d channels", bitDepth, - inChannelCount); - // ALOG_ASSERT(0); + if ((bitDepth != 16 && (quality < DYN_LOW_QUALITY || bitDepth != 32)) + || inChannelCount < 1 + || inChannelCount > (quality < DYN_LOW_QUALITY ? 2 : 8)) { + LOG_ALWAYS_FATAL("Unsupported sample format %d quality %d bits, %d channels", + quality, bitDepth, inChannelCount); } if (sampleRate <= 0) { - ALOGE("Unsupported sample rate %d Hz", sampleRate); + LOG_ALWAYS_FATAL("Unsupported sample rate %d Hz", sampleRate); } // initialize common members diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/services/audioflinger/AudioResamplerDyn.cpp index a4446a4..7ca10c1 100644 --- a/services/audioflinger/AudioResamplerDyn.cpp +++ b/services/audioflinger/AudioResamplerDyn.cpp @@ -38,11 +38,6 @@ namespace android { -// generate a unique resample type compile-time constant (constexpr) -#define RESAMPLETYPE(CHANNELS, LOCKED, STRIDE) \ - ((((CHANNELS)-1)&1) | !!(LOCKED)<<1 \ - | ((STRIDE)==8 ? 1 : (STRIDE)==16 ? 2 : 0)<<2) - /* * InBuffer is a type agnostic input buffer. * @@ -403,12 +398,76 @@ void AudioResamplerDyn<TC, TI, TO>::setSampleRate(int32_t inSampleRate) // determine which resampler to use // check if locked phase (works only if mPhaseIncrement has no "fractional phase bits") int locked = (mPhaseIncrement << (sizeof(mPhaseIncrement)*8 - c.mShift)) == 0; - int stride = (c.mHalfNumCoefs&7)==0 ? 16 : (c.mHalfNumCoefs&3)==0 ? 8 : 2; if (locked) { mPhaseFraction = mPhaseFraction >> c.mShift << c.mShift; // remove fractional phase } - setResampler(RESAMPLETYPE(mChannelCount, locked, stride)); + // stride is the minimum number of filter coefficients processed per loop iteration. + // We currently only allow a stride of 16 to match with SIMD processing. + // This means that the filter length must be a multiple of 16, + // or half the filter length (mHalfNumCoefs) must be a multiple of 8. + // + // Note: A stride of 2 is achieved with non-SIMD processing. + int stride = ((c.mHalfNumCoefs & 7) == 0) ? 16 : 2; + LOG_ALWAYS_FATAL_IF(stride < 16, "Resampler stride must be 16 or more"); + LOG_ALWAYS_FATAL_IF(mChannelCount > 8 || mChannelCount < 1, + "Resampler channels(%d) must be between 1 to 8", mChannelCount); + // stride 16 (falls back to stride 2 for machines that do not support NEON) + if (locked) { + switch (mChannelCount) { + case 1: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, true, 16>; + break; + case 2: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, true, 16>; + break; + case 3: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<3, true, 16>; + break; + case 4: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<4, true, 16>; + break; + case 5: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<5, true, 16>; + break; + case 6: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<6, true, 16>; + break; + case 7: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<7, true, 16>; + break; + case 8: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<8, true, 16>; + break; + } + } else { + switch (mChannelCount) { + case 1: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, false, 16>; + break; + case 2: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, false, 16>; + break; + case 3: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<3, false, 16>; + break; + case 4: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<4, false, 16>; + break; + case 5: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<5, false, 16>; + break; + case 6: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<6, false, 16>; + break; + case 7: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<7, false, 16>; + break; + case 8: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<8, false, 16>; + break; + } + } #ifdef DEBUG_RESAMPLER printf("channels:%d %s stride:%d %s coef:%d shift:%d\n", mChannelCount, locked ? "locked" : "interpolated", @@ -424,34 +483,12 @@ void AudioResamplerDyn<TC, TI, TO>::resample(int32_t* out, size_t outFrameCount, } template<typename TC, typename TI, typename TO> -void AudioResamplerDyn<TC, TI, TO>::setResampler(unsigned resampleType) -{ - // stride 16 (falls back to stride 2 for machines that do not support NEON) - switch (resampleType) { - case RESAMPLETYPE(1, true, 16): - mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, true, 16>; - return; - case RESAMPLETYPE(2, true, 16): - mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, true, 16>; - return; - case RESAMPLETYPE(1, false, 16): - mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, false, 16>; - return; - case RESAMPLETYPE(2, false, 16): - mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, false, 16>; - return; - default: - LOG_ALWAYS_FATAL("Invalid resampler type: %u", resampleType); - mResampleFunc = NULL; - return; - } -} - -template<typename TC, typename TI, typename TO> template<int CHANNELS, bool LOCKED, int STRIDE> void AudioResamplerDyn<TC, TI, TO>::resample(TO* out, size_t outFrameCount, AudioBufferProvider* provider) { + // TODO Mono -> Mono is not supported. OUTPUT_CHANNELS reflects minimum of stereo out. + const int OUTPUT_CHANNELS = (CHANNELS < 2) ? 2 : CHANNELS; const Constants& c(mConstants); const TC* const coefs = mConstants.mFirCoefs; TI* impulse = mInBuffer.getImpulse(); @@ -459,10 +496,16 @@ void AudioResamplerDyn<TC, TI, TO>::resample(TO* out, size_t outFrameCount, uint32_t phaseFraction = mPhaseFraction; const uint32_t phaseIncrement = mPhaseIncrement; size_t outputIndex = 0; - size_t outputSampleCount = outFrameCount * 2; // stereo output - size_t inFrameCount = getInFrameCountRequired(outFrameCount) + (phaseFraction != 0); - ALOG_ASSERT(0 < inFrameCount && inFrameCount < (1U << 31)); + size_t outputSampleCount = outFrameCount * OUTPUT_CHANNELS; const uint32_t phaseWrapLimit = c.mL << c.mShift; + size_t inFrameCount = (phaseIncrement * (uint64_t)outFrameCount + phaseFraction) + / phaseWrapLimit; + // sanity check that inFrameCount is in signed 32 bit integer range. + ALOG_ASSERT(0 <= inFrameCount && inFrameCount < (1U << 31)); + + //ALOGV("inFrameCount:%d outFrameCount:%d" + // " phaseIncrement:%u phaseFraction:%u phaseWrapLimit:%u", + // inFrameCount, outFrameCount, phaseIncrement, phaseFraction, phaseWrapLimit); // NOTE: be very careful when modifying the code here. register // pressure is very high and a small change might cause the compiler @@ -472,12 +515,19 @@ void AudioResamplerDyn<TC, TI, TO>::resample(TO* out, size_t outFrameCount, // the following logic is a bit convoluted to keep the main processing loop // as tight as possible with register allocation. while (outputIndex < outputSampleCount) { - // buffer is empty, fetch a new one - while (mBuffer.frameCount == 0) { + //ALOGV("LOOP: inFrameCount:%d outputIndex:%d outFrameCount:%d" + // " phaseFraction:%u phaseWrapLimit:%u", + // inFrameCount, outputIndex, outFrameCount, phaseFraction, phaseWrapLimit); + + // check inputIndex overflow + ALOG_ASSERT(inputIndex <= mBuffer.frameCount, "inputIndex%d > frameCount%d", + inputIndex, mBuffer.frameCount); + // Buffer is empty, fetch a new one if necessary (inFrameCount > 0). + // We may not fetch a new buffer if the existing data is sufficient. + while (mBuffer.frameCount == 0 && inFrameCount > 0) { mBuffer.frameCount = inFrameCount; - ALOG_ASSERT(inFrameCount > 0); provider->getNextBuffer(&mBuffer, - calculateOutputPTS(outputIndex / 2)); + calculateOutputPTS(outputIndex / OUTPUT_CHANNELS)); if (mBuffer.raw == NULL) { goto resample_exit; } @@ -486,9 +536,9 @@ void AudioResamplerDyn<TC, TI, TO>::resample(TO* out, size_t outFrameCount, mInBuffer.template readAdvance<CHANNELS>( impulse, c.mHalfNumCoefs, reinterpret_cast<TI*>(mBuffer.raw), inputIndex); + inputIndex++; phaseFraction -= phaseWrapLimit; while (phaseFraction >= phaseWrapLimit) { - inputIndex++; if (inputIndex >= mBuffer.frameCount) { inputIndex = 0; provider->releaseBuffer(&mBuffer); @@ -497,6 +547,7 @@ void AudioResamplerDyn<TC, TI, TO>::resample(TO* out, size_t outFrameCount, mInBuffer.template readAdvance<CHANNELS>( impulse, c.mHalfNumCoefs, reinterpret_cast<TI*>(mBuffer.raw), inputIndex); + inputIndex++; phaseFraction -= phaseWrapLimit; } } @@ -507,9 +558,6 @@ void AudioResamplerDyn<TC, TI, TO>::resample(TO* out, size_t outFrameCount, const int halfNumCoefs = c.mHalfNumCoefs; const TO* const volumeSimd = mVolumeSimd; - // reread the last input in. - mInBuffer.template readAgain<CHANNELS>(impulse, halfNumCoefs, in, inputIndex); - // main processing loop while (CC_LIKELY(outputIndex < outputSampleCount)) { // caution: fir() is inlined and may be large. @@ -518,26 +566,34 @@ void AudioResamplerDyn<TC, TI, TO>::resample(TO* out, size_t outFrameCount, // from the input samples in impulse[-halfNumCoefs+1]... impulse[halfNumCoefs] // from the polyphase filter of (phaseFraction / phaseWrapLimit) in coefs. // + //ALOGV("LOOP2: inFrameCount:%d outputIndex:%d outFrameCount:%d" + // " phaseFraction:%u phaseWrapLimit:%u", + // inFrameCount, outputIndex, outFrameCount, phaseFraction, phaseWrapLimit); + ALOG_ASSERT(phaseFraction < phaseWrapLimit); fir<CHANNELS, LOCKED, STRIDE>( &out[outputIndex], phaseFraction, phaseWrapLimit, coefShift, halfNumCoefs, coefs, impulse, volumeSimd); - outputIndex += 2; + + outputIndex += OUTPUT_CHANNELS; phaseFraction += phaseIncrement; while (phaseFraction >= phaseWrapLimit) { - inputIndex++; if (inputIndex >= frameCount) { goto done; // need a new buffer } mInBuffer.template readAdvance<CHANNELS>(impulse, halfNumCoefs, in, inputIndex); + inputIndex++; phaseFraction -= phaseWrapLimit; } } done: - // often arrives here when input buffer runs out - if (inputIndex >= frameCount) { + // We arrive here when we're finished or when the input buffer runs out. + // Regardless we need to release the input buffer if we've acquired it. + if (inputIndex > 0) { // we've acquired a buffer (alternatively could check frameCount) + ALOG_ASSERT(inputIndex == frameCount, "inputIndex(%d) != frameCount(%d)", + inputIndex, frameCount); // must have been fully read. inputIndex = 0; provider->releaseBuffer(&mBuffer); ALOG_ASSERT(mBuffer.frameCount == 0); @@ -545,14 +601,12 @@ done: } resample_exit: - // Release frames to avoid the count being inaccurate for pts timing. - // TODO: Avoid this extra check by making fetch count exact. This is tricky - // due to the overfetching mechanism which loads unnecessarily when - // mBuffer.frameCount == 0. - if (inputIndex) { - mBuffer.frameCount = inputIndex; - provider->releaseBuffer(&mBuffer); - } + // inputIndex must be zero in all three cases: + // (1) the buffer never was been acquired; (2) the buffer was + // released at "done:"; or (3) getNextBuffer() failed. + ALOG_ASSERT(inputIndex == 0, "Releasing: inputindex:%d frameCount:%d phaseFraction:%u", + inputIndex, mBuffer.frameCount, phaseFraction); + ALOG_ASSERT(mBuffer.frameCount == 0); // there must be no frames in the buffer mInBuffer.setImpulse(impulse); mPhaseFraction = phaseFraction; } diff --git a/services/audioflinger/AudioResamplerDyn.h b/services/audioflinger/AudioResamplerDyn.h index 8c56319..3dced8a 100644 --- a/services/audioflinger/AudioResamplerDyn.h +++ b/services/audioflinger/AudioResamplerDyn.h @@ -110,12 +110,10 @@ private: void createKaiserFir(Constants &c, double stopBandAtten, int inSampleRate, int outSampleRate, double tbwCheat); - void setResampler(unsigned resampleType); - template<int CHANNELS, bool LOCKED, int STRIDE> void resample(TO* out, size_t outFrameCount, AudioBufferProvider* provider); - // declare a pointer to member function for resample + // define a pointer to member function type for resample typedef void (AudioResamplerDyn<TC, TI, TO>::*resample_ABP_t)(TO* out, size_t outFrameCount, AudioBufferProvider* provider); diff --git a/services/audioflinger/AudioResamplerFirProcess.h b/services/audioflinger/AudioResamplerFirProcess.h index 76d2d66..bb0f1c9 100644 --- a/services/audioflinger/AudioResamplerFirProcess.h +++ b/services/audioflinger/AudioResamplerFirProcess.h @@ -44,14 +44,14 @@ static inline void mac(float& l, float& r, TC coef, const float* samples) { l += *samples++ * coef; - r += *samples++ * coef; + r += *samples * coef; } template<typename TC> static inline void mac(float& l, TC coef, const float* samples) { - l += *samples++ * coef; + l += *samples * coef; } /* variant for output type TO = int32_t output samples */ @@ -69,62 +69,48 @@ float volumeAdjust(float value, float volume) } /* - * Calculates a single output frame (two samples). - * - * This function computes both the positive half FIR dot product and - * the negative half FIR dot product, accumulates, and then applies the volume. + * Helper template functions for loop unrolling accumulator operations. * - * This is a locked phase filter (it does not compute the interpolation). - * - * Use fir() to compute the proper coefficient pointers for a polyphase - * filter bank. + * Unrolling the loops achieves about 2x gain. + * Using a recursive template rather than an array of TO[] for the accumulator + * values is an additional 10-20% gain. */ -template <int CHANNELS, int STRIDE, typename TC, typename TI, typename TO> -static inline -void ProcessL(TO* const out, - int count, - const TC* coefsP, - const TC* coefsN, - const TI* sP, - const TI* sN, - const TO* const volumeLR) +template<int CHANNELS, typename TO> +class Accumulator : public Accumulator<CHANNELS-1, TO> // recursive { - COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS >= 1 && CHANNELS <= 2) - if (CHANNELS == 2) { - TO l = 0; - TO r = 0; - do { - mac(l, r, *coefsP++, sP); - sP -= CHANNELS; - mac(l, r, *coefsN++, sN); - sN += CHANNELS; - } while (--count > 0); - out[0] += volumeAdjust(l, volumeLR[0]); - out[1] += volumeAdjust(r, volumeLR[1]); - } else { /* CHANNELS == 1 */ - TO l = 0; - do { - mac(l, *coefsP++, sP); - sP -= CHANNELS; - mac(l, *coefsN++, sN); - sN += CHANNELS; - } while (--count > 0); - out[0] += volumeAdjust(l, volumeLR[0]); - out[1] += volumeAdjust(l, volumeLR[1]); +public: + inline void clear() { + value = 0; + Accumulator<CHANNELS-1, TO>::clear(); } -} + template<typename TC, typename TI> + inline void acc(TC coef, const TI*& data) { + mac(value, coef, data++); + Accumulator<CHANNELS-1, TO>::acc(coef, data); + } + inline void volume(TO*& out, TO gain) { + *out++ = volumeAdjust(value, gain); + Accumulator<CHANNELS-1, TO>::volume(out, gain); + } + + TO value; // one per recursive inherited base class +}; + +template<typename TO> +class Accumulator<0, TO> { +public: + inline void clear() { + } + template<typename TC, typename TI> + inline void acc(TC coef __unused, const TI*& data __unused) { + } + inline void volume(TO*& out __unused, TO gain __unused) { + } +}; /* - * Calculates a single output frame (two samples) interpolating phase. - * - * This function computes both the positive half FIR dot product and - * the negative half FIR dot product, accumulates, and then applies the volume. - * - * This is an interpolated phase filter. - * - * Use fir() to compute the proper coefficient pointers for a polyphase - * filter bank. + * Helper template functions for interpolating filter coefficients. */ template<typename TC, typename T> @@ -159,30 +145,98 @@ int32_t interpolate(int32_t coef_0, int32_t coef_1, uint32_t lerp) return mulAdd(static_cast<int16_t>(lerp), (coef_1-coef_0)<<1, coef_0); } -template <int CHANNELS, int STRIDE, typename TC, typename TI, typename TO, typename TINTERP> +/* class scope for passing in functions into templates */ +struct InterpCompute { + template<typename TC, typename TINTERP> + static inline + TC interpolatep(TC coef_0, TC coef_1, TINTERP lerp) { + return interpolate(coef_0, coef_1, lerp); + } + + template<typename TC, typename TINTERP> + static inline + TC interpolaten(TC coef_0, TC coef_1, TINTERP lerp) { + return interpolate(coef_0, coef_1, lerp); + } +}; + +struct InterpNull { + template<typename TC, typename TINTERP> + static inline + TC interpolatep(TC coef_0, TC coef_1 __unused, TINTERP lerp __unused) { + return coef_0; + } + + template<typename TC, typename TINTERP> + static inline + TC interpolaten(TC coef_0 __unused, TC coef_1, TINTERP lerp __unused) { + return coef_1; + } +}; + +/* + * Calculates a single output frame (two samples). + * + * The Process*() functions compute both the positive half FIR dot product and + * the negative half FIR dot product, accumulates, and then applies the volume. + * + * Use fir() to compute the proper coefficient pointers for a polyphase + * filter bank. + * + * ProcessBase() is the fundamental processing template function. + * + * ProcessL() calls ProcessBase() with TFUNC = InterpNull, for fixed/locked phase. + * Process() calls ProcessBase() with TFUNC = InterpCompute, for interpolated phase. + */ + +template <int CHANNELS, int STRIDE, typename TFUNC, typename TC, typename TI, typename TO, typename TINTERP> static inline -void Process(TO* const out, +void ProcessBase(TO* const out, int count, const TC* coefsP, const TC* coefsN, - const TC* coefsP1 __unused, - const TC* coefsN1 __unused, const TI* sP, const TI* sN, TINTERP lerpP, const TO* const volumeLR) { - COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS >= 1 && CHANNELS <= 2) - adjustLerp<TC, TINTERP>(lerpP); // coefficient type adjustment for interpolation + COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS > 0) - if (CHANNELS == 2) { + if (CHANNELS > 2) { + // TO accum[CHANNELS]; + Accumulator<CHANNELS, TO> accum; + + // for (int j = 0; j < CHANNELS; ++j) accum[j] = 0; + accum.clear(); + for (size_t i = 0; i < count; ++i) { + TC c = TFUNC::interpolatep(coefsP[0], coefsP[count], lerpP); + + // for (int j = 0; j < CHANNELS; ++j) mac(accum[j], c, sP + j); + const TI *tmp_data = sP; // tmp_ptr seems to work better + accum.acc(c, tmp_data); + + coefsP++; + sP -= CHANNELS; + c = TFUNC::interpolaten(coefsN[count], coefsN[0], lerpP); + + // for (int j = 0; j < CHANNELS; ++j) mac(accum[j], c, sN + j); + tmp_data = sN; // tmp_ptr seems faster than directly using sN + accum.acc(c, tmp_data); + + coefsN++; + sN += CHANNELS; + } + // for (int j = 0; j < CHANNELS; ++j) out[j] += volumeAdjust(accum[j], volumeLR[0]); + TO *tmp_out = out; // may remove if const out definition changes. + accum.volume(tmp_out, volumeLR[0]); + } else if (CHANNELS == 2) { TO l = 0; TO r = 0; for (size_t i = 0; i < count; ++i) { - mac(l, r, interpolate(coefsP[0], coefsP[count], lerpP), sP); + mac(l, r, TFUNC::interpolatep(coefsP[0], coefsP[count], lerpP), sP); coefsP++; sP -= CHANNELS; - mac(l, r, interpolate(coefsN[count], coefsN[0], lerpP), sN); + mac(l, r, TFUNC::interpolaten(coefsN[count], coefsN[0], lerpP), sN); coefsN++; sN += CHANNELS; } @@ -191,10 +245,10 @@ void Process(TO* const out, } else { /* CHANNELS == 1 */ TO l = 0; for (size_t i = 0; i < count; ++i) { - mac(l, interpolate(coefsP[0], coefsP[count], lerpP), sP); + mac(l, TFUNC::interpolatep(coefsP[0], coefsP[count], lerpP), sP); coefsP++; sP -= CHANNELS; - mac(l, interpolate(coefsN[count], coefsN[0], lerpP), sN); + mac(l, TFUNC::interpolaten(coefsN[count], coefsN[0], lerpP), sN); coefsN++; sN += CHANNELS; } @@ -203,6 +257,36 @@ void Process(TO* const out, } } +template <int CHANNELS, int STRIDE, typename TC, typename TI, typename TO> +static inline +void ProcessL(TO* const out, + int count, + const TC* coefsP, + const TC* coefsN, + const TI* sP, + const TI* sN, + const TO* const volumeLR) +{ + ProcessBase<CHANNELS, STRIDE, InterpNull>(out, count, coefsP, coefsN, sP, sN, 0, volumeLR); +} + +template <int CHANNELS, int STRIDE, typename TC, typename TI, typename TO, typename TINTERP> +static inline +void Process(TO* const out, + int count, + const TC* coefsP, + const TC* coefsN, + const TC* coefsP1 __unused, + const TC* coefsN1 __unused, + const TI* sP, + const TI* sN, + TINTERP lerpP, + const TO* const volumeLR) +{ + adjustLerp<TC, TINTERP>(lerpP); // coefficient type adjustment for interpolations + ProcessBase<CHANNELS, STRIDE, InterpCompute>(out, count, coefsP, coefsN, sP, sN, lerpP, volumeLR); +} + /* * Calculates a single output frame (two samples) from input sample pointer. * diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp index 13b21ec..c486630 100644 --- a/services/audioflinger/FastMixer.cpp +++ b/services/audioflinger/FastMixer.cpp @@ -273,10 +273,9 @@ void FastMixer::onStateChange() ALOG_ASSERT(name >= 0); mixer->setBufferProvider(name, bufferProvider); if (fastTrack->mVolumeProvider == NULL) { - mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, - (void *) MAX_GAIN_INT); - mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, - (void *) MAX_GAIN_INT); + float f = AudioMixer::UNITY_GAIN_FLOAT; + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f); } mixer->setParameter(name, AudioMixer::RESAMPLE, AudioMixer::REMOVE, NULL); @@ -336,12 +335,11 @@ void FastMixer::onWork() ALOG_ASSERT(name >= 0); if (fastTrack->mVolumeProvider != NULL) { gain_minifloat_packed_t vlr = fastTrack->mVolumeProvider->getVolumeLR(); - mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, - (void *) (uintptr_t) - (float_from_gain(gain_minifloat_unpack_left(vlr)) * MAX_GAIN_INT)); - mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, - (void *) (uintptr_t) - (float_from_gain(gain_minifloat_unpack_right(vlr)) * MAX_GAIN_INT)); + float vlf = float_from_gain(gain_minifloat_unpack_left(vlr)); + float vrf = float_from_gain(gain_minifloat_unpack_right(vlr)); + + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &vlf); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &vrf); } // FIXME The current implementation of framesReady() for fast tracks // takes a tryLock, which can block diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp index 96a8127..6d84296 100644 --- a/services/audioflinger/PatchPanel.cpp +++ b/services/audioflinger/PatchPanel.cpp @@ -188,7 +188,7 @@ status_t AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *pa } // limit to connections between sinks and sources on same HW module if (patch->sinks[i].ext.mix.hw_module != src_module) { - ALOGW("createAudioPatch() cannot connect source on module %d to" + ALOGW("createAudioPatch() cannot connect source on module %d to " "sink on module %d", src_module, patch->sinks[i].ext.mix.hw_module); return BAD_VALUE; } @@ -235,7 +235,7 @@ status_t AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *pa param.addInt(String8(AudioParameter::keyInputSource), (int)patch->sinks[0].ext.mix.usecase.source); - ALOGW("createAudioPatch() AUDIO_PORT_TYPE_DEVICE setParameters %s", + ALOGV("createAudioPatch() AUDIO_PORT_TYPE_DEVICE setParameters %s", param.toString().string()); status = thread->setParameters(param.toString()); } @@ -354,7 +354,7 @@ status_t AudioFlinger::PatchPanel::releaseAudioPatch(audio_patch_handle_t handle } AudioParameter param; param.addInt(String8(AudioParameter::keyRouting), 0); - ALOGW("releaseAudioPatch() AUDIO_PORT_TYPE_DEVICE setParameters %s", + ALOGV("releaseAudioPatch() AUDIO_PORT_TYPE_DEVICE setParameters %s", param.toString().string()); status = thread->setParameters(param.toString()); } diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h index 6f1f293..79bdfe8 100644 --- a/services/audioflinger/PlaybackTracks.h +++ b/services/audioflinger/PlaybackTracks.h @@ -54,6 +54,7 @@ public: return mStreamType; } bool isOffloaded() const { return (mFlags & IAudioFlinger::TRACK_OFFLOAD) != 0; } + bool isDirect() const { return (mFlags & IAudioFlinger::TRACK_DIRECT) != 0; } status_t setParameters(const String8& keyValuePairs); status_t attachAuxEffect(int EffectId); void setAuxBuffer(int EffectId, int32_t *buffer); @@ -157,6 +158,12 @@ private: AudioTrackServerProxy* mAudioTrackServerProxy; bool mResumeToStopping; // track was paused in stopping state. bool mFlushHwPending; // track requests for thread flush + + // for last call to getTimestamp + bool mPreviousValid; + uint32_t mPreviousFramesWritten; + AudioTimestamp mPreviousTimestamp; + }; // end of Track class TimedTrack : public Track { diff --git a/services/audioflinger/ServiceUtilities.cpp b/services/audioflinger/ServiceUtilities.cpp index 152455d..8246fef 100644 --- a/services/audioflinger/ServiceUtilities.cpp +++ b/services/audioflinger/ServiceUtilities.cpp @@ -59,6 +59,13 @@ bool settingsAllowed() { return ok; } +bool modifyAudioRoutingAllowed() { + static const String16 sModifyAudioRoutingAllowed("android.permission.MODIFY_AUDIO_ROUTING"); + bool ok = checkCallingPermission(sModifyAudioRoutingAllowed); + if (!ok) ALOGE("android.permission.MODIFY_AUDIO_ROUTING"); + return ok; +} + bool dumpAllowed() { // don't optimize for same pid, since mediaserver never dumps itself static const String16 sDump("android.permission.DUMP"); diff --git a/services/audioflinger/ServiceUtilities.h b/services/audioflinger/ServiceUtilities.h index 531bc56..df6f6f4 100644 --- a/services/audioflinger/ServiceUtilities.h +++ b/services/audioflinger/ServiceUtilities.h @@ -24,6 +24,7 @@ bool recordingAllowed(); bool captureAudioOutputAllowed(); bool captureHotwordAllowed(); bool settingsAllowed(); +bool modifyAudioRoutingAllowed(); bool dumpAllowed(); } diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp index 742163b..67a0119 100644..100755 --- a/services/audioflinger/Threads.cpp +++ b/services/audioflinger/Threads.cpp @@ -38,6 +38,7 @@ #include <audio_utils/minifloat.h> // NBAIO implementations +#include <media/nbaio/AudioStreamInSource.h> #include <media/nbaio/AudioStreamOutSink.h> #include <media/nbaio/MonoPipe.h> #include <media/nbaio/MonoPipeReader.h> @@ -53,6 +54,7 @@ #include "AudioFlinger.h" #include "AudioMixer.h" #include "FastMixer.h" +#include "FastCapture.h" #include "ServiceUtilities.h" #include "SchedulingPolicyService.h" @@ -131,9 +133,17 @@ static const enum { // up large writes into smaller ones, and the wrapper would need to deal with scheduler. } kUseFastMixer = FastMixer_Static; +// Whether to use fast capture +static const enum { + FastCapture_Never, // never initialize or use: for debugging only + FastCapture_Always, // always initialize and use, even if not needed: for debugging only + FastCapture_Static, // initialize if needed, then use all the time if initialized +} kUseFastCapture = FastCapture_Static; + // Priorities for requestPriority static const int kPriorityAudioApp = 2; static const int kPriorityFastMixer = 3; +static const int kPriorityFastCapture = 3; // IAudioFlinger::createTrack() reports back to client the total size of shared memory area // for the track. The client then sub-divides this into smaller buffers for its use. @@ -1147,12 +1157,12 @@ AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinge type_t type) : ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type), mNormalFrameCount(0), mSinkBuffer(NULL), - mMixerBufferEnabled(false), + mMixerBufferEnabled(AudioFlinger::kEnableExtendedPrecision), mMixerBuffer(NULL), mMixerBufferSize(0), mMixerBufferFormat(AUDIO_FORMAT_INVALID), mMixerBufferValid(false), - mEffectBufferEnabled(false), + mEffectBufferEnabled(AudioFlinger::kEnableExtendedPrecision), mEffectBuffer(NULL), mEffectBufferSize(0), mEffectBufferFormat(AUDIO_FORMAT_INVALID), @@ -1391,9 +1401,10 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac frameCount, mFrameCount); } else { ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: isTimed=%d sharedBuffer=%p frameCount=%d " - "mFrameCount=%d format=%d isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u " + "mFrameCount=%d format=%#x mFormat=%#x isLinear=%d channelMask=%#x " + "sampleRate=%u mSampleRate=%u " "hasFastMixer=%d tid=%d fastTrackAvailMask=%#x", - isTimed, sharedBuffer.get(), frameCount, mFrameCount, format, + isTimed, sharedBuffer.get(), frameCount, mFrameCount, format, mFormat, audio_is_linear_pcm(format), channelMask, sampleRate, mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask); *flags &= ~IAudioFlinger::TRACK_FAST; @@ -1650,7 +1661,7 @@ bool AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track) track->mState = TrackBase::STOPPED; if (!trackActive) { removeTrack_l(track); - } else if (track->isFastTrack() || track->isOffloaded()) { + } else if (track->isFastTrack() || track->isOffloaded() || track->isDirect()) { track->mState = TrackBase::STOPPING_1; } @@ -1799,9 +1810,10 @@ void AudioFlinger::PlaybackThread::readOutputParameters_l() if (!audio_is_valid_format(mFormat)) { LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat); } - if ((mType == MIXER || mType == DUPLICATING) && mFormat != AUDIO_FORMAT_PCM_16_BIT) { - LOG_ALWAYS_FATAL("HAL format %#x not supported for mixed output; " - "must be AUDIO_FORMAT_PCM_16_BIT", mFormat); + if ((mType == MIXER || mType == DUPLICATING) + && !isValidPcmSinkFormat(mFormat)) { + LOG_FATAL("HAL format %#x not supported for mixed output", + mFormat); } mFrameSize = audio_stream_frame_size(&mOutput->stream->common); mBufferSize = mOutput->stream->common.get_buffer_size(&mOutput->stream->common); @@ -1858,7 +1870,9 @@ void AudioFlinger::PlaybackThread::readOutputParameters_l() } mNormalFrameCount = multiplier * mFrameCount; // round up to nearest 16 frames to satisfy AudioMixer - mNormalFrameCount = (mNormalFrameCount + 15) & ~15; + if (mType == MIXER || mType == DUPLICATING) { + mNormalFrameCount = (mNormalFrameCount + 15) & ~15; + } ALOGI("HAL output buffer size %u frames, normal sink buffer size %u frames", mFrameCount, mNormalFrameCount); @@ -2646,7 +2660,7 @@ status_t AudioFlinger::PlaybackThread::getTimestamp_l(AudioTimestamp& timestamp) if (mNormalSink != 0) { return mNormalSink->getTimestamp(timestamp); } - if (mType == OFFLOAD && mOutput->stream->get_presentation_position) { + if ((mType == OFFLOAD || mType == DIRECT) && mOutput->stream->get_presentation_position) { uint64_t position64; int ret = mOutput->stream->get_presentation_position( mOutput->stream, &position64, ×tamp.mTime); @@ -2850,8 +2864,6 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud } #endif - } else { - mFastMixer = NULL; } switch (kUseFastMixer) { @@ -2870,7 +2882,7 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud AudioFlinger::MixerThread::~MixerThread() { - if (mFastMixer != NULL) { + if (mFastMixer != 0) { FastMixerStateQueue *sq = mFastMixer->sq(); FastMixerState *state = sq->begin(); if (state->mCommand == FastMixerState::COLD_IDLE) { @@ -2892,7 +2904,7 @@ AudioFlinger::MixerThread::~MixerThread() ALOG_ASSERT(fastTrack->mBufferProvider != NULL); delete fastTrack->mBufferProvider; sq->end(false /*didModify*/); - delete mFastMixer; + mFastMixer.clear(); #ifdef AUDIO_WATCHDOG if (mAudioWatchdog != 0) { mAudioWatchdog->requestExit(); @@ -2908,7 +2920,7 @@ AudioFlinger::MixerThread::~MixerThread() uint32_t AudioFlinger::MixerThread::correctLatency_l(uint32_t latency) const { - if (mFastMixer != NULL) { + if (mFastMixer != 0) { MonoPipe *pipe = (MonoPipe *)mPipeSink.get(); latency += (pipe->getAvgFrames() * 1000) / mSampleRate; } @@ -2925,7 +2937,7 @@ ssize_t AudioFlinger::MixerThread::threadLoop_write() { // FIXME we should only do one push per cycle; confirm this is true // Start the fast mixer if it's not already running - if (mFastMixer != NULL) { + if (mFastMixer != 0) { FastMixerStateQueue *sq = mFastMixer->sq(); FastMixerState *state = sq->begin(); if (state->mCommand != FastMixerState::MIX_WRITE && @@ -2959,7 +2971,7 @@ ssize_t AudioFlinger::MixerThread::threadLoop_write() void AudioFlinger::MixerThread::threadLoop_standby() { // Idle the fast mixer if it's currently running - if (mFastMixer != NULL) { + if (mFastMixer != 0) { FastMixerStateQueue *sq = mFastMixer->sq(); FastMixerState *state = sq->begin(); if (!(state->mCommand & FastMixerState::IDLE)) { @@ -3122,7 +3134,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac FastMixerState *state = NULL; bool didModify = false; FastMixerStateQueue::block_t block = FastMixerStateQueue::BLOCK_UNTIL_PUSHED; - if (mFastMixer != NULL) { + if (mFastMixer != 0) { sq = mFastMixer->sq(); state = sq->begin(); } @@ -3369,9 +3381,11 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac } // compute volume for this track - uint32_t vl, vr, va; + uint32_t vl, vr; // in U8.24 integer format + float vlf, vrf, vaf; // in [0.0, 1.0] float format if (track->isPausing() || mStreamTypes[track->streamType()].mute) { - vl = vr = va = 0; + vl = vr = 0; + vlf = vrf = vaf = 0.; if (track->isPausing()) { track->setPaused(); } @@ -3382,8 +3396,8 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac float v = masterVolume * typeVolume; AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy; gain_minifloat_packed_t vlr = proxy->getVolumeLR(); - float vlf = float_from_gain(gain_minifloat_unpack_left(vlr)); - float vrf = float_from_gain(gain_minifloat_unpack_right(vlr)); + vlf = float_from_gain(gain_minifloat_unpack_left(vlr)); + vrf = float_from_gain(gain_minifloat_unpack_right(vlr)); // track volumes come from shared memory, so can't be trusted and must be clamped if (vlf > GAIN_FLOAT_UNITY) { ALOGV("Track left volume out of range: %.3g", vlf); @@ -3394,26 +3408,31 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac vrf = GAIN_FLOAT_UNITY; } // now apply the master volume and stream type volume - // FIXME we're losing the wonderful dynamic range in the minifloat representation - float v8_24 = v * (MAX_GAIN_INT * MAX_GAIN_INT); - vl = (uint32_t) (v8_24 * vlf); - vr = (uint32_t) (v8_24 * vrf); + vlf *= v; + vrf *= v; // assuming master volume and stream type volume each go up to 1.0, - // vl and vr are now in 8.24 format - + // then derive vl and vr as U8.24 versions for the effect chain + const float scaleto8_24 = MAX_GAIN_INT * MAX_GAIN_INT; + vl = (uint32_t) (scaleto8_24 * vlf); + vr = (uint32_t) (scaleto8_24 * vrf); + // vl and vr are now in U8.24 format uint16_t sendLevel = proxy->getSendLevel_U4_12(); // send level comes from shared memory and so may be corrupt if (sendLevel > MAX_GAIN_INT) { ALOGV("Track send level out of range: %04X", sendLevel); sendLevel = MAX_GAIN_INT; } - va = (uint32_t)(v * sendLevel); + // vaf is represented as [0.0, 1.0] float by rescaling sendLevel + vaf = v * sendLevel * (1. / MAX_GAIN_INT); } // Delegate volume control to effect in track effect chain if needed if (chain != 0 && chain->setVolume_l(&vl, &vr)) { // Do not ramp volume if volume is controlled by effect param = AudioMixer::VOLUME; + // Update remaining floating point volume levels + vlf = (float)vl / (1 << 24); + vrf = (float)vr / (1 << 24); track->mHasVolumeController = true; } else { // force no volume ramp when volume controller was just disabled or removed @@ -3424,29 +3443,13 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac track->mHasVolumeController = false; } - // FIXME Use float - // Convert volumes from 8.24 to 4.12 format - // This additional clamping is needed in case chain->setVolume_l() overshot - vl = (vl + (1 << 11)) >> 12; - if (vl > MAX_GAIN_INT) { - vl = MAX_GAIN_INT; - } - vr = (vr + (1 << 11)) >> 12; - if (vr > MAX_GAIN_INT) { - vr = MAX_GAIN_INT; - } - - if (va > MAX_GAIN_INT) { - va = MAX_GAIN_INT; // va is uint32_t, so no need to check for - - } - // XXX: these things DON'T need to be done each time mAudioMixer->setBufferProvider(name, track); mAudioMixer->enable(name); - mAudioMixer->setParameter(name, param, AudioMixer::VOLUME0, (void *)(uintptr_t)vl); - mAudioMixer->setParameter(name, param, AudioMixer::VOLUME1, (void *)(uintptr_t)vr); - mAudioMixer->setParameter(name, param, AudioMixer::AUXLEVEL, (void *)(uintptr_t)va); + mAudioMixer->setParameter(name, param, AudioMixer::VOLUME0, &vlf); + mAudioMixer->setParameter(name, param, AudioMixer::VOLUME1, &vrf); + mAudioMixer->setParameter(name, param, AudioMixer::AUXLEVEL, &vaf); mAudioMixer->setParameter( name, AudioMixer::TRACK, @@ -3674,7 +3677,7 @@ bool AudioFlinger::MixerThread::checkForNewParameter_l(const String8& keyValuePa // if !&IDLE, holds the FastMixer state to restore after new parameters processed FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE; - if (mFastMixer != NULL) { + if (mFastMixer != 0) { FastMixerStateQueue *sq = mFastMixer->sq(); FastMixerState *state = sq->begin(); if (!(state->mCommand & FastMixerState::IDLE)) { @@ -3779,7 +3782,7 @@ bool AudioFlinger::MixerThread::checkForNewParameter_l(const String8& keyValuePa } if (!(previousCommand & FastMixerState::IDLE)) { - ALOG_ASSERT(mFastMixer != NULL); + ALOG_ASSERT(mFastMixer != 0); FastMixerStateQueue *sq = mFastMixer->sq(); FastMixerState *state = sq->begin(); ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE); @@ -3946,14 +3949,16 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prep // The first time a track is added we wait // for all its buffers to be filled before processing it uint32_t minFrames; - if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing()) { + if ((track->sharedBuffer() == 0) && !track->isStopping_1() && !track->isPausing()) { minFrames = mNormalFrameCount; } else { minFrames = 1; } - if ((track->framesReady() >= minFrames) && track->isReady() && - !track->isPaused() && !track->isTerminated()) + ALOGI("prepareTracks_l minFrames %d state %d frames ready %d, ", + minFrames, track->mState, track->framesReady()); + if ((track->framesReady() >= minFrames) && track->isReady() && !track->isPaused() && + !track->isStopping_2() && !track->isStopped()) { ALOGVV("track %d s=%08x [OK]", track->name(), cblk->mServer); @@ -3980,17 +3985,26 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prep if (!mEffectChains.isEmpty() && last) { mEffectChains[0]->clearInputBuffer(); } - - ALOGVV("track %d s=%08x [NOT READY]", track->name(), cblk->mServer); - if ((track->sharedBuffer() != 0) || track->isTerminated() || - track->isStopped() || track->isPaused()) { + if (track->isStopping_1()) { + track->mState = TrackBase::STOPPING_2; + } + if ((track->sharedBuffer() != 0) || track->isStopped() || + track->isStopping_2() || track->isPaused()) { // We have consumed all the buffers of this track. // Remove it from the list of active tracks. - // TODO: implement behavior for compressed audio - size_t audioHALFrames = (latency_l() * mSampleRate) / 1000; + size_t audioHALFrames; + if (audio_is_linear_pcm(mFormat)) { + audioHALFrames = (latency_l() * mSampleRate) / 1000; + } else { + audioHALFrames = 0; + } + size_t framesWritten = mBytesWritten / mFrameSize; if (mStandby || !last || track->presentationComplete(framesWritten, audioHALFrames)) { + if (track->isStopping_2()) { + track->mState = TrackBase::STOPPED; + } if (track->isStopped()) { track->reset(); } @@ -4760,16 +4774,151 @@ AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger, #endif , mReadOnlyHeap(new MemoryDealer(kRecordThreadReadOnlyHeapSize, "RecordThreadRO", MemoryHeapBase::READ_ONLY)) + // mFastCapture below + , mFastCaptureFutex(0) + // mInputSource + // mPipeSink + // mPipeSource + , mPipeFramesP2(0) + // mPipeMemory + // mFastCaptureNBLogWriter + , mFastTrackAvail(true) { snprintf(mName, kNameLength, "AudioIn_%X", id); mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mName); readInputParameters_l(); + + // create an NBAIO source for the HAL input stream, and negotiate + mInputSource = new AudioStreamInSource(input->stream); + size_t numCounterOffers = 0; + const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)}; + ssize_t index = mInputSource->negotiate(offers, 1, NULL, numCounterOffers); + ALOG_ASSERT(index == 0); + + // initialize fast capture depending on configuration + bool initFastCapture; + switch (kUseFastCapture) { + case FastCapture_Never: + initFastCapture = false; + break; + case FastCapture_Always: + initFastCapture = true; + break; + case FastCapture_Static: + uint32_t primaryOutputSampleRate; + { + AutoMutex _l(audioFlinger->mHardwareLock); + primaryOutputSampleRate = audioFlinger->mPrimaryOutputSampleRate; + } + initFastCapture = + // either capture sample rate is same as (a reasonable) primary output sample rate + (((primaryOutputSampleRate == 44100 || primaryOutputSampleRate == 48000) && + (mSampleRate == primaryOutputSampleRate)) || + // or primary output sample rate is unknown, and capture sample rate is reasonable + ((primaryOutputSampleRate == 0) && + ((mSampleRate == 44100 || mSampleRate == 48000)))) && + // and the buffer size is < 10 ms + (mFrameCount * 1000) / mSampleRate < 10; + break; + // case FastCapture_Dynamic: + } + + if (initFastCapture) { + // create a Pipe for FastMixer to write to, and for us and fast tracks to read from + NBAIO_Format format = mInputSource->format(); + size_t pipeFramesP2 = roundup(mFrameCount * 8); + size_t pipeSize = pipeFramesP2 * Format_frameSize(format); + void *pipeBuffer; + const sp<MemoryDealer> roHeap(readOnlyHeap()); + sp<IMemory> pipeMemory; + if ((roHeap == 0) || + (pipeMemory = roHeap->allocate(pipeSize)) == 0 || + (pipeBuffer = pipeMemory->pointer()) == NULL) { + ALOGE("not enough memory for pipe buffer size=%zu", pipeSize); + goto failed; + } + // pipe will be shared directly with fast clients, so clear to avoid leaking old information + memset(pipeBuffer, 0, pipeSize); + Pipe *pipe = new Pipe(pipeFramesP2, format, pipeBuffer); + const NBAIO_Format offers[1] = {format}; + size_t numCounterOffers = 0; + ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers); + ALOG_ASSERT(index == 0); + mPipeSink = pipe; + PipeReader *pipeReader = new PipeReader(*pipe); + numCounterOffers = 0; + index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers); + ALOG_ASSERT(index == 0); + mPipeSource = pipeReader; + mPipeFramesP2 = pipeFramesP2; + mPipeMemory = pipeMemory; + + // create fast capture + mFastCapture = new FastCapture(); + FastCaptureStateQueue *sq = mFastCapture->sq(); +#ifdef STATE_QUEUE_DUMP + // FIXME +#endif + FastCaptureState *state = sq->begin(); + state->mCblk = NULL; + state->mInputSource = mInputSource.get(); + state->mInputSourceGen++; + state->mPipeSink = pipe; + state->mPipeSinkGen++; + state->mFrameCount = mFrameCount; + state->mCommand = FastCaptureState::COLD_IDLE; + // already done in constructor initialization list + //mFastCaptureFutex = 0; + state->mColdFutexAddr = &mFastCaptureFutex; + state->mColdGen++; + state->mDumpState = &mFastCaptureDumpState; +#ifdef TEE_SINK + // FIXME +#endif + mFastCaptureNBLogWriter = audioFlinger->newWriter_l(kFastCaptureLogSize, "FastCapture"); + state->mNBLogWriter = mFastCaptureNBLogWriter.get(); + sq->end(); + sq->push(FastCaptureStateQueue::BLOCK_UNTIL_PUSHED); + + // start the fast capture + mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO); + pid_t tid = mFastCapture->getTid(); + int err = requestPriority(getpid_cached, tid, kPriorityFastMixer); + if (err != 0) { + ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d", + kPriorityFastCapture, getpid_cached, tid, err); + } + +#ifdef AUDIO_WATCHDOG + // FIXME +#endif + + } +failed: ; + + // FIXME mNormalSource } AudioFlinger::RecordThread::~RecordThread() { + if (mFastCapture != 0) { + FastCaptureStateQueue *sq = mFastCapture->sq(); + FastCaptureState *state = sq->begin(); + if (state->mCommand == FastCaptureState::COLD_IDLE) { + int32_t old = android_atomic_inc(&mFastCaptureFutex); + if (old == -1) { + (void) syscall(__NR_futex, &mFastCaptureFutex, FUTEX_WAKE_PRIVATE, 1); + } + } + state->mCommand = FastCaptureState::EXIT; + sq->end(); + sq->push(FastCaptureStateQueue::BLOCK_UNTIL_PUSHED); + mFastCapture->join(); + mFastCapture.clear(); + } + mAudioFlinger->unregisterWriter(mFastCaptureNBLogWriter); mAudioFlinger->unregisterWriter(mNBLogWriter); delete[] mRsmpInBuffer; } @@ -4824,6 +4973,8 @@ reacquire_wakelock: // activeTracks accumulates a copy of a subset of mActiveTracks Vector< sp<RecordTrack> > activeTracks; + // reference to the (first and only) fast track + sp<RecordTrack> fastTrack; { // scope for mLock Mutex::Autolock _l(mLock); @@ -4905,6 +5056,11 @@ reacquire_wakelock: activeTracks.add(activeTrack); i++; + if (activeTrack->isFastTrack()) { + ALOG_ASSERT(!mFastTrackAvail); + ALOG_ASSERT(fastTrack == 0); + fastTrack = activeTrack; + } } if (doBroadcast) { mStartStopCond.broadcast(); @@ -4930,6 +5086,36 @@ reacquire_wakelock: effectChains[i]->process_l(); } + // Start the fast capture if it's not already running + if (mFastCapture != 0) { + FastCaptureStateQueue *sq = mFastCapture->sq(); + FastCaptureState *state = sq->begin(); + if (state->mCommand != FastCaptureState::READ_WRITE /* FIXME && + (kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)*/) { + if (state->mCommand == FastCaptureState::COLD_IDLE) { + int32_t old = android_atomic_inc(&mFastCaptureFutex); + if (old == -1) { + (void) syscall(__NR_futex, &mFastCaptureFutex, FUTEX_WAKE_PRIVATE, 1); + } + } + state->mCommand = FastCaptureState::READ_WRITE; +#if 0 // FIXME + mFastCaptureDumpState.increaseSamplingN(mAudioFlinger->isLowRamDevice() ? + FastCaptureDumpState::kSamplingNforLowRamDevice : FastMixerDumpState::kSamplingN); +#endif + state->mCblk = fastTrack != 0 ? fastTrack->cblk() : NULL; + sq->end(); + sq->push(FastCaptureStateQueue::BLOCK_UNTIL_PUSHED); +#if 0 + if (kUseFastCapture == FastCapture_Dynamic) { + mNormalSource = mPipeSource; + } +#endif + } else { + sq->end(false /*didModify*/); + } + } + // Read from HAL to keep up with fastest client if multiple active tracks, not slowest one. // Only the client(s) that are too slow will overrun. But if even the fastest client is too // slow, then this RecordThread will overrun by not calling HAL read often enough. @@ -4937,26 +5123,49 @@ reacquire_wakelock: // copy to the right place. Permitted because mRsmpInBuffer was over-allocated. int32_t rear = mRsmpInRear & (mRsmpInFramesP2 - 1); - ssize_t bytesRead = mInput->stream->read(mInput->stream, - &mRsmpInBuffer[rear * mChannelCount], mBufferSize); - if (bytesRead <= 0) { - ALOGE("read failed: bytesRead=%d < %u", bytesRead, mBufferSize); + ssize_t framesRead; + + // If an NBAIO source is present, use it to read the normal capture's data + if (mPipeSource != 0) { + size_t framesToRead = mBufferSize / mFrameSize; + framesRead = mPipeSource->read(&mRsmpInBuffer[rear * mChannelCount], + framesToRead, AudioBufferProvider::kInvalidPTS); + if (framesRead == 0) { + // since pipe is non-blocking, simulate blocking input + sleepUs = (framesToRead * 1000000LL) / mSampleRate; + } + // otherwise use the HAL / AudioStreamIn directly + } else { + ssize_t bytesRead = mInput->stream->read(mInput->stream, + &mRsmpInBuffer[rear * mChannelCount], mBufferSize); + if (bytesRead < 0) { + framesRead = bytesRead; + } else { + framesRead = bytesRead / mFrameSize; + } + } + + if (framesRead < 0 || (framesRead == 0 && mPipeSource == 0)) { + ALOGE("read failed: framesRead=%d", framesRead); // Force input into standby so that it tries to recover at next read attempt inputStandBy(); sleepUs = kRecordThreadSleepUs; - continue; } - ALOG_ASSERT((size_t) bytesRead <= mBufferSize); - size_t framesRead = bytesRead / mFrameSize; + if (framesRead <= 0) { + goto unlock; + } ALOG_ASSERT(framesRead > 0); + if (mTeeSink != 0) { (void) mTeeSink->write(&mRsmpInBuffer[rear * mChannelCount], framesRead); } // If destination is non-contiguous, we now correct for reading past end of buffer. - size_t part1 = mRsmpInFramesP2 - rear; - if (framesRead > part1) { - memcpy(mRsmpInBuffer, &mRsmpInBuffer[mRsmpInFramesP2 * mChannelCount], - (framesRead - part1) * mFrameSize); + { + size_t part1 = mRsmpInFramesP2 - rear; + if ((size_t) framesRead > part1) { + memcpy(mRsmpInBuffer, &mRsmpInBuffer[mRsmpInFramesP2 * mChannelCount], + (framesRead - part1) * mFrameSize); + } } rear = mRsmpInRear += framesRead; @@ -4965,6 +5174,11 @@ reacquire_wakelock: for (size_t i = 0; i < size; i++) { activeTrack = activeTracks[i]; + // skip fast tracks, as those are handled directly by FastCapture + if (activeTrack->isFastTrack()) { + continue; + } + enum { OVERRUN_UNKNOWN, OVERRUN_TRUE, @@ -5159,6 +5373,7 @@ reacquire_wakelock: } +unlock: // enable changes in effect chain unlockEffectChains(effectChains); // effectChains doesn't need to be cleared, since it is cleared by destructor at scope end @@ -5193,6 +5408,30 @@ void AudioFlinger::RecordThread::standbyIfNotAlreadyInStandby() void AudioFlinger::RecordThread::inputStandBy() { + // Idle the fast capture if it's currently running + if (mFastCapture != 0) { + FastCaptureStateQueue *sq = mFastCapture->sq(); + FastCaptureState *state = sq->begin(); + if (!(state->mCommand & FastCaptureState::IDLE)) { + state->mCommand = FastCaptureState::COLD_IDLE; + state->mColdFutexAddr = &mFastCaptureFutex; + state->mColdGen++; + mFastCaptureFutex = 0; + sq->end(); + // BLOCK_UNTIL_PUSHED would be insufficient, as we need it to stop doing I/O now + sq->push(FastCaptureStateQueue::BLOCK_UNTIL_ACKED); +#if 0 + if (kUseFastCapture == FastCapture_Dynamic) { + // FIXME + } +#endif +#ifdef AUDIO_WATCHDOG + // FIXME +#endif + } else { + sq->end(false /*didModify*/); + } + } mInput->stream->common.standby(&mInput->stream->common); } @@ -5219,36 +5458,40 @@ sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRe // use case: callback handler and frame count is default or at least as large as HAL ( (tid != -1) && - ((frameCount == 0) || + ((frameCount == 0) /*|| + // FIXME must be equal to pipe depth, so don't allow it to be specified by client // FIXME not necessarily true, should be native frame count for native SR! - (frameCount >= mFrameCount)) + (frameCount >= mFrameCount)*/) ) && // PCM data audio_is_linear_pcm(format) && + // native format + (format == mFormat) && // mono or stereo ( (channelMask == AUDIO_CHANNEL_IN_MONO) || (channelMask == AUDIO_CHANNEL_IN_STEREO) ) && - // hardware sample rate - // FIXME actually the native hardware sample rate + // native channel mask + (channelMask == mChannelMask) && + // native hardware sample rate (sampleRate == mSampleRate) && // record thread has an associated fast capture - hasFastCapture() - // fast capture does not require slots + hasFastCapture() && + // there are sufficient fast track slots available + mFastTrackAvail ) { - // if frameCount not specified, then it defaults to fast capture (HAL) frame count + // if frameCount not specified, then it defaults to pipe frame count if (frameCount == 0) { - // FIXME wrong mFrameCount - frameCount = mFrameCount * kFastTrackMultiplier; + frameCount = mPipeFramesP2; } ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d", frameCount, mFrameCount); } else { ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%d " "mFrameCount=%d format=%d isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u " - "hasFastCapture=%d tid=%d", + "hasFastCapture=%d tid=%d mFastTrackAvail=%d", frameCount, mFrameCount, format, audio_is_linear_pcm(format), - channelMask, sampleRate, mSampleRate, hasFastCapture(), tid); + channelMask, sampleRate, mSampleRate, hasFastCapture(), tid, mFastTrackAvail); *flags &= ~IAudioFlinger::TRACK_FAST; // FIXME It's not clear that we need to enforce this any more, since we have a pipe. // For compatibility with AudioRecord calculation, buffer depth is forced @@ -5477,6 +5720,10 @@ void AudioFlinger::RecordThread::removeTrack_l(const sp<RecordTrack>& track) { mTracks.remove(track); // need anything related to effects here? + if (track->isFastTrack()) { + ALOG_ASSERT(!mFastTrackAvail); + mFastTrackAvail = true; + } } void AudioFlinger::RecordThread::dump(int fd, const Vector<String16>& args) @@ -5495,6 +5742,7 @@ void AudioFlinger::RecordThread::dumpInternals(int fd, const Vector<String16>& a } else { dprintf(fd, " No active record clients\n"); } + dprintf(fd, " Fast track available: %s\n", mFastTrackAvail ? "yes" : "no"); dumpBase(fd, args); } diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h index eeb33d9..3eb1eb9 100644 --- a/services/audioflinger/Threads.h +++ b/services/audioflinger/Threads.h @@ -851,7 +851,7 @@ protected: AudioMixer* mAudioMixer; // normal mixer private: // one-time initialization, no locks required - FastMixer* mFastMixer; // non-NULL if there is also a fast mixer + sp<FastMixer> mFastMixer; // non-0 if there is also a fast mixer sp<AudioWatchdog> mAudioWatchdog; // non-0 if there is an audio watchdog thread // contents are not guaranteed to be consistent, no locks required @@ -867,7 +867,7 @@ private: int32_t mFastMixerFutex; // for cold idle public: - virtual bool hasFastMixer() const { return mFastMixer != NULL; } + virtual bool hasFastMixer() const { return mFastMixer != 0; } virtual FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex) const { ALOG_ASSERT(fastIndex < FastMixerState::kMaxFastTracks); return mFastMixerDumpState.mTracks[fastIndex].mUnderruns; @@ -1063,6 +1063,8 @@ public: virtual sp<MemoryDealer> readOnlyHeap() const { return mReadOnlyHeap; } + virtual sp<IMemory> pipeMemory() const { return mPipeMemory; } + sp<AudioFlinger::RecordThread::RecordTrack> createRecordTrack_l( const sp<AudioFlinger::Client>& client, uint32_t sampleRate, @@ -1114,7 +1116,7 @@ public: static void syncStartEventCallback(const wp<SyncEvent>& event); virtual size_t frameCount() const { return mFrameCount; } - bool hasFastCapture() const { return false; } + bool hasFastCapture() const { return mFastCapture != 0; } private: // Enter standby if not already in standby, and set mStandby flag @@ -1144,4 +1146,40 @@ private: const sp<NBAIO_Sink> mTeeSink; const sp<MemoryDealer> mReadOnlyHeap; + + // one-time initialization, no locks required + sp<FastCapture> mFastCapture; // non-0 if there is also a fast capture + // FIXME audio watchdog thread + + // contents are not guaranteed to be consistent, no locks required + FastCaptureDumpState mFastCaptureDumpState; +#ifdef STATE_QUEUE_DUMP + // FIXME StateQueue observer and mutator dump fields +#endif + // FIXME audio watchdog dump + + // accessible only within the threadLoop(), no locks required + // mFastCapture->sq() // for mutating and pushing state + int32_t mFastCaptureFutex; // for cold idle + + // The HAL input source is treated as non-blocking, + // but current implementation is blocking + sp<NBAIO_Source> mInputSource; + // The source for the normal capture thread to read from: mInputSource or mPipeSource + sp<NBAIO_Source> mNormalSource; + // If a fast capture is present, the non-blocking pipe sink written to by fast capture, + // otherwise clear + sp<NBAIO_Sink> mPipeSink; + // If a fast capture is present, the non-blocking pipe source read by normal thread, + // otherwise clear + sp<NBAIO_Source> mPipeSource; + // Depth of pipe from fast capture to normal thread and fast clients, always power of 2 + size_t mPipeFramesP2; + // If a fast capture is present, the Pipe as IMemory, otherwise clear + sp<IMemory> mPipeMemory; + + static const size_t kFastCaptureLogSize = 4 * 1024; + sp<NBLog::Writer> mFastCaptureNBLogWriter; + + bool mFastTrackAvail; // true if fast track available }; diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp index 7ddc71c..4fbb973 100644 --- a/services/audioflinger/Tracks.cpp +++ b/services/audioflinger/Tracks.cpp @@ -223,6 +223,8 @@ AudioFlinger::ThreadBase::TrackBase::~TrackBase() // relying on the automatic clear() at end of scope. mClient.clear(); } + // flush the binder command buffer + IPCThreadState::self()->flushCommands(); } // AudioBufferProvider interface @@ -382,7 +384,10 @@ AudioFlinger::PlaybackThread::Track::Track( mIsInvalid(false), mAudioTrackServerProxy(NULL), mResumeToStopping(false), - mFlushHwPending(false) + mFlushHwPending(false), + mPreviousValid(false), + mPreviousFramesWritten(0) + // mPreviousTimestamp { if (mCblk == NULL) { return; @@ -429,8 +434,6 @@ AudioFlinger::PlaybackThread::Track::~Track() // This prevents that leak. if (mSharedBuffer != 0) { mSharedBuffer.clear(); - // flush the binder command buffer - IPCThreadState::self()->flushCommands(); } } @@ -703,7 +706,7 @@ void AudioFlinger::PlaybackThread::Track::stop() if (playbackThread->mActiveTracks.indexOf(this) < 0) { reset(); mState = STOPPED; - } else if (!isFastTrack() && !isOffloaded()) { + } else if (!isFastTrack() && !isOffloaded() && !isDirect()) { mState = STOPPED; } else { // For fast tracks prepareTracks_l() will set state to STOPPING_2 @@ -847,27 +850,51 @@ status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& times { // Client should implement this using SSQ; the unpresented frame count in latch is irrelevant if (isFastTrack()) { + // FIXME no lock held to set mPreviousValid = false return INVALID_OPERATION; } sp<ThreadBase> thread = mThread.promote(); if (thread == 0) { + // FIXME no lock held to set mPreviousValid = false return INVALID_OPERATION; } Mutex::Autolock _l(thread->mLock); PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); - if (!isOffloaded()) { + if (!isOffloaded() && !isDirect()) { if (!playbackThread->mLatchQValid) { + mPreviousValid = false; return INVALID_OPERATION; } uint32_t unpresentedFrames = ((int64_t) playbackThread->mLatchQ.mUnpresentedFrames * mSampleRate) / playbackThread->mSampleRate; uint32_t framesWritten = mAudioTrackServerProxy->framesReleased(); + bool checkPreviousTimestamp = mPreviousValid && framesWritten >= mPreviousFramesWritten; if (framesWritten < unpresentedFrames) { + mPreviousValid = false; return INVALID_OPERATION; } - timestamp.mPosition = framesWritten - unpresentedFrames; - timestamp.mTime = playbackThread->mLatchQ.mTimestamp.mTime; + mPreviousFramesWritten = framesWritten; + uint32_t position = framesWritten - unpresentedFrames; + struct timespec time = playbackThread->mLatchQ.mTimestamp.mTime; + if (checkPreviousTimestamp) { + if (time.tv_sec < mPreviousTimestamp.mTime.tv_sec || + (time.tv_sec == mPreviousTimestamp.mTime.tv_sec && + time.tv_nsec < mPreviousTimestamp.mTime.tv_nsec)) { + ALOGW("Time is going backwards"); + } + // position can bobble slightly as an artifact; this hides the bobble + static const uint32_t MINIMUM_POSITION_DELTA = 8u; + if ((position <= mPreviousTimestamp.mPosition) || + (position - mPreviousTimestamp.mPosition) < MINIMUM_POSITION_DELTA) { + position = mPreviousTimestamp.mPosition; + time = mPreviousTimestamp.mTime; + } + } + timestamp.mPosition = position; + timestamp.mTime = time; + mPreviousTimestamp = timestamp; + mPreviousValid = true; return NO_ERROR; } @@ -953,8 +980,6 @@ bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWrit } if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) { - ALOGV("presentationComplete() session %d complete: framesWritten %d", - mSessionId, framesWritten); triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE); mAudioTrackServerProxy->setStreamEndDone(); return true; @@ -1854,7 +1879,7 @@ AudioFlinger::RecordThread::RecordTrack::RecordTrack( : TrackBase(thread, client, sampleRate, format, channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, uid, flags, false /*isOut*/, - (flags & IAudioFlinger::TRACK_FAST) != 0 ? ALLOC_READONLY : ALLOC_CBLK), + flags & IAudioFlinger::TRACK_FAST ? ALLOC_PIPE : ALLOC_CBLK), mOverflow(false), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpOutFrameCount(0), // See real initialization of mRsmpInFront at RecordThread::start() mRsmpInUnrel(0), mRsmpInFront(0), mFramesToDrop(0), mResamplerBufferProvider(NULL) @@ -1873,9 +1898,14 @@ AudioFlinger::RecordThread::RecordTrack::RecordTrack( mResampler = AudioResampler::create(16, thread->mChannelCount, sampleRate); // source SR mResampler->setSampleRate(thread->mSampleRate); - mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN); + mResampler->setVolume(AudioMixer::UNITY_GAIN_INT, AudioMixer::UNITY_GAIN_INT); mResamplerBufferProvider = new ResamplerBufferProvider(this); } + + if (flags & IAudioFlinger::TRACK_FAST) { + ALOG_ASSERT(thread->mFastTrackAvail); + thread->mFastTrackAvail = false; + } } AudioFlinger::RecordThread::RecordTrack::~RecordTrack() diff --git a/services/audioflinger/tests/Android.mk b/services/audioflinger/tests/Android.mk new file mode 100644 index 0000000..7bba05b --- /dev/null +++ b/services/audioflinger/tests/Android.mk @@ -0,0 +1,73 @@ +# Build the unit tests for audioflinger + +# +# resampler unit test +# +LOCAL_PATH:= $(call my-dir) +include $(CLEAR_VARS) + +LOCAL_SHARED_LIBRARIES := \ + liblog \ + libutils \ + libcutils \ + libstlport \ + libaudioutils \ + libaudioresampler + +LOCAL_STATIC_LIBRARIES := \ + libgtest \ + libgtest_main + +LOCAL_C_INCLUDES := \ + bionic \ + bionic/libstdc++/include \ + external/gtest/include \ + external/stlport/stlport \ + $(call include-path-for, audio-utils) \ + frameworks/av/services/audioflinger + +LOCAL_SRC_FILES := \ + resampler_tests.cpp + +LOCAL_MODULE := resampler_tests +LOCAL_MODULE_TAGS := tests + +include $(BUILD_EXECUTABLE) + +# +# audio mixer test tool +# +include $(CLEAR_VARS) + +LOCAL_SRC_FILES:= \ + test-mixer.cpp \ + ../AudioMixer.cpp.arm \ + +LOCAL_C_INCLUDES := \ + bionic \ + bionic/libstdc++/include \ + external/stlport/stlport \ + $(call include-path-for, audio-effects) \ + $(call include-path-for, audio-utils) \ + frameworks/av/services/audioflinger + +LOCAL_STATIC_LIBRARIES := \ + libsndfile + +LOCAL_SHARED_LIBRARIES := \ + libstlport \ + libeffects \ + libnbaio \ + libcommon_time_client \ + libaudioresampler \ + libaudioutils \ + libdl \ + libcutils \ + libutils \ + liblog + +LOCAL_MODULE:= test-mixer + +LOCAL_MODULE_TAGS := optional + +include $(BUILD_EXECUTABLE) diff --git a/services/audioflinger/tests/build_and_run_all_unit_tests.sh b/services/audioflinger/tests/build_and_run_all_unit_tests.sh new file mode 100755 index 0000000..2c453b0 --- /dev/null +++ b/services/audioflinger/tests/build_and_run_all_unit_tests.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +if [ -z "$ANDROID_BUILD_TOP" ]; then + echo "Android build environment not set" + exit -1 +fi + +# ensure we have mm +. $ANDROID_BUILD_TOP/build/envsetup.sh + +pushd $ANDROID_BUILD_TOP/frameworks/av/services/audioflinger/ +pwd +mm + +echo "waiting for device" +adb root && adb wait-for-device remount +adb push $OUT/system/lib/libaudioresampler.so /system/lib +adb push $OUT/system/bin/resampler_tests /system/bin + +sh $ANDROID_BUILD_TOP/frameworks/av/services/audioflinger/tests/run_all_unit_tests.sh + +popd diff --git a/services/audioflinger/tests/mixer_to_wav_tests.sh b/services/audioflinger/tests/mixer_to_wav_tests.sh new file mode 100755 index 0000000..93bff47 --- /dev/null +++ b/services/audioflinger/tests/mixer_to_wav_tests.sh @@ -0,0 +1,134 @@ +#!/bin/bash +# +# This script uses test-mixer to generate WAV files +# for evaluation of the AudioMixer component. +# +# Sine and chirp signals are used for input because they +# show up as clear lines, either horizontal or diagonal, +# on a spectrogram. This means easy verification of multiple +# track mixing. +# +# After execution, look for created subdirectories like +# mixer_i_i +# mixer_i_f +# mixer_f_f +# +# Recommend using a program such as audacity to evaluate +# the output WAV files, e.g. +# +# cd testdir +# audacity *.wav +# +# Using Audacity: +# +# Under "Waveform" view mode you can zoom into the +# start of the WAV file to verify proper ramping. +# +# Select "Spectrogram" to see verify the lines +# (sine = horizontal, chirp = diagonal) which should +# be clear (except for around the start as the volume +# ramping causes spectral distortion). + +if [ -z "$ANDROID_BUILD_TOP" ]; then + echo "Android build environment not set" + exit -1 +fi + +# ensure we have mm +. $ANDROID_BUILD_TOP/build/envsetup.sh + +pushd $ANDROID_BUILD_TOP/frameworks/av/services/audioflinger/ + +# build +pwd +mm + +# send to device +echo "waiting for device" +adb root && adb wait-for-device remount +adb push $OUT/system/lib/libaudioresampler.so /system/lib +adb push $OUT/system/bin/test-mixer /system/bin + +# createwav creates a series of WAV files testing various +# mixer settings +# $1 = flags +# $2 = directory +function createwav() { +# create directory if it doesn't exist + if [ ! -d $2 ]; then + mkdir $2 + fi + +# Test: +# process__genericResampling +# track__Resample / track__genericResample + adb shell test-mixer $1 -s 48000 \ + -o /sdcard/tm48000gr.wav \ + sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000 + adb pull /sdcard/tm48000gr.wav $2 + +# Test: +# process__genericResample +# track__Resample / track__genericResample +# track__NoResample / track__16BitsStereo / track__16BitsMono +# Aux buffer + adb shell test-mixer $1 -s 9307 \ + -a /sdcard/aux9307gra.wav -o /sdcard/tm9307gra.wav \ + sine:2,1000,3000 sine:1,2000,9307 chirp:2,9307 + adb pull /sdcard/tm9307gra.wav $2 + adb pull /sdcard/aux9307gra.wav $2 + +# Test: +# process__genericNoResampling +# track__NoResample / track__16BitsStereo / track__16BitsMono + adb shell test-mixer $1 -s 32000 \ + -o /sdcard/tm32000gnr.wav \ + sine:2,1000,32000 chirp:2,32000 sine:1,3000,32000 + adb pull /sdcard/tm32000gnr.wav $2 + +# Test: +# process__genericNoResampling +# track__NoResample / track__16BitsStereo / track__16BitsMono +# Aux buffer + adb shell test-mixer $1 -s 32000 \ + -a /sdcard/aux32000gnra.wav -o /sdcard/tm32000gnra.wav \ + sine:2,1000,32000 chirp:2,32000 sine:1,3000,32000 + adb pull /sdcard/tm32000gnra.wav $2 + adb pull /sdcard/aux32000gnra.wav $2 + +# Test: +# process__NoResampleOneTrack / process__OneTrack16BitsStereoNoResampling +# Downmixer + adb shell test-mixer $1 -s 32000 \ + -o /sdcard/tm32000nrot.wav \ + sine:6,1000,32000 + adb pull /sdcard/tm32000nrot.wav $2 + +# Test: +# process__NoResampleOneTrack / OneTrack16BitsStereoNoResampling +# Aux buffer + adb shell test-mixer $1 -s 44100 \ + -a /sdcard/aux44100nrota.wav -o /sdcard/tm44100nrota.wav \ + sine:2,2000,44100 + adb pull /sdcard/tm44100nrota.wav $2 + adb pull /sdcard/aux44100nrota.wav $2 +} + +# +# Call createwav to generate WAV files in various combinations +# +# i_i = integer input track, integer mixer output +# f_f = float input track, float mixer output +# i_f = integer input track, float_mixer output +# +# If the mixer output is float, then the output WAV file is pcm float. +# +# TODO: create a "snr" like "diff" to automatically +# compare files in these directories together. +# + +createwav "" "tests/mixer_i_i" +createwav "-f -m" "tests/mixer_f_f" +createwav "-m" "tests/mixer_i_f" + +popd diff --git a/services/audioflinger/tests/resampler_tests.cpp b/services/audioflinger/tests/resampler_tests.cpp new file mode 100644 index 0000000..d76c376 --- /dev/null +++ b/services/audioflinger/tests/resampler_tests.cpp @@ -0,0 +1,317 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "audioflinger_resampler_tests" + +#include <unistd.h> +#include <stdio.h> +#include <stdlib.h> +#include <fcntl.h> +#include <string.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <errno.h> +#include <time.h> +#include <math.h> +#include <vector> +#include <utility> +#include <cutils/log.h> +#include <gtest/gtest.h> +#include <media/AudioBufferProvider.h> +#include "AudioResampler.h" +#include "test_utils.h" + +void resample(int channels, void *output, + size_t outputFrames, const std::vector<size_t> &outputIncr, + android::AudioBufferProvider *provider, android::AudioResampler *resampler) +{ + for (size_t i = 0, j = 0; i < outputFrames; ) { + size_t thisFrames = outputIncr[j++]; + if (j >= outputIncr.size()) { + j = 0; + } + if (thisFrames == 0 || thisFrames > outputFrames - i) { + thisFrames = outputFrames - i; + } + resampler->resample((int32_t*) output + channels*i, thisFrames, provider); + i += thisFrames; + } +} + +void buffercmp(const void *reference, const void *test, + size_t outputFrameSize, size_t outputFrames) +{ + for (size_t i = 0; i < outputFrames; ++i) { + int check = memcmp((const char*)reference + i * outputFrameSize, + (const char*)test + i * outputFrameSize, outputFrameSize); + if (check) { + ALOGE("Failure at frame %d", i); + ASSERT_EQ(check, 0); /* fails */ + } + } +} + +void testBufferIncrement(size_t channels, bool useFloat, + unsigned inputFreq, unsigned outputFreq, + enum android::AudioResampler::src_quality quality) +{ + const int bits = useFloat ? 32 : 16; + // create the provider + std::vector<int> inputIncr; + SignalProvider provider; + if (useFloat) { + provider.setChirp<float>(channels, + 0., outputFreq/2., outputFreq, outputFreq/2000.); + } else { + provider.setChirp<int16_t>(channels, + 0., outputFreq/2., outputFreq, outputFreq/2000.); + } + provider.setIncr(inputIncr); + + // calculate the output size + size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq; + size_t outputFrameSize = channels * (useFloat ? sizeof(float) : sizeof(int32_t)); + size_t outputSize = outputFrameSize * outputFrames; + outputSize &= ~7; + + // create the resampler + const int volumePrecision = 12; /* typical unity gain */ + android::AudioResampler* resampler; + + resampler = android::AudioResampler::create(bits, channels, outputFreq, quality); + resampler->setSampleRate(inputFreq); + resampler->setVolume(1 << volumePrecision, 1 << volumePrecision); + + // set up the reference run + std::vector<size_t> refIncr; + refIncr.push_back(outputFrames); + void* reference = malloc(outputSize); + resample(channels, reference, outputFrames, refIncr, &provider, resampler); + + provider.reset(); + +#if 0 + /* this test will fail - API interface issue: reset() does not clear internal buffers */ + resampler->reset(); +#else + delete resampler; + resampler = android::AudioResampler::create(bits, channels, outputFreq, quality); + resampler->setSampleRate(inputFreq); + resampler->setVolume(1 << volumePrecision, 1 << volumePrecision); +#endif + + // set up the test run + std::vector<size_t> outIncr; + outIncr.push_back(1); + outIncr.push_back(2); + outIncr.push_back(3); + void* test = malloc(outputSize); + inputIncr.push_back(1); + inputIncr.push_back(3); + provider.setIncr(inputIncr); + resample(channels, test, outputFrames, outIncr, &provider, resampler); + + // check + buffercmp(reference, test, outputFrameSize, outputFrames); + + free(reference); + free(test); + delete resampler; +} + +template <typename T> +inline double sqr(T v) +{ + double dv = static_cast<double>(v); + return dv * dv; +} + +template <typename T> +double signalEnergy(T *start, T *end, unsigned stride) +{ + double accum = 0; + + for (T *p = start; p < end; p += stride) { + accum += sqr(*p); + } + unsigned count = (end - start + stride - 1) / stride; + return accum / count; +} + +void testStopbandDownconversion(size_t channels, + unsigned inputFreq, unsigned outputFreq, + unsigned passband, unsigned stopband, + enum android::AudioResampler::src_quality quality) +{ + // create the provider + std::vector<int> inputIncr; + SignalProvider provider; + provider.setChirp<int16_t>(channels, + 0., inputFreq/2., inputFreq, inputFreq/2000.); + provider.setIncr(inputIncr); + + // calculate the output size + size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq; + size_t outputFrameSize = channels * sizeof(int32_t); + size_t outputSize = outputFrameSize * outputFrames; + outputSize &= ~7; + + // create the resampler + const int volumePrecision = 12; /* typical unity gain */ + android::AudioResampler* resampler; + + resampler = android::AudioResampler::create(16, channels, outputFreq, quality); + resampler->setSampleRate(inputFreq); + resampler->setVolume(1 << volumePrecision, 1 << volumePrecision); + + // set up the reference run + std::vector<size_t> refIncr; + refIncr.push_back(outputFrames); + void* reference = malloc(outputSize); + resample(channels, reference, outputFrames, refIncr, &provider, resampler); + + int32_t *out = reinterpret_cast<int32_t *>(reference); + + // check signal energy in passband + const unsigned passbandFrame = passband * outputFreq / 1000.; + const unsigned stopbandFrame = stopband * outputFreq / 1000.; + + // check each channel separately + for (size_t i = 0; i < channels; ++i) { + double passbandEnergy = signalEnergy(out, out + passbandFrame * channels, channels); + double stopbandEnergy = signalEnergy(out + stopbandFrame * channels, + out + outputFrames * channels, channels); + double dbAtten = -10. * log10(stopbandEnergy / passbandEnergy); + ASSERT_GT(dbAtten, 60.); + +#if 0 + // internal verification + printf("if:%d of:%d pbf:%d sbf:%d sbe: %f pbe: %f db: %.2f\n", + provider.getNumFrames(), outputFrames, + passbandFrame, stopbandFrame, stopbandEnergy, passbandEnergy, dbAtten); + for (size_t i = 0; i < 10; ++i) { + printf("%d\n", out[i+passbandFrame*channels]); + } + for (size_t i = 0; i < 10; ++i) { + printf("%d\n", out[i+stopbandFrame*channels]); + } +#endif + } + + free(reference); + delete resampler; +} + +/* Buffer increment test + * + * We compare a reference output, where we consume and process the entire + * buffer at a time, and a test output, where we provide small chunks of input + * data and process small chunks of output (which may not be equivalent in size). + * + * Two subtests - fixed phase (3:2 down) and interpolated phase (147:320 up) + */ +TEST(audioflinger_resampler, bufferincrement_fixedphase) { + // all of these work + static const enum android::AudioResampler::src_quality kQualityArray[] = { + android::AudioResampler::LOW_QUALITY, + android::AudioResampler::MED_QUALITY, + android::AudioResampler::HIGH_QUALITY, + android::AudioResampler::VERY_HIGH_QUALITY, + android::AudioResampler::DYN_LOW_QUALITY, + android::AudioResampler::DYN_MED_QUALITY, + android::AudioResampler::DYN_HIGH_QUALITY, + }; + + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testBufferIncrement(2, false, 48000, 32000, kQualityArray[i]); + } +} + +TEST(audioflinger_resampler, bufferincrement_interpolatedphase) { + // all of these work except low quality + static const enum android::AudioResampler::src_quality kQualityArray[] = { +// android::AudioResampler::LOW_QUALITY, + android::AudioResampler::MED_QUALITY, + android::AudioResampler::HIGH_QUALITY, + android::AudioResampler::VERY_HIGH_QUALITY, + android::AudioResampler::DYN_LOW_QUALITY, + android::AudioResampler::DYN_MED_QUALITY, + android::AudioResampler::DYN_HIGH_QUALITY, + }; + + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testBufferIncrement(2, false, 22050, 48000, kQualityArray[i]); + } +} + +TEST(audioflinger_resampler, bufferincrement_fixedphase_multi) { + // only dynamic quality + static const enum android::AudioResampler::src_quality kQualityArray[] = { + android::AudioResampler::DYN_LOW_QUALITY, + android::AudioResampler::DYN_MED_QUALITY, + android::AudioResampler::DYN_HIGH_QUALITY, + }; + + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testBufferIncrement(4, false, 48000, 32000, kQualityArray[i]); + } +} + +TEST(audioflinger_resampler, bufferincrement_interpolatedphase_multi_float) { + // only dynamic quality + static const enum android::AudioResampler::src_quality kQualityArray[] = { + android::AudioResampler::DYN_LOW_QUALITY, + android::AudioResampler::DYN_MED_QUALITY, + android::AudioResampler::DYN_HIGH_QUALITY, + }; + + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testBufferIncrement(8, true, 22050, 48000, kQualityArray[i]); + } +} + +/* Simple aliasing test + * + * This checks stopband response of the chirp signal to make sure frequencies + * are properly suppressed. It uses downsampling because the stopband can be + * clearly isolated by input frequencies exceeding the output sample rate (nyquist). + */ +TEST(audioflinger_resampler, stopbandresponse) { + // not all of these may work (old resamplers fail on downsampling) + static const enum android::AudioResampler::src_quality kQualityArray[] = { + //android::AudioResampler::LOW_QUALITY, + //android::AudioResampler::MED_QUALITY, + //android::AudioResampler::HIGH_QUALITY, + //android::AudioResampler::VERY_HIGH_QUALITY, + android::AudioResampler::DYN_LOW_QUALITY, + android::AudioResampler::DYN_MED_QUALITY, + android::AudioResampler::DYN_HIGH_QUALITY, + }; + + // in this test we assume a maximum transition band between 12kHz and 20kHz. + // there must be at least 60dB relative attenuation between stopband and passband. + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testStopbandDownconversion(2, 48000, 32000, 12000, 20000, kQualityArray[i]); + } + + // in this test we assume a maximum transition band between 7kHz and 15kHz. + // there must be at least 60dB relative attenuation between stopband and passband. + // (the weird ratio triggers interpolative resampling) + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testStopbandDownconversion(2, 48000, 22101, 7000, 15000, kQualityArray[i]); + } +} diff --git a/services/audioflinger/tests/run_all_unit_tests.sh b/services/audioflinger/tests/run_all_unit_tests.sh new file mode 100755 index 0000000..ffae6ae --- /dev/null +++ b/services/audioflinger/tests/run_all_unit_tests.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +if [ -z "$ANDROID_BUILD_TOP" ]; then + echo "Android build environment not set" + exit -1 +fi + +echo "waiting for device" +adb root && adb wait-for-device remount + +adb shell /system/bin/resampler_tests diff --git a/services/audioflinger/tests/test-mixer.cpp b/services/audioflinger/tests/test-mixer.cpp new file mode 100644 index 0000000..3940702 --- /dev/null +++ b/services/audioflinger/tests/test-mixer.cpp @@ -0,0 +1,286 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <stdio.h> +#include <inttypes.h> +#include <math.h> +#include <vector> +#include <audio_utils/primitives.h> +#include <audio_utils/sndfile.h> +#include <media/AudioBufferProvider.h> +#include "AudioMixer.h" +#include "test_utils.h" + +/* Testing is typically through creation of an output WAV file from several + * source inputs, to be later analyzed by an audio program such as Audacity. + * + * Sine or chirp functions are typically more useful as input to the mixer + * as they show up as straight lines on a spectrogram if successfully mixed. + * + * A sample shell script is provided: mixer_to_wave_tests.sh + */ + +using namespace android; + +static void usage(const char* name) { + fprintf(stderr, "Usage: %s [-f] [-m]" + " [-s sample-rate] [-o <output-file>] [-a <aux-buffer-file>] [-P csv]" + " (<input-file> | <command>)+\n", name); + fprintf(stderr, " -f enable floating point input track\n"); + fprintf(stderr, " -m enable floating point mixer output\n"); + fprintf(stderr, " -s mixer sample-rate\n"); + fprintf(stderr, " -o <output-file> WAV file, pcm16 (or float if -m specified)\n"); + fprintf(stderr, " -a <aux-buffer-file>\n"); + fprintf(stderr, " -P # frames provided per call to resample() in CSV format\n"); + fprintf(stderr, " <input-file> is a WAV file\n"); + fprintf(stderr, " <command> can be 'sine:<channels>,<frequency>,<samplerate>'\n"); + fprintf(stderr, " 'chirp:<channels>,<samplerate>'\n"); +} + +static int writeFile(const char *filename, const void *buffer, + uint32_t sampleRate, uint32_t channels, size_t frames, bool isBufferFloat) { + if (filename == NULL) { + return 0; // ok to pass in NULL filename + } + // write output to file. + SF_INFO info; + info.frames = 0; + info.samplerate = sampleRate; + info.channels = channels; + info.format = SF_FORMAT_WAV | (isBufferFloat ? SF_FORMAT_FLOAT : SF_FORMAT_PCM_16); + printf("saving file:%s channels:%d samplerate:%d frames:%d\n", + filename, info.channels, info.samplerate, frames); + SNDFILE *sf = sf_open(filename, SFM_WRITE, &info); + if (sf == NULL) { + perror(filename); + return EXIT_FAILURE; + } + if (isBufferFloat) { + (void) sf_writef_float(sf, (float*)buffer, frames); + } else { + (void) sf_writef_short(sf, (short*)buffer, frames); + } + sf_close(sf); + return EXIT_SUCCESS; +} + +int main(int argc, char* argv[]) { + const char* const progname = argv[0]; + bool useInputFloat = false; + bool useMixerFloat = false; + bool useRamp = true; + uint32_t outputSampleRate = 48000; + uint32_t outputChannels = 2; // stereo for now + std::vector<int> Pvalues; + const char* outputFilename = NULL; + const char* auxFilename = NULL; + std::vector<int32_t> Names; + std::vector<SignalProvider> Providers; + + for (int ch; (ch = getopt(argc, argv, "fms:o:a:P:")) != -1;) { + switch (ch) { + case 'f': + useInputFloat = true; + break; + case 'm': + useMixerFloat = true; + break; + case 's': + outputSampleRate = atoi(optarg); + break; + case 'o': + outputFilename = optarg; + break; + case 'a': + auxFilename = optarg; + break; + case 'P': + if (parseCSV(optarg, Pvalues) < 0) { + fprintf(stderr, "incorrect syntax for -P option\n"); + return EXIT_FAILURE; + } + break; + case '?': + default: + usage(progname); + return EXIT_FAILURE; + } + } + argc -= optind; + argv += optind; + + if (argc == 0) { + usage(progname); + return EXIT_FAILURE; + } + if ((unsigned)argc > AudioMixer::MAX_NUM_TRACKS) { + fprintf(stderr, "too many tracks: %d > %u", argc, AudioMixer::MAX_NUM_TRACKS); + return EXIT_FAILURE; + } + + size_t outputFrames = 0; + + // create providers for each track + Providers.resize(argc); + for (int i = 0; i < argc; ++i) { + static const char chirp[] = "chirp:"; + static const char sine[] = "sine:"; + static const double kSeconds = 1; + + if (!strncmp(argv[i], chirp, strlen(chirp))) { + std::vector<int> v; + + parseCSV(argv[i] + strlen(chirp), v); + if (v.size() == 2) { + printf("creating chirp(%d %d)\n", v[0], v[1]); + if (useInputFloat) { + Providers[i].setChirp<float>(v[0], 0, v[1]/2, v[1], kSeconds); + } else { + Providers[i].setChirp<int16_t>(v[0], 0, v[1]/2, v[1], kSeconds); + } + Providers[i].setIncr(Pvalues); + } else { + fprintf(stderr, "malformed input '%s'\n", argv[i]); + } + } else if (!strncmp(argv[i], sine, strlen(sine))) { + std::vector<int> v; + + parseCSV(argv[i] + strlen(sine), v); + if (v.size() == 3) { + printf("creating sine(%d %d)\n", v[0], v[1]); + if (useInputFloat) { + Providers[i].setSine<float>(v[0], v[1], v[2], kSeconds); + } else { + Providers[i].setSine<int16_t>(v[0], v[1], v[2], kSeconds); + } + Providers[i].setIncr(Pvalues); + } else { + fprintf(stderr, "malformed input '%s'\n", argv[i]); + } + } else { + printf("creating filename(%s)\n", argv[i]); + if (useInputFloat) { + Providers[i].setFile<float>(argv[i]); + } else { + Providers[i].setFile<short>(argv[i]); + } + Providers[i].setIncr(Pvalues); + } + // calculate the number of output frames + size_t nframes = (int64_t) Providers[i].getNumFrames() * outputSampleRate + / Providers[i].getSampleRate(); + if (i == 0 || outputFrames > nframes) { // choose minimum for outputFrames + outputFrames = nframes; + } + } + + // create the output buffer. + const size_t outputFrameSize = outputChannels + * (useMixerFloat ? sizeof(float) : sizeof(int16_t)); + const size_t outputSize = outputFrames * outputFrameSize; + void *outputAddr = NULL; + (void) posix_memalign(&outputAddr, 32, outputSize); + memset(outputAddr, 0, outputSize); + + // create the aux buffer, if needed. + const size_t auxFrameSize = sizeof(int32_t); // Q4.27 always + const size_t auxSize = outputFrames * auxFrameSize; + void *auxAddr = NULL; + if (auxFilename) { + (void) posix_memalign(&auxAddr, 32, auxSize); + memset(auxAddr, 0, auxSize); + } + + // create the mixer. + const size_t mixerFrameCount = 320; // typical numbers may range from 240 or 960 + AudioMixer *mixer = new AudioMixer(mixerFrameCount, outputSampleRate); + audio_format_t inputFormat = useInputFloat + ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + audio_format_t mixerFormat = useMixerFloat + ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + float f = AudioMixer::UNITY_GAIN_FLOAT / Providers.size(); // normalize volume by # tracks + static float f0; // zero + + // set up the tracks. + for (size_t i = 0; i < Providers.size(); ++i) { + //printf("track %d out of %d\n", i, Providers.size()); + uint32_t channelMask = audio_channel_out_mask_from_count(Providers[i].getNumChannels()); + int32_t name = mixer->getTrackName(channelMask, + inputFormat, AUDIO_SESSION_OUTPUT_MIX); + ALOG_ASSERT(name >= 0); + Names.push_back(name); + mixer->setBufferProvider(name, &Providers[i]); + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER, + (void *) outputAddr); + mixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MIXER_FORMAT, (void *)mixerFormat); + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT, + (void *)(uintptr_t)inputFormat); + mixer->setParameter( + name, + AudioMixer::RESAMPLE, + AudioMixer::SAMPLE_RATE, + (void *)(uintptr_t)Providers[i].getSampleRate()); + if (useRamp) { + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f0); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f0); + mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME0, &f); + mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME1, &f); + } else { + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f); + } + if (auxFilename) { + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::AUX_BUFFER, + (void *) auxAddr); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::AUXLEVEL, &f0); + mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::AUXLEVEL, &f); + } + mixer->enable(name); + } + + // pump the mixer to process data. + size_t i; + for (i = 0; i < outputFrames - mixerFrameCount; i += mixerFrameCount) { + for (size_t j = 0; j < Names.size(); ++j) { + mixer->setParameter(Names[j], AudioMixer::TRACK, AudioMixer::MAIN_BUFFER, + (char *) outputAddr + i * outputFrameSize); + if (auxFilename) { + mixer->setParameter(Names[j], AudioMixer::TRACK, AudioMixer::AUX_BUFFER, + (char *) auxAddr + i * auxFrameSize); + } + } + mixer->process(AudioBufferProvider::kInvalidPTS); + } + outputFrames = i; // reset output frames to the data actually produced. + + // write to files + writeFile(outputFilename, outputAddr, + outputSampleRate, outputChannels, outputFrames, useMixerFloat); + if (auxFilename) { + // Aux buffer is always in q4_27 format for now. + // memcpy_to_i16_from_q4_27(), but with stereo frame count (not sample count) + ditherAndClamp((int32_t*)auxAddr, (int32_t*)auxAddr, outputFrames >> 1); + writeFile(auxFilename, auxAddr, outputSampleRate, 1, outputFrames, false); + } + + delete mixer; + free(outputAddr); + free(auxAddr); + return EXIT_SUCCESS; +} diff --git a/services/audioflinger/tests/test_utils.h b/services/audioflinger/tests/test_utils.h new file mode 100644 index 0000000..f954292 --- /dev/null +++ b/services/audioflinger/tests/test_utils.h @@ -0,0 +1,307 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_TEST_UTILS_H +#define ANDROID_AUDIO_TEST_UTILS_H + +#include <audio_utils/sndfile.h> + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) +#endif + +template<typename T, typename U> +struct is_same +{ + static const bool value = false; +}; + +template<typename T> +struct is_same<T, T> // partial specialization +{ + static const bool value = true; +}; + +template<typename T> +static inline T convertValue(double val) +{ + if (is_same<T, int16_t>::value) { + return floor(val * 32767.0 + 0.5); + } else if (is_same<T, int32_t>::value) { + return floor(val * (1UL<<31) + 0.5); + } + return val; // assume float or double +} + +// Convert a list of integers in CSV format to a Vector of those values. +// Returns the number of elements in the list, or -1 on error. +static inline int parseCSV(const char *string, std::vector<int>& values) +{ + // pass 1: count the number of values and do syntax check + size_t numValues = 0; + bool hadDigit = false; + for (const char *p = string; ; ) { + switch (*p++) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + hadDigit = true; + break; + case '\0': + if (hadDigit) { + // pass 2: allocate and initialize vector of values + values.resize(++numValues); + values[0] = atoi(p = string); + for (size_t i = 1; i < numValues; ) { + if (*p++ == ',') { + values[i++] = atoi(p); + } + } + return numValues; + } + // fall through + case ',': + if (hadDigit) { + hadDigit = false; + numValues++; + break; + } + // fall through + default: + return -1; + } + } +} + +/* Creates a type-independent audio buffer provider from + * a buffer base address, size, framesize, and input increment array. + * + * No allocation or deallocation of the provided buffer is done. + */ +class TestProvider : public android::AudioBufferProvider { +public: + TestProvider(void* addr, size_t frames, size_t frameSize, + const std::vector<int>& inputIncr) + : mAddr(addr), + mNumFrames(frames), + mFrameSize(frameSize), + mNextFrame(0), mUnrel(0), mInputIncr(inputIncr), mNextIdx(0) + { + } + + TestProvider() + : mAddr(NULL), mNumFrames(0), mFrameSize(0), + mNextFrame(0), mUnrel(0), mNextIdx(0) + { + } + + void setIncr(const std::vector<int>& inputIncr) { + mInputIncr = inputIncr; + mNextIdx = 0; + } + + virtual android::status_t getNextBuffer(Buffer* buffer, int64_t pts __unused = kInvalidPTS) + { + size_t requestedFrames = buffer->frameCount; + if (requestedFrames > mNumFrames - mNextFrame) { + buffer->frameCount = mNumFrames - mNextFrame; + } + if (!mInputIncr.empty()) { + size_t provided = mInputIncr[mNextIdx++]; + ALOGV("getNextBuffer() mValue[%d]=%u not %u", + mNextIdx-1, provided, buffer->frameCount); + if (provided < buffer->frameCount) { + buffer->frameCount = provided; + } + if (mNextIdx >= mInputIncr.size()) { + mNextIdx = 0; + } + } + ALOGV("getNextBuffer() requested %u frames out of %u frames available" + " and returned %u frames\n", + requestedFrames, mNumFrames - mNextFrame, buffer->frameCount); + mUnrel = buffer->frameCount; + if (buffer->frameCount > 0) { + buffer->raw = (char *)mAddr + mFrameSize * mNextFrame; + return android::NO_ERROR; + } else { + buffer->raw = NULL; + return android::NOT_ENOUGH_DATA; + } + } + + virtual void releaseBuffer(Buffer* buffer) + { + if (buffer->frameCount > mUnrel) { + ALOGE("releaseBuffer() released %u frames but only %u available " + "to release\n", buffer->frameCount, mUnrel); + mNextFrame += mUnrel; + mUnrel = 0; + } else { + + ALOGV("releaseBuffer() released %u frames out of %u frames available " + "to release\n", buffer->frameCount, mUnrel); + mNextFrame += buffer->frameCount; + mUnrel -= buffer->frameCount; + } + buffer->frameCount = 0; + buffer->raw = NULL; + } + + void reset() + { + mNextFrame = 0; + } + + size_t getNumFrames() + { + return mNumFrames; + } + + +protected: + void* mAddr; // base address + size_t mNumFrames; // total frames + int mFrameSize; // frame size (# channels * bytes per sample) + size_t mNextFrame; // index of next frame to provide + size_t mUnrel; // number of frames not yet released + std::vector<int> mInputIncr; // number of frames provided per call + size_t mNextIdx; // index of next entry in mInputIncr to use +}; + +/* Creates a buffer filled with a sine wave. + */ +template<typename T> +static void createSine(void *vbuffer, size_t frames, + size_t channels, double sampleRate, double freq) +{ + double tscale = 1. / sampleRate; + T* buffer = reinterpret_cast<T*>(vbuffer); + for (size_t i = 0; i < frames; ++i) { + double t = i * tscale; + double y = sin(2. * M_PI * freq * t); + T yt = convertValue<T>(y); + + for (size_t j = 0; j < channels; ++j) { + buffer[i*channels + j] = yt / (j + 1); + } + } +} + +/* Creates a buffer filled with a chirp signal (a sine wave sweep). + * + * When creating the Chirp, note that the frequency is the true sinusoidal + * frequency not the sampling rate. + * + * http://en.wikipedia.org/wiki/Chirp + */ +template<typename T> +static void createChirp(void *vbuffer, size_t frames, + size_t channels, double sampleRate, double minfreq, double maxfreq) +{ + double tscale = 1. / sampleRate; + T *buffer = reinterpret_cast<T*>(vbuffer); + // note the chirp constant k has a divide-by-two. + double k = (maxfreq - minfreq) / (2. * tscale * frames); + for (size_t i = 0; i < frames; ++i) { + double t = i * tscale; + double y = sin(2. * M_PI * (k * t + minfreq) * t); + T yt = convertValue<T>(y); + + for (size_t j = 0; j < channels; ++j) { + buffer[i*channels + j] = yt / (j + 1); + } + } +} + +/* This derived class creates a buffer provider of datatype T, + * consisting of an input signal, e.g. from createChirp(). + * The number of frames can be obtained from the base class + * TestProvider::getNumFrames(). + */ + +class SignalProvider : public TestProvider { +public: + SignalProvider() + : mSampleRate(0), + mChannels(0) + { + } + + virtual ~SignalProvider() + { + free(mAddr); + mAddr = NULL; + } + + template <typename T> + void setChirp(size_t channels, double minfreq, double maxfreq, double sampleRate, double time) + { + createBufferByFrames<T>(channels, sampleRate, sampleRate*time); + createChirp<T>(mAddr, mNumFrames, mChannels, mSampleRate, minfreq, maxfreq); + } + + template <typename T> + void setSine(size_t channels, + double freq, double sampleRate, double time) + { + createBufferByFrames<T>(channels, sampleRate, sampleRate*time); + createSine<T>(mAddr, mNumFrames, mChannels, mSampleRate, freq); + } + + template <typename T> + void setFile(const char *file_in) + { + SF_INFO info; + info.format = 0; + SNDFILE *sf = sf_open(file_in, SFM_READ, &info); + if (sf == NULL) { + perror(file_in); + return; + } + createBufferByFrames<T>(info.channels, info.samplerate, info.frames); + if (is_same<T, float>::value) { + (void) sf_readf_float(sf, (float *) mAddr, mNumFrames); + } else if (is_same<T, short>::value) { + (void) sf_readf_short(sf, (short *) mAddr, mNumFrames); + } + sf_close(sf); + } + + template <typename T> + void createBufferByFrames(size_t channels, uint32_t sampleRate, size_t frames) + { + mNumFrames = frames; + mChannels = channels; + mFrameSize = mChannels * sizeof(T); + free(mAddr); + mAddr = malloc(mFrameSize * mNumFrames); + mSampleRate = sampleRate; + } + + uint32_t getSampleRate() const { + return mSampleRate; + } + + uint32_t getNumChannels() const { + return mChannels; + } + +protected: + uint32_t mSampleRate; + uint32_t mChannels; +}; + +#endif // ANDROID_AUDIO_TEST_UTILS_H diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk index a22ad9d..f3be42d 100644..100755 --- a/services/audiopolicy/Android.mk +++ b/services/audiopolicy/Android.mk @@ -3,7 +3,8 @@ LOCAL_PATH:= $(call my-dir) include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ - AudioPolicyService.cpp + AudioPolicyService.cpp \ + AudioPolicyEffects.cpp ifeq ($(USE_LEGACY_AUDIO_POLICY), 1) LOCAL_SRC_FILES += \ @@ -46,8 +47,8 @@ LOCAL_CFLAGS += -fvisibility=hidden include $(BUILD_SHARED_LIBRARY) + ifneq ($(USE_LEGACY_AUDIO_POLICY), 1) -ifneq ($(USE_CUSTOM_AUDIO_POLICY), 1) include $(CLEAR_VARS) @@ -62,6 +63,20 @@ LOCAL_SHARED_LIBRARIES := \ LOCAL_STATIC_LIBRARIES := \ libmedia_helper +LOCAL_MODULE:= libaudiopolicymanagerdefault + +include $(BUILD_SHARED_LIBRARY) + +ifneq ($(USE_CUSTOM_AUDIO_POLICY), 1) + +include $(CLEAR_VARS) + +LOCAL_SRC_FILES:= \ + AudioPolicyFactory.cpp + +LOCAL_SHARED_LIBRARIES := \ + libaudiopolicymanagerdefault + LOCAL_MODULE:= libaudiopolicymanager include $(BUILD_SHARED_LIBRARY) diff --git a/services/audiopolicy/AudioPolicyEffects.cpp b/services/audiopolicy/AudioPolicyEffects.cpp new file mode 100755 index 0000000..185e1cc --- /dev/null +++ b/services/audiopolicy/AudioPolicyEffects.cpp @@ -0,0 +1,638 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "AudioPolicyEffects" +#define LOG_NDEBUG 0 + +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <cutils/misc.h> +#include <media/AudioEffect.h> +#include <system/audio.h> +#include <hardware/audio_effect.h> +#include <audio_effects/audio_effects_conf.h> +#include <utils/Vector.h> +#include <utils/SortedVector.h> +#include <cutils/config_utils.h> +#include "AudioPolicyEffects.h" +#include "ServiceUtilities.h" + +namespace android { + +// ---------------------------------------------------------------------------- +// AudioPolicyEffects Implementation +// ---------------------------------------------------------------------------- + +AudioPolicyEffects::AudioPolicyEffects() +{ + // load automatic audio effect modules + if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE, R_OK) == 0) { + loadAudioEffectConfig(AUDIO_EFFECT_VENDOR_CONFIG_FILE); + } else if (access(AUDIO_EFFECT_DEFAULT_CONFIG_FILE, R_OK) == 0) { + loadAudioEffectConfig(AUDIO_EFFECT_DEFAULT_CONFIG_FILE); + } +} + + +AudioPolicyEffects::~AudioPolicyEffects() +{ + size_t i = 0; + // release audio input processing resources + for (i = 0; i < mInputSources.size(); i++) { + delete mInputSources.valueAt(i); + } + mInputSources.clear(); + + for (i = 0; i < mInputs.size(); i++) { + mInputs.valueAt(i)->mEffects.clear(); + delete mInputs.valueAt(i); + } + mInputs.clear(); + + // release audio output processing resources + for (i = 0; i < mOutputStreams.size(); i++) { + delete mOutputStreams.valueAt(i); + } + mOutputStreams.clear(); + + for (i = 0; i < mOutputSessions.size(); i++) { + mOutputSessions.valueAt(i)->mEffects.clear(); + delete mOutputSessions.valueAt(i); + } + mOutputSessions.clear(); +} + + +status_t AudioPolicyEffects::addInputEffects(audio_io_handle_t input, + audio_source_t inputSource, + int audioSession) +{ + status_t status = NO_ERROR; + + // create audio pre processors according to input source + audio_source_t aliasSource = (inputSource == AUDIO_SOURCE_HOTWORD) ? + AUDIO_SOURCE_VOICE_RECOGNITION : inputSource; + + ssize_t index = mInputSources.indexOfKey(aliasSource); + if (index < 0) { + ALOGV("addInputEffects(): no processing needs to be attached to this source"); + return status; + } + ssize_t idx = mInputs.indexOfKey(input); + EffectVector *inputDesc; + if (idx < 0) { + inputDesc = new EffectVector(audioSession); + mInputs.add(input, inputDesc); + } else { + inputDesc = mInputs.valueAt(idx); + } + + Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects; + for (size_t i = 0; i < effects.size(); i++) { + EffectDesc *effect = effects[i]; + sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0, audioSession, input); + status_t status = fx->initCheck(); + if (status != NO_ERROR && status != ALREADY_EXISTS) { + ALOGW("addInputEffects(): failed to create Fx %s on source %d", + effect->mName, (int32_t)aliasSource); + // fx goes out of scope and strong ref on AudioEffect is released + continue; + } + for (size_t j = 0; j < effect->mParams.size(); j++) { + fx->setParameter(effect->mParams[j]); + } + ALOGV("addInputEffects(): added Fx %s on source: %d", effect->mName, (int32_t)aliasSource); + inputDesc->mEffects.add(fx); + } + setProcessorEnabled(inputDesc, true); + + return status; +} + + +status_t AudioPolicyEffects::releaseInputEffects(audio_io_handle_t input) +{ + status_t status = NO_ERROR; + + ssize_t index = mInputs.indexOfKey(input); + if (index < 0) { + return status; + } + EffectVector *inputDesc = mInputs.valueAt(index); + setProcessorEnabled(inputDesc, false); + delete inputDesc; + mInputs.removeItemsAt(index); + ALOGV("releaseInputEffects(): all effects released"); + return status; +} + +status_t AudioPolicyEffects::queryDefaultInputEffects(int audioSession, + effect_descriptor_t *descriptors, + uint32_t *count) +{ + status_t status = NO_ERROR; + + size_t index; + for (index = 0; index < mInputs.size(); index++) { + if (mInputs.valueAt(index)->mSessionId == audioSession) { + break; + } + } + if (index == mInputs.size()) { + *count = 0; + return BAD_VALUE; + } + Vector< sp<AudioEffect> > effects = mInputs.valueAt(index)->mEffects; + + for (size_t i = 0; i < effects.size(); i++) { + effect_descriptor_t desc = effects[i]->descriptor(); + if (i < *count) { + descriptors[i] = desc; + } + } + if (effects.size() > *count) { + status = NO_MEMORY; + } + *count = effects.size(); + return status; +} + + +status_t AudioPolicyEffects::queryDefaultOutputSessionEffects(int audioSession, + effect_descriptor_t *descriptors, + uint32_t *count) +{ + status_t status = NO_ERROR; + + size_t index; + for (index = 0; index < mOutputSessions.size(); index++) { + if (mOutputSessions.valueAt(index)->mSessionId == audioSession) { + break; + } + } + if (index == mOutputSessions.size()) { + *count = 0; + return BAD_VALUE; + } + Vector< sp<AudioEffect> > effects = mOutputSessions.valueAt(index)->mEffects; + + for (size_t i = 0; i < effects.size(); i++) { + effect_descriptor_t desc = effects[i]->descriptor(); + if (i < *count) { + descriptors[i] = desc; + } + } + if (effects.size() > *count) { + status = NO_MEMORY; + } + *count = effects.size(); + return status; +} + + +status_t AudioPolicyEffects::addOutputSessionEffects(audio_io_handle_t output, + audio_stream_type_t stream, + int audioSession) +{ + status_t status = NO_ERROR; + + // create audio processors according to stream + ssize_t index = mOutputStreams.indexOfKey(stream); + if (index < 0) { + ALOGV("addOutputSessionEffects(): no output processing needed for this stream"); + return NO_ERROR; + } + + ssize_t idx = mOutputSessions.indexOfKey(audioSession); + EffectVector *procDesc; + if (idx < 0) { + procDesc = new EffectVector(audioSession); + mOutputSessions.add(audioSession, procDesc); + } else { + procDesc = mOutputSessions.valueAt(idx); + } + + Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects; + for (size_t i = 0; i < effects.size(); i++) { + EffectDesc *effect = effects[i]; + sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, 0, 0, 0, audioSession, output); + status_t status = fx->initCheck(); + if (status != NO_ERROR && status != ALREADY_EXISTS) { + ALOGE("addOutputSessionEffects(): failed to create Fx %s on session %d", + effect->mName, audioSession); + // fx goes out of scope and strong ref on AudioEffect is released + continue; + } + ALOGV("addOutputSessionEffects(): added Fx %s on session: %d for stream: %d", + effect->mName, audioSession, (int32_t)stream); + procDesc->mEffects.add(fx); + } + + setProcessorEnabled(procDesc, true); + + return status; +} + +status_t AudioPolicyEffects::releaseOutputSessionEffects(audio_io_handle_t output, + audio_stream_type_t stream, + int audioSession) +{ + status_t status = NO_ERROR; + (void) output; // argument not used for now + (void) stream; // argument not used for now + + ssize_t index = mOutputSessions.indexOfKey(audioSession); + if (index < 0) { + ALOGV("releaseOutputSessionEffects: no output processing was attached to this stream"); + return NO_ERROR; + } + + EffectVector *procDesc = mOutputSessions.valueAt(index); + setProcessorEnabled(procDesc, false); + procDesc->mEffects.clear(); + delete procDesc; + mOutputSessions.removeItemsAt(index); + ALOGV("releaseOutputSessionEffects(): output processing released from session: %d", + audioSession); + return status; +} + + +void AudioPolicyEffects::setProcessorEnabled(const EffectVector *effectVector, bool enabled) +{ + const Vector<sp<AudioEffect> > &fxVector = effectVector->mEffects; + for (size_t i = 0; i < fxVector.size(); i++) { + fxVector.itemAt(i)->setEnabled(enabled); + } +} + + +// ---------------------------------------------------------------------------- +// Audio processing configuration +// ---------------------------------------------------------------------------- + +/*static*/ const char * const AudioPolicyEffects::kInputSourceNames[AUDIO_SOURCE_CNT -1] = { + MIC_SRC_TAG, + VOICE_UL_SRC_TAG, + VOICE_DL_SRC_TAG, + VOICE_CALL_SRC_TAG, + CAMCORDER_SRC_TAG, + VOICE_REC_SRC_TAG, + VOICE_COMM_SRC_TAG +}; + +// returns the audio_source_t enum corresponding to the input source name or +// AUDIO_SOURCE_CNT is no match found +audio_source_t AudioPolicyEffects::inputSourceNameToEnum(const char *name) +{ + int i; + for (i = AUDIO_SOURCE_MIC; i < AUDIO_SOURCE_CNT; i++) { + if (strcmp(name, kInputSourceNames[i - AUDIO_SOURCE_MIC]) == 0) { + ALOGV("inputSourceNameToEnum found source %s %d", name, i); + break; + } + } + return (audio_source_t)i; +} + +const char *AudioPolicyEffects::kStreamNames[AUDIO_STREAM_CNT+1] = { + AUDIO_STREAM_DEFAULT_TAG, + AUDIO_STREAM_VOICE_CALL_TAG, + AUDIO_STREAM_SYSTEM_TAG, + AUDIO_STREAM_RING_TAG, + AUDIO_STREAM_MUSIC_TAG, + AUDIO_STREAM_ALARM_TAG, + AUDIO_STREAM_NOTIFICATION_TAG, + AUDIO_STREAM_BLUETOOTH_SCO_TAG, + AUDIO_STREAM_ENFORCED_AUDIBLE_TAG, + AUDIO_STREAM_DTMF_TAG, + AUDIO_STREAM_TTS_TAG +}; + +// returns the audio_stream_t enum corresponding to the output stream name or +// AUDIO_STREAM_CNT is no match found +audio_stream_type_t AudioPolicyEffects::streamNameToEnum(const char *name) +{ + int i; + for (i = AUDIO_STREAM_DEFAULT; i < AUDIO_STREAM_CNT; i++) { + if (strcmp(name, kStreamNames[i - AUDIO_STREAM_DEFAULT]) == 0) { + ALOGV("streamNameToEnum found stream %s %d", name, i); + break; + } + } + return (audio_stream_type_t)i; +} + +// ---------------------------------------------------------------------------- +// Audio Effect Config parser +// ---------------------------------------------------------------------------- + +size_t AudioPolicyEffects::growParamSize(char *param, + size_t size, + size_t *curSize, + size_t *totSize) +{ + // *curSize is at least sizeof(effect_param_t) + 2 * sizeof(int) + size_t pos = ((*curSize - 1 ) / size + 1) * size; + + if (pos + size > *totSize) { + while (pos + size > *totSize) { + *totSize += ((*totSize + 7) / 8) * 4; + } + param = (char *)realloc(param, *totSize); + } + *curSize = pos + size; + return pos; +} + +size_t AudioPolicyEffects::readParamValue(cnode *node, + char *param, + size_t *curSize, + size_t *totSize) +{ + if (strncmp(node->name, SHORT_TAG, sizeof(SHORT_TAG) + 1) == 0) { + size_t pos = growParamSize(param, sizeof(short), curSize, totSize); + *(short *)((char *)param + pos) = (short)atoi(node->value); + ALOGV("readParamValue() reading short %d", *(short *)((char *)param + pos)); + return sizeof(short); + } else if (strncmp(node->name, INT_TAG, sizeof(INT_TAG) + 1) == 0) { + size_t pos = growParamSize(param, sizeof(int), curSize, totSize); + *(int *)((char *)param + pos) = atoi(node->value); + ALOGV("readParamValue() reading int %d", *(int *)((char *)param + pos)); + return sizeof(int); + } else if (strncmp(node->name, FLOAT_TAG, sizeof(FLOAT_TAG) + 1) == 0) { + size_t pos = growParamSize(param, sizeof(float), curSize, totSize); + *(float *)((char *)param + pos) = (float)atof(node->value); + ALOGV("readParamValue() reading float %f",*(float *)((char *)param + pos)); + return sizeof(float); + } else if (strncmp(node->name, BOOL_TAG, sizeof(BOOL_TAG) + 1) == 0) { + size_t pos = growParamSize(param, sizeof(bool), curSize, totSize); + if (strncmp(node->value, "false", strlen("false") + 1) == 0) { + *(bool *)((char *)param + pos) = false; + } else { + *(bool *)((char *)param + pos) = true; + } + ALOGV("readParamValue() reading bool %s",*(bool *)((char *)param + pos) ? "true" : "false"); + return sizeof(bool); + } else if (strncmp(node->name, STRING_TAG, sizeof(STRING_TAG) + 1) == 0) { + size_t len = strnlen(node->value, EFFECT_STRING_LEN_MAX); + if (*curSize + len + 1 > *totSize) { + *totSize = *curSize + len + 1; + param = (char *)realloc(param, *totSize); + } + strncpy(param + *curSize, node->value, len); + *curSize += len; + param[*curSize] = '\0'; + ALOGV("readParamValue() reading string %s", param + *curSize - len); + return len; + } + ALOGW("readParamValue() unknown param type %s", node->name); + return 0; +} + +effect_param_t *AudioPolicyEffects::loadEffectParameter(cnode *root) +{ + cnode *param; + cnode *value; + size_t curSize = sizeof(effect_param_t); + size_t totSize = sizeof(effect_param_t) + 2 * sizeof(int); + effect_param_t *fx_param = (effect_param_t *)malloc(totSize); + + param = config_find(root, PARAM_TAG); + value = config_find(root, VALUE_TAG); + if (param == NULL && value == NULL) { + // try to parse simple parameter form {int int} + param = root->first_child; + if (param != NULL) { + // Note: that a pair of random strings is read as 0 0 + int *ptr = (int *)fx_param->data; + int *ptr2 = (int *)((char *)param + sizeof(effect_param_t)); + ALOGW("loadEffectParameter() ptr %p ptr2 %p", ptr, ptr2); + *ptr++ = atoi(param->name); + *ptr = atoi(param->value); + fx_param->psize = sizeof(int); + fx_param->vsize = sizeof(int); + return fx_param; + } + } + if (param == NULL || value == NULL) { + ALOGW("loadEffectParameter() invalid parameter description %s", root->name); + goto error; + } + + fx_param->psize = 0; + param = param->first_child; + while (param) { + ALOGV("loadEffectParameter() reading param of type %s", param->name); + size_t size = readParamValue(param, (char *)fx_param, &curSize, &totSize); + if (size == 0) { + goto error; + } + fx_param->psize += size; + param = param->next; + } + + // align start of value field on 32 bit boundary + curSize = ((curSize - 1 ) / sizeof(int) + 1) * sizeof(int); + + fx_param->vsize = 0; + value = value->first_child; + while (value) { + ALOGV("loadEffectParameter() reading value of type %s", value->name); + size_t size = readParamValue(value, (char *)fx_param, &curSize, &totSize); + if (size == 0) { + goto error; + } + fx_param->vsize += size; + value = value->next; + } + + return fx_param; + +error: + delete fx_param; + return NULL; +} + +void AudioPolicyEffects::loadEffectParameters(cnode *root, Vector <effect_param_t *>& params) +{ + cnode *node = root->first_child; + while (node) { + ALOGV("loadEffectParameters() loading param %s", node->name); + effect_param_t *param = loadEffectParameter(node); + if (param == NULL) { + node = node->next; + continue; + } + params.add(param); + node = node->next; + } +} + + +AudioPolicyEffects::EffectDescVector *AudioPolicyEffects::loadEffectConfig( + cnode *root, + const Vector <EffectDesc *>& effects) +{ + cnode *node = root->first_child; + if (node == NULL) { + ALOGW("loadInputSource() empty element %s", root->name); + return NULL; + } + EffectDescVector *desc = new EffectDescVector(); + while (node) { + size_t i; + for (i = 0; i < effects.size(); i++) { + if (strncmp(effects[i]->mName, node->name, EFFECT_STRING_LEN_MAX) == 0) { + ALOGV("loadEffectConfig() found effect %s in list", node->name); + break; + } + } + if (i == effects.size()) { + ALOGV("loadEffectConfig() effect %s not in list", node->name); + node = node->next; + continue; + } + EffectDesc *effect = new EffectDesc(*effects[i]); // deep copy + loadEffectParameters(node, effect->mParams); + ALOGV("loadEffectConfig() adding effect %s uuid %08x", + effect->mName, effect->mUuid.timeLow); + desc->mEffects.add(effect); + node = node->next; + } + if (desc->mEffects.size() == 0) { + ALOGW("loadEffectConfig() no valid effects found in config %s", root->name); + delete desc; + return NULL; + } + return desc; +} + +status_t AudioPolicyEffects::loadInputEffectConfigurations(cnode *root, + const Vector <EffectDesc *>& effects) +{ + cnode *node = config_find(root, PREPROCESSING_TAG); + if (node == NULL) { + return -ENOENT; + } + node = node->first_child; + while (node) { + audio_source_t source = inputSourceNameToEnum(node->name); + if (source == AUDIO_SOURCE_CNT) { + ALOGW("loadInputSources() invalid input source %s", node->name); + node = node->next; + continue; + } + ALOGV("loadInputSources() loading input source %s", node->name); + EffectDescVector *desc = loadEffectConfig(node, effects); + if (desc == NULL) { + node = node->next; + continue; + } + mInputSources.add(source, desc); + node = node->next; + } + return NO_ERROR; +} + +status_t AudioPolicyEffects::loadStreamEffectConfigurations(cnode *root, + const Vector <EffectDesc *>& effects) +{ + cnode *node = config_find(root, OUTPUT_SESSION_PROCESSING_TAG); + if (node == NULL) { + return -ENOENT; + } + node = node->first_child; + while (node) { + audio_stream_type_t stream = streamNameToEnum(node->name); + if (stream == AUDIO_STREAM_CNT) { + ALOGW("loadStreamEffectConfigurations() invalid output stream %s", node->name); + node = node->next; + continue; + } + ALOGV("loadStreamEffectConfigurations() loading output stream %s", node->name); + EffectDescVector *desc = loadEffectConfig(node, effects); + if (desc == NULL) { + node = node->next; + continue; + } + mOutputStreams.add(stream, desc); + node = node->next; + } + return NO_ERROR; +} + +AudioPolicyEffects::EffectDesc *AudioPolicyEffects::loadEffect(cnode *root) +{ + cnode *node = config_find(root, UUID_TAG); + if (node == NULL) { + return NULL; + } + effect_uuid_t uuid; + if (AudioEffect::stringToGuid(node->value, &uuid) != NO_ERROR) { + ALOGW("loadEffect() invalid uuid %s", node->value); + return NULL; + } + return new EffectDesc(root->name, uuid); +} + +status_t AudioPolicyEffects::loadEffects(cnode *root, Vector <EffectDesc *>& effects) +{ + cnode *node = config_find(root, EFFECTS_TAG); + if (node == NULL) { + return -ENOENT; + } + node = node->first_child; + while (node) { + ALOGV("loadEffects() loading effect %s", node->name); + EffectDesc *effect = loadEffect(node); + if (effect == NULL) { + node = node->next; + continue; + } + effects.add(effect); + node = node->next; + } + return NO_ERROR; +} + +status_t AudioPolicyEffects::loadAudioEffectConfig(const char *path) +{ + cnode *root; + char *data; + + data = (char *)load_file(path, NULL); + if (data == NULL) { + return -ENODEV; + } + root = config_node("", ""); + config_load(root, data); + + Vector <EffectDesc *> effects; + loadEffects(root, effects); + loadInputEffectConfigurations(root, effects); + loadStreamEffectConfigurations(root, effects); + + config_free(root); + free(root); + free(data); + + return NO_ERROR; +} + + +}; // namespace android diff --git a/services/audiopolicy/AudioPolicyEffects.h b/services/audiopolicy/AudioPolicyEffects.h new file mode 100755 index 0000000..351cb1a --- /dev/null +++ b/services/audiopolicy/AudioPolicyEffects.h @@ -0,0 +1,187 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIOPOLICYEFFECTS_H +#define ANDROID_AUDIOPOLICYEFFECTS_H + +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <cutils/misc.h> +#include <media/AudioEffect.h> +#include <system/audio.h> +#include <hardware/audio_effect.h> +#include <utils/Vector.h> +#include <utils/SortedVector.h> + +namespace android { + +// ---------------------------------------------------------------------------- + +// AudioPolicyEffects class +// This class will manage all effects attached to input and output streams in +// AudioPolicyService as configured in audio_effects.conf. +class AudioPolicyEffects : public RefBase +{ + +public: + + // The constructor will parse audio_effects.conf + // First it will look whether vendor specific file exists, + // otherwise it will parse the system default file. + AudioPolicyEffects(); + virtual ~AudioPolicyEffects(); + + // Return a list of effect descriptors for default input effects + // associated with audioSession + status_t queryDefaultInputEffects(int audioSession, + effect_descriptor_t *descriptors, + uint32_t *count); + + // Add all input effects associated with this input + // Effects are attached depending on the audio_source_t + status_t addInputEffects(audio_io_handle_t input, + audio_source_t inputSource, + int audioSession); + + // Add all input effects associated to this input + status_t releaseInputEffects(audio_io_handle_t input); + + + // Return a list of effect descriptors for default output effects + // associated with audioSession + status_t queryDefaultOutputSessionEffects(int audioSession, + effect_descriptor_t *descriptors, + uint32_t *count); + + // Add all output effects associated to this output + // Effects are attached depending on the audio_stream_type_t + status_t addOutputSessionEffects(audio_io_handle_t output, + audio_stream_type_t stream, + int audioSession); + + // release all output effects associated with this output stream and audiosession + status_t releaseOutputSessionEffects(audio_io_handle_t output, + audio_stream_type_t stream, + int audioSession); + +private: + + // class to store the description of an effects and its parameters + // as defined in audio_effects.conf + class EffectDesc { + public: + EffectDesc(const char *name, const effect_uuid_t& uuid) : + mName(strdup(name)), + mUuid(uuid) { } + EffectDesc(const EffectDesc& orig) : + mName(strdup(orig.mName)), + mUuid(orig.mUuid) { + // deep copy mParams + for (size_t k = 0; k < orig.mParams.size(); k++) { + effect_param_t *origParam = orig.mParams[k]; + // psize and vsize are rounded up to an int boundary for allocation + size_t origSize = sizeof(effect_param_t) + + ((origParam->psize + 3) & ~3) + + ((origParam->vsize + 3) & ~3); + effect_param_t *dupParam = (effect_param_t *) malloc(origSize); + memcpy(dupParam, origParam, origSize); + // This works because the param buffer allocation is also done by + // multiples of 4 bytes originally. In theory we should memcpy only + // the actual param size, that is without rounding vsize. + mParams.add(dupParam); + } + } + /*virtual*/ ~EffectDesc() { + free(mName); + for (size_t k = 0; k < mParams.size(); k++) { + free(mParams[k]); + } + } + char *mName; + effect_uuid_t mUuid; + Vector <effect_param_t *> mParams; + }; + + // class to store voctor of EffectDesc + class EffectDescVector { + public: + EffectDescVector() {} + /*virtual*/ ~EffectDescVector() { + for (size_t j = 0; j < mEffects.size(); j++) { + delete mEffects[j]; + } + } + Vector <EffectDesc *> mEffects; + }; + + // class to store voctor of AudioEffects + class EffectVector { + public: + EffectVector(int session) : mSessionId(session) {} + /*virtual*/ ~EffectVector() {} + const int mSessionId; + Vector< sp<AudioEffect> >mEffects; + }; + + + static const char * const kInputSourceNames[AUDIO_SOURCE_CNT -1]; + audio_source_t inputSourceNameToEnum(const char *name); + + static const char *kStreamNames[AUDIO_STREAM_CNT+1]; //+1 required as streams start from -1 + audio_stream_type_t streamNameToEnum(const char *name); + + // Enable or disable all effects in effect vector + void setProcessorEnabled(const EffectVector *effectVector, bool enabled); + + // Parse audio_effects.conf + status_t loadAudioEffectConfig(const char *path); + + // Load all effects descriptors in configuration file + status_t loadEffects(cnode *root, Vector <EffectDesc *>& effects); + EffectDesc *loadEffect(cnode *root); + + // Load all automatic effect configurations + status_t loadInputEffectConfigurations(cnode *root, const Vector <EffectDesc *>& effects); + status_t loadStreamEffectConfigurations(cnode *root, const Vector <EffectDesc *>& effects); + EffectDescVector *loadEffectConfig(cnode *root, const Vector <EffectDesc *>& effects); + + // Load all automatic effect parameters + void loadEffectParameters(cnode *root, Vector <effect_param_t *>& params); + effect_param_t *loadEffectParameter(cnode *root); + size_t readParamValue(cnode *node, + char *param, + size_t *curSize, + size_t *totSize); + size_t growParamSize(char *param, + size_t size, + size_t *curSize, + size_t *totSize); + + // Automatic input effects are configured per audio_source_t + KeyedVector< audio_source_t, EffectDescVector* > mInputSources; + // Automatic input effects are unique for audio_io_handle_t + KeyedVector< audio_io_handle_t, EffectVector* > mInputs; + + // Automatic output effects are organized per audio_stream_type_t + KeyedVector< audio_stream_type_t, EffectDescVector* > mOutputStreams; + // Automatic output effects are unique for audiosession ID + KeyedVector< int32_t, EffectVector* > mOutputSessions; +}; + +}; // namespace android + +#endif // ANDROID_AUDIOPOLICYEFFECTS_H diff --git a/services/audiopolicy/AudioPolicyFactory.cpp b/services/audiopolicy/AudioPolicyFactory.cpp new file mode 100644 index 0000000..2ae7bc1 --- /dev/null +++ b/services/audiopolicy/AudioPolicyFactory.cpp @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "AudioPolicyManager.h" + +namespace android { + +extern "C" AudioPolicyInterface* createAudioPolicyManager( + AudioPolicyClientInterface *clientInterface) +{ + return new AudioPolicyManager(clientInterface); +} + +extern "C" void destroyAudioPolicyManager(AudioPolicyInterface *interface) +{ + delete interface; +} + +}; // namespace android diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h index c025a45..33e4397 100644 --- a/services/audiopolicy/AudioPolicyInterface.h +++ b/services/audiopolicy/AudioPolicyInterface.h @@ -90,6 +90,12 @@ public: audio_channel_mask_t channelMask, audio_output_flags_t flags, const audio_offload_info_t *offloadInfo) = 0; + virtual audio_io_handle_t getOutputForAttr(const audio_attributes_t *attr, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo) = 0; // indicates to the audio policy manager that the output starts being used by corresponding stream. virtual status_t startOutput(audio_io_handle_t output, audio_stream_type_t stream, diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp index 2b33703..5a13ac2 100644..100755 --- a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp +++ b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp @@ -131,6 +131,22 @@ audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream, format, channelMask, flags, offloadInfo); } +audio_io_handle_t AudioPolicyService::getOutputForAttr(const audio_attributes_t *attr, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo) +{ + if (mAudioPolicyManager == NULL) { + return 0; + } + ALOGV("getOutput()"); + Mutex::Autolock _l(mLock); + return mAudioPolicyManager->getOutputForAttr(attr, samplingRate, + format, channelMask, flags, offloadInfo); +} + status_t AudioPolicyService::startOutput(audio_io_handle_t output, audio_stream_type_t stream, int session) @@ -140,6 +156,13 @@ status_t AudioPolicyService::startOutput(audio_io_handle_t output, } ALOGV("startOutput()"); Mutex::Autolock _l(mLock); + + // create audio processors according to stream + status_t status = mAudioPolicyEffects->addOutputSessionEffects(output, stream, session); + if (status != NO_ERROR && status != ALREADY_EXISTS) { + ALOGW("Failed to add effects on session %d", session); + } + return mAudioPolicyManager->startOutput(output, stream, session); } @@ -161,6 +184,13 @@ status_t AudioPolicyService::doStopOutput(audio_io_handle_t output, { ALOGV("doStopOutput from tid %d", gettid()); Mutex::Autolock _l(mLock); + + // release audio processors from the stream + status_t status = mAudioPolicyEffects->releaseOutputSessionEffects(output, stream, session); + if (status != NO_ERROR && status != ALREADY_EXISTS) { + ALOGW("Failed to release effects on session %d", session); + } + return mAudioPolicyManager->stopOutput(output, stream, session); } @@ -206,39 +236,13 @@ audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource, if (input == 0) { return input; } - // create audio pre processors according to input source - audio_source_t aliasSource = (inputSource == AUDIO_SOURCE_HOTWORD) ? - AUDIO_SOURCE_VOICE_RECOGNITION : inputSource; - ssize_t index = mInputSources.indexOfKey(aliasSource); - if (index < 0) { - return input; + // create audio pre processors according to input source + status_t status = mAudioPolicyEffects->addInputEffects(input, inputSource, audioSession); + if (status != NO_ERROR && status != ALREADY_EXISTS) { + ALOGW("Failed to add effects on input %d", input); } - ssize_t idx = mInputs.indexOfKey(input); - InputDesc *inputDesc; - if (idx < 0) { - inputDesc = new InputDesc(audioSession); - mInputs.add(input, inputDesc); - } else { - inputDesc = mInputs.valueAt(idx); - } - - Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects; - for (size_t i = 0; i < effects.size(); i++) { - EffectDesc *effect = effects[i]; - sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0, audioSession, input); - status_t status = fx->initCheck(); - if (status != NO_ERROR && status != ALREADY_EXISTS) { - ALOGW("Failed to create Fx %s on input %d", effect->mName, input); - // fx goes out of scope and strong ref on AudioEffect is released - continue; - } - for (size_t j = 0; j < effect->mParams.size(); j++) { - fx->setParameter(effect->mParams[j]); - } - inputDesc->mEffects.add(fx); - } - setPreProcessorEnabled(inputDesc, true); + return input; } @@ -270,14 +274,11 @@ void AudioPolicyService::releaseInput(audio_io_handle_t input) Mutex::Autolock _l(mLock); mAudioPolicyManager->releaseInput(input); - ssize_t index = mInputs.indexOfKey(input); - if (index < 0) { - return; + // release audio processors from the input + status_t status = mAudioPolicyEffects->releaseInputEffects(input); + if(status != NO_ERROR) { + ALOGW("Failed to release effects on input %d", input); } - InputDesc *inputDesc = mInputs.valueAt(index); - setPreProcessorEnabled(inputDesc, false); - delete inputDesc; - mInputs.removeItemsAt(index); } status_t AudioPolicyService::initStreamVolume(audio_stream_type_t stream, @@ -420,37 +421,13 @@ status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession, effect_descriptor_t *descriptors, uint32_t *count) { - if (mAudioPolicyManager == NULL) { *count = 0; return NO_INIT; } Mutex::Autolock _l(mLock); - status_t status = NO_ERROR; - size_t index; - for (index = 0; index < mInputs.size(); index++) { - if (mInputs.valueAt(index)->mSessionId == audioSession) { - break; - } - } - if (index == mInputs.size()) { - *count = 0; - return BAD_VALUE; - } - Vector< sp<AudioEffect> > effects = mInputs.valueAt(index)->mEffects; - - for (size_t i = 0; i < effects.size(); i++) { - effect_descriptor_t desc = effects[i]->descriptor(); - if (i < *count) { - descriptors[i] = desc; - } - } - if (effects.size() > *count) { - status = NO_MEMORY; - } - *count = effects.size(); - return status; + return mAudioPolicyEffects->queryDefaultInputEffects(audioSession, descriptors, count); } bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info) @@ -470,6 +447,9 @@ status_t AudioPolicyService::listAudioPorts(audio_port_role_t role, unsigned int *generation) { Mutex::Autolock _l(mLock); + if(!modifyAudioRoutingAllowed()) { + return PERMISSION_DENIED; + } if (mAudioPolicyManager == NULL) { return NO_INIT; } @@ -480,6 +460,9 @@ status_t AudioPolicyService::listAudioPorts(audio_port_role_t role, status_t AudioPolicyService::getAudioPort(struct audio_port *port) { Mutex::Autolock _l(mLock); + if(!modifyAudioRoutingAllowed()) { + return PERMISSION_DENIED; + } if (mAudioPolicyManager == NULL) { return NO_INIT; } @@ -491,6 +474,9 @@ status_t AudioPolicyService::createAudioPatch(const struct audio_patch *patch, audio_patch_handle_t *handle) { Mutex::Autolock _l(mLock); + if(!modifyAudioRoutingAllowed()) { + return PERMISSION_DENIED; + } if (mAudioPolicyManager == NULL) { return NO_INIT; } @@ -501,6 +487,9 @@ status_t AudioPolicyService::createAudioPatch(const struct audio_patch *patch, status_t AudioPolicyService::releaseAudioPatch(audio_patch_handle_t handle) { Mutex::Autolock _l(mLock); + if(!modifyAudioRoutingAllowed()) { + return PERMISSION_DENIED; + } if (mAudioPolicyManager == NULL) { return NO_INIT; } @@ -514,6 +503,9 @@ status_t AudioPolicyService::listAudioPatches(unsigned int *num_patches, unsigned int *generation) { Mutex::Autolock _l(mLock); + if(!modifyAudioRoutingAllowed()) { + return PERMISSION_DENIED; + } if (mAudioPolicyManager == NULL) { return NO_INIT; } @@ -524,6 +516,9 @@ status_t AudioPolicyService::listAudioPatches(unsigned int *num_patches, status_t AudioPolicyService::setAudioPortConfig(const struct audio_port_config *config) { Mutex::Autolock _l(mLock); + if(!modifyAudioRoutingAllowed()) { + return PERMISSION_DENIED; + } if (mAudioPolicyManager == NULL) { return NO_INIT; } diff --git a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp index 0bf4982..406988c 100644..100755 --- a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp +++ b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp @@ -144,6 +144,13 @@ status_t AudioPolicyService::startOutput(audio_io_handle_t output, } ALOGV("startOutput()"); Mutex::Autolock _l(mLock); + + // create audio processors according to stream + status_t status = mAudioPolicyEffects->addOutputSessionEffects(output, stream, session); + if (status != NO_ERROR && status != ALREADY_EXISTS) { + ALOGW("Failed to add effects on session %d", session); + } + return mpAudioPolicy->start_output(mpAudioPolicy, output, stream, session); } @@ -165,6 +172,13 @@ status_t AudioPolicyService::doStopOutput(audio_io_handle_t output, { ALOGV("doStopOutput from tid %d", gettid()); Mutex::Autolock _l(mLock); + + // release audio processors from the stream + status_t status = mAudioPolicyEffects->releaseOutputSessionEffects(output, stream, session); + if (status != NO_ERROR && status != ALREADY_EXISTS) { + ALOGW("Failed to release effects on session %d", session); + } + return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session); } @@ -210,39 +224,13 @@ audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource, if (input == 0) { return input; } - // create audio pre processors according to input source - audio_source_t aliasSource = (inputSource == AUDIO_SOURCE_HOTWORD) ? - AUDIO_SOURCE_VOICE_RECOGNITION : inputSource; - ssize_t index = mInputSources.indexOfKey(aliasSource); - if (index < 0) { - return input; + // create audio pre processors according to input source + status_t status = mAudioPolicyEffects->addInputEffects(input, inputSource, audioSession); + if (status != NO_ERROR && status != ALREADY_EXISTS) { + ALOGW("Failed to add effects on input %d", input); } - ssize_t idx = mInputs.indexOfKey(input); - InputDesc *inputDesc; - if (idx < 0) { - inputDesc = new InputDesc(audioSession); - mInputs.add(input, inputDesc); - } else { - inputDesc = mInputs.valueAt(idx); - } - - Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects; - for (size_t i = 0; i < effects.size(); i++) { - EffectDesc *effect = effects[i]; - sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0, audioSession, input); - status_t status = fx->initCheck(); - if (status != NO_ERROR && status != ALREADY_EXISTS) { - ALOGW("Failed to create Fx %s on input %d", effect->mName, input); - // fx goes out of scope and strong ref on AudioEffect is released - continue; - } - for (size_t j = 0; j < effect->mParams.size(); j++) { - fx->setParameter(effect->mParams[j]); - } - inputDesc->mEffects.add(fx); - } - setPreProcessorEnabled(inputDesc, true); + return input; } @@ -274,14 +262,11 @@ void AudioPolicyService::releaseInput(audio_io_handle_t input) Mutex::Autolock _l(mLock); mpAudioPolicy->release_input(mpAudioPolicy, input); - ssize_t index = mInputs.indexOfKey(input); - if (index < 0) { - return; + // release audio processors from the input + status_t status = mAudioPolicyEffects->releaseInputEffects(input); + if(status != NO_ERROR) { + ALOGW("Failed to release effects on input %d", input); } - InputDesc *inputDesc = mInputs.valueAt(index); - setPreProcessorEnabled(inputDesc, false); - delete inputDesc; - mInputs.removeItemsAt(index); } status_t AudioPolicyService::initStreamVolume(audio_stream_type_t stream, @@ -437,37 +422,13 @@ status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession, effect_descriptor_t *descriptors, uint32_t *count) { - if (mpAudioPolicy == NULL) { *count = 0; return NO_INIT; } Mutex::Autolock _l(mLock); - status_t status = NO_ERROR; - - size_t index; - for (index = 0; index < mInputs.size(); index++) { - if (mInputs.valueAt(index)->mSessionId == audioSession) { - break; - } - } - if (index == mInputs.size()) { - *count = 0; - return BAD_VALUE; - } - Vector< sp<AudioEffect> > effects = mInputs.valueAt(index)->mEffects; - for (size_t i = 0; i < effects.size(); i++) { - effect_descriptor_t desc = effects[i]->descriptor(); - if (i < *count) { - descriptors[i] = desc; - } - } - if (effects.size() > *count) { - status = NO_MEMORY; - } - *count = effects.size(); - return status; + return mAudioPolicyEffects->queryDefaultInputEffects(audioSession, descriptors, count); } bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info) diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp index bf5b9a8..95179b7 100644 --- a/services/audiopolicy/AudioPolicyManager.cpp +++ b/services/audiopolicy/AudioPolicyManager.cpp @@ -122,6 +122,11 @@ const StringToEnum sFormatNameToEnumTable[] = { STRING_TO_ENUM(AUDIO_FORMAT_MP3), STRING_TO_ENUM(AUDIO_FORMAT_AAC), STRING_TO_ENUM(AUDIO_FORMAT_VORBIS), + STRING_TO_ENUM(AUDIO_FORMAT_HE_AAC_V1), + STRING_TO_ENUM(AUDIO_FORMAT_HE_AAC_V2), + STRING_TO_ENUM(AUDIO_FORMAT_OPUS), + STRING_TO_ENUM(AUDIO_FORMAT_AC3), + STRING_TO_ENUM(AUDIO_FORMAT_E_AC3), }; const StringToEnum sOutChannelsNameToEnumTable[] = { @@ -224,7 +229,7 @@ status_t AudioPolicyManager::setDeviceConnectionState(audio_devices_t device, index = mAvailableOutputDevices.add(devDesc); if (index >= 0) { mAvailableOutputDevices[index]->mId = nextUniqueId(); - HwModule *module = getModuleForDevice(device); + sp<HwModule> module = getModuleForDevice(device); ALOG_ASSERT(module != NULL, "setDeviceConnectionState():" "could not find HW module for device %08x", device); mAvailableOutputDevices[index]->mModule = module; @@ -261,7 +266,7 @@ status_t AudioPolicyManager::setDeviceConnectionState(audio_devices_t device, // outputs must be closed after checkOutputForAllStrategies() is executed if (!outputs.isEmpty()) { for (size_t i = 0; i < outputs.size(); i++) { - AudioOutputDescriptor *desc = mOutputs.valueFor(outputs[i]); + sp<AudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]); // close unused outputs after device disconnection or direct outputs that have been // opened by checkOutputsForDevice() to query dynamic parameters if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) || @@ -304,7 +309,7 @@ status_t AudioPolicyManager::setDeviceConnectionState(audio_devices_t device, ALOGW("setDeviceConnectionState() device already connected: %d", device); return INVALID_OPERATION; } - HwModule *module = getModuleForDevice(device); + sp<HwModule> module = getModuleForDevice(device); if (module == NULL) { ALOGW("setDeviceConnectionState(): could not find HW module for device %08x", device); @@ -435,7 +440,7 @@ void AudioPolicyManager::setPhoneState(audio_mode_t state) checkOutputForAllStrategies(); updateDevicesAndOutputs(); - AudioOutputDescriptor *hwOutputDesc = mOutputs.valueFor(mPrimaryOutput); + sp<AudioOutputDescriptor> hwOutputDesc = mOutputs.valueFor(mPrimaryOutput); // force routing command to audio hardware when ending call // even if no device change is needed @@ -447,7 +452,7 @@ void AudioPolicyManager::setPhoneState(audio_mode_t state) if (isStateInCall(state)) { nsecs_t sysTime = systemTime(); for (size_t i = 0; i < mOutputs.size(); i++) { - AudioOutputDescriptor *desc = mOutputs.valueAt(i); + sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i); // mute media and sonification strategies and delay device switch by the largest // latency of any output where either strategy is active. // This avoid sending the ring tone or music tail into the earpiece or headset. @@ -511,7 +516,10 @@ void AudioPolicyManager::setForceUse(audio_policy_force_use_t usage, config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY && config != AUDIO_POLICY_FORCE_ANALOG_DOCK && config != AUDIO_POLICY_FORCE_DIGITAL_DOCK && config != AUDIO_POLICY_FORCE_NONE && - config != AUDIO_POLICY_FORCE_NO_BT_A2DP) { + config != AUDIO_POLICY_FORCE_NO_BT_A2DP && + config != AUDIO_POLICY_FORCE_SYSTEM_AUDIO_HDMI_ARC && + config != AUDIO_POLICY_FORCE_SYSTEM_AUDIO_SPDIF && + config != AUDIO_POLICY_FORCE_SYSTEM_AUDIO_LINE) { ALOGW("setForceUse() invalid config %d for FOR_MEDIA", config); return; } @@ -623,13 +631,53 @@ audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream, audio_output_flags_t flags, const audio_offload_info_t *offloadInfo) { - audio_io_handle_t output = 0; - uint32_t latency = 0; + routing_strategy strategy = getStrategy(stream); audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/); ALOGV("getOutput() device %d, stream %d, samplingRate %d, format %x, channelMask %x, flags %x", device, stream, samplingRate, format, channelMask, flags); + return getOutputForDevice(device, stream, samplingRate,format, channelMask, flags, + offloadInfo); +} + +audio_io_handle_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo) +{ + if (attr == NULL) { + ALOGE("getOutputForAttr() called with NULL audio attributes"); + return 0; + } + ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s", + attr->usage, attr->content_type, attr->tags); + + // TODO this is where filtering for custom policies (rerouting, dynamic sources) will go + routing_strategy strategy = (routing_strategy) getStrategyForAttr(attr); + audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/); + ALOGV("getOutputForAttr() device %d, samplingRate %d, format %x, channelMask %x, flags %x", + device, samplingRate, format, channelMask, flags); + + audio_stream_type_t stream = streamTypefromAttributesInt(attr); + return getOutputForDevice(device, stream, samplingRate, format, channelMask, flags, + offloadInfo); +} + +audio_io_handle_t AudioPolicyManager::getOutputForDevice( + audio_devices_t device, + audio_stream_type_t stream, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo) +{ + audio_io_handle_t output = 0; + uint32_t latency = 0; + #ifdef AUDIO_POLICY_TEST if (mCurOutput != 0) { ALOGV("getOutput() test output mCurOutput %d, samplingRate %d, format %d, channelMask %x, mDirectOutput %d", @@ -637,7 +685,7 @@ audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream, if (mTestOutputs[mCurOutput] == 0) { ALOGV("getOutput() opening test output"); - AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor(NULL); + sp<AudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(NULL); outputDesc->mDevice = mTestDevice; outputDesc->mSamplingRate = mTestSamplingRate; outputDesc->mFormat = mTestFormat; @@ -689,10 +737,10 @@ audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream, } if (profile != 0) { - AudioOutputDescriptor *outputDesc = NULL; + sp<AudioOutputDescriptor> outputDesc = NULL; for (size_t i = 0; i < mOutputs.size(); i++) { - AudioOutputDescriptor *desc = mOutputs.valueAt(i); + sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i); if (!desc->isDuplicated() && (profile == desc->mProfile)) { outputDesc = desc; // reuse direct output if currently open and configured with same parameters @@ -740,7 +788,6 @@ audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream, if (output != 0) { mpClientInterface->closeOutput(output); } - delete outputDesc; return 0; } audio_io_handle_t srcOutput = getOutputForEffect(); @@ -797,7 +844,7 @@ audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_h audio_io_handle_t outputPrimary = 0; for (size_t i = 0; i < outputs.size(); i++) { - AudioOutputDescriptor *outputDesc = mOutputs.valueFor(outputs[i]); + sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]); if (!outputDesc->isDuplicated()) { int commonFlags = popcount(outputDesc->mProfile->mFlags & flags); if (commonFlags > maxCommonFlags) { @@ -832,7 +879,7 @@ status_t AudioPolicyManager::startOutput(audio_io_handle_t output, return BAD_VALUE; } - AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index); + sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index); // increment usage count for this stream on the requested output: // NOTE that the usage count is the same for duplicated output and hardware output which is @@ -847,7 +894,7 @@ status_t AudioPolicyManager::startOutput(audio_io_handle_t output, uint32_t waitMs = 0; bool force = false; for (size_t i = 0; i < mOutputs.size(); i++) { - AudioOutputDescriptor *desc = mOutputs.valueAt(i); + sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i); if (desc != outputDesc) { // force a device change if any other output is managed by the same hw // module and has a current device selection that differs from selected device. @@ -900,7 +947,7 @@ status_t AudioPolicyManager::stopOutput(audio_io_handle_t output, return BAD_VALUE; } - AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index); + sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index); // handle special case for sonification while in call if (isInCall()) { @@ -925,7 +972,7 @@ status_t AudioPolicyManager::stopOutput(audio_io_handle_t output, // one being selected for this output for (size_t i = 0; i < mOutputs.size(); i++) { audio_io_handle_t curOutput = mOutputs.keyAt(i); - AudioOutputDescriptor *desc = mOutputs.valueAt(i); + sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i); if (curOutput != output && desc->isActive() && outputDesc->sharesHwModuleWith(desc) && @@ -958,10 +1005,9 @@ void AudioPolicyManager::releaseOutput(audio_io_handle_t output) #ifdef AUDIO_POLICY_TEST int testIndex = testOutputIndex(output); if (testIndex != 0) { - AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index); + sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index); if (outputDesc->isActive()) { mpClientInterface->closeOutput(output); - delete mOutputs.valueAt(index); mOutputs.removeItem(output); mTestOutputs[testIndex] = 0; } @@ -969,7 +1015,7 @@ void AudioPolicyManager::releaseOutput(audio_io_handle_t output) } #endif //AUDIO_POLICY_TEST - AudioOutputDescriptor *desc = mOutputs.valueAt(index); + sp<AudioOutputDescriptor> desc = mOutputs.valueAt(index); if (desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) { if (desc->mDirectOpenCount <= 0) { ALOGW("releaseOutput() invalid open count %d for output %d", @@ -1038,7 +1084,7 @@ audio_io_handle_t AudioPolicyManager::getInput(audio_source_t inputSource, return 0; } - AudioInputDescriptor *inputDesc = new AudioInputDescriptor(profile); + sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile); inputDesc->mInputSource = inputSource; inputDesc->mDevice = device; @@ -1062,7 +1108,6 @@ audio_io_handle_t AudioPolicyManager::getInput(audio_source_t inputSource, if (input != 0) { mpClientInterface->closeInput(input); } - delete inputDesc; return 0; } addInput(input, inputDesc); @@ -1078,7 +1123,7 @@ status_t AudioPolicyManager::startInput(audio_io_handle_t input) ALOGW("startInput() unknown input %d", input); return BAD_VALUE; } - AudioInputDescriptor *inputDesc = mInputs.valueAt(index); + sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index); #ifdef AUDIO_POLICY_TEST if (mTestInput == 0) @@ -1088,7 +1133,7 @@ status_t AudioPolicyManager::startInput(audio_io_handle_t input) // uses AUDIO_SOURCE_HOTWORD in which case it is closed. audio_io_handle_t activeInput = getActiveInput(); if (!isVirtualInputDevice(inputDesc->mDevice) && activeInput != 0) { - AudioInputDescriptor *activeDesc = mInputs.valueFor(activeInput); + sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput); if (activeDesc->mInputSource == AUDIO_SOURCE_HOTWORD) { ALOGW("startInput() preempting already started low-priority input %d", activeInput); stopInput(activeInput); @@ -1122,7 +1167,7 @@ status_t AudioPolicyManager::stopInput(audio_io_handle_t input) ALOGW("stopInput() unknown input %d", input); return BAD_VALUE; } - AudioInputDescriptor *inputDesc = mInputs.valueAt(index); + sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index); if (inputDesc->mRefCount == 0) { ALOGW("stopInput() input %d already stopped", input); @@ -1149,7 +1194,6 @@ void AudioPolicyManager::releaseInput(audio_io_handle_t input) return; } mpClientInterface->closeInput(input); - delete mInputs.valueAt(index); mInputs.removeItem(input); nextAudioPortGeneration(); mpClientInterface->onAudioPortListUpdate(); @@ -1258,7 +1302,7 @@ audio_io_handle_t AudioPolicyManager::selectOutputForEffects( audio_io_handle_t outputDeepBuffer = 0; for (size_t i = 0; i < outputs.size(); i++) { - AudioOutputDescriptor *desc = mOutputs.valueFor(outputs[i]); + sp<AudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]); ALOGV("selectOutputForEffects outputs[%zu] flags %x", i, desc->mFlags); if ((desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) { outputOffloaded = outputs[i]; @@ -1320,14 +1364,14 @@ status_t AudioPolicyManager::registerEffect(const effect_descriptor_t *desc, desc->name, io, strategy, session, id); ALOGV("registerEffect() memory %d, total memory %d", desc->memoryUsage, mTotalEffectsMemory); - EffectDescriptor *pDesc = new EffectDescriptor(); - memcpy (&pDesc->mDesc, desc, sizeof(effect_descriptor_t)); - pDesc->mIo = io; - pDesc->mStrategy = (routing_strategy)strategy; - pDesc->mSession = session; - pDesc->mEnabled = false; + sp<EffectDescriptor> effectDesc = new EffectDescriptor(); + memcpy (&effectDesc->mDesc, desc, sizeof(effect_descriptor_t)); + effectDesc->mIo = io; + effectDesc->mStrategy = (routing_strategy)strategy; + effectDesc->mSession = session; + effectDesc->mEnabled = false; - mEffects.add(id, pDesc); + mEffects.add(id, effectDesc); return NO_ERROR; } @@ -1340,21 +1384,20 @@ status_t AudioPolicyManager::unregisterEffect(int id) return INVALID_OPERATION; } - EffectDescriptor *pDesc = mEffects.valueAt(index); + sp<EffectDescriptor> effectDesc = mEffects.valueAt(index); - setEffectEnabled(pDesc, false); + setEffectEnabled(effectDesc, false); - if (mTotalEffectsMemory < pDesc->mDesc.memoryUsage) { + if (mTotalEffectsMemory < effectDesc->mDesc.memoryUsage) { ALOGW("unregisterEffect() memory %d too big for total %d", - pDesc->mDesc.memoryUsage, mTotalEffectsMemory); - pDesc->mDesc.memoryUsage = mTotalEffectsMemory; + effectDesc->mDesc.memoryUsage, mTotalEffectsMemory); + effectDesc->mDesc.memoryUsage = mTotalEffectsMemory; } - mTotalEffectsMemory -= pDesc->mDesc.memoryUsage; + mTotalEffectsMemory -= effectDesc->mDesc.memoryUsage; ALOGV("unregisterEffect() effect %s, ID %d, memory %d total memory %d", - pDesc->mDesc.name, id, pDesc->mDesc.memoryUsage, mTotalEffectsMemory); + effectDesc->mDesc.name, id, effectDesc->mDesc.memoryUsage, mTotalEffectsMemory); mEffects.removeItem(id); - delete pDesc; return NO_ERROR; } @@ -1370,43 +1413,43 @@ status_t AudioPolicyManager::setEffectEnabled(int id, bool enabled) return setEffectEnabled(mEffects.valueAt(index), enabled); } -status_t AudioPolicyManager::setEffectEnabled(EffectDescriptor *pDesc, bool enabled) +status_t AudioPolicyManager::setEffectEnabled(const sp<EffectDescriptor>& effectDesc, bool enabled) { - if (enabled == pDesc->mEnabled) { + if (enabled == effectDesc->mEnabled) { ALOGV("setEffectEnabled(%s) effect already %s", enabled?"true":"false", enabled?"enabled":"disabled"); return INVALID_OPERATION; } if (enabled) { - if (mTotalEffectsCpuLoad + pDesc->mDesc.cpuLoad > getMaxEffectsCpuLoad()) { + if (mTotalEffectsCpuLoad + effectDesc->mDesc.cpuLoad > getMaxEffectsCpuLoad()) { ALOGW("setEffectEnabled(true) CPU Load limit exceeded for Fx %s, CPU %f MIPS", - pDesc->mDesc.name, (float)pDesc->mDesc.cpuLoad/10); + effectDesc->mDesc.name, (float)effectDesc->mDesc.cpuLoad/10); return INVALID_OPERATION; } - mTotalEffectsCpuLoad += pDesc->mDesc.cpuLoad; + mTotalEffectsCpuLoad += effectDesc->mDesc.cpuLoad; ALOGV("setEffectEnabled(true) total CPU %d", mTotalEffectsCpuLoad); } else { - if (mTotalEffectsCpuLoad < pDesc->mDesc.cpuLoad) { + if (mTotalEffectsCpuLoad < effectDesc->mDesc.cpuLoad) { ALOGW("setEffectEnabled(false) CPU load %d too high for total %d", - pDesc->mDesc.cpuLoad, mTotalEffectsCpuLoad); - pDesc->mDesc.cpuLoad = mTotalEffectsCpuLoad; + effectDesc->mDesc.cpuLoad, mTotalEffectsCpuLoad); + effectDesc->mDesc.cpuLoad = mTotalEffectsCpuLoad; } - mTotalEffectsCpuLoad -= pDesc->mDesc.cpuLoad; + mTotalEffectsCpuLoad -= effectDesc->mDesc.cpuLoad; ALOGV("setEffectEnabled(false) total CPU %d", mTotalEffectsCpuLoad); } - pDesc->mEnabled = enabled; + effectDesc->mEnabled = enabled; return NO_ERROR; } bool AudioPolicyManager::isNonOffloadableEffectEnabled() { for (size_t i = 0; i < mEffects.size(); i++) { - const EffectDescriptor * const pDesc = mEffects.valueAt(i); - if (pDesc->mEnabled && (pDesc->mStrategy == STRATEGY_MEDIA) && - ((pDesc->mDesc.flags & EFFECT_FLAG_OFFLOAD_SUPPORTED) == 0)) { + sp<EffectDescriptor> effectDesc = mEffects.valueAt(i); + if (effectDesc->mEnabled && (effectDesc->mStrategy == STRATEGY_MEDIA) && + ((effectDesc->mDesc.flags & EFFECT_FLAG_OFFLOAD_SUPPORTED) == 0)) { ALOGV("isNonOffloadableEffectEnabled() non offloadable effect %s enabled on session %d", - pDesc->mDesc.name, pDesc->mSession); + effectDesc->mDesc.name, effectDesc->mSession); return true; } } @@ -1417,7 +1460,7 @@ bool AudioPolicyManager::isStreamActive(audio_stream_type_t stream, uint32_t inP { nsecs_t sysTime = systemTime(); for (size_t i = 0; i < mOutputs.size(); i++) { - const AudioOutputDescriptor *outputDesc = mOutputs.valueAt(i); + const sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(i); if (outputDesc->isStreamActive(stream, inPastMs, sysTime)) { return true; } @@ -1430,7 +1473,7 @@ bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream, { nsecs_t sysTime = systemTime(); for (size_t i = 0; i < mOutputs.size(); i++) { - const AudioOutputDescriptor *outputDesc = mOutputs.valueAt(i); + const sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(i); if (((outputDesc->device() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) != 0) && outputDesc->isStreamActive(stream, inPastMs, sysTime)) { return true; @@ -1442,7 +1485,7 @@ bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream, bool AudioPolicyManager::isSourceActive(audio_source_t source) const { for (size_t i = 0; i < mInputs.size(); i++) { - const AudioInputDescriptor * inputDescriptor = mInputs.valueAt(i); + const sp<AudioInputDescriptor> inputDescriptor = mInputs.valueAt(i); if ((inputDescriptor->mInputSource == (int)source || (source == AUDIO_SOURCE_VOICE_RECOGNITION && inputDescriptor->mInputSource == AUDIO_SOURCE_HOTWORD)) @@ -1520,7 +1563,7 @@ status_t AudioPolicyManager::dump(int fd) snprintf(buffer, SIZE, " Stream Can be muted Index Min Index Max Index Cur [device : index]...\n"); write(fd, buffer, strlen(buffer)); - for (int i = 0; i < AUDIO_STREAM_CNT; i++) { + for (size_t i = 0; i < AUDIO_STREAM_CNT; i++) { snprintf(buffer, SIZE, " %02zu ", i); write(fd, buffer, strlen(buffer)); mStreams[i].dump(fd); @@ -1651,14 +1694,20 @@ status_t AudioPolicyManager::listAudioPorts(audio_port_role_t role, *num_ports += mInputs.size(); } if (role == AUDIO_PORT_ROLE_SOURCE || role == AUDIO_PORT_ROLE_NONE) { - for (size_t i = 0; i < mOutputs.size() && portsWritten < portsMax; i++) { - mOutputs[i]->toAudioPort(&ports[portsWritten++]); + size_t numOutputs = 0; + for (size_t i = 0; i < mOutputs.size(); i++) { + if (!mOutputs[i]->isDuplicated()) { + numOutputs++; + if (portsWritten < portsMax) { + mOutputs[i]->toAudioPort(&ports[portsWritten++]); + } + } } - *num_ports += mOutputs.size(); + *num_ports += numOutputs; } } *generation = curAudioPortGeneration(); - ALOGV("listAudioPorts() got %d ports needed %d", portsWritten, *num_ports); + ALOGV("listAudioPorts() got %zu ports needed %d", portsWritten, *num_ports); return NO_ERROR; } @@ -1667,10 +1716,10 @@ status_t AudioPolicyManager::getAudioPort(struct audio_port *port __unused) return NO_ERROR; } -AudioPolicyManager::AudioOutputDescriptor *AudioPolicyManager::getOutputFromId( +sp<AudioPolicyManager::AudioOutputDescriptor> AudioPolicyManager::getOutputFromId( audio_port_handle_t id) const { - AudioOutputDescriptor *outputDesc = NULL; + sp<AudioOutputDescriptor> outputDesc = NULL; for (size_t i = 0; i < mOutputs.size(); i++) { outputDesc = mOutputs.valueAt(i); if (outputDesc->mId == id) { @@ -1680,10 +1729,10 @@ AudioPolicyManager::AudioOutputDescriptor *AudioPolicyManager::getOutputFromId( return outputDesc; } -AudioPolicyManager::AudioInputDescriptor *AudioPolicyManager::getInputFromId( +sp<AudioPolicyManager::AudioInputDescriptor> AudioPolicyManager::getInputFromId( audio_port_handle_t id) const { - AudioInputDescriptor *inputDesc = NULL; + sp<AudioInputDescriptor> inputDesc = NULL; for (size_t i = 0; i < mInputs.size(); i++) { inputDesc = mInputs.valueAt(i); if (inputDesc->mId == id) { @@ -1693,8 +1742,11 @@ AudioPolicyManager::AudioInputDescriptor *AudioPolicyManager::getInputFromId( return inputDesc; } -AudioPolicyManager::HwModule *AudioPolicyManager::getModuleForDevice(audio_devices_t device) const +sp <AudioPolicyManager::HwModule> AudioPolicyManager::getModuleForDevice( + audio_devices_t device) const { + sp <HwModule> module; + for (size_t i = 0; i < mHwModules.size(); i++) { if (mHwModules[i]->mHandle == 0) { continue; @@ -1715,18 +1767,20 @@ AudioPolicyManager::HwModule *AudioPolicyManager::getModuleForDevice(audio_devic } } } - return NULL; + return module; } -AudioPolicyManager::HwModule *AudioPolicyManager::getModuleFromName(const char *name) const +sp <AudioPolicyManager::HwModule> AudioPolicyManager::getModuleFromName(const char *name) const { + sp <HwModule> module; + for (size_t i = 0; i < mHwModules.size(); i++) { if (strcmp(mHwModules[i]->mName, name) == 0) { return mHwModules[i]; } } - return NULL; + return module; } @@ -1776,11 +1830,13 @@ status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch, return BAD_VALUE; } // output mix to output device connection - AudioOutputDescriptor *outputDesc = getOutputFromId(patch->sources[0].id); + sp<AudioOutputDescriptor> outputDesc = getOutputFromId(patch->sources[0].id); if (outputDesc == NULL) { ALOGV("createAudioPatch() output not found for id %d", patch->sources[0].id); return BAD_VALUE; } + ALOG_ASSERT(!outputDesc->isDuplicated(),"duplicated output %d in source in ports", + outputDesc->mIoHandle); if (patchDesc != 0) { if (patchDesc->mPatch.sources[0].id != patch->sources[0].id) { ALOGV("createAudioPatch() source id differs for patch current id %d new id %d", @@ -1795,7 +1851,7 @@ status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch, return BAD_VALUE; } - if (!outputDesc->mProfile->isCompatibleProfile(devDesc->mType, + if (!outputDesc->mProfile->isCompatibleProfile(devDesc->mDeviceType, patch->sources[0].sample_rate, patch->sources[0].format, patch->sources[0].channel_mask, @@ -1804,9 +1860,9 @@ status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch, } // TODO: reconfigure output format and channels here ALOGV("createAudioPatch() setting device %08x on output %d", - devDesc->mType, outputDesc->mIoHandle); + devDesc->mDeviceType, outputDesc->mIoHandle); setOutputDevice(outputDesc->mIoHandle, - devDesc->mType, + devDesc->mDeviceType, true, 0, handle); @@ -1825,7 +1881,7 @@ status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch, } else if (patch->sources[0].type == AUDIO_PORT_TYPE_DEVICE) { if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) { // input device to input mix connection - AudioInputDescriptor *inputDesc = getInputFromId(patch->sinks[0].id); + sp<AudioInputDescriptor> inputDesc = getInputFromId(patch->sinks[0].id); if (inputDesc == NULL) { return BAD_VALUE; } @@ -1840,7 +1896,7 @@ status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch, return BAD_VALUE; } - if (!inputDesc->mProfile->isCompatibleProfile(devDesc->mType, + if (!inputDesc->mProfile->isCompatibleProfile(devDesc->mDeviceType, patch->sinks[0].sample_rate, patch->sinks[0].format, patch->sinks[0].channel_mask, @@ -1849,9 +1905,9 @@ status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch, } // TODO: reconfigure output format and channels here ALOGV("createAudioPatch() setting device %08x on output %d", - devDesc->mType, inputDesc->mIoHandle); + devDesc->mDeviceType, inputDesc->mIoHandle); setInputDevice(inputDesc->mIoHandle, - devDesc->mType, + devDesc->mDeviceType, true, handle); index = mAudioPatches.indexOfKey(*handle); @@ -1950,7 +2006,7 @@ status_t AudioPolicyManager::releaseAudioPatch(audio_patch_handle_t handle, struct audio_patch *patch = &patchDesc->mPatch; patchDesc->mUid = mUidCached; if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) { - AudioOutputDescriptor *outputDesc = getOutputFromId(patch->sources[0].id); + sp<AudioOutputDescriptor> outputDesc = getOutputFromId(patch->sources[0].id); if (outputDesc == NULL) { ALOGV("releaseAudioPatch() output not found for id %d", patch->sources[0].id); return BAD_VALUE; @@ -1963,7 +2019,7 @@ status_t AudioPolicyManager::releaseAudioPatch(audio_patch_handle_t handle, NULL); } else if (patch->sources[0].type == AUDIO_PORT_TYPE_DEVICE) { if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) { - AudioInputDescriptor *inputDesc = getInputFromId(patch->sinks[0].id); + sp<AudioInputDescriptor> inputDesc = getInputFromId(patch->sinks[0].id); if (inputDesc == NULL) { ALOGV("releaseAudioPatch() input not found for id %d", patch->sinks[0].id); return BAD_VALUE; @@ -1997,7 +2053,7 @@ status_t AudioPolicyManager::listAudioPatches(unsigned int *num_patches, generation == NULL) { return BAD_VALUE; } - ALOGV("listAudioPatches() num_patches %d patches %p available patches %d", + ALOGV("listAudioPatches() num_patches %d patches %p available patches %zu", *num_patches, patches, mAudioPatches.size()); if (patches == NULL) { *num_patches = 0; @@ -2009,13 +2065,13 @@ status_t AudioPolicyManager::listAudioPatches(unsigned int *num_patches, i < mAudioPatches.size() && patchesWritten < patchesMax; i++) { patches[patchesWritten] = mAudioPatches[i]->mPatch; patches[patchesWritten++].id = mAudioPatches[i]->mHandle; - ALOGV("listAudioPatches() patch %d num_sources %d num_sinks %d", + ALOGV("listAudioPatches() patch %zu num_sources %d num_sinks %d", i, mAudioPatches[i]->mPatch.num_sources, mAudioPatches[i]->mPatch.num_sinks); } *num_patches = mAudioPatches.size(); *generation = curAudioPortGeneration(); - ALOGV("listAudioPatches() got %d patches needed %d", patchesWritten, *num_patches); + ALOGV("listAudioPatches() got %zu patches needed %d", patchesWritten, *num_patches); return NO_ERROR; } @@ -2028,27 +2084,27 @@ status_t AudioPolicyManager::setAudioPortConfig(const struct audio_port_config * } ALOGV("setAudioPortConfig() on port handle %d", config->id); // Only support gain configuration for now - if (config->config_mask != AUDIO_PORT_CONFIG_GAIN || config->gain.index < 0) { - return BAD_VALUE; + if (config->config_mask != AUDIO_PORT_CONFIG_GAIN) { + return INVALID_OPERATION; } - sp<AudioPort> portDesc; - struct audio_port_config portConfig; + sp<AudioPortConfig> audioPortConfig; if (config->type == AUDIO_PORT_TYPE_MIX) { if (config->role == AUDIO_PORT_ROLE_SOURCE) { - AudioOutputDescriptor *outputDesc = getOutputFromId(config->id); + sp<AudioOutputDescriptor> outputDesc = getOutputFromId(config->id); if (outputDesc == NULL) { return BAD_VALUE; } - portDesc = outputDesc->mProfile; - outputDesc->toAudioPortConfig(&portConfig); + ALOG_ASSERT(!outputDesc->isDuplicated(), + "setAudioPortConfig() called on duplicated output %d", + outputDesc->mIoHandle); + audioPortConfig = outputDesc; } else if (config->role == AUDIO_PORT_ROLE_SINK) { - AudioInputDescriptor *inputDesc = getInputFromId(config->id); + sp<AudioInputDescriptor> inputDesc = getInputFromId(config->id); if (inputDesc == NULL) { return BAD_VALUE; } - portDesc = inputDesc->mProfile; - inputDesc->toAudioPortConfig(&portConfig); + audioPortConfig = inputDesc; } else { return BAD_VALUE; } @@ -2064,46 +2120,21 @@ status_t AudioPolicyManager::setAudioPortConfig(const struct audio_port_config * if (deviceDesc == NULL) { return BAD_VALUE; } - portDesc = deviceDesc; - deviceDesc->toAudioPortConfig(&portConfig); + audioPortConfig = deviceDesc; } else { return BAD_VALUE; } - if ((size_t)config->gain.index >= portDesc->mGains.size()) { - return INVALID_OPERATION; - } - const struct audio_gain *gain = &portDesc->mGains[config->gain.index]->mGain; - if ((config->gain.mode & ~gain->mode) != 0) { - return BAD_VALUE; + struct audio_port_config backupConfig; + status_t status = audioPortConfig->applyAudioPortConfig(config, &backupConfig); + if (status == NO_ERROR) { + struct audio_port_config newConfig; + audioPortConfig->toAudioPortConfig(&newConfig, config); + status = mpClientInterface->setAudioPortConfig(&newConfig, 0); } - if ((config->gain.mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) { - if ((config->gain.values[0] < gain->min_value) || - (config->gain.values[0] > gain->max_value)) { - return BAD_VALUE; - } - } else { - if ((config->gain.channel_mask & ~gain->channel_mask) != 0) { - return BAD_VALUE; - } - size_t numValues = popcount(config->gain.channel_mask); - for (size_t i = 0; i < numValues; i++) { - if ((config->gain.values[i] < gain->min_value) || - (config->gain.values[i] > gain->max_value)) { - return BAD_VALUE; - } - } + if (status != NO_ERROR) { + audioPortConfig->applyAudioPortConfig(&backupConfig); } - if ((config->gain.mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) { - if ((config->gain.ramp_duration_ms < gain->min_ramp_ms) || - (config->gain.ramp_duration_ms > gain->max_ramp_ms)) { - return BAD_VALUE; - } - } - - portConfig.gain = config->gain; - - status_t status = mpClientInterface->setAudioPortConfig(&portConfig, 0); return status; } @@ -2223,7 +2254,7 @@ AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterfa audio_devices_t profileTypes = outProfile->mSupportedDevices.types(); if ((profileTypes & outputDeviceTypes) && ((outProfile->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0)) { - AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor(outProfile); + sp<AudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(outProfile); outputDesc->mDevice = (audio_devices_t)(mDefaultOutputDevice->mDeviceType & profileTypes); audio_io_handle_t output = mpClientInterface->openOutput( @@ -2238,7 +2269,6 @@ AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterfa ALOGW("Cannot open output stream for device %08x on hw module %s", outputDesc->mDevice, mHwModules[i]->mName); - delete outputDesc; } else { for (size_t k = 0; k < outProfile->mSupportedDevices.size(); k++) { audio_devices_t type = outProfile->mSupportedDevices[k]->mDeviceType; @@ -2275,7 +2305,7 @@ AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterfa audio_devices_t profileTypes = inProfile->mSupportedDevices.types(); if (profileTypes & inputDeviceTypes) { - AudioInputDescriptor *inputDesc = new AudioInputDescriptor(inProfile); + sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(inProfile); inputDesc->mInputSource = AUDIO_SOURCE_MIC; inputDesc->mDevice = inProfile->mSupportedDevices[0]->mDeviceType; @@ -2303,7 +2333,6 @@ AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterfa inputDesc->mDevice, mHwModules[i]->mName); } - delete inputDesc; } } } @@ -2365,17 +2394,15 @@ AudioPolicyManager::~AudioPolicyManager() #endif //AUDIO_POLICY_TEST for (size_t i = 0; i < mOutputs.size(); i++) { mpClientInterface->closeOutput(mOutputs.keyAt(i)); - delete mOutputs.valueAt(i); } for (size_t i = 0; i < mInputs.size(); i++) { mpClientInterface->closeInput(mInputs.keyAt(i)); - delete mInputs.valueAt(i); - } - for (size_t i = 0; i < mHwModules.size(); i++) { - delete mHwModules[i]; } mAvailableOutputDevices.clear(); mAvailableInputDevices.clear(); + mOutputs.clear(); + mInputs.clear(); + mHwModules.clear(); } status_t AudioPolicyManager::initCheck() @@ -2479,15 +2506,14 @@ bool AudioPolicyManager::threadLoop() if (param.get(String8("test_cmd_policy_reopen"), value) == NO_ERROR) { param.remove(String8("test_cmd_policy_reopen")); - AudioOutputDescriptor *outputDesc = mOutputs.valueFor(mPrimaryOutput); + sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(mPrimaryOutput); mpClientInterface->closeOutput(mPrimaryOutput); audio_module_handle_t moduleHandle = outputDesc->mModule->mHandle; - delete mOutputs.valueFor(mPrimaryOutput); mOutputs.removeItem(mPrimaryOutput); - AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor(NULL); + sp<AudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(NULL); outputDesc->mDevice = AUDIO_DEVICE_OUT_SPEAKER; mPrimaryOutput = mpClientInterface->openOutput(moduleHandle, &outputDesc->mDevice, @@ -2535,7 +2561,7 @@ int AudioPolicyManager::testOutputIndex(audio_io_handle_t output) // --- -void AudioPolicyManager::addOutput(audio_io_handle_t output, AudioOutputDescriptor *outputDesc) +void AudioPolicyManager::addOutput(audio_io_handle_t output, sp<AudioOutputDescriptor> outputDesc) { outputDesc->mIoHandle = output; outputDesc->mId = nextUniqueId(); @@ -2543,7 +2569,7 @@ void AudioPolicyManager::addOutput(audio_io_handle_t output, AudioOutputDescript nextAudioPortGeneration(); } -void AudioPolicyManager::addInput(audio_io_handle_t input, AudioInputDescriptor *inputDesc) +void AudioPolicyManager::addInput(audio_io_handle_t input, sp<AudioInputDescriptor> inputDesc) { inputDesc->mIoHandle = input; inputDesc->mId = nextUniqueId(); @@ -2564,7 +2590,7 @@ status_t AudioPolicyManager::checkOutputsForDevice(audio_devices_t device, SortedVector<audio_io_handle_t>& outputs, const String8 address) { - AudioOutputDescriptor *desc; + sp<AudioOutputDescriptor> desc; if (state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE) { // first list already open outputs that can be routed to this device @@ -2707,7 +2733,7 @@ status_t AudioPolicyManager::checkOutputsForDevice(audio_devices_t device, mPrimaryOutput); if (duplicatedOutput != 0) { // add duplicated output descriptor - AudioOutputDescriptor *dupOutputDesc = new AudioOutputDescriptor(NULL); + sp<AudioOutputDescriptor> dupOutputDesc = new AudioOutputDescriptor(NULL); dupOutputDesc->mOutput1 = mOutputs.valueFor(mPrimaryOutput); dupOutputDesc->mOutput2 = mOutputs.valueFor(output); dupOutputDesc->mSamplingRate = desc->mSamplingRate; @@ -2729,7 +2755,6 @@ status_t AudioPolicyManager::checkOutputsForDevice(audio_devices_t device, } if (output == 0) { ALOGW("checkOutputsForDevice() could not open output for device %x", device); - delete desc; profiles.removeAt(profile_index); profile_index--; } else { @@ -2789,7 +2814,7 @@ status_t AudioPolicyManager::checkInputsForDevice(audio_devices_t device, SortedVector<audio_io_handle_t>& inputs, const String8 address) { - AudioInputDescriptor *desc; + sp<AudioInputDescriptor> desc; if (state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE) { // first list already open inputs that can be routed to this device for (size_t input_index = 0; input_index < mInputs.size(); input_index++) { @@ -2813,7 +2838,7 @@ status_t AudioPolicyManager::checkInputsForDevice(audio_devices_t device, { if (mHwModules[module_idx]->mInputProfiles[profile_index]->mSupportedDevices.types() & (device & ~AUDIO_DEVICE_BIT_IN)) { - ALOGV("checkInputsForDevice(): adding profile %d from module %d", + ALOGV("checkInputsForDevice(): adding profile %zu from module %zu", profile_index, module_idx); profiles.add(mHwModules[module_idx]->mInputProfiles[profile_index]); } @@ -2904,7 +2929,6 @@ status_t AudioPolicyManager::checkInputsForDevice(audio_devices_t device, if (input == 0) { ALOGW("checkInputsForDevice() could not open input for device 0x%X", device); - delete desc; profiles.removeAt(profile_index); profile_index--; } else { @@ -2938,7 +2962,7 @@ status_t AudioPolicyManager::checkInputsForDevice(audio_devices_t device, profile_index++) { sp<IOProfile> profile = mHwModules[module_index]->mInputProfiles[profile_index]; if (profile->mSupportedDevices.types() & device) { - ALOGV("checkInputsForDevice(): clearing direct input profile %d on module %d", + ALOGV("checkInputsForDevice(): clearing direct input profile %zu on module %zu", profile_index, module_index); if (profile->mSamplingRates[0] == 0) { profile->mSamplingRates.clear(); @@ -2965,7 +2989,7 @@ void AudioPolicyManager::closeOutput(audio_io_handle_t output) { ALOGV("closeOutput(%d)", output); - AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output); + sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); if (outputDesc == NULL) { ALOGW("closeOutput() unknown output %d", output); return; @@ -2973,11 +2997,11 @@ void AudioPolicyManager::closeOutput(audio_io_handle_t output) // look for duplicated outputs connected to the output being removed. for (size_t i = 0; i < mOutputs.size(); i++) { - AudioOutputDescriptor *dupOutputDesc = mOutputs.valueAt(i); + sp<AudioOutputDescriptor> dupOutputDesc = mOutputs.valueAt(i); if (dupOutputDesc->isDuplicated() && (dupOutputDesc->mOutput1 == outputDesc || dupOutputDesc->mOutput2 == outputDesc)) { - AudioOutputDescriptor *outputDesc2; + sp<AudioOutputDescriptor> outputDesc2; if (dupOutputDesc->mOutput1 == outputDesc) { outputDesc2 = dupOutputDesc->mOutput2; } else { @@ -2995,7 +3019,6 @@ void AudioPolicyManager::closeOutput(audio_io_handle_t output) ALOGV("closeOutput() closing also duplicated output %d", duplicatedOutput); mpClientInterface->closeOutput(duplicatedOutput); - delete mOutputs.valueFor(duplicatedOutput); mOutputs.removeItem(duplicatedOutput); } } @@ -3005,14 +3028,13 @@ void AudioPolicyManager::closeOutput(audio_io_handle_t output) mpClientInterface->setParameters(output, param.toString()); mpClientInterface->closeOutput(output); - delete outputDesc; mOutputs.removeItem(output); mPreviousOutputs = mOutputs; nextAudioPortGeneration(); } SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevice(audio_devices_t device, - DefaultKeyedVector<audio_io_handle_t, AudioOutputDescriptor *> openOutputs) + DefaultKeyedVector<audio_io_handle_t, sp<AudioOutputDescriptor> > openOutputs) { SortedVector<audio_io_handle_t> outputs; @@ -3054,7 +3076,7 @@ void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy) strategy, srcOutputs[0], dstOutputs[0]); // mute strategy while moving tracks from one output to another for (size_t i = 0; i < srcOutputs.size(); i++) { - AudioOutputDescriptor *desc = mOutputs.valueFor(srcOutputs[i]); + sp<AudioOutputDescriptor> desc = mOutputs.valueFor(srcOutputs[i]); if (desc->isStrategyActive(strategy)) { setStrategyMute(strategy, true, srcOutputs[i]); setStrategyMute(strategy, false, srcOutputs[i], MUTE_TIME_MS, newDevice); @@ -3066,17 +3088,17 @@ void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy) audio_io_handle_t fxOutput = selectOutputForEffects(dstOutputs); SortedVector<audio_io_handle_t> moved; for (size_t i = 0; i < mEffects.size(); i++) { - EffectDescriptor *desc = mEffects.valueAt(i); - if (desc->mSession == AUDIO_SESSION_OUTPUT_MIX && - desc->mIo != fxOutput) { - if (moved.indexOf(desc->mIo) < 0) { + sp<EffectDescriptor> effectDesc = mEffects.valueAt(i); + if (effectDesc->mSession == AUDIO_SESSION_OUTPUT_MIX && + effectDesc->mIo != fxOutput) { + if (moved.indexOf(effectDesc->mIo) < 0) { ALOGV("checkOutputForStrategy() moving effect %d to output %d", mEffects.keyAt(i), fxOutput); - mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, desc->mIo, + mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, effectDesc->mIo, fxOutput); - moved.add(desc->mIo); + moved.add(effectDesc->mIo); } - desc->mIo = fxOutput; + effectDesc->mIo = fxOutput; } } } @@ -3102,7 +3124,7 @@ void AudioPolicyManager::checkOutputForAllStrategies() audio_io_handle_t AudioPolicyManager::getA2dpOutput() { for (size_t i = 0; i < mOutputs.size(); i++) { - AudioOutputDescriptor *outputDesc = mOutputs.valueAt(i); + sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(i); if (!outputDesc->isDuplicated() && outputDesc->device() & AUDIO_DEVICE_OUT_ALL_A2DP) { return mOutputs.keyAt(i); } @@ -3160,7 +3182,7 @@ audio_devices_t AudioPolicyManager::getNewOutputDevice(audio_io_handle_t output, { audio_devices_t device = AUDIO_DEVICE_NONE; - AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output); + sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); ssize_t index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle); if (index >= 0) { @@ -3206,7 +3228,7 @@ audio_devices_t AudioPolicyManager::getNewOutputDevice(audio_io_handle_t output, audio_devices_t AudioPolicyManager::getNewInputDevice(audio_io_handle_t input) { - AudioInputDescriptor *inputDesc = mInputs.valueFor(input); + sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input); ssize_t index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle); if (index >= 0) { @@ -3240,7 +3262,7 @@ audio_devices_t AudioPolicyManager::getDevicesForStream(audio_stream_type_t stre devices = getDeviceForStrategy(strategy, true /*fromCache*/); SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(devices, mOutputs); for (size_t i = 0; i < outputs.size(); i++) { - AudioOutputDescriptor *outputDesc = mOutputs.valueFor(outputs[i]); + sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]); if (outputDesc->isStrategyActive(strategy)) { devices = outputDesc->device(); break; @@ -3276,6 +3298,44 @@ AudioPolicyManager::routing_strategy AudioPolicyManager::getStrategy( } } +uint32_t AudioPolicyManager::getStrategyForAttr(const audio_attributes_t *attr) { + // flags to strategy mapping + if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) { + return (uint32_t) STRATEGY_ENFORCED_AUDIBLE; + } + + // usage to strategy mapping + switch (attr->usage) { + case AUDIO_USAGE_MEDIA: + case AUDIO_USAGE_GAME: + case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY: + case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE: + case AUDIO_USAGE_ASSISTANCE_SONIFICATION: + return (uint32_t) STRATEGY_MEDIA; + + case AUDIO_USAGE_VOICE_COMMUNICATION: + return (uint32_t) STRATEGY_PHONE; + + case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING: + return (uint32_t) STRATEGY_DTMF; + + case AUDIO_USAGE_ALARM: + case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE: + return (uint32_t) STRATEGY_SONIFICATION; + + case AUDIO_USAGE_NOTIFICATION: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED: + case AUDIO_USAGE_NOTIFICATION_EVENT: + return (uint32_t) STRATEGY_SONIFICATION_RESPECTFUL; + + case AUDIO_USAGE_UNKNOWN: + default: + return (uint32_t) STRATEGY_MEDIA; + } +} + void AudioPolicyManager::handleNotificationRoutingForStream(audio_stream_type_t stream) { switch(stream) { case AUDIO_STREAM_MUSIC: @@ -3479,10 +3539,32 @@ audio_devices_t AudioPolicyManager::getDeviceForStrategy(routing_strategy strate if (device2 == AUDIO_DEVICE_NONE) { device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER; } + int device3 = AUDIO_DEVICE_NONE; + if (strategy == STRATEGY_MEDIA) { + // Hdmi system audio should use manually configured device type. + if (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] + == AUDIO_POLICY_FORCE_SYSTEM_AUDIO_HDMI_ARC) { + device3 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_HDMI_ARC; + } else if (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] + == AUDIO_POLICY_FORCE_SYSTEM_AUDIO_SPDIF) { + device3 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPDIF; + } else if (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] + == AUDIO_POLICY_FORCE_SYSTEM_AUDIO_LINE) { + device3 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_LINE; + } + } + // Merge hdmi cec system audio and existing device for media. If system audio is on, + // internal speaker will be muted but others are not. + device2 |= device3; // device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or // STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise device |= device2; + + // If system audio mode is on and proper audio out is set, remove speaker from device. + if (device3 != AUDIO_DEVICE_NONE) { + device &= ~AUDIO_DEVICE_OUT_SPEAKER; + } if (device) break; device = mDefaultOutputDevice->mDeviceType; if (device == AUDIO_DEVICE_NONE) { @@ -3507,7 +3589,7 @@ void AudioPolicyManager::updateDevicesAndOutputs() mPreviousOutputs = mOutputs; } -uint32_t AudioPolicyManager::checkDeviceMuteStrategies(AudioOutputDescriptor *outputDesc, +uint32_t AudioPolicyManager::checkDeviceMuteStrategies(sp<AudioOutputDescriptor> outputDesc, audio_devices_t prevDevice, uint32_t delayMs) { @@ -3536,7 +3618,7 @@ uint32_t AudioPolicyManager::checkDeviceMuteStrategies(AudioOutputDescriptor *ou } if (doMute) { for (size_t j = 0; j < mOutputs.size(); j++) { - AudioOutputDescriptor *desc = mOutputs.valueAt(j); + sp<AudioOutputDescriptor> desc = mOutputs.valueAt(j); // skip output if it does not share any device with current output if ((desc->supportedDevices() & outputDesc->supportedDevices()) == AUDIO_DEVICE_NONE) { @@ -3594,7 +3676,7 @@ uint32_t AudioPolicyManager::setOutputDevice(audio_io_handle_t output, audio_patch_handle_t *patchHandle) { ALOGV("setOutputDevice() output %d device %04x delayMs %d", output, device, delayMs); - AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output); + sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); AudioParameter param; uint32_t muteWaitMs; @@ -3696,7 +3778,7 @@ status_t AudioPolicyManager::resetOutputDevice(audio_io_handle_t output, int delayMs, audio_patch_handle_t *patchHandle) { - AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output); + sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); ssize_t index; if (patchHandle) { index = mAudioPatches.indexOfKey(*patchHandle); @@ -3723,7 +3805,7 @@ status_t AudioPolicyManager::setInputDevice(audio_io_handle_t input, { status_t status = NO_ERROR; - AudioInputDescriptor *inputDesc = mInputs.valueFor(input); + sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input); if ((device != AUDIO_DEVICE_NONE) && ((device != inputDesc->mDevice) || force)) { inputDesc->mDevice = device; @@ -3778,7 +3860,7 @@ status_t AudioPolicyManager::setInputDevice(audio_io_handle_t input, status_t AudioPolicyManager::resetInputDevice(audio_io_handle_t input, audio_patch_handle_t *patchHandle) { - AudioInputDescriptor *inputDesc = mInputs.valueFor(input); + sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input); ssize_t index; if (patchHandle) { index = mAudioPatches.indexOfKey(*patchHandle); @@ -3898,7 +3980,7 @@ bool AudioPolicyManager::isVirtualInputDevice(audio_devices_t device) audio_io_handle_t AudioPolicyManager::getActiveInput(bool ignoreVirtualInputs) { for (size_t i = 0; i < mInputs.size(); i++) { - const AudioInputDescriptor * input_descriptor = mInputs.valueAt(i); + const sp<AudioInputDescriptor> input_descriptor = mInputs.valueAt(i); if ((input_descriptor->mRefCount > 0) && (!ignoreVirtualInputs || !isVirtualInputDevice(input_descriptor->mDevice))) { return mInputs.keyAt(i); @@ -4021,6 +4103,11 @@ const AudioPolicyManager::VolumeCurvePoint }; const AudioPolicyManager::VolumeCurvePoint + AudioPolicyManager::sSpeakerMediaVolumeCurveDrc[AudioPolicyManager::VOLCNT] = { + {1, -56.0f}, {20, -34.0f}, {86, -10.0f}, {100, 0.0f} +}; + +const AudioPolicyManager::VolumeCurvePoint AudioPolicyManager::sSpeakerSonificationVolumeCurve[AudioPolicyManager::VOLCNT] = { {1, -29.7f}, {33, -20.1f}, {66, -10.2f}, {100, 0.0f} }; @@ -4134,6 +4221,8 @@ void AudioPolicyManager::initializeVolumeCurves() sSpeakerSonificationVolumeCurveDrc; mStreams[AUDIO_STREAM_NOTIFICATION].mVolumeCurve[DEVICE_CATEGORY_SPEAKER] = sSpeakerSonificationVolumeCurveDrc; + mStreams[AUDIO_STREAM_MUSIC].mVolumeCurve[DEVICE_CATEGORY_SPEAKER] = + sSpeakerMediaVolumeCurveDrc; } } @@ -4143,7 +4232,7 @@ float AudioPolicyManager::computeVolume(audio_stream_type_t stream, audio_devices_t device) { float volume = 1.0; - AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output); + sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); StreamDescriptor &streamDesc = mStreams[stream]; if (device == AUDIO_DEVICE_NONE) { @@ -4154,9 +4243,7 @@ float AudioPolicyManager::computeVolume(audio_stream_type_t stream, if (stream == AUDIO_STREAM_MUSIC && index != mStreams[stream].mIndexMin && (device == AUDIO_DEVICE_OUT_AUX_DIGITAL || - device == AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET || - device == AUDIO_DEVICE_OUT_USB_ACCESSORY || - device == AUDIO_DEVICE_OUT_USB_DEVICE)) { + device == AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET)) { return 1.0; } @@ -4299,7 +4386,7 @@ void AudioPolicyManager::setStreamMute(audio_stream_type_t stream, audio_devices_t device) { StreamDescriptor &streamDesc = mStreams[stream]; - AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output); + sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); if (device == AUDIO_DEVICE_NONE) { device = outputDesc->device(); } @@ -4344,7 +4431,7 @@ void AudioPolicyManager::handleIncallSonification(audio_stream_type_t stream, const routing_strategy stream_strategy = getStrategy(stream); if ((stream_strategy == STRATEGY_SONIFICATION) || ((stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL))) { - AudioOutputDescriptor *outputDesc = mOutputs.valueFor(mPrimaryOutput); + sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(mPrimaryOutput); ALOGV("handleIncallSonification() stream %d starting %d device %x stateChange %d", stream, starting, outputDesc->mDevice, stateChange); if (outputDesc->mRefCount[stream]) { @@ -4402,8 +4489,7 @@ uint32_t AudioPolicyManager::getMaxEffectsMemory() AudioPolicyManager::AudioOutputDescriptor::AudioOutputDescriptor( const sp<IOProfile>& profile) - : mId(0), mIoHandle(0), mSamplingRate(0), mFormat(AUDIO_FORMAT_DEFAULT), - mChannelMask(0), mLatency(0), + : mId(0), mIoHandle(0), mLatency(0), mFlags((audio_output_flags_t)0), mDevice(AUDIO_DEVICE_NONE), mPatchHandle(0), mOutput1(0), mOutput2(0), mProfile(profile), mDirectOpenCount(0) { @@ -4418,9 +4504,13 @@ AudioPolicyManager::AudioOutputDescriptor::AudioOutputDescriptor( mStrategyMutedByDevice[i] = false; } if (profile != NULL) { + mAudioPort = profile; mSamplingRate = profile->mSamplingRates[0]; mFormat = profile->mFormats[0]; mChannelMask = profile->mChannelMasks[0]; + if (profile->mGains.size() > 0) { + profile->mGains[0]->getDefaultConfig(&mGain); + } mFlags = profile->mFlags; } } @@ -4444,7 +4534,7 @@ uint32_t AudioPolicyManager::AudioOutputDescriptor::latency() } bool AudioPolicyManager::AudioOutputDescriptor::sharesHwModuleWith( - const AudioOutputDescriptor *outputDesc) + const sp<AudioOutputDescriptor> outputDesc) { if (isDuplicated()) { return mOutput1->sharesHwModuleWith(outputDesc) || mOutput2->sharesHwModuleWith(outputDesc); @@ -4527,31 +4617,18 @@ void AudioPolicyManager::AudioOutputDescriptor::toAudioPortConfig( struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig) const { - dstConfig->id = mId; - dstConfig->role = AUDIO_PORT_ROLE_SOURCE; - dstConfig->type = AUDIO_PORT_TYPE_MIX; - dstConfig->sample_rate = mSamplingRate; - dstConfig->channel_mask = mChannelMask; - dstConfig->format = mFormat; - dstConfig->gain.index = -1; + ALOG_ASSERT(!isDuplicated(), "toAudioPortConfig() called on duplicated output %d", mIoHandle); + dstConfig->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK| - AUDIO_PORT_CONFIG_FORMAT; - // use supplied variable configuration parameters if any + AUDIO_PORT_CONFIG_FORMAT|AUDIO_PORT_CONFIG_GAIN; if (srcConfig != NULL) { - if (srcConfig->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) { - dstConfig->sample_rate = srcConfig->sample_rate; - } - if (srcConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) { - dstConfig->channel_mask = srcConfig->channel_mask; - } - if (srcConfig->config_mask & AUDIO_PORT_CONFIG_FORMAT) { - dstConfig->format = srcConfig->format; - } - if (srcConfig->config_mask & AUDIO_PORT_CONFIG_GAIN) { - dstConfig->gain = srcConfig->gain; - dstConfig->config_mask |= AUDIO_PORT_CONFIG_GAIN; - } + dstConfig->config_mask |= srcConfig->config_mask; } + AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig); + + dstConfig->id = mId; + dstConfig->role = AUDIO_PORT_ROLE_SOURCE; + dstConfig->type = AUDIO_PORT_TYPE_MIX; dstConfig->ext.mix.hw_module = mProfile->mModule->mHandle; dstConfig->ext.mix.handle = mIoHandle; dstConfig->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT; @@ -4560,6 +4637,7 @@ void AudioPolicyManager::AudioOutputDescriptor::toAudioPortConfig( void AudioPolicyManager::AudioOutputDescriptor::toAudioPort( struct audio_port *port) const { + ALOG_ASSERT(!isDuplicated(), "toAudioPort() called on duplicated output %d", mIoHandle); mProfile->toAudioPort(port); port->id = mId; toAudioPortConfig(&port->active_config); @@ -4602,15 +4680,22 @@ status_t AudioPolicyManager::AudioOutputDescriptor::dump(int fd) // --- AudioInputDescriptor class implementation AudioPolicyManager::AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile) - : mId(0), mIoHandle(0), mSamplingRate(0), - mFormat(AUDIO_FORMAT_DEFAULT), mChannelMask(0), + : mId(0), mIoHandle(0), mDevice(AUDIO_DEVICE_NONE), mPatchHandle(0), mRefCount(0), mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile) { if (profile != NULL) { + mAudioPort = profile; mSamplingRate = profile->mSamplingRates[0]; mFormat = profile->mFormats[0]; mChannelMask = profile->mChannelMasks[0]; + if (profile->mGains.size() > 0) { + profile->mGains[0]->getDefaultConfig(&mGain); + } + } else { + mSamplingRate = 0; + mFormat = AUDIO_FORMAT_DEFAULT; + mChannelMask = 0; } } @@ -4618,36 +4703,29 @@ void AudioPolicyManager::AudioInputDescriptor::toAudioPortConfig( struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig) const { - dstConfig->id = mId; - dstConfig->role = AUDIO_PORT_ROLE_SINK; - dstConfig->type = AUDIO_PORT_TYPE_MIX; - dstConfig->sample_rate = mSamplingRate; - dstConfig->channel_mask = mChannelMask; - dstConfig->format = mFormat; - dstConfig->gain.index = -1; + ALOG_ASSERT(mProfile != 0, + "toAudioPortConfig() called on input with null profile %d", mIoHandle); dstConfig->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK| - AUDIO_PORT_CONFIG_FORMAT; - // use supplied variable configuration parameters if any + AUDIO_PORT_CONFIG_FORMAT|AUDIO_PORT_CONFIG_GAIN; if (srcConfig != NULL) { - if (srcConfig->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) { - dstConfig->sample_rate = srcConfig->sample_rate; - } - if (srcConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) { - dstConfig->channel_mask = srcConfig->channel_mask; - } - if (srcConfig->config_mask & AUDIO_PORT_CONFIG_FORMAT) { - dstConfig->format = srcConfig->format; - } - if (srcConfig->config_mask & AUDIO_PORT_CONFIG_GAIN) { - dstConfig->gain = srcConfig->gain; - dstConfig->config_mask |= AUDIO_PORT_CONFIG_GAIN; - } + dstConfig->config_mask |= srcConfig->config_mask; } + + AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig); + + dstConfig->id = mId; + dstConfig->role = AUDIO_PORT_ROLE_SINK; + dstConfig->type = AUDIO_PORT_TYPE_MIX; + dstConfig->ext.mix.hw_module = mProfile->mModule->mHandle; + dstConfig->ext.mix.handle = mIoHandle; + dstConfig->ext.mix.usecase.source = mInputSource; } void AudioPolicyManager::AudioInputDescriptor::toAudioPort( struct audio_port *port) const { + ALOG_ASSERT(mProfile != 0, "toAudioPort() called on input with null profile %d", mIoHandle); + mProfile->toAudioPort(port); port->id = mId; toAudioPortConfig(&port->active_config); @@ -4741,7 +4819,8 @@ status_t AudioPolicyManager::EffectDescriptor::dump(int fd) // --- HwModule class implementation AudioPolicyManager::HwModule::HwModule(const char *name) - : mName(strndup(name, AUDIO_HARDWARE_MODULE_ID_MAX_LEN)), mHandle(0) + : mName(strndup(name, AUDIO_HARDWARE_MODULE_ID_MAX_LEN)), + mHalVersion(AUDIO_DEVICE_API_VERSION_MIN), mHandle(0) { } @@ -4900,6 +4979,8 @@ void AudioPolicyManager::HwModule::dump(int fd) result.append(buffer); snprintf(buffer, SIZE, " - handle: %d\n", mHandle); result.append(buffer); + snprintf(buffer, SIZE, " - version: %u.%u\n", mHalVersion >> 8, mHalVersion & 0xFF); + result.append(buffer); write(fd, result.string(), result.size()); if (mOutputProfiles.size()) { write(fd, " - outputs:\n", strlen(" - outputs:\n")); @@ -4927,6 +5008,15 @@ void AudioPolicyManager::HwModule::dump(int fd) // --- AudioPort class implementation + +AudioPolicyManager::AudioPort::AudioPort(const String8& name, audio_port_type_t type, + audio_port_role_t role, const sp<HwModule>& module) : + mName(name), mType(type), mRole(role), mModule(module) +{ + mUseInChannelMask = ((type == AUDIO_PORT_TYPE_DEVICE) && (role == AUDIO_PORT_ROLE_SOURCE)) || + ((type == AUDIO_PORT_TYPE_MIX) && (role == AUDIO_PORT_ROLE_SINK)); +} + void AudioPolicyManager::AudioPort::toAudioPort(struct audio_port *port) const { port->role = mRole; @@ -4945,7 +5035,7 @@ void AudioPolicyManager::AudioPort::toAudioPort(struct audio_port *port) const } port->num_formats = i; - ALOGV("AudioPort::toAudioPort() num gains %d", mGains.size()); + ALOGV("AudioPort::toAudioPort() num gains %zu", mGains.size()); for (i = 0; i < mGains.size() && i < AUDIO_PORT_MAX_GAINS; i++) { port->gains[i] = mGains[i]->mGain; @@ -5062,18 +5152,17 @@ audio_gain_mode_t AudioPolicyManager::AudioPort::loadGainMode(char *name) return mode; } -void AudioPolicyManager::AudioPort::loadGain(cnode *root) +void AudioPolicyManager::AudioPort::loadGain(cnode *root, int index) { cnode *node = root->first_child; - sp<AudioGain> gain = new AudioGain(); + sp<AudioGain> gain = new AudioGain(index, mUseInChannelMask); while (node) { if (strcmp(node->name, GAIN_MODE) == 0) { gain->mGain.mode = loadGainMode((char *)node->value); } else if (strcmp(node->name, GAIN_CHANNELS) == 0) { - if ((mType == AUDIO_PORT_TYPE_DEVICE && mRole == AUDIO_PORT_ROLE_SOURCE) || - (mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK)) { + if (mUseInChannelMask) { gain->mGain.channel_mask = (audio_channel_mask_t)stringToEnum(sInChannelsNameToEnumTable, ARRAY_SIZE(sInChannelsNameToEnumTable), @@ -5112,13 +5201,53 @@ void AudioPolicyManager::AudioPort::loadGain(cnode *root) void AudioPolicyManager::AudioPort::loadGains(cnode *root) { cnode *node = root->first_child; + int index = 0; while (node) { ALOGV("loadGains() loading gain %s", node->name); - loadGain(node); + loadGain(node, index++); node = node->next; } } +status_t AudioPolicyManager::AudioPort::checkSamplingRate(uint32_t samplingRate) const +{ + for (size_t i = 0; i < mSamplingRates.size(); i ++) { + if (mSamplingRates[i] == samplingRate) { + return NO_ERROR; + } + } + return BAD_VALUE; +} + +status_t AudioPolicyManager::AudioPort::checkChannelMask(audio_channel_mask_t channelMask) const +{ + for (size_t i = 0; i < mChannelMasks.size(); i ++) { + if (mChannelMasks[i] == channelMask) { + return NO_ERROR; + } + } + return BAD_VALUE; +} + +status_t AudioPolicyManager::AudioPort::checkFormat(audio_format_t format) const +{ + for (size_t i = 0; i < mFormats.size(); i ++) { + if (mFormats[i] == format) { + return NO_ERROR; + } + } + return BAD_VALUE; +} + +status_t AudioPolicyManager::AudioPort::checkGain(const struct audio_gain_config *gainConfig, + int index) const +{ + if (index < 0 || (size_t)index >= mGains.size()) { + return BAD_VALUE; + } + return mGains[index]->checkConfig(gainConfig); +} + void AudioPolicyManager::AudioPort::dump(int fd, int spaces) const { const size_t SIZE = 256; @@ -5177,11 +5306,72 @@ void AudioPolicyManager::AudioPort::dump(int fd, int spaces) const // --- AudioGain class implementation -AudioPolicyManager::AudioGain::AudioGain() +AudioPolicyManager::AudioGain::AudioGain(int index, bool useInChannelMask) { + mIndex = index; + mUseInChannelMask = useInChannelMask; memset(&mGain, 0, sizeof(struct audio_gain)); } +void AudioPolicyManager::AudioGain::getDefaultConfig(struct audio_gain_config *config) +{ + config->index = mIndex; + config->mode = mGain.mode; + config->channel_mask = mGain.channel_mask; + if ((mGain.mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) { + config->values[0] = mGain.default_value; + } else { + uint32_t numValues; + if (mUseInChannelMask) { + numValues = audio_channel_count_from_in_mask(mGain.channel_mask); + } else { + numValues = audio_channel_count_from_out_mask(mGain.channel_mask); + } + for (size_t i = 0; i < numValues; i++) { + config->values[i] = mGain.default_value; + } + } + if ((mGain.mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) { + config->ramp_duration_ms = mGain.min_ramp_ms; + } +} + +status_t AudioPolicyManager::AudioGain::checkConfig(const struct audio_gain_config *config) +{ + if ((config->mode & ~mGain.mode) != 0) { + return BAD_VALUE; + } + if ((config->mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) { + if ((config->values[0] < mGain.min_value) || + (config->values[0] > mGain.max_value)) { + return BAD_VALUE; + } + } else { + if ((config->channel_mask & ~mGain.channel_mask) != 0) { + return BAD_VALUE; + } + uint32_t numValues; + if (mUseInChannelMask) { + numValues = audio_channel_count_from_in_mask(config->channel_mask); + } else { + numValues = audio_channel_count_from_out_mask(config->channel_mask); + } + for (size_t i = 0; i < numValues; i++) { + if ((config->values[i] < mGain.min_value) || + (config->values[i] > mGain.max_value)) { + return BAD_VALUE; + } + } + } + if ((config->mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) { + if ((config->ramp_duration_ms < mGain.min_ramp_ms) || + (config->ramp_duration_ms > mGain.max_ramp_ms)) { + return BAD_VALUE; + } + } + return NO_ERROR; +} + void AudioPolicyManager::AudioGain::dump(int fd, int spaces, int index) const { const size_t SIZE = 256; @@ -5210,10 +5400,116 @@ void AudioPolicyManager::AudioGain::dump(int fd, int spaces, int index) const write(fd, result.string(), result.size()); } +// --- AudioPortConfig class implementation + +AudioPolicyManager::AudioPortConfig::AudioPortConfig() +{ + mSamplingRate = 0; + mChannelMask = AUDIO_CHANNEL_NONE; + mFormat = AUDIO_FORMAT_INVALID; + mGain.index = -1; +} + +status_t AudioPolicyManager::AudioPortConfig::applyAudioPortConfig( + const struct audio_port_config *config, + struct audio_port_config *backupConfig) +{ + struct audio_port_config localBackupConfig; + status_t status = NO_ERROR; + + localBackupConfig.config_mask = config->config_mask; + toAudioPortConfig(&localBackupConfig); + + if (mAudioPort == 0) { + status = NO_INIT; + goto exit; + } + if (config->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) { + status = mAudioPort->checkSamplingRate(config->sample_rate); + if (status != NO_ERROR) { + goto exit; + } + mSamplingRate = config->sample_rate; + } + if (config->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) { + status = mAudioPort->checkChannelMask(config->channel_mask); + if (status != NO_ERROR) { + goto exit; + } + mChannelMask = config->channel_mask; + } + if (config->config_mask & AUDIO_PORT_CONFIG_FORMAT) { + status = mAudioPort->checkFormat(config->format); + if (status != NO_ERROR) { + goto exit; + } + mFormat = config->format; + } + if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) { + status = mAudioPort->checkGain(&config->gain, config->gain.index); + if (status != NO_ERROR) { + goto exit; + } + mGain = config->gain; + } + +exit: + if (status != NO_ERROR) { + applyAudioPortConfig(&localBackupConfig); + } + if (backupConfig != NULL) { + *backupConfig = localBackupConfig; + } + return status; +} + +void AudioPolicyManager::AudioPortConfig::toAudioPortConfig( + struct audio_port_config *dstConfig, + const struct audio_port_config *srcConfig) const +{ + if (dstConfig->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) { + dstConfig->sample_rate = mSamplingRate; + if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE)) { + dstConfig->sample_rate = srcConfig->sample_rate; + } + } else { + dstConfig->sample_rate = 0; + } + if (dstConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) { + dstConfig->channel_mask = mChannelMask; + if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK)) { + dstConfig->channel_mask = srcConfig->channel_mask; + } + } else { + dstConfig->channel_mask = AUDIO_CHANNEL_NONE; + } + if (dstConfig->config_mask & AUDIO_PORT_CONFIG_FORMAT) { + dstConfig->format = mFormat; + if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_FORMAT)) { + dstConfig->format = srcConfig->format; + } + } else { + dstConfig->format = AUDIO_FORMAT_INVALID; + } + if (dstConfig->config_mask & AUDIO_PORT_CONFIG_GAIN) { + dstConfig->gain = mGain; + if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_GAIN)) { + dstConfig->gain = srcConfig->gain; + } + } else { + dstConfig->gain.index = -1; + } + if (dstConfig->gain.index != -1) { + dstConfig->config_mask |= AUDIO_PORT_CONFIG_GAIN; + } else { + dstConfig->config_mask &= ~AUDIO_PORT_CONFIG_GAIN; + } +} + // --- IOProfile class implementation AudioPolicyManager::IOProfile::IOProfile(const String8& name, audio_port_role_t role, - HwModule *module) + const sp<HwModule>& module) : AudioPort(name, AUDIO_PORT_TYPE_MIX, role, module), mFlags((audio_output_flags_t)0) { } @@ -5241,32 +5537,13 @@ bool AudioPolicyManager::IOProfile::isCompatibleProfile(audio_devices_t device, if ((mFlags & flags) != flags) { return false; } - size_t i; - for (i = 0; i < mSamplingRates.size(); i++) - { - if (mSamplingRates[i] == samplingRate) { - break; - } - } - if (i == mSamplingRates.size()) { + if (checkSamplingRate(samplingRate) != NO_ERROR) { return false; } - for (i = 0; i < mFormats.size(); i++) - { - if (mFormats[i] == format) { - break; - } - } - if (i == mFormats.size()) { + if (checkChannelMask(channelMask) != NO_ERROR) { return false; } - for (i = 0; i < mChannelMasks.size(); i++) - { - if (mChannelMasks[i] == channelMask) { - break; - } - } - if (i == mChannelMasks.size()) { + if (checkFormat(format) != NO_ERROR) { return false; } return true; @@ -5318,6 +5595,21 @@ void AudioPolicyManager::IOProfile::log() // --- DeviceDescriptor implementation + +AudioPolicyManager::DeviceDescriptor::DeviceDescriptor(const String8& name, audio_devices_t type) : + AudioPort(name, AUDIO_PORT_TYPE_DEVICE, + audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK : + AUDIO_PORT_ROLE_SOURCE, + NULL), + mDeviceType(type), mAddress(""), + mChannelMask(AUDIO_CHANNEL_NONE), mId(0) +{ + mAudioPort = this; + if (mGains.size() > 0) { + mGains[0]->getDefaultConfig(&mGain); + } +} + bool AudioPolicyManager::DeviceDescriptor::equals(const sp<DeviceDescriptor>& other) const { // Devices are considered equal if they: @@ -5441,7 +5733,7 @@ sp<AudioPolicyManager::DeviceDescriptor> AudioPolicyManager::DeviceVector::getDe { sp<DeviceDescriptor> device; for (size_t i = 0; i < size(); i++) { - ALOGV("DeviceVector::getDeviceFromId(%d) itemAt(%d)->mId %d", id, i, itemAt(i)->mId); + ALOGV("DeviceVector::getDeviceFromId(%d) itemAt(%zu)->mId %d", id, i, itemAt(i)->mId); if (itemAt(i)->mId == id) { device = itemAt(i); break; @@ -5482,23 +5774,17 @@ void AudioPolicyManager::DeviceDescriptor::toAudioPortConfig( struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig) const { + dstConfig->config_mask = AUDIO_PORT_CONFIG_CHANNEL_MASK|AUDIO_PORT_CONFIG_GAIN; + if (srcConfig != NULL) { + dstConfig->config_mask |= srcConfig->config_mask; + } + + AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig); + dstConfig->id = mId; dstConfig->role = audio_is_output_device(mDeviceType) ? AUDIO_PORT_ROLE_SINK : AUDIO_PORT_ROLE_SOURCE; dstConfig->type = AUDIO_PORT_TYPE_DEVICE; - dstConfig->channel_mask = mChannelMask; - dstConfig->gain.index = -1; - dstConfig->config_mask = AUDIO_PORT_CONFIG_CHANNEL_MASK; - // use supplied variable configuration parameters if any - if (srcConfig != NULL) { - if (srcConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) { - dstConfig->channel_mask = srcConfig->channel_mask; - } - if (srcConfig->config_mask & AUDIO_PORT_CONFIG_GAIN) { - dstConfig->gain = srcConfig->gain; - dstConfig->config_mask |= AUDIO_PORT_CONFIG_GAIN; - } - } dstConfig->ext.device.type = mDeviceType; dstConfig->ext.device.hw_module = mModule->mHandle; strncpy(dstConfig->ext.device.address, mAddress.string(), AUDIO_DEVICE_MAX_ADDRESS_LEN); @@ -5594,7 +5880,7 @@ void AudioPolicyManager::loadHwModule(cnode *root) { status_t status = NAME_NOT_FOUND; cnode *node; - HwModule *module = new HwModule(root->name); + sp<HwModule> module = new HwModule(root->name); node = config_find(root, DEVICES_TAG); if (node != NULL) { @@ -5636,8 +5922,6 @@ void AudioPolicyManager::loadHwModule(cnode *root) if (status == NO_ERROR) { mHwModules.add(module); - } else { - delete module; } } @@ -5656,9 +5940,10 @@ void AudioPolicyManager::loadHwModules(cnode *root) } } -void AudioPolicyManager::loadGlobalConfig(cnode *root, HwModule *module) +void AudioPolicyManager::loadGlobalConfig(cnode *root, const sp<HwModule>& module) { cnode *node = config_find(root, GLOBAL_CONFIG_TAG); + if (node == NULL) { return; } @@ -5691,6 +5976,12 @@ void AudioPolicyManager::loadGlobalConfig(cnode *root, HwModule *module) } else if (strcmp(SPEAKER_DRC_ENABLED_TAG, node->name) == 0) { mSpeakerDrcEnabled = stringToBool((char *)node->value); ALOGV("loadGlobalConfig() mSpeakerDrcEnabled = %d", mSpeakerDrcEnabled); + } else if (strcmp(AUDIO_HAL_VERSION_TAG, node->name) == 0) { + uint32_t major, minor; + sscanf((char *)node->value, "%u.%u", &major, &minor); + module->mHalVersion = HARDWARE_DEVICE_API_VERSION(major, minor); + ALOGV("loadGlobalConfig() mHalVersion = %04x major %u minor %u", + module->mHalVersion, major, minor); } node = node->next; } @@ -5722,9 +6013,10 @@ status_t AudioPolicyManager::loadAudioPolicyConfig(const char *path) void AudioPolicyManager::defaultAudioPolicyConfig(void) { - HwModule *module; + sp<HwModule> module; sp<IOProfile> profile; - sp<DeviceDescriptor> defaultInputDevice = new DeviceDescriptor(String8(""), AUDIO_DEVICE_IN_BUILTIN_MIC); + sp<DeviceDescriptor> defaultInputDevice = new DeviceDescriptor(String8(""), + AUDIO_DEVICE_IN_BUILTIN_MIC); mAvailableOutputDevices.add(mDefaultOutputDevice); mAvailableInputDevices.add(defaultInputDevice); @@ -5748,4 +6040,46 @@ void AudioPolicyManager::defaultAudioPolicyConfig(void) mHwModules.add(module); } +audio_stream_type_t AudioPolicyManager::streamTypefromAttributesInt(const audio_attributes_t *attr) +{ + // flags to stream type mapping + if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) { + return AUDIO_STREAM_ENFORCED_AUDIBLE; + } + if ((attr->flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) { + return AUDIO_STREAM_BLUETOOTH_SCO; + } + + // usage to stream type mapping + switch (attr->usage) { + case AUDIO_USAGE_MEDIA: + case AUDIO_USAGE_GAME: + case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY: + case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE: + return AUDIO_STREAM_MUSIC; + case AUDIO_USAGE_ASSISTANCE_SONIFICATION: + return AUDIO_STREAM_SYSTEM; + case AUDIO_USAGE_VOICE_COMMUNICATION: + return AUDIO_STREAM_VOICE_CALL; + + case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING: + return AUDIO_STREAM_DTMF; + + case AUDIO_USAGE_ALARM: + return AUDIO_STREAM_ALARM; + case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE: + return AUDIO_STREAM_RING; + + case AUDIO_USAGE_NOTIFICATION: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED: + case AUDIO_USAGE_NOTIFICATION_EVENT: + return AUDIO_STREAM_NOTIFICATION; + + case AUDIO_USAGE_UNKNOWN: + default: + return AUDIO_STREAM_MUSIC; + } +} }; // namespace android diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h index e012d63..c23d994 100644 --- a/services/audiopolicy/AudioPolicyManager.h +++ b/services/audiopolicy/AudioPolicyManager.h @@ -84,6 +84,12 @@ public: audio_channel_mask_t channelMask, audio_output_flags_t flags, const audio_offload_info_t *offloadInfo); + virtual audio_io_handle_t getOutputForAttr(const audio_attributes_t *attr, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo); virtual status_t startOutput(audio_io_handle_t output, audio_stream_type_t stream, int session = 0); @@ -116,6 +122,8 @@ public: // return the strategy corresponding to a given stream type virtual uint32_t getStrategyForStream(audio_stream_type_t stream); + // return the strategy corresponding to the given audio attributes + virtual uint32_t getStrategyForAttr(const audio_attributes_t *attr); // return the enabled output devices for the given stream type virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream); @@ -195,20 +203,23 @@ protected: class AudioGain: public RefBase { public: - AudioGain(); + AudioGain(int index, bool useInChannelMask); virtual ~AudioGain() {} void dump(int fd, int spaces, int index) const; + void getDefaultConfig(struct audio_gain_config *config); + status_t checkConfig(const struct audio_gain_config *config); + int mIndex; struct audio_gain mGain; + bool mUseInChannelMask; }; - class AudioPort: public RefBase + class AudioPort: public virtual RefBase { public: AudioPort(const String8& name, audio_port_type_t type, - audio_port_role_t role, HwModule *module) : - mName(name), mType(type), mRole(role), mModule(module) {} + audio_port_role_t role, const sp<HwModule>& module); virtual ~AudioPort() {} virtual void toAudioPort(struct audio_port *port) const; @@ -219,14 +230,20 @@ protected: void loadInChannels(char *name); audio_gain_mode_t loadGainMode(char *name); - void loadGain(cnode *root); + void loadGain(cnode *root, int index); void loadGains(cnode *root); + status_t checkSamplingRate(uint32_t samplingRate) const; + status_t checkChannelMask(audio_channel_mask_t channelMask) const; + status_t checkFormat(audio_format_t format) const; + status_t checkGain(const struct audio_gain_config *gainConfig, int index) const; + void dump(int fd, int spaces) const; String8 mName; audio_port_type_t mType; audio_port_role_t mRole; + bool mUseInChannelMask; // by convention, "0' in the first entry in mSamplingRates, mChannelMasks or mFormats // indicates the supported parameters should be read from the output stream // after it is opened for the first time @@ -234,9 +251,27 @@ protected: Vector <audio_channel_mask_t> mChannelMasks; // supported channel masks Vector <audio_format_t> mFormats; // supported audio formats Vector < sp<AudioGain> > mGains; // gain controllers - HwModule *mModule; // audio HW module exposing this I/O stream + sp<HwModule> mModule; // audio HW module exposing this I/O stream }; + class AudioPortConfig: public virtual RefBase + { + public: + AudioPortConfig(); + virtual ~AudioPortConfig() {} + + status_t applyAudioPortConfig(const struct audio_port_config *config, + struct audio_port_config *backupConfig = NULL); + virtual void toAudioPortConfig(struct audio_port_config *dstConfig, + const struct audio_port_config *srcConfig = NULL) const = 0; + sp<AudioPort> mAudioPort; + uint32_t mSamplingRate; + audio_format_t mFormat; + audio_channel_mask_t mChannelMask; + struct audio_gain_config mGain; + }; + + class AudioPatch: public RefBase { public: @@ -250,29 +285,15 @@ protected: audio_patch_handle_t mAfPatchHandle; }; - class DeviceDescriptor: public AudioPort + class DeviceDescriptor: public AudioPort, public AudioPortConfig { public: - DeviceDescriptor(const String8& name, audio_devices_t type, String8 address, - audio_channel_mask_t channelMask) : - AudioPort(name, AUDIO_PORT_TYPE_DEVICE, - audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK : - AUDIO_PORT_ROLE_SOURCE, - NULL), - mDeviceType(type), mAddress(address), - mChannelMask(channelMask), mId(0) {} - - DeviceDescriptor(String8 name, audio_devices_t type) : - AudioPort(name, AUDIO_PORT_TYPE_DEVICE, - audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK : - AUDIO_PORT_ROLE_SOURCE, - NULL), - mDeviceType(type), mAddress(""), - mChannelMask(0), mId(0) {} + DeviceDescriptor(const String8& name, audio_devices_t type); + virtual ~DeviceDescriptor() {} bool equals(const sp<DeviceDescriptor>& other) const; - void toAudioPortConfig(struct audio_port_config *dstConfig, + virtual void toAudioPortConfig(struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig = NULL) const; virtual void toAudioPort(struct audio_port *port) const; @@ -317,7 +338,7 @@ protected: class IOProfile : public AudioPort { public: - IOProfile(const String8& name, audio_port_role_t role, HwModule *module); + IOProfile(const String8& name, audio_port_role_t role, const sp<HwModule>& module); virtual ~IOProfile(); bool isCompatibleProfile(audio_devices_t device, @@ -335,7 +356,7 @@ protected: // direct output...). For outputs only. }; - class HwModule { + class HwModule : public RefBase{ public: HwModule(const char *name); ~HwModule(); @@ -346,8 +367,9 @@ protected: void dump(int fd); - const char *const mName; // base name of the audio HW module (primary, a2dp ...) - audio_module_handle_t mHandle; + const char *const mName; // base name of the audio HW module (primary, a2dp ...) + uint32_t mHalVersion; // audio HAL API version + audio_module_handle_t mHandle; Vector < sp<IOProfile> > mOutputProfiles; // output profiles exposed by this module Vector < sp<IOProfile> > mInputProfiles; // input profiles exposed by this module DeviceVector mDeclaredDevices; // devices declared in audio_policy.conf @@ -360,6 +382,7 @@ protected: static const VolumeCurvePoint sDefaultMediaVolumeCurve[AudioPolicyManager::VOLCNT]; // volume curve for media strategy on speakers static const VolumeCurvePoint sSpeakerMediaVolumeCurve[AudioPolicyManager::VOLCNT]; + static const VolumeCurvePoint sSpeakerMediaVolumeCurveDrc[AudioPolicyManager::VOLCNT]; // volume curve for sonification strategy on speakers static const VolumeCurvePoint sSpeakerSonificationVolumeCurve[AudioPolicyManager::VOLCNT]; static const VolumeCurvePoint sSpeakerSonificationVolumeCurveDrc[AudioPolicyManager::VOLCNT]; @@ -373,7 +396,7 @@ protected: // descriptor for audio outputs. Used to maintain current configuration of each opened audio output // and keep track of the usage of this output by each audio stream type. - class AudioOutputDescriptor + class AudioOutputDescriptor: public AudioPortConfig { public: AudioOutputDescriptor(const sp<IOProfile>& profile); @@ -386,7 +409,7 @@ protected: bool isDuplicated() const { return (mOutput1 != NULL && mOutput2 != NULL); } audio_devices_t supportedDevices(); uint32_t latency(); - bool sharesHwModuleWith(const AudioOutputDescriptor *outputDesc); + bool sharesHwModuleWith(const sp<AudioOutputDescriptor> outputDesc); bool isActive(uint32_t inPastMs = 0) const; bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0, @@ -395,23 +418,20 @@ protected: uint32_t inPastMs = 0, nsecs_t sysTime = 0) const; - void toAudioPortConfig(struct audio_port_config *dstConfig, + virtual void toAudioPortConfig(struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig = NULL) const; void toAudioPort(struct audio_port *port) const; audio_port_handle_t mId; audio_io_handle_t mIoHandle; // output handle - uint32_t mSamplingRate; // - audio_format_t mFormat; // - audio_channel_mask_t mChannelMask; // output configuration uint32_t mLatency; // audio_output_flags_t mFlags; // audio_devices_t mDevice; // current device this output is routed to audio_patch_handle_t mPatchHandle; uint32_t mRefCount[AUDIO_STREAM_CNT]; // number of streams of each type using this output nsecs_t mStopTime[AUDIO_STREAM_CNT]; - AudioOutputDescriptor *mOutput1; // used by duplicated outputs: first output - AudioOutputDescriptor *mOutput2; // used by duplicated outputs: second output + sp<AudioOutputDescriptor> mOutput1; // used by duplicated outputs: first output + sp<AudioOutputDescriptor> mOutput2; // used by duplicated outputs: second output float mCurVolume[AUDIO_STREAM_CNT]; // current stream volume int mMuteCount[AUDIO_STREAM_CNT]; // mute request counter const sp<IOProfile> mProfile; // I/O profile this output derives from @@ -422,7 +442,7 @@ protected: // descriptor for audio inputs. Used to maintain current configuration of each opened audio input // and keep track of the usage of this input. - class AudioInputDescriptor + class AudioInputDescriptor: public AudioPortConfig { public: AudioInputDescriptor(const sp<IOProfile>& profile); @@ -431,16 +451,13 @@ protected: audio_port_handle_t mId; audio_io_handle_t mIoHandle; // input handle - uint32_t mSamplingRate; // - audio_format_t mFormat; // input configuration - audio_channel_mask_t mChannelMask; // audio_devices_t mDevice; // current device this input is routed to audio_patch_handle_t mPatchHandle; uint32_t mRefCount; // number of AudioRecord clients using this output audio_source_t mInputSource; // input source selected by application (mediarecorder.h) const sp<IOProfile> mProfile; // I/O profile this output derives from - void toAudioPortConfig(struct audio_port_config *dstConfig, + virtual void toAudioPortConfig(struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig = NULL) const; void toAudioPort(struct audio_port *port) const; }; @@ -463,7 +480,7 @@ protected: }; // stream descriptor used for volume control - class EffectDescriptor + class EffectDescriptor : public RefBase { public: @@ -476,8 +493,8 @@ protected: bool mEnabled; // enabled state: CPU load being used or not }; - void addOutput(audio_io_handle_t output, AudioOutputDescriptor *outputDesc); - void addInput(audio_io_handle_t input, AudioInputDescriptor *inputDesc); + void addOutput(audio_io_handle_t output, sp<AudioOutputDescriptor> outputDesc); + void addInput(audio_io_handle_t input, sp<AudioInputDescriptor> inputDesc); // return the strategy corresponding to a given stream type static routing_strategy getStrategy(audio_stream_type_t stream); @@ -618,7 +635,7 @@ protected: int testOutputIndex(audio_io_handle_t output); #endif //AUDIO_POLICY_TEST - status_t setEffectEnabled(EffectDescriptor *pDesc, bool enabled); + status_t setEffectEnabled(const sp<EffectDescriptor>& effectDesc, bool enabled); // returns the category the device belongs to with regard to volume curve management static device_category getDeviceCategory(audio_devices_t device); @@ -627,7 +644,7 @@ protected: static audio_devices_t getDeviceForVolume(audio_devices_t device); SortedVector<audio_io_handle_t> getOutputsForDevice(audio_devices_t device, - DefaultKeyedVector<audio_io_handle_t, AudioOutputDescriptor *> openOutputs); + DefaultKeyedVector<audio_io_handle_t, sp<AudioOutputDescriptor> > openOutputs); bool vectorsEqual(SortedVector<audio_io_handle_t>& outputs1, SortedVector<audio_io_handle_t>& outputs2); @@ -635,7 +652,7 @@ protected: // if muting, wait for the audio in pcm buffer to be drained before proceeding // if unmuting, unmute only after the specified delay // Returns the number of ms waited - uint32_t checkDeviceMuteStrategies(AudioOutputDescriptor *outputDesc, + uint32_t checkDeviceMuteStrategies(sp<AudioOutputDescriptor> outputDesc, audio_devices_t prevDevice, uint32_t delayMs); @@ -659,10 +676,10 @@ protected: const sp<AudioPatch>& patch); status_t removeAudioPatch(audio_patch_handle_t handle); - AudioOutputDescriptor *getOutputFromId(audio_port_handle_t id) const; - AudioInputDescriptor *getInputFromId(audio_port_handle_t id) const; - HwModule *getModuleForDevice(audio_devices_t device) const; - HwModule *getModuleFromName(const char *name) const; + sp<AudioOutputDescriptor> getOutputFromId(audio_port_handle_t id) const; + sp<AudioInputDescriptor> getInputFromId(audio_port_handle_t id) const; + sp<HwModule> getModuleForDevice(audio_devices_t device) const; + sp<HwModule> getModuleFromName(const char *name) const; // // Audio policy configuration file parsing (audio_policy.conf) // @@ -677,7 +694,7 @@ protected: static audio_devices_t parseDeviceNames(char *name); void loadHwModule(cnode *root); void loadHwModules(cnode *root); - void loadGlobalConfig(cnode *root, HwModule *module); + void loadGlobalConfig(cnode *root, const sp<HwModule>& module); status_t loadAudioPolicyConfig(const char *path); void defaultAudioPolicyConfig(void); @@ -686,11 +703,11 @@ protected: AudioPolicyClientInterface *mpClientInterface; // audio policy client interface audio_io_handle_t mPrimaryOutput; // primary output handle // list of descriptors for outputs currently opened - DefaultKeyedVector<audio_io_handle_t, AudioOutputDescriptor *> mOutputs; + DefaultKeyedVector<audio_io_handle_t, sp<AudioOutputDescriptor> > mOutputs; // copy of mOutputs before setDeviceConnectionState() opens new outputs // reset to mOutputs when updateDevicesAndOutputs() is called. - DefaultKeyedVector<audio_io_handle_t, AudioOutputDescriptor *> mPreviousOutputs; - DefaultKeyedVector<audio_io_handle_t, AudioInputDescriptor *> mInputs; // list of input descriptors + DefaultKeyedVector<audio_io_handle_t, sp<AudioOutputDescriptor> > mPreviousOutputs; + DefaultKeyedVector<audio_io_handle_t, sp<AudioInputDescriptor> > mInputs; // list of input descriptors DeviceVector mAvailableOutputDevices; // all available output devices DeviceVector mAvailableInputDevices; // all available input devices int mPhoneState; // current phone state @@ -707,13 +724,13 @@ protected: static const uint32_t MAX_EFFECTS_MEMORY = 512; uint32_t mTotalEffectsCpuLoad; // current CPU load used by effects uint32_t mTotalEffectsMemory; // current memory used by effects - KeyedVector<int, EffectDescriptor *> mEffects; // list of registered audio effects + KeyedVector<int, sp<EffectDescriptor> > mEffects; // list of registered audio effects bool mA2dpSuspended; // true if A2DP output is suspended sp<DeviceDescriptor> mDefaultOutputDevice; // output device selected by default at boot time bool mSpeakerDrcEnabled;// true on devices that use DRC on the DEVICE_CATEGORY_SPEAKER path // to boost soft sounds, used to adjust volume curves accordingly - Vector <HwModule *> mHwModules; + Vector < sp<HwModule> > mHwModules; volatile int32_t mNextUniqueId; volatile int32_t mAudioPortGeneration; @@ -746,6 +763,17 @@ private: uint32_t curAudioPortGeneration() const { return mAudioPortGeneration; } // converts device address to string sent to audio HAL via setParameters static String8 addressToParameter(audio_devices_t device, const String8 address); + // internal method to return the output handle for the given device and format + audio_io_handle_t getOutputForDevice( + audio_devices_t device, + audio_stream_type_t stream, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo); + // internal function to derive a stream type value from audio attributes + audio_stream_type_t streamTypefromAttributesInt(const audio_attributes_t *attr); }; }; diff --git a/services/audiopolicy/AudioPolicyService.cpp b/services/audiopolicy/AudioPolicyService.cpp index a2a0461..9435797 100644..100755 --- a/services/audiopolicy/AudioPolicyService.cpp +++ b/services/audiopolicy/AudioPolicyService.cpp @@ -40,8 +40,6 @@ #include <system/audio.h> #include <system/audio_policy.h> #include <hardware/audio_policy.h> -#include <audio_effects/audio_effects_conf.h> -#include <media/AudioParameter.h> namespace android { @@ -108,15 +106,11 @@ AudioPolicyService::AudioPolicyService() ALOGI("AudioPolicyService CSTOR in new mode"); mAudioPolicyClient = new AudioPolicyClient(this); - mAudioPolicyManager = new AudioPolicyManager(mAudioPolicyClient); + mAudioPolicyManager = createAudioPolicyManager(mAudioPolicyClient); #endif - // load audio pre processing modules - if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE, R_OK) == 0) { - loadPreProcessorConfig(AUDIO_EFFECT_VENDOR_CONFIG_FILE); - } else if (access(AUDIO_EFFECT_DEFAULT_CONFIG_FILE, R_OK) == 0) { - loadPreProcessorConfig(AUDIO_EFFECT_DEFAULT_CONFIG_FILE); - } + // load audio processing modules + mAudioPolicyEffects = new AudioPolicyEffects(); } AudioPolicyService::~AudioPolicyService() @@ -125,18 +119,6 @@ AudioPolicyService::~AudioPolicyService() mAudioCommandThread->exit(); mOutputCommandThread->exit(); - // release audio pre processing resources - for (size_t i = 0; i < mInputSources.size(); i++) { - delete mInputSources.valueAt(i); - } - mInputSources.clear(); - - for (size_t i = 0; i < mInputs.size(); i++) { - mInputs.valueAt(i)->mEffects.clear(); - delete mInputs.valueAt(i); - } - mInputs.clear(); - #ifdef USE_LEGACY_AUDIO_POLICY if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL) { mpAudioPolicyDev->destroy_audio_policy(mpAudioPolicyDev, mpAudioPolicy); @@ -145,11 +127,12 @@ AudioPolicyService::~AudioPolicyService() audio_policy_dev_close(mpAudioPolicyDev); } #else - delete mAudioPolicyManager; + destroyAudioPolicyManager(mAudioPolicyManager); delete mAudioPolicyClient; #endif mNotificationClients.clear(); + mAudioPolicyEffects.clear(); } // A notification client is always registered by AudioSystem when the client process @@ -353,14 +336,6 @@ status_t AudioPolicyService::dumpPermissionDenial(int fd) return NO_ERROR; } -void AudioPolicyService::setPreProcessorEnabled(const InputDesc *inputDesc, bool enabled) -{ - const Vector<sp<AudioEffect> > &fxVector = inputDesc->mEffects; - for (size_t i = 0; i < fxVector.size(); i++) { - fxVector.itemAt(i)->setEnabled(enabled); - } -} - status_t AudioPolicyService::onTransact( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { @@ -399,7 +374,8 @@ bool AudioPolicyService::AudioCommandThread::threadLoop() mLock.lock(); while (!exitPending()) { - while (!mAudioCommands.isEmpty()) { + sp<AudioPolicyService> svc; + while (!mAudioCommands.isEmpty() && !exitPending()) { nsecs_t curTime = systemTime(); // commands are sorted by increasing time stamp: execute them from index 0 and up if (mAudioCommands[0]->mTime <= curTime) { @@ -452,7 +428,7 @@ bool AudioPolicyService::AudioCommandThread::threadLoop() StopOutputData *data = (StopOutputData *)command->mParam.get(); ALOGV("AudioCommandThread() processing stop output %d", data->mIO); - sp<AudioPolicyService> svc = mService.promote(); + svc = mService.promote(); if (svc == 0) { break; } @@ -464,7 +440,7 @@ bool AudioPolicyService::AudioCommandThread::threadLoop() ReleaseOutputData *data = (ReleaseOutputData *)command->mParam.get(); ALOGV("AudioCommandThread() processing release output %d", data->mIO); - sp<AudioPolicyService> svc = mService.promote(); + svc = mService.promote(); if (svc == 0) { break; } @@ -494,7 +470,7 @@ bool AudioPolicyService::AudioCommandThread::threadLoop() } break; case UPDATE_AUDIOPORT_LIST: { ALOGV("AudioCommandThread() processing update audio port list"); - sp<AudioPolicyService> svc = mService.promote(); + svc = mService.promote(); if (svc == 0) { break; } @@ -504,7 +480,7 @@ bool AudioPolicyService::AudioCommandThread::threadLoop() }break; case UPDATE_AUDIOPATCH_LIST: { ALOGV("AudioCommandThread() processing update audio patch list"); - sp<AudioPolicyService> svc = mService.promote(); + svc = mService.promote(); if (svc == 0) { break; } @@ -542,9 +518,16 @@ bool AudioPolicyService::AudioCommandThread::threadLoop() if (mAudioCommands.isEmpty()) { release_wake_lock(mName.string()); } - ALOGV("AudioCommandThread() going to sleep"); - mWaitWorkCV.waitRelative(mLock, waitTime); - ALOGV("AudioCommandThread() waking up"); + // release mLock before releasing strong reference on the service as + // AudioPolicyService destructor calls AudioCommandThread::exit() which acquires mLock. + mLock.unlock(); + svc.clear(); + mLock.lock(); + if (!exitPending()) { + ALOGV("AudioCommandThread() going to sleep"); + mWaitWorkCV.waitRelative(mLock, waitTime); + ALOGV("AudioCommandThread() waking up"); + } } mLock.unlock(); return false; @@ -928,304 +911,6 @@ int AudioPolicyService::setVoiceVolume(float volume, int delayMs) return (int)mAudioCommandThread->voiceVolumeCommand(volume, delayMs); } -// ---------------------------------------------------------------------------- -// Audio pre-processing configuration -// ---------------------------------------------------------------------------- - -/*static*/ const char * const AudioPolicyService::kInputSourceNames[AUDIO_SOURCE_CNT -1] = { - MIC_SRC_TAG, - VOICE_UL_SRC_TAG, - VOICE_DL_SRC_TAG, - VOICE_CALL_SRC_TAG, - CAMCORDER_SRC_TAG, - VOICE_REC_SRC_TAG, - VOICE_COMM_SRC_TAG -}; - -// returns the audio_source_t enum corresponding to the input source name or -// AUDIO_SOURCE_CNT is no match found -audio_source_t AudioPolicyService::inputSourceNameToEnum(const char *name) -{ - int i; - for (i = AUDIO_SOURCE_MIC; i < AUDIO_SOURCE_CNT; i++) { - if (strcmp(name, kInputSourceNames[i - AUDIO_SOURCE_MIC]) == 0) { - ALOGV("inputSourceNameToEnum found source %s %d", name, i); - break; - } - } - return (audio_source_t)i; -} - -size_t AudioPolicyService::growParamSize(char *param, - size_t size, - size_t *curSize, - size_t *totSize) -{ - // *curSize is at least sizeof(effect_param_t) + 2 * sizeof(int) - size_t pos = ((*curSize - 1 ) / size + 1) * size; - - if (pos + size > *totSize) { - while (pos + size > *totSize) { - *totSize += ((*totSize + 7) / 8) * 4; - } - param = (char *)realloc(param, *totSize); - } - *curSize = pos + size; - return pos; -} - -size_t AudioPolicyService::readParamValue(cnode *node, - char *param, - size_t *curSize, - size_t *totSize) -{ - if (strncmp(node->name, SHORT_TAG, sizeof(SHORT_TAG) + 1) == 0) { - size_t pos = growParamSize(param, sizeof(short), curSize, totSize); - *(short *)((char *)param + pos) = (short)atoi(node->value); - ALOGV("readParamValue() reading short %d", *(short *)((char *)param + pos)); - return sizeof(short); - } else if (strncmp(node->name, INT_TAG, sizeof(INT_TAG) + 1) == 0) { - size_t pos = growParamSize(param, sizeof(int), curSize, totSize); - *(int *)((char *)param + pos) = atoi(node->value); - ALOGV("readParamValue() reading int %d", *(int *)((char *)param + pos)); - return sizeof(int); - } else if (strncmp(node->name, FLOAT_TAG, sizeof(FLOAT_TAG) + 1) == 0) { - size_t pos = growParamSize(param, sizeof(float), curSize, totSize); - *(float *)((char *)param + pos) = (float)atof(node->value); - ALOGV("readParamValue() reading float %f",*(float *)((char *)param + pos)); - return sizeof(float); - } else if (strncmp(node->name, BOOL_TAG, sizeof(BOOL_TAG) + 1) == 0) { - size_t pos = growParamSize(param, sizeof(bool), curSize, totSize); - if (strncmp(node->value, "false", strlen("false") + 1) == 0) { - *(bool *)((char *)param + pos) = false; - } else { - *(bool *)((char *)param + pos) = true; - } - ALOGV("readParamValue() reading bool %s",*(bool *)((char *)param + pos) ? "true" : "false"); - return sizeof(bool); - } else if (strncmp(node->name, STRING_TAG, sizeof(STRING_TAG) + 1) == 0) { - size_t len = strnlen(node->value, EFFECT_STRING_LEN_MAX); - if (*curSize + len + 1 > *totSize) { - *totSize = *curSize + len + 1; - param = (char *)realloc(param, *totSize); - } - strncpy(param + *curSize, node->value, len); - *curSize += len; - param[*curSize] = '\0'; - ALOGV("readParamValue() reading string %s", param + *curSize - len); - return len; - } - ALOGW("readParamValue() unknown param type %s", node->name); - return 0; -} - -effect_param_t *AudioPolicyService::loadEffectParameter(cnode *root) -{ - cnode *param; - cnode *value; - size_t curSize = sizeof(effect_param_t); - size_t totSize = sizeof(effect_param_t) + 2 * sizeof(int); - effect_param_t *fx_param = (effect_param_t *)malloc(totSize); - - param = config_find(root, PARAM_TAG); - value = config_find(root, VALUE_TAG); - if (param == NULL && value == NULL) { - // try to parse simple parameter form {int int} - param = root->first_child; - if (param != NULL) { - // Note: that a pair of random strings is read as 0 0 - int *ptr = (int *)fx_param->data; - int *ptr2 = (int *)((char *)param + sizeof(effect_param_t)); - ALOGW("loadEffectParameter() ptr %p ptr2 %p", ptr, ptr2); - *ptr++ = atoi(param->name); - *ptr = atoi(param->value); - fx_param->psize = sizeof(int); - fx_param->vsize = sizeof(int); - return fx_param; - } - } - if (param == NULL || value == NULL) { - ALOGW("loadEffectParameter() invalid parameter description %s", root->name); - goto error; - } - - fx_param->psize = 0; - param = param->first_child; - while (param) { - ALOGV("loadEffectParameter() reading param of type %s", param->name); - size_t size = readParamValue(param, (char *)fx_param, &curSize, &totSize); - if (size == 0) { - goto error; - } - fx_param->psize += size; - param = param->next; - } - - // align start of value field on 32 bit boundary - curSize = ((curSize - 1 ) / sizeof(int) + 1) * sizeof(int); - - fx_param->vsize = 0; - value = value->first_child; - while (value) { - ALOGV("loadEffectParameter() reading value of type %s", value->name); - size_t size = readParamValue(value, (char *)fx_param, &curSize, &totSize); - if (size == 0) { - goto error; - } - fx_param->vsize += size; - value = value->next; - } - - return fx_param; - -error: - free(fx_param); - return NULL; -} - -void AudioPolicyService::loadEffectParameters(cnode *root, Vector <effect_param_t *>& params) -{ - cnode *node = root->first_child; - while (node) { - ALOGV("loadEffectParameters() loading param %s", node->name); - effect_param_t *param = loadEffectParameter(node); - if (param == NULL) { - node = node->next; - continue; - } - params.add(param); - node = node->next; - } -} - -AudioPolicyService::InputSourceDesc *AudioPolicyService::loadInputSource( - cnode *root, - const Vector <EffectDesc *>& effects) -{ - cnode *node = root->first_child; - if (node == NULL) { - ALOGW("loadInputSource() empty element %s", root->name); - return NULL; - } - InputSourceDesc *source = new InputSourceDesc(); - while (node) { - size_t i; - for (i = 0; i < effects.size(); i++) { - if (strncmp(effects[i]->mName, node->name, EFFECT_STRING_LEN_MAX) == 0) { - ALOGV("loadInputSource() found effect %s in list", node->name); - break; - } - } - if (i == effects.size()) { - ALOGV("loadInputSource() effect %s not in list", node->name); - node = node->next; - continue; - } - EffectDesc *effect = new EffectDesc(*effects[i]); // deep copy - loadEffectParameters(node, effect->mParams); - ALOGV("loadInputSource() adding effect %s uuid %08x", effect->mName, effect->mUuid.timeLow); - source->mEffects.add(effect); - node = node->next; - } - if (source->mEffects.size() == 0) { - ALOGW("loadInputSource() no valid effects found in source %s", root->name); - delete source; - return NULL; - } - return source; -} - -status_t AudioPolicyService::loadInputSources(cnode *root, const Vector <EffectDesc *>& effects) -{ - cnode *node = config_find(root, PREPROCESSING_TAG); - if (node == NULL) { - return -ENOENT; - } - node = node->first_child; - while (node) { - audio_source_t source = inputSourceNameToEnum(node->name); - if (source == AUDIO_SOURCE_CNT) { - ALOGW("loadInputSources() invalid input source %s", node->name); - node = node->next; - continue; - } - ALOGV("loadInputSources() loading input source %s", node->name); - InputSourceDesc *desc = loadInputSource(node, effects); - if (desc == NULL) { - node = node->next; - continue; - } - mInputSources.add(source, desc); - node = node->next; - } - return NO_ERROR; -} - -AudioPolicyService::EffectDesc *AudioPolicyService::loadEffect(cnode *root) -{ - cnode *node = config_find(root, UUID_TAG); - if (node == NULL) { - return NULL; - } - effect_uuid_t uuid; - if (AudioEffect::stringToGuid(node->value, &uuid) != NO_ERROR) { - ALOGW("loadEffect() invalid uuid %s", node->value); - return NULL; - } - return new EffectDesc(root->name, uuid); -} - -status_t AudioPolicyService::loadEffects(cnode *root, Vector <EffectDesc *>& effects) -{ - cnode *node = config_find(root, EFFECTS_TAG); - if (node == NULL) { - return -ENOENT; - } - node = node->first_child; - while (node) { - ALOGV("loadEffects() loading effect %s", node->name); - EffectDesc *effect = loadEffect(node); - if (effect == NULL) { - node = node->next; - continue; - } - effects.add(effect); - node = node->next; - } - return NO_ERROR; -} - -status_t AudioPolicyService::loadPreProcessorConfig(const char *path) -{ - cnode *root; - char *data; - - data = (char *)load_file(path, NULL); - if (data == NULL) { - return -ENODEV; - } - root = config_node("", ""); - config_load(root, data); - - Vector <EffectDesc *> effects; - loadEffects(root, effects); - loadInputSources(root, effects); - - // delete effects to fix memory leak. - // as effects is local var and valgrind would treat this as memory leak - // and although it only did in mediaserver init, but free it in case mediaserver reboot - size_t i; - for (i = 0; i < effects.size(); i++) { - delete effects[i]; - } - - config_free(root); - free(root); - free(data); - - return NO_ERROR; -} - extern "C" { audio_module_handle_t aps_load_hw_module(void *service __unused, const char *name); diff --git a/services/audiopolicy/AudioPolicyService.h b/services/audiopolicy/AudioPolicyService.h index 40f589b..380fd5e 100644..100755 --- a/services/audiopolicy/AudioPolicyService.h +++ b/services/audiopolicy/AudioPolicyService.h @@ -31,8 +31,10 @@ #include <media/ToneGenerator.h> #include <media/AudioEffect.h> #include <hardware_legacy/AudioPolicyInterface.h> +#include "AudioPolicyEffects.h" #include "AudioPolicyManager.h" + namespace android { // ---------------------------------------------------------------------------- @@ -70,6 +72,12 @@ public: audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, const audio_offload_info_t *offloadInfo = NULL); + virtual audio_io_handle_t getOutputForAttr(const audio_attributes_t *attr, + uint32_t samplingRate = 0, + audio_format_t format = AUDIO_FORMAT_DEFAULT, + audio_channel_mask_t channelMask = 0, + audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, + const audio_offload_info_t *offloadInfo = NULL); virtual status_t startOutput(audio_io_handle_t output, audio_stream_type_t stream, int session = 0); @@ -331,60 +339,6 @@ private: wp<AudioPolicyService> mService; }; - class EffectDesc { - public: - EffectDesc(const char *name, const effect_uuid_t& uuid) : - mName(strdup(name)), - mUuid(uuid) { } - EffectDesc(const EffectDesc& orig) : - mName(strdup(orig.mName)), - mUuid(orig.mUuid) { - // deep copy mParams - for (size_t k = 0; k < orig.mParams.size(); k++) { - effect_param_t *origParam = orig.mParams[k]; - // psize and vsize are rounded up to an int boundary for allocation - size_t origSize = sizeof(effect_param_t) + - ((origParam->psize + 3) & ~3) + - ((origParam->vsize + 3) & ~3); - effect_param_t *dupParam = (effect_param_t *) malloc(origSize); - memcpy(dupParam, origParam, origSize); - // This works because the param buffer allocation is also done by - // multiples of 4 bytes originally. In theory we should memcpy only - // the actual param size, that is without rounding vsize. - mParams.add(dupParam); - } - } - /*virtual*/ ~EffectDesc() { - free(mName); - for (size_t k = 0; k < mParams.size(); k++) { - free(mParams[k]); - } - } - char *mName; - effect_uuid_t mUuid; - Vector <effect_param_t *> mParams; - }; - - class InputSourceDesc { - public: - InputSourceDesc() {} - /*virtual*/ ~InputSourceDesc() { - for (size_t j = 0; j < mEffects.size(); j++) { - delete mEffects[j]; - } - } - Vector <EffectDesc *> mEffects; - }; - - - class InputDesc { - public: - InputDesc(int session) : mSessionId(session) {} - /*virtual*/ ~InputDesc() {} - const int mSessionId; - Vector< sp<AudioEffect> >mEffects; - }; - class AudioPolicyClient : public AudioPolicyClientInterface { public: @@ -508,26 +462,6 @@ private: const sp<IAudioPolicyServiceClient> mAudioPolicyServiceClient; }; - static const char * const kInputSourceNames[AUDIO_SOURCE_CNT -1]; - - void setPreProcessorEnabled(const InputDesc *inputDesc, bool enabled); - status_t loadPreProcessorConfig(const char *path); - status_t loadEffects(cnode *root, Vector <EffectDesc *>& effects); - EffectDesc *loadEffect(cnode *root); - status_t loadInputSources(cnode *root, const Vector <EffectDesc *>& effects); - audio_source_t inputSourceNameToEnum(const char *name); - InputSourceDesc *loadInputSource(cnode *root, const Vector <EffectDesc *>& effects); - void loadEffectParameters(cnode *root, Vector <effect_param_t *>& params); - effect_param_t *loadEffectParameter(cnode *root); - size_t readParamValue(cnode *node, - char *param, - size_t *curSize, - size_t *totSize); - size_t growParamSize(char *param, - size_t size, - size_t *curSize, - size_t *totSize); - // Internal dump utilities. status_t dumpPermissionDenial(int fd); @@ -539,13 +473,13 @@ private: sp<AudioCommandThread> mOutputCommandThread; // process stop and release output struct audio_policy_device *mpAudioPolicyDev; struct audio_policy *mpAudioPolicy; - AudioPolicyManager *mAudioPolicyManager; + AudioPolicyInterface *mAudioPolicyManager; AudioPolicyClient *mAudioPolicyClient; - KeyedVector< audio_source_t, InputSourceDesc* > mInputSources; - KeyedVector< audio_io_handle_t, InputDesc* > mInputs; - DefaultKeyedVector< uid_t, sp<NotificationClient> > mNotificationClients; + + // Manage all effects configured in audio_effects.conf + sp<AudioPolicyEffects> mAudioPolicyEffects; }; }; // namespace android diff --git a/services/audiopolicy/audio_policy.conf b/services/audiopolicy/audio_policy.conf new file mode 100644 index 0000000..9b83fef --- /dev/null +++ b/services/audiopolicy/audio_policy.conf @@ -0,0 +1,145 @@ +# +# Template audio policy configuration file +# + +# Global configuration section: +# - before audio HAL version 3.0: +# lists input and output devices always present on the device +# as well as the output device selected by default. +# Devices are designated by a string that corresponds to the enum in audio.h +# +# global_configuration { +# attached_output_devices AUDIO_DEVICE_OUT_SPEAKER +# default_output_device AUDIO_DEVICE_OUT_SPEAKER +# attached_input_devices AUDIO_DEVICE_IN_BUILTIN_MIC|AUDIO_DEVICE_IN_REMOTE_SUBMIX +# } +# +# - after and including audio HAL 3.0 the global_configuration section is included in each +# hardware module section. +# it also includes the audio HAL version of this hw module: +# global_configuration { +# ... +# audio_hal_version <major.minor> # audio HAL version in e.g. 3.0 +# } +# other attributes (attached devices, default device) have to be included in the +# global_configuration section of each hardware module + + +# audio hardware module section: contains descriptors for all audio hw modules present on the +# device. Each hw module node is named after the corresponding hw module library base name. +# For instance, "primary" corresponds to audio.primary.<device>.so. +# The "primary" module is mandatory and must include at least one output with +# AUDIO_OUTPUT_FLAG_PRIMARY flag. +# Each module descriptor contains one or more output profile descriptors and zero or more +# input profile descriptors. Each profile lists all the parameters supported by a given output +# or input stream category. +# The "channel_masks", "formats", "devices" and "flags" are specified using strings corresponding +# to enums in audio.h and audio_policy.h. They are concatenated by use of "|" without space or "\n". +# +# For audio HAL version posterior to 3.0 the following sections or sub sections can be present in +# a hw module section: +# - A "global_configuration" section: see above +# - Optionally a "devices" section: +# This section contains descriptors for audio devices with attributes like an address or a +# gain controller. The syntax for the devices section and device descriptor is as follows: +# devices { +# <device name> { # <device name>: any string without space +# type <device type> # <device type> e.g. AUDIO_DEVICE_OUT_SPEAKER +# address <address> # optional: device address, char string less than 64 in length +# } +# } +# - one or more "gains" sections can be present in a device descriptor section. +# If present, they describe the capabilities of gain controllers attached to this input or +# output device. e.g. : +# <device name> { # <device name>: any string without space +# type <device type> # <device type> e.g. AUDIO_DEVICE_OUT_SPEAKER +# address <address> # optional: device address, char string less than 64 in length +# gains { +# <gain name> { +# mode <gain modes supported> # e.g. AUDIO_GAIN_MODE_CHANNELS +# channel_mask <controlled channels> # needed if mode AUDIO_GAIN_MODE_CHANNELS +# min_value_mB <min value in millibel> +# max_value_mB <max value in millibel> +# default_value_mB <default value in millibel> +# step_value_mB <step value in millibel> +# min_ramp_ms <min duration in ms> # needed if mode AUDIO_GAIN_MODE_RAMP +# max_ramp_ms <max duration ms> # needed if mode AUDIO_GAIN_MODE_RAMP +# } +# } +# } +# - when a device descriptor is present, output and input profiles can refer to this device by +# its name in their "devices" section instead of specifying a device type. e.g. : +# outputs { +# primary { +# sampling_rates 44100 +# channel_masks AUDIO_CHANNEL_OUT_STEREO +# formats AUDIO_FORMAT_PCM_16_BIT +# devices <device name> +# flags AUDIO_OUTPUT_FLAG_PRIMARY +# } +# } +# sample audio_policy.conf file below + +audio_hw_modules { + primary { + global_configuration { + attached_output_devices AUDIO_DEVICE_OUT_SPEAKER + default_output_device AUDIO_DEVICE_OUT_SPEAKER + attached_input_devices AUDIO_DEVICE_IN_BUILTIN_MIC + audio_hal_version 3.0 + } + devices { + speaker { + type AUDIO_DEVICE_OUT_SPEAKER + gains { + gain_1 { + mode AUDIO_GAIN_MODE_JOINT + min_value_mB -8400 + max_value_mB 4000 + default_value_mB 0 + step_value_mB 100 + } + } + } + } + outputs { + primary { + sampling_rates 48000 + channel_masks AUDIO_CHANNEL_OUT_STEREO + formats AUDIO_FORMAT_PCM_16_BIT + devices speaker + flags AUDIO_OUTPUT_FLAG_PRIMARY + } + } + inputs { + primary { + sampling_rates 8000|16000 + channel_masks AUDIO_CHANNEL_IN_MONO + formats AUDIO_FORMAT_PCM_16_BIT + devices AUDIO_DEVICE_IN_BUILTIN_MIC + } + } + } + r_submix { + global_configuration { + attached_input_devices AUDIO_DEVICE_IN_REMOTE_SUBMIX + audio_hal_version 2.0 + } + outputs { + submix { + sampling_rates 48000 + channel_masks AUDIO_CHANNEL_OUT_STEREO + formats AUDIO_FORMAT_PCM_16_BIT + devices AUDIO_DEVICE_OUT_REMOTE_SUBMIX + } + } + inputs { + submix { + sampling_rates 48000 + channel_masks AUDIO_CHANNEL_IN_STEREO + formats AUDIO_FORMAT_PCM_16_BIT + devices AUDIO_DEVICE_IN_REMOTE_SUBMIX + } + } + } +} diff --git a/services/audiopolicy/audio_policy_conf.h b/services/audiopolicy/audio_policy_conf.h index 79f20f1..2535a67 100644 --- a/services/audiopolicy/audio_policy_conf.h +++ b/services/audiopolicy/audio_policy_conf.h @@ -35,6 +35,7 @@ #define DEFAULT_OUTPUT_DEVICE_TAG "default_output_device" #define ATTACHED_INPUT_DEVICES_TAG "attached_input_devices" #define SPEAKER_DRC_ENABLED_TAG "speaker_drc_enabled" +#define AUDIO_HAL_VERSION_TAG "audio_hal_version" // hw modules descriptions #define AUDIO_HW_MODULE_TAG "audio_hw_modules" diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp index 9fd35e1..648e82c 100644 --- a/services/camera/libcameraservice/CameraService.cpp +++ b/services/camera/libcameraservice/CameraService.cpp @@ -261,32 +261,20 @@ status_t CameraService::generateShimMetadata(int cameraId, /*out*/CameraMetadata return ret; } - ssize_t index = -1; - { // Scope for service lock - Mutex::Autolock lock(mServiceLock); - index = mShimParams.indexOfKey(cameraId); - // Release service lock so initializeShimMetadata can be called correctly. - } - - if (index < 0) { - int64_t token = IPCThreadState::self()->clearCallingIdentity(); - ret = initializeShimMetadata(cameraId); - IPCThreadState::self()->restoreCallingIdentity(token); - if (ret != OK) { - return ret; - } + CameraParameters shimParams; + if ((ret = getLegacyParametersLazy(cameraId, /*out*/&shimParams)) != OK) { + // Error logged by callee + return ret; } Vector<Size> sizes; + Vector<Size> jpegSizes; Vector<int32_t> formats; const char* supportedPreviewFormats; - { // Scope for service lock - Mutex::Autolock lock(mServiceLock); - index = mShimParams.indexOfKey(cameraId); - - mShimParams[index].getSupportedPreviewSizes(/*out*/sizes); - - mShimParams[index].getSupportedPreviewFormats(/*out*/formats); + { + shimParams.getSupportedPreviewSizes(/*out*/sizes); + shimParams.getSupportedPreviewFormats(/*out*/formats); + shimParams.getSupportedPictureSizes(/*out*/jpegSizes); } // Always include IMPLEMENTATION_DEFINED @@ -295,21 +283,29 @@ status_t CameraService::generateShimMetadata(int cameraId, /*out*/CameraMetadata const size_t INTS_PER_CONFIG = 4; // Build available stream configurations metadata - size_t streamConfigSize = sizes.size() * formats.size() * INTS_PER_CONFIG; - int32_t streamConfigs[streamConfigSize]; - size_t configIndex = 0; + size_t streamConfigSize = (sizes.size() * formats.size() + jpegSizes.size()) * INTS_PER_CONFIG; + + Vector<int32_t> streamConfigs; + streamConfigs.setCapacity(streamConfigSize); + for (size_t i = 0; i < formats.size(); ++i) { for (size_t j = 0; j < sizes.size(); ++j) { - streamConfigs[configIndex++] = formats[i]; - streamConfigs[configIndex++] = sizes[j].width; - streamConfigs[configIndex++] = sizes[j].height; - streamConfigs[configIndex++] = - ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT; + streamConfigs.add(formats[i]); + streamConfigs.add(sizes[j].width); + streamConfigs.add(sizes[j].height); + streamConfigs.add(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT); } } + for (size_t i = 0; i < jpegSizes.size(); ++i) { + streamConfigs.add(HAL_PIXEL_FORMAT_BLOB); + streamConfigs.add(jpegSizes[i].width); + streamConfigs.add(jpegSizes[i].height); + streamConfigs.add(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT); + } + if ((ret = shimInfo.update(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, - streamConfigs, streamConfigSize)) != OK) { + streamConfigs.array(), streamConfigSize)) != OK) { return ret; } @@ -470,6 +466,7 @@ status_t CameraService::initializeShimMetadata(int cameraId) { int uid = getCallingUid(); status_t ret = validateConnect(cameraId, uid); if (ret != OK) { + // Error already logged by callee return ret; } @@ -492,6 +489,7 @@ status_t CameraService::initializeShimMetadata(int cameraId) { client); if (ret != OK) { + // Error already logged by callee return ret; } } @@ -513,6 +511,52 @@ status_t CameraService::initializeShimMetadata(int cameraId) { return OK; } +status_t CameraService::getLegacyParametersLazy(int cameraId, + /*out*/ + CameraParameters* parameters) { + + ALOGV("%s: for cameraId: %d", __FUNCTION__, cameraId); + + status_t ret = 0; + + if (parameters == NULL) { + ALOGE("%s: parameters must not be null", __FUNCTION__); + return BAD_VALUE; + } + + ssize_t index = -1; + { // Scope for service lock + Mutex::Autolock lock(mServiceLock); + index = mShimParams.indexOfKey(cameraId); + // Release service lock so initializeShimMetadata can be called correctly. + + if (index >= 0) { + *parameters = mShimParams[index]; + } + } + + if (index < 0) { + int64_t token = IPCThreadState::self()->clearCallingIdentity(); + ret = initializeShimMetadata(cameraId); + IPCThreadState::self()->restoreCallingIdentity(token); + if (ret != OK) { + // Error already logged by callee + return ret; + } + + { // Scope for service lock + Mutex::Autolock lock(mServiceLock); + index = mShimParams.indexOfKey(cameraId); + + LOG_ALWAYS_FATAL_IF(index < 0, "index should have been initialized"); + + *parameters = mShimParams[index]; + } + } + + return OK; +} + status_t CameraService::validateConnect(int cameraId, /*inout*/ int& clientUid) const { @@ -615,7 +659,8 @@ status_t CameraService::connectHelperLocked(const sp<ICameraClient>& cameraClien int clientUid, int callingPid, /*out*/ - sp<Client>& client) { + sp<Client>& client, + int halVersion) { int facing = -1; int deviceVersion = getDeviceVersion(cameraId, &facing); @@ -628,28 +673,47 @@ status_t CameraService::connectHelperLocked(const sp<ICameraClient>& cameraClien cameraId); } - switch(deviceVersion) { - case CAMERA_DEVICE_API_VERSION_1_0: - client = new CameraClient(this, cameraClient, - clientPackageName, cameraId, - facing, callingPid, clientUid, getpid()); - break; - case CAMERA_DEVICE_API_VERSION_2_0: - case CAMERA_DEVICE_API_VERSION_2_1: - case CAMERA_DEVICE_API_VERSION_3_0: - case CAMERA_DEVICE_API_VERSION_3_1: - case CAMERA_DEVICE_API_VERSION_3_2: - client = new Camera2Client(this, cameraClient, - clientPackageName, cameraId, - facing, callingPid, clientUid, getpid(), - deviceVersion); - break; - case -1: - ALOGE("Invalid camera id %d", cameraId); - return BAD_VALUE; - default: - ALOGE("Unknown camera device HAL version: %d", deviceVersion); - return INVALID_OPERATION; + if (halVersion < 0 || halVersion == deviceVersion) { + // Default path: HAL version is unspecified by caller, create CameraClient + // based on device version reported by the HAL. + switch(deviceVersion) { + case CAMERA_DEVICE_API_VERSION_1_0: + client = new CameraClient(this, cameraClient, + clientPackageName, cameraId, + facing, callingPid, clientUid, getpid()); + break; + case CAMERA_DEVICE_API_VERSION_2_0: + case CAMERA_DEVICE_API_VERSION_2_1: + case CAMERA_DEVICE_API_VERSION_3_0: + case CAMERA_DEVICE_API_VERSION_3_1: + case CAMERA_DEVICE_API_VERSION_3_2: + client = new Camera2Client(this, cameraClient, + clientPackageName, cameraId, + facing, callingPid, clientUid, getpid()); + break; + case -1: + ALOGE("Invalid camera id %d", cameraId); + return BAD_VALUE; + default: + ALOGE("Unknown camera device HAL version: %d", deviceVersion); + return INVALID_OPERATION; + } + } else { + // A particular HAL version is requested by caller. Create CameraClient + // based on the requested HAL version. + if (deviceVersion > CAMERA_DEVICE_API_VERSION_1_0 && + halVersion == CAMERA_DEVICE_API_VERSION_1_0) { + // Only support higher HAL version device opened as HAL1.0 device. + client = new CameraClient(this, cameraClient, + clientPackageName, cameraId, + facing, callingPid, clientUid, getpid()); + } else { + // Other combinations (e.g. HAL3.x open as HAL2.x) are not supported yet. + ALOGE("Invalid camera HAL version %x: HAL %x device can only be" + " opened as HAL %x device", halVersion, deviceVersion, + CAMERA_DEVICE_API_VERSION_1_0); + return INVALID_OPERATION; + } } status_t status = connectFinishUnsafe(client, client->getRemote()); @@ -718,6 +782,70 @@ status_t CameraService::connect( return OK; } +status_t CameraService::connectLegacy( + const sp<ICameraClient>& cameraClient, + int cameraId, int halVersion, + const String16& clientPackageName, + int clientUid, + /*out*/ + sp<ICamera>& device) { + + if (halVersion != CAMERA_HAL_API_VERSION_UNSPECIFIED && + mModule->common.module_api_version < CAMERA_MODULE_API_VERSION_2_3) { + /* + * Either the HAL version is unspecified in which case this just creates + * a camera client selected by the latest device version, or + * it's a particular version in which case the HAL must supported + * the open_legacy call + */ + ALOGE("%s: camera HAL module version %x doesn't support connecting to legacy HAL devices!", + __FUNCTION__, mModule->common.module_api_version); + return INVALID_OPERATION; + } + + String8 clientName8(clientPackageName); + int callingPid = getCallingPid(); + + LOG1("CameraService::connect legacy E (pid %d \"%s\", id %d)", callingPid, + clientName8.string(), cameraId); + + status_t status = validateConnect(cameraId, /*inout*/clientUid); + if (status != OK) { + return status; + } + + sp<Client> client; + { + Mutex::Autolock lock(mServiceLock); + sp<BasicClient> clientTmp; + if (!canConnectUnsafe(cameraId, clientPackageName, + cameraClient->asBinder(), + /*out*/clientTmp)) { + return -EBUSY; + } else if (client.get() != NULL) { + device = static_cast<Client*>(clientTmp.get()); + return OK; + } + + status = connectHelperLocked(cameraClient, + cameraId, + clientPackageName, + clientUid, + callingPid, + client, + halVersion); + if (status != OK) { + return status; + } + + } + // important: release the mutex here so the client can call back + // into the service from its destructor (can be at the end of the call) + + device = client; + return OK; +} + status_t CameraService::connectFinishUnsafe(const sp<BasicClient>& client, const sp<IBinder>& remoteCallback) { status_t status = client->initialize(mModule); @@ -780,8 +908,8 @@ status_t CameraService::connectPro( case CAMERA_DEVICE_API_VERSION_3_0: case CAMERA_DEVICE_API_VERSION_3_1: case CAMERA_DEVICE_API_VERSION_3_2: - client = new ProCamera2Client(this, cameraCb, String16(), - cameraId, facing, callingPid, USE_CALLING_UID, getpid()); + client = new ProCamera2Client(this, cameraCb, clientPackageName, + cameraId, facing, callingPid, clientUid, getpid()); break; case -1: ALOGE("Invalid camera id %d", cameraId); @@ -860,8 +988,8 @@ status_t CameraService::connectDevice( case CAMERA_DEVICE_API_VERSION_3_0: case CAMERA_DEVICE_API_VERSION_3_1: case CAMERA_DEVICE_API_VERSION_3_2: - client = new CameraDeviceClient(this, cameraCb, String16(), - cameraId, facing, callingPid, USE_CALLING_UID, getpid()); + client = new CameraDeviceClient(this, cameraCb, clientPackageName, + cameraId, facing, callingPid, clientUid, getpid()); break; case -1: ALOGE("Invalid camera id %d", cameraId); @@ -950,6 +1078,78 @@ status_t CameraService::removeListener( return BAD_VALUE; } +status_t CameraService::getLegacyParameters( + int cameraId, + /*out*/ + String16* parameters) { + ALOGV("%s: for camera ID = %d", __FUNCTION__, cameraId); + + if (parameters == NULL) { + ALOGE("%s: parameters must not be null", __FUNCTION__); + return BAD_VALUE; + } + + status_t ret = 0; + + CameraParameters shimParams; + if ((ret = getLegacyParametersLazy(cameraId, /*out*/&shimParams)) != OK) { + // Error logged by caller + return ret; + } + + String8 shimParamsString8 = shimParams.flatten(); + String16 shimParamsString16 = String16(shimParamsString8); + + *parameters = shimParamsString16; + + return OK; +} + +status_t CameraService::supportsCameraApi(int cameraId, int apiVersion) { + ALOGV("%s: for camera ID = %d", __FUNCTION__, cameraId); + + switch (apiVersion) { + case API_VERSION_1: + case API_VERSION_2: + break; + default: + ALOGE("%s: Bad API version %d", __FUNCTION__, apiVersion); + return BAD_VALUE; + } + + int facing = -1; + int deviceVersion = getDeviceVersion(cameraId, &facing); + + switch(deviceVersion) { + case CAMERA_DEVICE_API_VERSION_1_0: + case CAMERA_DEVICE_API_VERSION_2_0: + case CAMERA_DEVICE_API_VERSION_2_1: + case CAMERA_DEVICE_API_VERSION_3_0: + case CAMERA_DEVICE_API_VERSION_3_1: + if (apiVersion == API_VERSION_2) { + ALOGV("%s: Camera id %d uses HAL prior to HAL3.2, doesn't support api2 without shim", + __FUNCTION__, cameraId); + return -EOPNOTSUPP; + } else { // if (apiVersion == API_VERSION_1) { + ALOGV("%s: Camera id %d uses older HAL before 3.2, but api1 is always supported", + __FUNCTION__, cameraId); + return OK; + } + case CAMERA_DEVICE_API_VERSION_3_2: + ALOGV("%s: Camera id %d uses HAL3.2 or newer, supports api1/api2 directly", + __FUNCTION__, cameraId); + return OK; + case -1: + ALOGE("%s: Invalid camera id %d", __FUNCTION__, cameraId); + return BAD_VALUE; + default: + ALOGE("%s: Unknown camera device HAL version: %d", __FUNCTION__, deviceVersion); + return INVALID_OPERATION; + } + + return OK; +} + void CameraService::removeClientByRemote(const wp<IBinder>& remoteBinder) { int callingPid = getCallingPid(); LOG1("CameraService::removeClientByRemote E (pid %d)", callingPid); @@ -1079,6 +1279,8 @@ status_t CameraService::onTransact( switch (code) { case BnCameraService::CONNECT: case BnCameraService::CONNECT_PRO: + case BnCameraService::CONNECT_DEVICE: + case BnCameraService::CONNECT_LEGACY: const int pid = getCallingPid(); const int self_pid = getpid(); if (pid != self_pid) { diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h index ee39d52..28590eb 100644 --- a/services/camera/libcameraservice/CameraService.h +++ b/services/camera/libcameraservice/CameraService.h @@ -83,6 +83,11 @@ public: /*out*/ sp<ICamera>& device); + virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient, int cameraId, + int halVersion, const String16& clientPackageName, int clientUid, + /*out*/ + sp<ICamera>& device); + virtual status_t connectPro(const sp<IProCameraCallbacks>& cameraCb, int cameraId, const String16& clientPackageName, int clientUid, /*out*/ @@ -100,6 +105,15 @@ public: virtual status_t removeListener( const sp<ICameraServiceListener>& listener); + virtual status_t getLegacyParameters( + int cameraId, + /*out*/ + String16* parameters); + + // OK = supports api of that version, -EOPNOTSUPP = does not support + virtual status_t supportsCameraApi( + int cameraId, int apiVersion); + // Extra permissions checks virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags); @@ -414,6 +428,14 @@ private: status_t initializeShimMetadata(int cameraId); /** + * Get the cached CameraParameters for the camera. If they haven't been + * cached yet, then initialize them for the first time. + * + * Returns OK on success, or a negative error code. + */ + status_t getLegacyParametersLazy(int cameraId, /*out*/CameraParameters* parameters); + + /** * Generate the CameraCharacteristics metadata required by the Camera2 API * from the available HAL1 CameraParameters and CameraInfo. * @@ -433,7 +455,8 @@ private: int clientUid, int callingPid, /*out*/ - sp<Client>& client); + sp<Client>& client, + int halVersion = CAMERA_HAL_API_VERSION_UNSPECIFIED); }; } // namespace android diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp index 0447979..1642896 100644 --- a/services/camera/libcameraservice/api1/Camera2Client.cpp +++ b/services/camera/libcameraservice/api1/Camera2Client.cpp @@ -53,12 +53,10 @@ Camera2Client::Camera2Client(const sp<CameraService>& cameraService, int cameraFacing, int clientPid, uid_t clientUid, - int servicePid, - int deviceVersion): + int servicePid): Camera2ClientBase(cameraService, cameraClient, clientPackageName, cameraId, cameraFacing, clientPid, clientUid, servicePid), - mParameters(cameraId, cameraFacing), - mDeviceVersion(deviceVersion) + mParameters(cameraId, cameraFacing) { ATRACE_CALL(); @@ -80,7 +78,7 @@ status_t Camera2Client::initialize(camera_module_t *module) { SharedParameters::Lock l(mParameters); - res = l.mParameters.initialize(&(mDevice->info())); + res = l.mParameters.initialize(&(mDevice->info()), mDeviceVersion); if (res != OK) { ALOGE("%s: Camera %d: unable to build defaults: %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res); @@ -755,6 +753,7 @@ status_t Camera2Client::startPreviewL(Parameters ¶ms, bool restart) { // ever take a picture. // TODO: Find a better compromise, though this likely would involve HAL // changes. + int lastJpegStreamId = mJpegProcessor->getStreamId(); res = updateProcessorStream(mJpegProcessor, params); if (res != OK) { ALOGE("%s: Camera %d: Can't pre-configure still image " @@ -762,6 +761,7 @@ status_t Camera2Client::startPreviewL(Parameters ¶ms, bool restart) { __FUNCTION__, mCameraId, strerror(-res), res); return res; } + bool jpegStreamChanged = mJpegProcessor->getStreamId() != lastJpegStreamId; Vector<int32_t> outputStreams; bool callbacksEnabled = (params.previewCallbackFlags & @@ -817,6 +817,12 @@ status_t Camera2Client::startPreviewL(Parameters ¶ms, bool restart) { __FUNCTION__, mCameraId, strerror(-res), res); return res; } + + if (jpegStreamChanged) { + ALOGV("%s: Camera %d: Clear ZSL buffer queue when Jpeg size is changed", + __FUNCTION__, mCameraId); + mZslProcessor->clearZslQueue(); + } outputStreams.push(getZslStreamId()); } else { mZslProcessor->deleteStream(); @@ -1270,6 +1276,7 @@ status_t Camera2Client::takePicture(int msgType) { ALOGV("%s: Camera %d: Starting picture capture", __FUNCTION__, mCameraId); + int lastJpegStreamId = mJpegProcessor->getStreamId(); res = updateProcessorStream(mJpegProcessor, l.mParameters); if (res != OK) { ALOGE("%s: Camera %d: Can't set up still image stream: %s (%d)", @@ -1277,6 +1284,14 @@ status_t Camera2Client::takePicture(int msgType) { return res; } takePictureCounter = ++l.mParameters.takePictureCounter; + + // Clear ZSL buffer queue when Jpeg size is changed. + bool jpegStreamChanged = mJpegProcessor->getStreamId() != lastJpegStreamId; + if (l.mParameters.zslMode && jpegStreamChanged) { + ALOGV("%s: Camera %d: Clear ZSL buffer queue when Jpeg size is changed", + __FUNCTION__, mCameraId); + mZslProcessor->clearZslQueue(); + } } ATRACE_ASYNC_BEGIN(kTakepictureLabel, takePictureCounter); @@ -1659,8 +1674,8 @@ int Camera2Client::getZslStreamId() const { } status_t Camera2Client::registerFrameListener(int32_t minId, int32_t maxId, - wp<camera2::FrameProcessor::FilteredListener> listener) { - return mFrameProcessor->registerListener(minId, maxId, listener); + wp<camera2::FrameProcessor::FilteredListener> listener, bool sendPartials) { + return mFrameProcessor->registerListener(minId, maxId, listener, sendPartials); } status_t Camera2Client::removeFrameListener(int32_t minId, int32_t maxId, diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h index fe0bf74..5ce757a 100644 --- a/services/camera/libcameraservice/api1/Camera2Client.h +++ b/services/camera/libcameraservice/api1/Camera2Client.h @@ -89,8 +89,7 @@ public: int cameraFacing, int clientPid, uid_t clientUid, - int servicePid, - int deviceVersion); + int servicePid); virtual ~Camera2Client(); @@ -118,7 +117,8 @@ public: int getZslStreamId() const; status_t registerFrameListener(int32_t minId, int32_t maxId, - wp<camera2::FrameProcessor::FilteredListener> listener); + wp<camera2::FrameProcessor::FilteredListener> listener, + bool sendPartials = true); status_t removeFrameListener(int32_t minId, int32_t maxId, wp<camera2::FrameProcessor::FilteredListener> listener); @@ -170,7 +170,6 @@ private: void setPreviewCallbackFlagL(Parameters ¶ms, int flag); status_t updateRequests(Parameters ¶ms); - int mDeviceVersion; // Used with stream IDs static const int NO_STREAM = -1; diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp index 30b7bb8..517226d 100644 --- a/services/camera/libcameraservice/api1/CameraClient.cpp +++ b/services/camera/libcameraservice/api1/CameraClient.cpp @@ -79,7 +79,7 @@ status_t CameraClient::initialize(camera_module_t *module) { ALOGE("%s: Camera %d: unable to initialize device: %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res); mHardware.clear(); - return NO_INIT; + return res; } mHardware->setCallbacks(notifyCallback, diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp index 8268f65..cb9aca6 100644 --- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp +++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp @@ -350,8 +350,10 @@ CaptureSequencer::CaptureState CaptureSequencer::manageZslStart( return DONE; } + // We don't want to get partial results for ZSL capture. client->registerFrameListener(mCaptureId, mCaptureId + 1, - this); + this, + /*sendPartials*/false); // TODO: Actually select the right thing here. res = processor->pushToReprocess(mCaptureId); @@ -393,8 +395,14 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardStart( bool isAeConverged = false; // Get the onFrameAvailable callback when the requestID == mCaptureId + // We don't want to get partial results for normal capture, as we need + // Get ANDROID_SENSOR_TIMESTAMP from the capture result, but partial + // result doesn't have to have this metadata available. + // TODO: Update to use the HALv3 shutter notification for remove the + // need for this listener and make it faster. see bug 12530628. client->registerFrameListener(mCaptureId, mCaptureId + 1, - this); + this, + /*sendPartials*/false); { Mutex::Autolock l(mInputMutex); diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp index 69bea24..3de5d90 100644 --- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp +++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp @@ -78,7 +78,7 @@ bool FrameProcessor::processSingleFrame(CaptureResult &frame, } if (mSynthesize3ANotify) { - process3aState(frame.mMetadata, client); + process3aState(frame, client); } return FrameProcessorBase::processSingleFrame(frame, device); @@ -212,14 +212,15 @@ status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame, return OK; } -status_t FrameProcessor::process3aState(const CameraMetadata &frame, +status_t FrameProcessor::process3aState(const CaptureResult &frame, const sp<Camera2Client> &client) { ATRACE_CALL(); + const CameraMetadata &metadata = frame.mMetadata; camera_metadata_ro_entry_t entry; int cameraId = client->getCameraId(); - entry = frame.find(ANDROID_REQUEST_FRAME_COUNT); + entry = metadata.find(ANDROID_REQUEST_FRAME_COUNT); int32_t frameNumber = entry.data.i32[0]; // Don't send 3A notifications for the same frame number twice @@ -238,26 +239,31 @@ status_t FrameProcessor::process3aState(const CameraMetadata &frame, // TODO: Also use AE mode, AE trigger ID - gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AF_MODE, + gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AF_MODE, &new3aState.afMode, frameNumber, cameraId); - gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AWB_MODE, + gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AWB_MODE, &new3aState.awbMode, frameNumber, cameraId); - gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AE_STATE, + gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AE_STATE, &new3aState.aeState, frameNumber, cameraId); - gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AF_STATE, + gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AF_STATE, &new3aState.afState, frameNumber, cameraId); - gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AWB_STATE, + gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AWB_STATE, &new3aState.awbState, frameNumber, cameraId); - gotAllStates &= get3aResult<int32_t>(frame, ANDROID_CONTROL_AF_TRIGGER_ID, - &new3aState.afTriggerId, frameNumber, cameraId); + if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) { + new3aState.afTriggerId = frame.mResultExtras.afTriggerId; + new3aState.aeTriggerId = frame.mResultExtras.precaptureTriggerId; + } else { + gotAllStates &= get3aResult<int32_t>(metadata, ANDROID_CONTROL_AF_TRIGGER_ID, + &new3aState.afTriggerId, frameNumber, cameraId); - gotAllStates &= get3aResult<int32_t>(frame, ANDROID_CONTROL_AE_PRECAPTURE_ID, - &new3aState.aeTriggerId, frameNumber, cameraId); + gotAllStates &= get3aResult<int32_t>(metadata, ANDROID_CONTROL_AE_PRECAPTURE_ID, + &new3aState.aeTriggerId, frameNumber, cameraId); + } if (!gotAllStates) return BAD_VALUE; diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h index 514bd1a..4afca50 100644 --- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h +++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h @@ -58,7 +58,7 @@ class FrameProcessor : public FrameProcessorBase { const sp<Camera2Client> &client); // Send 3A state change notifications to client based on frame metadata - status_t process3aState(const CameraMetadata &frame, + status_t process3aState(const CaptureResult &frame, const sp<Camera2Client> &client); // Helper for process3aState diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp index dece764..6459300 100644 --- a/services/camera/libcameraservice/api1/client2/Parameters.cpp +++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp @@ -29,6 +29,9 @@ #include "Parameters.h" #include "system/camera.h" +#include "hardware/camera_common.h" +#include <media/MediaProfiles.h> +#include <media/mediarecorder.h> namespace android { namespace camera2 { @@ -43,7 +46,7 @@ Parameters::Parameters(int cameraId, Parameters::~Parameters() { } -status_t Parameters::initialize(const CameraMetadata *info) { +status_t Parameters::initialize(const CameraMetadata *info, int deviceVersion) { status_t res; if (info->entryCount() == 0) { @@ -51,6 +54,7 @@ status_t Parameters::initialize(const CameraMetadata *info) { return BAD_VALUE; } Parameters::info = info; + mDeviceVersion = deviceVersion; res = buildFastInfo(); if (res != OK) return res; @@ -59,7 +63,17 @@ status_t Parameters::initialize(const CameraMetadata *info) { if (res != OK) return res; const Size MAX_PREVIEW_SIZE = { MAX_PREVIEW_WIDTH, MAX_PREVIEW_HEIGHT }; - res = getFilteredPreviewSizes(MAX_PREVIEW_SIZE, &availablePreviewSizes); + // Treat the H.264 max size as the max supported video size. + MediaProfiles *videoEncoderProfiles = MediaProfiles::getInstance(); + int32_t maxVideoWidth = videoEncoderProfiles->getVideoEncoderParamByName( + "enc.vid.width.max", VIDEO_ENCODER_H264); + int32_t maxVideoHeight = videoEncoderProfiles->getVideoEncoderParamByName( + "enc.vid.height.max", VIDEO_ENCODER_H264); + const Size MAX_VIDEO_SIZE = {maxVideoWidth, maxVideoHeight}; + + res = getFilteredSizes(MAX_PREVIEW_SIZE, &availablePreviewSizes); + if (res != OK) return res; + res = getFilteredSizes(MAX_VIDEO_SIZE, &availableVideoSizes); if (res != OK) return res; // TODO: Pick more intelligently @@ -84,8 +98,17 @@ status_t Parameters::initialize(const CameraMetadata *info) { ALOGV("Supported preview sizes are: %s", supportedPreviewSizes.string()); params.set(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES, supportedPreviewSizes); + + String8 supportedVideoSizes; + for (size_t i = 0; i < availableVideoSizes.size(); i++) { + if (i != 0) supportedVideoSizes += ","; + supportedVideoSizes += String8::format("%dx%d", + availableVideoSizes[i].width, + availableVideoSizes[i].height); + } + ALOGV("Supported video sizes are: %s", supportedVideoSizes.string()); params.set(CameraParameters::KEY_SUPPORTED_VIDEO_SIZES, - supportedPreviewSizes); + supportedVideoSizes); } camera_metadata_ro_entry_t availableFpsRanges = @@ -119,16 +142,14 @@ status_t Parameters::initialize(const CameraMetadata *info) { previewTransform = degToTransform(0, cameraFacing == CAMERA_FACING_FRONT); - camera_metadata_ro_entry_t availableFormats = - staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS); - { String8 supportedPreviewFormats; + SortedVector<int32_t> outputFormats = getAvailableOutputFormats(); bool addComma = false; - for (size_t i=0; i < availableFormats.count; i++) { + for (size_t i=0; i < outputFormats.size(); i++) { if (addComma) supportedPreviewFormats += ","; addComma = true; - switch (availableFormats.data.i32[i]) { + switch (outputFormats[i]) { case HAL_PIXEL_FORMAT_YCbCr_422_SP: supportedPreviewFormats += CameraParameters::PIXEL_FORMAT_YUV422SP; @@ -170,7 +191,7 @@ status_t Parameters::initialize(const CameraMetadata *info) { default: ALOGW("%s: Camera %d: Unknown preview format: %x", - __FUNCTION__, cameraId, availableFormats.data.i32[i]); + __FUNCTION__, cameraId, outputFormats[i]); addComma = false; break; } @@ -218,24 +239,23 @@ status_t Parameters::initialize(const CameraMetadata *info) { supportedPreviewFrameRates); } - camera_metadata_ro_entry_t availableJpegSizes = - staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES, 2); - if (!availableJpegSizes.count) return NO_INIT; + Vector<Size> availableJpegSizes = getAvailableJpegSizes(); + if (!availableJpegSizes.size()) return NO_INIT; // TODO: Pick maximum - pictureWidth = availableJpegSizes.data.i32[0]; - pictureHeight = availableJpegSizes.data.i32[1]; + pictureWidth = availableJpegSizes[0].width; + pictureHeight = availableJpegSizes[0].height; params.setPictureSize(pictureWidth, pictureHeight); { String8 supportedPictureSizes; - for (size_t i=0; i < availableJpegSizes.count; i += 2) { + for (size_t i=0; i < availableJpegSizes.size(); i++) { if (i != 0) supportedPictureSizes += ","; supportedPictureSizes += String8::format("%dx%d", - availableJpegSizes.data.i32[i], - availableJpegSizes.data.i32[i+1]); + availableJpegSizes[i].width, + availableJpegSizes[i].height); } params.set(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES, supportedPictureSizes); @@ -931,9 +951,8 @@ status_t Parameters::buildFastInfo() { staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS); if (!availableFocalLengths.count) return NO_INIT; - camera_metadata_ro_entry_t availableFormats = - staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS); - if (!availableFormats.count) return NO_INIT; + SortedVector<int32_t> availableFormats = getAvailableOutputFormats(); + if (!availableFormats.size()) return NO_INIT; if (sceneModeOverrides.count > 0) { @@ -1017,8 +1036,8 @@ status_t Parameters::buildFastInfo() { // Check if the HAL supports HAL_PIXEL_FORMAT_YCbCr_420_888 fastInfo.useFlexibleYuv = false; - for (size_t i = 0; i < availableFormats.count; i++) { - if (availableFormats.data.i32[i] == HAL_PIXEL_FORMAT_YCbCr_420_888) { + for (size_t i = 0; i < availableFormats.size(); i++) { + if (availableFormats[i] == HAL_PIXEL_FORMAT_YCbCr_420_888) { fastInfo.useFlexibleYuv = true; break; } @@ -1177,8 +1196,7 @@ status_t Parameters::set(const String8& paramString) { "is active!", __FUNCTION__); return BAD_VALUE; } - camera_metadata_ro_entry_t availableFormats = - staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS); + SortedVector<int32_t> availableFormats = getAvailableOutputFormats(); // If using flexible YUV, always support NV21/YV12. Otherwise, check // HAL's list. if (! (fastInfo.useFlexibleYuv && @@ -1187,11 +1205,10 @@ status_t Parameters::set(const String8& paramString) { validatedParams.previewFormat == HAL_PIXEL_FORMAT_YV12) ) ) { // Not using flexible YUV format, so check explicitly - for (i = 0; i < availableFormats.count; i++) { - if (availableFormats.data.i32[i] == - validatedParams.previewFormat) break; + for (i = 0; i < availableFormats.size(); i++) { + if (availableFormats[i] == validatedParams.previewFormat) break; } - if (i == availableFormats.count) { + if (i == availableFormats.size()) { ALOGE("%s: Requested preview format %s (0x%x) is not supported", __FUNCTION__, newParams.getPreviewFormat(), validatedParams.previewFormat); @@ -1281,15 +1298,14 @@ status_t Parameters::set(const String8& paramString) { &validatedParams.pictureHeight); if (validatedParams.pictureWidth == pictureWidth || validatedParams.pictureHeight == pictureHeight) { - camera_metadata_ro_entry_t availablePictureSizes = - staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES); - for (i = 0; i < availablePictureSizes.count; i+=2) { - if ((availablePictureSizes.data.i32[i] == + Vector<Size> availablePictureSizes = getAvailableJpegSizes(); + for (i = 0; i < availablePictureSizes.size(); i++) { + if ((availablePictureSizes[i].width == validatedParams.pictureWidth) && - (availablePictureSizes.data.i32[i+1] == + (availablePictureSizes[i].height == validatedParams.pictureHeight)) break; } - if (i == availablePictureSizes.count) { + if (i == availablePictureSizes.size()) { ALOGE("%s: Requested picture size %d x %d is not supported", __FUNCTION__, validatedParams.pictureWidth, validatedParams.pictureHeight); @@ -1660,13 +1676,13 @@ status_t Parameters::set(const String8& paramString) { __FUNCTION__); return BAD_VALUE; } - for (i = 0; i < availablePreviewSizes.size(); i++) { - if ((availablePreviewSizes[i].width == + for (i = 0; i < availableVideoSizes.size(); i++) { + if ((availableVideoSizes[i].width == validatedParams.videoWidth) && - (availablePreviewSizes[i].height == + (availableVideoSizes[i].height == validatedParams.videoHeight)) break; } - if (i == availablePreviewSizes.size()) { + if (i == availableVideoSizes.size()) { ALOGE("%s: Requested video size %d x %d is not supported", __FUNCTION__, validatedParams.videoWidth, validatedParams.videoHeight); @@ -2497,7 +2513,7 @@ int Parameters::normalizedYToArray(int y) const { return cropYToArray(normalizedYToCrop(y)); } -status_t Parameters::getFilteredPreviewSizes(Size limit, Vector<Size> *sizes) { +status_t Parameters::getFilteredSizes(Size limit, Vector<Size> *sizes) { if (info == NULL) { ALOGE("%s: Static metadata is not initialized", __FUNCTION__); return NO_INIT; @@ -2506,22 +2522,37 @@ status_t Parameters::getFilteredPreviewSizes(Size limit, Vector<Size> *sizes) { ALOGE("%s: Input size is null", __FUNCTION__); return BAD_VALUE; } - - const size_t SIZE_COUNT = sizeof(Size) / sizeof(int); - camera_metadata_ro_entry_t availableProcessedSizes = - staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, SIZE_COUNT); - if (availableProcessedSizes.count < SIZE_COUNT) return BAD_VALUE; - - Size previewSize; - for (size_t i = 0; i < availableProcessedSizes.count; i += SIZE_COUNT) { - previewSize.width = availableProcessedSizes.data.i32[i]; - previewSize.height = availableProcessedSizes.data.i32[i+1]; - // Need skip the preview sizes that are too large. - if (previewSize.width <= limit.width && - previewSize.height <= limit.height) { - sizes->push(previewSize); + sizes->clear(); + + if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) { + Vector<StreamConfiguration> scs = getStreamConfigurations(); + for (size_t i=0; i < scs.size(); i++) { + const StreamConfiguration &sc = scs[i]; + if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT && + sc.format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED && + sc.width <= limit.width && sc.height <= limit.height) { + Size sz = {sc.width, sc.height}; + sizes->push(sz); } + } + } else { + const size_t SIZE_COUNT = sizeof(Size) / sizeof(int); + camera_metadata_ro_entry_t availableProcessedSizes = + staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, SIZE_COUNT); + if (availableProcessedSizes.count < SIZE_COUNT) return BAD_VALUE; + + Size filteredSize; + for (size_t i = 0; i < availableProcessedSizes.count; i += SIZE_COUNT) { + filteredSize.width = availableProcessedSizes.data.i32[i]; + filteredSize.height = availableProcessedSizes.data.i32[i+1]; + // Need skip the preview sizes that are too large. + if (filteredSize.width <= limit.width && + filteredSize.height <= limit.height) { + sizes->push(filteredSize); + } + } } + if (sizes->isEmpty()) { ALOGE("generated preview size list is empty!!"); return BAD_VALUE; @@ -2555,6 +2586,78 @@ Parameters::Size Parameters::getMaxSizeForRatio( return maxSize; } +Vector<Parameters::StreamConfiguration> Parameters::getStreamConfigurations() { + const int STREAM_CONFIGURATION_SIZE = 4; + const int STREAM_FORMAT_OFFSET = 0; + const int STREAM_WIDTH_OFFSET = 1; + const int STREAM_HEIGHT_OFFSET = 2; + const int STREAM_IS_INPUT_OFFSET = 3; + Vector<StreamConfiguration> scs; + if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) { + ALOGE("StreamConfiguration is only valid after device HAL 3.2!"); + return scs; + } + + camera_metadata_ro_entry_t availableStreamConfigs = + staticInfo(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS); + for (size_t i=0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) { + int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET]; + int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET]; + int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET]; + int32_t isInput = availableStreamConfigs.data.i32[i + STREAM_IS_INPUT_OFFSET]; + StreamConfiguration sc = {format, width, height, isInput}; + scs.add(sc); + } + return scs; +} + +SortedVector<int32_t> Parameters::getAvailableOutputFormats() { + SortedVector<int32_t> outputFormats; // Non-duplicated output formats + if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) { + Vector<StreamConfiguration> scs = getStreamConfigurations(); + for (size_t i=0; i < scs.size(); i++) { + const StreamConfiguration &sc = scs[i]; + if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT) { + outputFormats.add(sc.format); + } + } + } else { + camera_metadata_ro_entry_t availableFormats = staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS); + for (size_t i=0; i < availableFormats.count; i++) { + outputFormats.add(availableFormats.data.i32[i]); + } + } + return outputFormats; +} + +Vector<Parameters::Size> Parameters::getAvailableJpegSizes() { + Vector<Parameters::Size> jpegSizes; + if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) { + Vector<StreamConfiguration> scs = getStreamConfigurations(); + for (size_t i=0; i < scs.size(); i++) { + const StreamConfiguration &sc = scs[i]; + if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT && + sc.format == HAL_PIXEL_FORMAT_BLOB) { + Size sz = {sc.width, sc.height}; + jpegSizes.add(sz); + } + } + } else { + const int JPEG_SIZE_ENTRY_COUNT = 2; + const int WIDTH_OFFSET = 0; + const int HEIGHT_OFFSET = 1; + camera_metadata_ro_entry_t availableJpegSizes = + staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES); + for (size_t i=0; i < availableJpegSizes.count; i+= JPEG_SIZE_ENTRY_COUNT) { + int width = availableJpegSizes.data.i32[i + WIDTH_OFFSET]; + int height = availableJpegSizes.data.i32[i + HEIGHT_OFFSET]; + Size sz = {width, height}; + jpegSizes.add(sz); + } + } + return jpegSizes; +} + Parameters::CropRegion Parameters::calculateCropRegion( Parameters::CropRegion::Outputs outputs) const { diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h index 60c4687..f95c69a 100644 --- a/services/camera/libcameraservice/api1/client2/Parameters.h +++ b/services/camera/libcameraservice/api1/client2/Parameters.h @@ -226,7 +226,7 @@ struct Parameters { ~Parameters(); // Sets up default parameters - status_t initialize(const CameraMetadata *info); + status_t initialize(const CameraMetadata *info, int deviceVersion); // Build fast-access device static info from static info status_t buildFastInfo(); @@ -341,10 +341,29 @@ private: int normalizedYToCrop(int y) const; Vector<Size> availablePreviewSizes; + Vector<Size> availableVideoSizes; // Get size list (that are no larger than limit) from static metadata. - status_t getFilteredPreviewSizes(Size limit, Vector<Size> *sizes); + status_t getFilteredSizes(Size limit, Vector<Size> *sizes); // Get max size (from the size array) that matches the given aspect ratio. Size getMaxSizeForRatio(float ratio, const int32_t* sizeArray, size_t count); + + struct StreamConfiguration { + int32_t format; + int32_t width; + int32_t height; + int32_t isInput; + }; + // Helper function extract available stream configuration + // Only valid since device HAL version 3.2 + // returns an empty Vector if device HAL version does support it + Vector<StreamConfiguration> getStreamConfigurations(); + + // Helper function to get non-duplicated available output formats + SortedVector<int32_t> getAvailableOutputFormats(); + // Helper function to get available output jpeg sizes + Vector<Size> getAvailableJpegSizes(); + + int mDeviceVersion; }; // This class encapsulates the Parameters class so that it can only be accessed diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp index 2064e2c..99abced 100644 --- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp +++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp @@ -430,10 +430,13 @@ status_t StreamingProcessor::startStream(StreamType type, Mutex::Autolock m(mMutex); - // If a recording stream is being started up, free up any - // outstanding buffers left from the previous recording session. - // There should never be any, so if there are, warn about it. - if (isStreamActive(outputStreams, mRecordingStreamId)) { + // If a recording stream is being started up and no recording + // stream is active yet, free up any outstanding buffers left + // from the previous recording session. There should never be + // any, so if there are, warn about it. + bool isRecordingStreamIdle = !isStreamActive(mActiveStreamIds, mRecordingStreamId); + bool startRecordingStream = isStreamActive(outputStreams, mRecordingStreamId); + if (startRecordingStream && isRecordingStreamIdle) { releaseAllRecordingFramesLocked(); } diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp index 2a2a5af..10463c1 100644 --- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp +++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp @@ -202,7 +202,8 @@ status_t ZslProcessor::updateStream(const Parameters ¶ms) { } client->registerFrameListener(Camera2Client::kPreviewRequestIdStart, Camera2Client::kPreviewRequestIdEnd, - this); + this, + /*sendPartials*/false); return OK; } diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp index 1dcb718..ae537e2 100644 --- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp +++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp @@ -52,8 +52,31 @@ ZslProcessor3::ZslProcessor3( mFrameListHead(0), mZslQueueHead(0), mZslQueueTail(0) { - mZslQueue.insertAt(0, kZslBufferDepth); - mFrameList.insertAt(0, kFrameListDepth); + // Initialize buffer queue and frame list based on pipeline max depth. + size_t pipelineMaxDepth = kDefaultMaxPipelineDepth; + if (client != 0) { + sp<Camera3Device> device = + static_cast<Camera3Device*>(client->getCameraDevice().get()); + if (device != 0) { + camera_metadata_ro_entry_t entry = + device->info().find(ANDROID_REQUEST_PIPELINE_MAX_DEPTH); + if (entry.count == 1) { + pipelineMaxDepth = entry.data.u8[0]; + } else { + ALOGW("%s: Unable to find the android.request.pipelineMaxDepth," + " use default pipeline max depth %zu", __FUNCTION__, + kDefaultMaxPipelineDepth); + } + } + } + + ALOGV("%s: Initialize buffer queue and frame list depth based on max pipeline depth (%d)", + __FUNCTION__, pipelineMaxDepth); + mBufferQueueDepth = pipelineMaxDepth + 1; + mFrameListDepth = pipelineMaxDepth + 1; + + mZslQueue.insertAt(0, mBufferQueueDepth); + mFrameList.insertAt(0, mFrameListDepth); sp<CaptureSequencer> captureSequencer = mSequencer.promote(); if (captureSequencer != 0) captureSequencer->setZslProcessor(this); } @@ -70,13 +93,25 @@ void ZslProcessor3::onResultAvailable(const CaptureResult &result) { camera_metadata_ro_entry_t entry; entry = result.mMetadata.find(ANDROID_SENSOR_TIMESTAMP); nsecs_t timestamp = entry.data.i64[0]; + if (entry.count == 0) { + ALOGE("%s: metadata doesn't have timestamp, skip this result"); + return; + } (void)timestamp; - ALOGVV("Got preview metadata for timestamp %" PRId64, timestamp); + + entry = result.mMetadata.find(ANDROID_REQUEST_FRAME_COUNT); + if (entry.count == 0) { + ALOGE("%s: metadata doesn't have frame number, skip this result"); + return; + } + int32_t frameNumber = entry.data.i32[0]; + + ALOGVV("Got preview metadata for frame %d with timestamp %" PRId64, frameNumber, timestamp); if (mState != RUNNING) return; mFrameList.editItemAt(mFrameListHead) = result.mMetadata; - mFrameListHead = (mFrameListHead + 1) % kFrameListDepth; + mFrameListHead = (mFrameListHead + 1) % mFrameListDepth; } status_t ZslProcessor3::updateStream(const Parameters ¶ms) { @@ -136,7 +171,7 @@ status_t ZslProcessor3::updateStream(const Parameters ¶ms) { // Note that format specified internally in Camera3ZslStream res = device->createZslStream( params.fastInfo.arrayWidth, params.fastInfo.arrayHeight, - kZslBufferDepth, + mBufferQueueDepth, &mZslStreamId, &mZslStream); if (res != OK) { @@ -145,10 +180,15 @@ status_t ZslProcessor3::updateStream(const Parameters ¶ms) { strerror(-res), res); return res; } + + // Only add the camera3 buffer listener when the stream is created. + mZslStream->addBufferListener(this); } + client->registerFrameListener(Camera2Client::kPreviewRequestIdStart, Camera2Client::kPreviewRequestIdEnd, - this); + this, + /*sendPartials*/false); return OK; } @@ -276,15 +316,6 @@ status_t ZslProcessor3::pushToReprocess(int32_t requestId) { return INVALID_OPERATION; } - // Flush device to clear out all in-flight requests pending in HAL. - res = client->getCameraDevice()->flush(); - if (res != OK) { - ALOGE("%s: Camera %d: Failed to flush device: " - "%s (%d)", - __FUNCTION__, client->getCameraId(), strerror(-res), res); - return res; - } - // Update JPEG settings { SharedParameters::Lock l(client->getParameters()); @@ -322,11 +353,19 @@ status_t ZslProcessor3::clearZslQueue() { status_t ZslProcessor3::clearZslQueueLocked() { if (mZslStream != 0) { + // clear result metadata list first. + clearZslResultQueueLocked(); return mZslStream->clearInputRingBuffer(); } return OK; } +void ZslProcessor3::clearZslResultQueueLocked() { + mFrameList.clear(); + mFrameListHead = 0; + mFrameList.insertAt(0, mFrameListDepth); +} + void ZslProcessor3::dump(int fd, const Vector<String16>& /*args*/) const { Mutex::Autolock l(mInputMutex); if (!mLatestCapturedRequest.isEmpty()) { @@ -480,11 +519,17 @@ void ZslProcessor3::onBufferReleased(const BufferInfo& bufferInfo) { // We need to guarantee that if we do two back-to-back captures, // the second won't use a buffer that's older/the same as the first, which // is theoretically possible if we don't clear out the queue and the - // selection criteria is something like 'newest'. Clearing out the queue - // on a completed capture ensures we'll only use new data. + // selection criteria is something like 'newest'. Clearing out the result + // metadata queue on a completed capture ensures we'll only use new timestamp. + // Calling clearZslQueueLocked is a guaranteed deadlock because this callback + // holds the Camera3Stream internal lock (mLock), and clearZslQueueLocked requires + // to hold the same lock. + // TODO: need figure out a way to clear the Zsl buffer queue properly. Right now + // it is safe not to do so, as back to back ZSL capture requires stop and start + // preview, which will flush ZSL queue automatically. ALOGV("%s: Memory optimization, clearing ZSL queue", __FUNCTION__); - clearZslQueueLocked(); + clearZslResultQueueLocked(); // Required so we accept more ZSL requests mState = RUNNING; diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h index 4c52a64..dfb1457 100644 --- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h +++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h @@ -107,8 +107,9 @@ class ZslProcessor3 : CameraMetadata frame; }; - static const size_t kZslBufferDepth = 4; - static const size_t kFrameListDepth = kZslBufferDepth * 2; + static const int32_t kDefaultMaxPipelineDepth = 4; + size_t mBufferQueueDepth; + size_t mFrameListDepth; Vector<CameraMetadata> mFrameList; size_t mFrameListHead; @@ -124,6 +125,8 @@ class ZslProcessor3 : status_t clearZslQueueLocked(); + void clearZslResultQueueLocked(); + void dumpZslQueue(int id) const; nsecs_t getCandidateTimestampLocked(size_t* metadataIdx) const; diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp index 4fce1b3..de42cee 100644 --- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp +++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp @@ -82,7 +82,7 @@ status_t CameraDeviceClient::initialize(camera_module_t *module) mFrameProcessor->registerListener(FRAME_PROCESSOR_LISTENER_MIN_ID, FRAME_PROCESSOR_LISTENER_MAX_ID, /*listener*/this, - /*quirkSendPartials*/true); + /*sendPartials*/true); return OK; } @@ -102,7 +102,7 @@ status_t CameraDeviceClient::submitRequest(sp<CaptureRequest> request, status_t CameraDeviceClient::submitRequestList(List<sp<CaptureRequest> > requests, bool streaming, int64_t* lastFrameNumber) { ATRACE_CALL(); - ALOGV("%s-start of function. Request list size %d", __FUNCTION__, requests.size()); + ALOGV("%s-start of function. Request list size %zu", __FUNCTION__, requests.size()); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; @@ -177,7 +177,7 @@ status_t CameraDeviceClient::submitRequestList(List<sp<CaptureRequest> > request metadata.update(ANDROID_REQUEST_ID, &requestId, /*size*/1); loopCounter++; // loopCounter starts from 1 - ALOGV("%s: Camera %d: Creating request with ID %d (%d of %d)", + ALOGV("%s: Camera %d: Creating request with ID %d (%d of %zu)", __FUNCTION__, mCameraId, requestId, loopCounter, requests.size()); metadataRequestList.push_back(metadata); @@ -310,6 +310,10 @@ status_t CameraDeviceClient::createStream(int width, int height, int format, Mutex::Autolock icl(mBinderSerializationLock); + if (bufferProducer == NULL) { + ALOGE("%s: bufferProducer must not be null", __FUNCTION__); + return BAD_VALUE; + } if (!mDevice.get()) return DEAD_OBJECT; // Don't create multiple streams for the same target surface diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp index 19efd30..13c9f48 100644 --- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp +++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp @@ -54,7 +54,8 @@ Camera2ClientBase<TClientBase>::Camera2ClientBase( int servicePid): TClientBase(cameraService, remoteCallback, clientPackageName, cameraId, cameraFacing, clientPid, clientUid, servicePid), - mSharedCameraCallbacks(remoteCallback) + mSharedCameraCallbacks(remoteCallback), + mDeviceVersion(cameraService->getDeviceVersion(cameraId)) { ALOGI("Camera %d: Opened", cameraId); @@ -280,6 +281,11 @@ int Camera2ClientBase<TClientBase>::getCameraId() const { } template <typename TClientBase> +int Camera2ClientBase<TClientBase>::getCameraDeviceVersion() const { + return mDeviceVersion; +} + +template <typename TClientBase> const sp<CameraDeviceBase>& Camera2ClientBase<TClientBase>::getCameraDevice() { return mDevice; } diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h index 9feca93..f57d204 100644 --- a/services/camera/libcameraservice/common/Camera2ClientBase.h +++ b/services/camera/libcameraservice/common/Camera2ClientBase.h @@ -76,6 +76,7 @@ public: int getCameraId() const; const sp<CameraDeviceBase>& getCameraDevice(); + int getCameraDeviceVersion() const; const sp<CameraService>& getCameraService(); @@ -122,6 +123,7 @@ protected: /** CameraDeviceBase instance wrapping HAL2+ entry */ + const int mDeviceVersion; sp<CameraDeviceBase> mDevice; /** Utility members */ diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.cpp b/services/camera/libcameraservice/common/FrameProcessorBase.cpp index f6a971a..482f687 100644 --- a/services/camera/libcameraservice/common/FrameProcessorBase.cpp +++ b/services/camera/libcameraservice/common/FrameProcessorBase.cpp @@ -37,11 +37,23 @@ FrameProcessorBase::~FrameProcessorBase() { } status_t FrameProcessorBase::registerListener(int32_t minId, - int32_t maxId, wp<FilteredListener> listener, bool quirkSendPartials) { + int32_t maxId, wp<FilteredListener> listener, bool sendPartials) { Mutex::Autolock l(mInputMutex); + List<RangeListener>::iterator item = mRangeListeners.begin(); + while (item != mRangeListeners.end()) { + if (item->minId == minId && + item->maxId == maxId && + item->listener == listener) { + // already registered, just return + ALOGV("%s: Attempt to register the same client twice, ignoring", + __FUNCTION__); + return OK; + } + item++; + } ALOGV("%s: Registering listener for frame id range %d - %d", __FUNCTION__, minId, maxId); - RangeListener rListener = { minId, maxId, listener, quirkSendPartials }; + RangeListener rListener = { minId, maxId, listener, sendPartials }; mRangeListeners.push_back(rListener); return OK; } @@ -176,7 +188,7 @@ status_t FrameProcessorBase::processListeners(const CaptureResult &result, List<RangeListener>::iterator item = mRangeListeners.begin(); while (item != mRangeListeners.end()) { if (requestId >= item->minId && requestId < item->maxId && - (!quirkIsPartial || item->quirkSendPartials)) { + (!quirkIsPartial || item->sendPartials)) { sp<FilteredListener> listener = item->listener.promote(); if (listener == 0) { item = mRangeListeners.erase(item); diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.h b/services/camera/libcameraservice/common/FrameProcessorBase.h index 15a014e..3649c45 100644 --- a/services/camera/libcameraservice/common/FrameProcessorBase.h +++ b/services/camera/libcameraservice/common/FrameProcessorBase.h @@ -44,11 +44,12 @@ class FrameProcessorBase: public Thread { }; // Register a listener for a range of IDs [minId, maxId). Multiple listeners - // can be listening to the same range. - // QUIRK: sendPartials controls whether partial results will be sent. + // can be listening to the same range. Registering the same listener with + // the same range of IDs has no effect. + // sendPartials controls whether partial results will be sent. status_t registerListener(int32_t minId, int32_t maxId, wp<FilteredListener> listener, - bool quirkSendPartials = true); + bool sendPartials = true); status_t removeListener(int32_t minId, int32_t maxId, wp<FilteredListener> listener); @@ -66,7 +67,7 @@ class FrameProcessorBase: public Thread { int32_t minId; int32_t maxId; wp<FilteredListener> listener; - bool quirkSendPartials; + bool sendPartials; }; List<RangeListener> mRangeListeners; diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h index 87b2807..925b645 100644 --- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h +++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h @@ -92,8 +92,22 @@ public: status_t initialize(hw_module_t *module) { ALOGI("Opening camera %s", mName.string()); - int rc = module->methods->open(module, mName.string(), - (hw_device_t **)&mDevice); + camera_module_t *cameraModule = reinterpret_cast<camera_module_t *>(module); + camera_info info; + status_t res = cameraModule->get_camera_info(atoi(mName.string()), &info); + if (res != OK) return res; + + int rc = OK; + if (module->module_api_version >= CAMERA_MODULE_API_VERSION_2_3 && + info.device_version > CAMERA_DEVICE_API_VERSION_1_0) { + // Open higher version camera device as HAL1.0 device. + rc = cameraModule->open_legacy(module, mName.string(), + CAMERA_DEVICE_API_VERSION_1_0, + (hw_device_t **)&mDevice); + } else { + rc = module->methods->open(module, mName.string(), + (hw_device_t **)&mDevice); + } if (rc != OK) { ALOGE("Could not open camera %s: %d", mName.string(), rc); return rc; diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp index 16d6f42..8fce191 100644 --- a/services/camera/libcameraservice/device3/Camera3Device.cpp +++ b/services/camera/libcameraservice/device3/Camera3Device.cpp @@ -113,7 +113,6 @@ status_t Camera3Device::initialize(camera_module_t *module) } /** Cross-check device version */ - if (device->common.version < CAMERA_DEVICE_API_VERSION_3_0) { SET_ERR_L("Could not open camera: " "Camera device should be at least %x, reports %x instead", @@ -173,6 +172,7 @@ status_t Camera3Device::initialize(camera_module_t *module) /** Everything is good to go */ + mDeviceVersion = device->common.version; mDeviceInfo = info.static_camera_characteristics; mHal3Device = device; mStatus = STATUS_UNCONFIGURED; @@ -284,42 +284,74 @@ bool Camera3Device::tryLockSpinRightRound(Mutex& lock) { return gotLock; } -ssize_t Camera3Device::getJpegBufferSize(uint32_t width, uint32_t height) const { - // TODO: replace below with availableStreamConfiguration for HAL3.2+. - camera_metadata_ro_entry availableJpegSizes = - mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_JPEG_SIZES); - if (availableJpegSizes.count == 0 || availableJpegSizes.count % 2 != 0) { - ALOGE("%s: Camera %d: Can't find find valid available jpeg sizes in static metadata!", - __FUNCTION__, mId); - return BAD_VALUE; - } - - // Get max jpeg size (area-wise). +Camera3Device::Size Camera3Device::getMaxJpegResolution() const { int32_t maxJpegWidth = 0, maxJpegHeight = 0; - bool foundMax = false; - for (size_t i = 0; i < availableJpegSizes.count; i += 2) { - if ((availableJpegSizes.data.i32[i] * availableJpegSizes.data.i32[i + 1]) - > (maxJpegWidth * maxJpegHeight)) { - maxJpegWidth = availableJpegSizes.data.i32[i]; - maxJpegHeight = availableJpegSizes.data.i32[i + 1]; - foundMax = true; + if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) { + const int STREAM_CONFIGURATION_SIZE = 4; + const int STREAM_FORMAT_OFFSET = 0; + const int STREAM_WIDTH_OFFSET = 1; + const int STREAM_HEIGHT_OFFSET = 2; + const int STREAM_IS_INPUT_OFFSET = 3; + camera_metadata_ro_entry_t availableStreamConfigs = + mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS); + if (availableStreamConfigs.count == 0 || + availableStreamConfigs.count % STREAM_CONFIGURATION_SIZE != 0) { + return Size(0, 0); + } + + // Get max jpeg size (area-wise). + for (size_t i=0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) { + int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET]; + int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET]; + int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET]; + int32_t isInput = availableStreamConfigs.data.i32[i + STREAM_IS_INPUT_OFFSET]; + if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT + && format == HAL_PIXEL_FORMAT_BLOB && + (width * height > maxJpegWidth * maxJpegHeight)) { + maxJpegWidth = width; + maxJpegHeight = height; + } + } + } else { + camera_metadata_ro_entry availableJpegSizes = + mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_JPEG_SIZES); + if (availableJpegSizes.count == 0 || availableJpegSizes.count % 2 != 0) { + return Size(0, 0); + } + + // Get max jpeg size (area-wise). + for (size_t i = 0; i < availableJpegSizes.count; i += 2) { + if ((availableJpegSizes.data.i32[i] * availableJpegSizes.data.i32[i + 1]) + > (maxJpegWidth * maxJpegHeight)) { + maxJpegWidth = availableJpegSizes.data.i32[i]; + maxJpegHeight = availableJpegSizes.data.i32[i + 1]; + } } } - if (!foundMax) { + return Size(maxJpegWidth, maxJpegHeight); +} + +ssize_t Camera3Device::getJpegBufferSize(uint32_t width, uint32_t height) const { + // Get max jpeg size (area-wise). + Size maxJpegResolution = getMaxJpegResolution(); + if (maxJpegResolution.width == 0) { + ALOGE("%s: Camera %d: Can't find find valid available jpeg sizes in static metadata!", + __FUNCTION__, mId); return BAD_VALUE; } // Get max jpeg buffer size ssize_t maxJpegBufferSize = 0; - camera_metadata_ro_entry jpegMaxSize = mDeviceInfo.find(ANDROID_JPEG_MAX_SIZE); - if (jpegMaxSize.count == 0) { + camera_metadata_ro_entry jpegBufMaxSize = mDeviceInfo.find(ANDROID_JPEG_MAX_SIZE); + if (jpegBufMaxSize.count == 0) { ALOGE("%s: Camera %d: Can't find maximum JPEG size in static metadata!", __FUNCTION__, mId); return BAD_VALUE; } - maxJpegBufferSize = jpegMaxSize.data.i32[0]; + maxJpegBufferSize = jpegBufMaxSize.data.i32[0]; // Calculate final jpeg buffer size for the given resolution. - float scaleFactor = ((float) (width * height)) / (maxJpegWidth * maxJpegHeight); + float scaleFactor = ((float) (width * height)) / + (maxJpegResolution.width * maxJpegResolution.height); ssize_t jpegBufferSize = scaleFactor * maxJpegBufferSize; // Bound the buffer size to [MIN_JPEG_BUFFER_SIZE, maxJpegBufferSize]. if (jpegBufferSize > maxJpegBufferSize) { @@ -1156,7 +1188,7 @@ status_t Camera3Device::triggerAutofocus(uint32_t id) { { ANDROID_CONTROL_AF_TRIGGER_ID, static_cast<int32_t>(id) - }, + } }; return mRequestThread->queueTrigger(trigger, @@ -1177,7 +1209,7 @@ status_t Camera3Device::triggerCancelAutofocus(uint32_t id) { { ANDROID_CONTROL_AF_TRIGGER_ID, static_cast<int32_t>(id) - }, + } }; return mRequestThread->queueTrigger(trigger, @@ -1198,7 +1230,7 @@ status_t Camera3Device::triggerPrecaptureMetering(uint32_t id) { { ANDROID_CONTROL_AE_PRECAPTURE_ID, static_cast<int32_t>(id) - }, + } }; return mRequestThread->queueTrigger(trigger, @@ -1539,8 +1571,6 @@ bool Camera3Device::processPartial3AQuirk( uint8_t aeState; uint8_t afState; uint8_t awbState; - int32_t afTriggerId; - int32_t aeTriggerId; gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_MODE, &afMode, frameNumber); @@ -1557,12 +1587,6 @@ bool Camera3Device::processPartial3AQuirk( gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AWB_STATE, &awbState, frameNumber); - gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_TRIGGER_ID, - &afTriggerId, frameNumber); - - gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AE_PRECAPTURE_ID, - &aeTriggerId, frameNumber); - if (!gotAllStates) return false; ALOGVV("%s: Camera %d: Frame %d, Request ID %d: AF mode %d, AWB mode %d, " @@ -1571,7 +1595,7 @@ bool Camera3Device::processPartial3AQuirk( __FUNCTION__, mId, frameNumber, resultExtras.requestId, afMode, awbMode, afState, aeState, awbState, - afTriggerId, aeTriggerId); + resultExtras.afTriggerId, resultExtras.precaptureTriggerId); // Got all states, so construct a minimal result to send // In addition to the above fields, this means adding in @@ -1635,12 +1659,12 @@ bool Camera3Device::processPartial3AQuirk( } if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_TRIGGER_ID, - &afTriggerId, frameNumber)) { + &resultExtras.afTriggerId, frameNumber)) { return false; } if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AE_PRECAPTURE_ID, - &aeTriggerId, frameNumber)) { + &resultExtras.precaptureTriggerId, frameNumber)) { return false; } @@ -1696,7 +1720,8 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) { status_t res; uint32_t frameNumber = result->frame_number; - if (result->result == NULL && result->num_output_buffers == 0) { + if (result->result == NULL && result->num_output_buffers == 0 && + result->input_buffer == NULL) { SET_ERR("No result data provided by HAL for frame %d", frameNumber); return; @@ -1781,7 +1806,7 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) { } request.numBuffersLeft -= result->num_output_buffers; - + request.numBuffersLeft -= (result->input_buffer != NULL) ? 1 : 0; if (request.numBuffersLeft < 0) { SET_ERR("Too many buffers returned for frame %d", frameNumber); @@ -1882,6 +1907,19 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) { } } + if (result->input_buffer != NULL) { + Camera3Stream *stream = + Camera3Stream::cast(result->input_buffer->stream); + res = stream->returnInputBuffer(*(result->input_buffer)); + // Note: stream may be deallocated at this point, if this buffer was the + // last reference to it. + if (res != OK) { + ALOGE("%s: RequestThread: Can't return input buffer for frame %d to" + " its stream:%s (%d)", __FUNCTION__, + frameNumber, strerror(-res), res); + } + } + // Finally, signal any waiters for new frames if (gotResult) { @@ -2126,6 +2164,17 @@ status_t Camera3Device::RequestThread::setRepeatingRequests( return OK; } +bool Camera3Device::RequestThread::isRepeatingRequestLocked(const sp<CaptureRequest> requestIn) { + if (mRepeatingRequests.empty()) { + return false; + } + int32_t requestId = requestIn->mResultExtras.requestId; + const RequestList &repeatRequests = mRepeatingRequests; + // All repeating requests are guaranteed to have same id so only check first quest + const sp<CaptureRequest> firstRequest = *repeatRequests.begin(); + return (firstRequest->mResultExtras.requestId == requestId); +} + status_t Camera3Device::RequestThread::clearRepeatingRequests(/*out*/int64_t *lastFrameNumber) { Mutex::Autolock l(mRequestLock); mRepeatingRequests.clear(); @@ -2140,6 +2189,18 @@ status_t Camera3Device::RequestThread::clear(/*out*/int64_t *lastFrameNumber) { Mutex::Autolock l(mRequestLock); ALOGV("RequestThread::%s:", __FUNCTION__); mRepeatingRequests.clear(); + + // Decrement repeating frame count for those requests never sent to device + // TODO: Remove this after we have proper error handling so these requests + // will generate an error callback. This might be the only place calling + // isRepeatingRequestLocked. If so, isRepeatingRequestLocked should also be removed. + const RequestList &requests = mRequestQueue; + for (RequestList::const_iterator it = requests.begin(); + it != requests.end(); ++it) { + if (isRepeatingRequestLocked(*it)) { + mRepeatingLastFrameNumber--; + } + } mRequestQueue.clear(); mTriggerMap.clear(); if (lastFrameNumber != NULL) { @@ -2271,6 +2332,7 @@ bool Camera3Device::RequestThread::threadLoop() { } camera3_stream_buffer_t inputBuffer; + uint32_t totalNumBuffers = 0; // Fill in buffers @@ -2283,6 +2345,7 @@ bool Camera3Device::RequestThread::threadLoop() { cleanUpFailedRequest(request, nextRequest, outputBuffers); return true; } + totalNumBuffers += 1; } else { request.input_buffer = NULL; } @@ -2301,6 +2364,7 @@ bool Camera3Device::RequestThread::threadLoop() { } request.num_output_buffers++; } + totalNumBuffers += request.num_output_buffers; // Log request in the in-flight queue sp<Camera3Device> parent = mParent.promote(); @@ -2311,7 +2375,7 @@ bool Camera3Device::RequestThread::threadLoop() { } res = parent->registerInFlight(request.frame_number, - request.num_output_buffers, nextRequest->mResultExtras); + totalNumBuffers, nextRequest->mResultExtras); ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64 ", burstId = %" PRId32 ".", __FUNCTION__, @@ -2367,21 +2431,6 @@ bool Camera3Device::RequestThread::threadLoop() { } mPrevTriggers = triggerCount; - // Return input buffer back to framework - if (request.input_buffer != NULL) { - Camera3Stream *stream = - Camera3Stream::cast(request.input_buffer->stream); - res = stream->returnInputBuffer(*(request.input_buffer)); - // Note: stream may be deallocated at this point, if this buffer was the - // last reference to it. - if (res != OK) { - ALOGE("%s: RequestThread: Can't return input buffer for frame %d to" - " its stream:%s (%d)", __FUNCTION__, - request.frame_number, strerror(-res), res); - // TODO: Report error upstream - } - } - return true; } @@ -2554,13 +2603,29 @@ status_t Camera3Device::RequestThread::insertTriggers( Mutex::Autolock al(mTriggerMutex); + sp<Camera3Device> parent = mParent.promote(); + if (parent == NULL) { + CLOGE("RequestThread: Parent is gone"); + return DEAD_OBJECT; + } + CameraMetadata &metadata = request->mSettings; size_t count = mTriggerMap.size(); for (size_t i = 0; i < count; ++i) { RequestTrigger trigger = mTriggerMap.valueAt(i); - uint32_t tag = trigger.metadataTag; + + if (tag == ANDROID_CONTROL_AF_TRIGGER_ID || tag == ANDROID_CONTROL_AE_PRECAPTURE_ID) { + bool isAeTrigger = (trigger.metadataTag == ANDROID_CONTROL_AE_PRECAPTURE_ID); + uint32_t triggerId = static_cast<uint32_t>(trigger.entryValue); + isAeTrigger ? request->mResultExtras.precaptureTriggerId = triggerId : + request->mResultExtras.afTriggerId = triggerId; + if (parent->mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) { + continue; // Trigger ID tag is deprecated since device HAL 3.2 + } + } + camera_metadata_entry entry = metadata.find(tag); if (entry.count > 0) { diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h index 00ae771..d7545d0 100644 --- a/services/camera/libcameraservice/device3/Camera3Device.h +++ b/services/camera/libcameraservice/device3/Camera3Device.h @@ -168,6 +168,8 @@ class Camera3Device : CameraMetadata mDeviceInfo; + int mDeviceVersion; + enum Status { STATUS_ERROR, STATUS_UNINITIALIZED, @@ -297,6 +299,18 @@ class Camera3Device : */ bool tryLockSpinRightRound(Mutex& lock); + struct Size { + int width; + int height; + Size(int w, int h) : width(w), height(h){} + }; + + /** + * Helper function to get the largest Jpeg resolution (in area) + * Return Size(0, 0) if static metatdata is invalid + */ + Size getMaxJpegResolution() const; + /** * Get Jpeg buffer size for a given jpeg resolution. * Negative values are error codes. @@ -430,6 +444,9 @@ class Camera3Device : // Relay error to parent device object setErrorState void setErrorState(const char *fmt, ...); + // If the input request is in mRepeatingRequests. Must be called with mRequestLock hold + bool isRepeatingRequestLocked(const sp<CaptureRequest>); + wp<Camera3Device> mParent; wp<camera3::StatusTracker> mStatusTracker; camera3_device_t *mHal3Device; @@ -484,7 +501,7 @@ class Camera3Device : // Set by process_capture_result call with valid metadata bool haveResultMetadata; // Decremented by calls to process_capture_result with valid output - // buffers + // and input buffers int numBuffersLeft; CaptureResultExtras resultExtras; diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp index 7645a2a..d7b1871 100644 --- a/services/camera/libcameraservice/device3/Camera3Stream.cpp +++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp @@ -485,6 +485,18 @@ status_t Camera3Stream::returnInputBufferLocked( void Camera3Stream::addBufferListener( wp<Camera3StreamBufferListener> listener) { Mutex::Autolock l(mLock); + + List<wp<Camera3StreamBufferListener> >::iterator it, end; + for (it = mBufferListenerList.begin(), end = mBufferListenerList.end(); + it != end; + ) { + if (*it == listener) { + ALOGE("%s: Try to add the same listener twice, ignoring...", __FUNCTION__); + return; + } + it++; + } + mBufferListenerList.push_back(listener); } diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h index 14f5387..a77f27c 100644 --- a/services/camera/libcameraservice/device3/Camera3Stream.h +++ b/services/camera/libcameraservice/device3/Camera3Stream.h @@ -226,8 +226,17 @@ class Camera3Stream : */ virtual void dump(int fd, const Vector<String16> &args) const = 0; + /** + * Add a camera3 buffer listener. Adding the same listener twice has + * no effect. + */ void addBufferListener( wp<Camera3StreamBufferListener> listener); + + /** + * Remove a camera3 buffer listener. Removing the same listener twice + * or the listener that was never added has no effect. + */ void removeBufferListener( const sp<Camera3StreamBufferListener>& listener); diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp index 05b3d1f..6c298f9 100644 --- a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp +++ b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp @@ -300,6 +300,7 @@ status_t Camera3ZslStream::enqueueInputBufferByTimestamp( nsecs_t actual = pinnedBuffer->getBufferItem().mTimestamp; if (actual != timestamp) { + // TODO: this is problematic, we'll end up with using wrong result for this pinned buffer. ALOGW("%s: ZSL buffer candidate search didn't find an exact match --" " requested timestamp = %" PRId64 ", actual timestamp = %" PRId64, __FUNCTION__, timestamp, actual); diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk new file mode 100644 index 0000000..b7ccaab --- /dev/null +++ b/services/soundtrigger/Android.mk @@ -0,0 +1,41 @@ +# Copyright 2014 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +LOCAL_PATH:= $(call my-dir) + +include $(CLEAR_VARS) + + +ifeq ($(SOUND_TRIGGER_USE_STUB_MODULE), 1) + LOCAL_CFLAGS += -DSOUND_TRIGGER_USE_STUB_MODULE +endif + +LOCAL_SRC_FILES:= \ + SoundTriggerHwService.cpp + +LOCAL_SHARED_LIBRARIES:= \ + libui \ + liblog \ + libutils \ + libbinder \ + libcutils \ + libhardware \ + libsoundtrigger + +#LOCAL_C_INCLUDES += \ + + +LOCAL_MODULE:= libsoundtriggerservice + +include $(BUILD_SHARED_LIBRARY) diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp new file mode 100644 index 0000000..fa59388 --- /dev/null +++ b/services/soundtrigger/SoundTriggerHwService.cpp @@ -0,0 +1,570 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "SoundTriggerHwService" +//#define LOG_NDEBUG 0 + +#include <stdio.h> +#include <string.h> +#include <sys/types.h> +#include <pthread.h> + +#include <binder/IServiceManager.h> +#include <binder/MemoryBase.h> +#include <binder/MemoryHeapBase.h> +#include <cutils/atomic.h> +#include <cutils/properties.h> +#include <hardware/hardware.h> +#include <utils/Errors.h> +#include <utils/Log.h> + +#include "SoundTriggerHwService.h" +#include <system/sound_trigger.h> +#include <hardware/sound_trigger.h> + +namespace android { + +#ifdef SOUND_TRIGGER_USE_STUB_MODULE +#define HW_MODULE_PREFIX "stub" +#else +#define HW_MODULE_PREFIX "primary" +#endif + +SoundTriggerHwService::SoundTriggerHwService() + : BnSoundTriggerHwService(), + mNextUniqueId(1) +{ +} + +void SoundTriggerHwService::onFirstRef() +{ + const hw_module_t *mod; + int rc; + sound_trigger_hw_device *dev; + + rc = hw_get_module_by_class(SOUND_TRIGGER_HARDWARE_MODULE_ID, HW_MODULE_PREFIX, &mod); + if (rc != 0) { + ALOGE("couldn't load sound trigger module %s.%s (%s)", + SOUND_TRIGGER_HARDWARE_MODULE_ID, "primary", strerror(-rc)); + return; + } + rc = sound_trigger_hw_device_open(mod, &dev); + if (rc != 0) { + ALOGE("couldn't open sound trigger hw device in %s.%s (%s)", + SOUND_TRIGGER_HARDWARE_MODULE_ID, "primary", strerror(-rc)); + return; + } + if (dev->common.version != SOUND_TRIGGER_DEVICE_API_VERSION_CURRENT) { + ALOGE("wrong sound trigger hw device version %04x", dev->common.version); + return; + } + + sound_trigger_module_descriptor descriptor; + rc = dev->get_properties(dev, &descriptor.properties); + if (rc != 0) { + ALOGE("could not read implementation properties"); + return; + } + descriptor.handle = + (sound_trigger_module_handle_t)android_atomic_inc(&mNextUniqueId); + ALOGI("loaded default module %s, handle %d", descriptor.properties.description, + descriptor.handle); + + sp<ISoundTriggerClient> client; + sp<Module> module = new Module(this, dev, descriptor, client); + mModules.add(descriptor.handle, module); + mCallbackThread = new CallbackThread(this); +} + +SoundTriggerHwService::~SoundTriggerHwService() +{ + if (mCallbackThread != 0) { + mCallbackThread->exit(); + } + for (size_t i = 0; i < mModules.size(); i++) { + sound_trigger_hw_device_close(mModules.valueAt(i)->hwDevice()); + } +} + +status_t SoundTriggerHwService::listModules(struct sound_trigger_module_descriptor *modules, + uint32_t *numModules) +{ + ALOGV("listModules"); + AutoMutex lock(mServiceLock); + if (numModules == NULL || (*numModules != 0 && modules == NULL)) { + return BAD_VALUE; + } + size_t maxModules = *numModules; + *numModules = mModules.size(); + for (size_t i = 0; i < mModules.size() && i < maxModules; i++) { + modules[i] = mModules.valueAt(i)->descriptor(); + } + return NO_ERROR; +} + +status_t SoundTriggerHwService::attach(const sound_trigger_module_handle_t handle, + const sp<ISoundTriggerClient>& client, + sp<ISoundTrigger>& moduleInterface) +{ + ALOGV("attach module %d", handle); + AutoMutex lock(mServiceLock); + moduleInterface.clear(); + if (client == 0) { + return BAD_VALUE; + } + ssize_t index = mModules.indexOfKey(handle); + if (index < 0) { + return BAD_VALUE; + } + sp<Module> module = mModules.valueAt(index); + + module->setClient(client); + client->asBinder()->linkToDeath(module); + moduleInterface = module; + + return NO_ERROR; +} + +void SoundTriggerHwService::detachModule(sp<Module> module) { + AutoMutex lock(mServiceLock); + ALOGV("detachModule"); + module->clearClient(); +} + +static const int kDumpLockRetries = 50; +static const int kDumpLockSleep = 60000; + +static bool tryLock(Mutex& mutex) +{ + bool locked = false; + for (int i = 0; i < kDumpLockRetries; ++i) { + if (mutex.tryLock() == NO_ERROR) { + locked = true; + break; + } + usleep(kDumpLockSleep); + } + return locked; +} + +status_t SoundTriggerHwService::dump(int fd, const Vector<String16>& args __unused) { + String8 result; + if (checkCallingPermission(String16("android.permission.DUMP")) == false) { + result.appendFormat("Permission Denial: can't dump SoundTriggerHwService"); + write(fd, result.string(), result.size()); + } else { + bool locked = tryLock(mServiceLock); + // failed to lock - SoundTriggerHwService is probably deadlocked + if (!locked) { + result.append("SoundTriggerHwService may be deadlocked\n"); + write(fd, result.string(), result.size()); + } + + if (locked) mServiceLock.unlock(); + } + return NO_ERROR; +} + +status_t SoundTriggerHwService::onTransact( + uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { + return BnSoundTriggerHwService::onTransact(code, data, reply, flags); +} + + +// static +void SoundTriggerHwService::recognitionCallback(struct sound_trigger_recognition_event *event, + void *cookie) +{ + Module *module = (Module *)cookie; + if (module == NULL) { + return; + } + module->sendRecognitionEvent(event); +} + + +void SoundTriggerHwService::sendRecognitionEvent(const sp<RecognitionEvent>& event) +{ + mCallbackThread->sendRecognitionEvent(event); +} + +void SoundTriggerHwService::onRecognitionEvent(const sp<RecognitionEvent>& event) +{ + ALOGV("onRecognitionEvent"); + sp<Module> module; + { + AutoMutex lock(mServiceLock); + module = event->mModule.promote(); + if (module == 0) { + return; + } + } + module->onRecognitionEvent(event->mEventMemory); +} + +// static +void SoundTriggerHwService::soundModelCallback(struct sound_trigger_model_event *event __unused, + void *cookie) +{ + Module *module = (Module *)cookie; + +} + +#undef LOG_TAG +#define LOG_TAG "SoundTriggerHwService::CallbackThread" + +SoundTriggerHwService::CallbackThread::CallbackThread(const wp<SoundTriggerHwService>& service) + : mService(service) +{ +} + +SoundTriggerHwService::CallbackThread::~CallbackThread() +{ + mEventQueue.clear(); +} + +void SoundTriggerHwService::CallbackThread::onFirstRef() +{ + run("soundTrigger cbk", ANDROID_PRIORITY_URGENT_AUDIO); +} + +bool SoundTriggerHwService::CallbackThread::threadLoop() +{ + while (!exitPending()) { + sp<RecognitionEvent> event; + sp<SoundTriggerHwService> service; + { + Mutex::Autolock _l(mCallbackLock); + while (mEventQueue.isEmpty() && !exitPending()) { + ALOGV("CallbackThread::threadLoop() sleep"); + mCallbackCond.wait(mCallbackLock); + ALOGV("CallbackThread::threadLoop() wake up"); + } + if (exitPending()) { + break; + } + event = mEventQueue[0]; + mEventQueue.removeAt(0); + service = mService.promote(); + } + if (service != 0) { + service->onRecognitionEvent(event); + } + } + return false; +} + +void SoundTriggerHwService::CallbackThread::exit() +{ + Mutex::Autolock _l(mCallbackLock); + requestExit(); + mCallbackCond.broadcast(); +} + +void SoundTriggerHwService::CallbackThread::sendRecognitionEvent( + const sp<SoundTriggerHwService::RecognitionEvent>& event) +{ + AutoMutex lock(mCallbackLock); + mEventQueue.add(event); + mCallbackCond.signal(); +} + +SoundTriggerHwService::RecognitionEvent::RecognitionEvent( + sp<IMemory> eventMemory, + wp<Module> module) + : mEventMemory(eventMemory), mModule(module) +{ +} + +SoundTriggerHwService::RecognitionEvent::~RecognitionEvent() +{ +} + +#undef LOG_TAG +#define LOG_TAG "SoundTriggerHwService::Module" + +SoundTriggerHwService::Module::Module(const sp<SoundTriggerHwService>& service, + sound_trigger_hw_device* hwDevice, + sound_trigger_module_descriptor descriptor, + const sp<ISoundTriggerClient>& client) + : mService(service), mHwDevice(hwDevice), mDescriptor(descriptor), + mClient(client) +{ +} + +SoundTriggerHwService::Module::~Module() { +} + +void SoundTriggerHwService::Module::detach() { + ALOGV("detach()"); + { + AutoMutex lock(mLock); + for (size_t i = 0; i < mModels.size(); i++) { + sp<Model> model = mModels.valueAt(i); + ALOGV("detach() unloading model %d", model->mHandle); + if (model->mState == Model::STATE_ACTIVE) { + mHwDevice->stop_recognition(mHwDevice, model->mHandle); + model->deallocateMemory(); + } + mHwDevice->unload_sound_model(mHwDevice, model->mHandle); + } + mModels.clear(); + } + if (mClient != 0) { + mClient->asBinder()->unlinkToDeath(this); + } + sp<SoundTriggerHwService> service = mService.promote(); + if (service == 0) { + return; + } + service->detachModule(this); +} + +status_t SoundTriggerHwService::Module::loadSoundModel(const sp<IMemory>& modelMemory, + sound_model_handle_t *handle) +{ + ALOGV("loadSoundModel() handle"); + + if (modelMemory == 0 || modelMemory->pointer() == NULL) { + ALOGE("loadSoundModel() modelMemory is 0 or has NULL pointer()"); + return BAD_VALUE; + } + struct sound_trigger_sound_model *sound_model = + (struct sound_trigger_sound_model *)modelMemory->pointer(); + + AutoMutex lock(mLock); + status_t status = mHwDevice->load_sound_model(mHwDevice, + sound_model, + SoundTriggerHwService::soundModelCallback, + this, + handle); + if (status == NO_ERROR) { + mModels.replaceValueFor(*handle, new Model(*handle)); + } + + return status; +} + +status_t SoundTriggerHwService::Module::unloadSoundModel(sound_model_handle_t handle) +{ + ALOGV("unloadSoundModel() model handle %d", handle); + + AutoMutex lock(mLock); + ssize_t index = mModels.indexOfKey(handle); + if (index < 0) { + return BAD_VALUE; + } + sp<Model> model = mModels.valueAt(index); + mModels.removeItem(handle); + if (model->mState == Model::STATE_ACTIVE) { + mHwDevice->stop_recognition(mHwDevice, model->mHandle); + model->deallocateMemory(); + } + return mHwDevice->unload_sound_model(mHwDevice, handle); +} + +status_t SoundTriggerHwService::Module::startRecognition(sound_model_handle_t handle, + const sp<IMemory>& dataMemory) +{ + ALOGV("startRecognition() model handle %d", handle); + + if (dataMemory != 0 && dataMemory->pointer() == NULL) { + ALOGE("startRecognition() dataMemory is non-0 but has NULL pointer()"); + return BAD_VALUE; + + } + AutoMutex lock(mLock); + sp<Model> model = getModel(handle); + if (model == 0) { + return BAD_VALUE; + } + + if (model->mState == Model::STATE_ACTIVE) { + return INVALID_OPERATION; + } + model->mState = Model::STATE_ACTIVE; + + char *data = NULL; + unsigned int data_size = 0; + if (dataMemory != 0 && dataMemory->size() != 0) { + data_size = (unsigned int)dataMemory->size(); + data = (char *)dataMemory->pointer(); + ALOGV("startRecognition() data size %d data %d - %d", + data_size, data[0], data[data_size - 1]); + } + + //TODO: get capture handle and device from audio policy service + audio_io_handle_t capture_handle = 0; + return mHwDevice->start_recognition(mHwDevice, handle, capture_handle, AUDIO_DEVICE_NONE, + SoundTriggerHwService::recognitionCallback, + this, + data_size, + data); +} + +status_t SoundTriggerHwService::Module::stopRecognition(sound_model_handle_t handle) +{ + ALOGV("stopRecognition() model handle %d", handle); + + AutoMutex lock(mLock); + sp<Model> model = getModel(handle); + if (model == 0) { + return BAD_VALUE; + } + + if (model->mState != Model::STATE_ACTIVE) { + return INVALID_OPERATION; + } + mHwDevice->stop_recognition(mHwDevice, handle); + model->deallocateMemory(); + model->mState = Model::STATE_IDLE; + return NO_ERROR; +} + +void SoundTriggerHwService::Module::sendRecognitionEvent( + struct sound_trigger_recognition_event *event) +{ + sp<SoundTriggerHwService> service; + sp<IMemory> eventMemory; + ALOGV("sendRecognitionEvent for model %d", event->model); + { + AutoMutex lock(mLock); + sp<Model> model = getModel(event->model); + if (model == 0) { + return; + } + if (model->mState != Model::STATE_ACTIVE) { + ALOGV("sendRecognitionEvent model->mState %d != Model::STATE_ACTIVE", model->mState); + return; + } + if (mClient == 0) { + return; + } + service = mService.promote(); + if (service == 0) { + return; + } + + //sanitize event + switch (event->type) { + case SOUND_MODEL_TYPE_KEYPHRASE: + ALOGW_IF(event->data_offset != + sizeof(struct sound_trigger_phrase_recognition_event), + "sendRecognitionEvent(): invalid data offset %u for keyphrase event type", + event->data_offset); + event->data_offset = sizeof(struct sound_trigger_phrase_recognition_event); + break; + case SOUND_MODEL_TYPE_UNKNOWN: + ALOGW_IF(event->data_offset != + sizeof(struct sound_trigger_recognition_event), + "sendRecognitionEvent(): invalid data offset %u for unknown event type", + event->data_offset); + event->data_offset = sizeof(struct sound_trigger_recognition_event); + break; + default: + return; + } + + size_t size = event->data_offset + event->data_size; + eventMemory = model->allocateMemory(size); + if (eventMemory == 0 || eventMemory->pointer() == NULL) { + return; + } + memcpy(eventMemory->pointer(), event, size); + } + service->sendRecognitionEvent(new RecognitionEvent(eventMemory, this)); +} + +void SoundTriggerHwService::Module::onRecognitionEvent(sp<IMemory> eventMemory) +{ + ALOGV("Module::onRecognitionEvent"); + + AutoMutex lock(mLock); + + if (eventMemory == 0 || eventMemory->pointer() == NULL) { + return; + } + struct sound_trigger_recognition_event *event = + (struct sound_trigger_recognition_event *)eventMemory->pointer(); + + sp<Model> model = getModel(event->model); + if (model == 0) { + ALOGI("%s model == 0", __func__); + return; + } + if (model->mState != Model::STATE_ACTIVE) { + ALOGV("onRecognitionEvent model->mState %d != Model::STATE_ACTIVE", model->mState); + return; + } + if (mClient == 0) { + ALOGI("%s mClient == 0", __func__); + return; + } + mClient->onRecognitionEvent(eventMemory); + model->mState = Model::STATE_IDLE; + model->deallocateMemory(); +} + +sp<SoundTriggerHwService::Model> SoundTriggerHwService::Module::getModel( + sound_model_handle_t handle) +{ + sp<Model> model; + ssize_t index = mModels.indexOfKey(handle); + if (index >= 0) { + model = mModels.valueAt(index); + } + return model; +} + +void SoundTriggerHwService::Module::binderDied( + const wp<IBinder> &who __unused) { + ALOGW("client binder died for module %d", mDescriptor.handle); + detach(); +} + + +SoundTriggerHwService::Model::Model(sound_model_handle_t handle) : + mHandle(handle), mState(STATE_IDLE), mInputHandle(AUDIO_IO_HANDLE_NONE), + mCaptureSession(AUDIO_SESSION_ALLOCATE), + mMemoryDealer(new MemoryDealer(sizeof(struct sound_trigger_recognition_event), + "SoundTriggerHwService::Event")) +{ + +} + + +sp<IMemory> SoundTriggerHwService::Model::allocateMemory(size_t size) +{ + sp<IMemory> memory; + if (mMemoryDealer->getMemoryHeap()->getSize() < size) { + mMemoryDealer = new MemoryDealer(size, "SoundTriggerHwService::Event"); + } + memory = mMemoryDealer->allocate(size); + return memory; +} + +void SoundTriggerHwService::Model::deallocateMemory() +{ + mMemoryDealer->deallocate(0); +} + +status_t SoundTriggerHwService::Module::dump(int fd __unused, + const Vector<String16>& args __unused) { + String8 result; + return NO_ERROR; +} + +}; // namespace android diff --git a/services/soundtrigger/SoundTriggerHwService.h b/services/soundtrigger/SoundTriggerHwService.h new file mode 100644 index 0000000..377f2a1 --- /dev/null +++ b/services/soundtrigger/SoundTriggerHwService.h @@ -0,0 +1,185 @@ +/* + * Copyright (C) 2008 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_SOUNDTRIGGER_HAL_SERVICE_H +#define ANDROID_HARDWARE_SOUNDTRIGGER_HAL_SERVICE_H + +#include <utils/Vector.h> +//#include <binder/AppOpsManager.h> +#include <binder/MemoryDealer.h> +#include <binder/BinderService.h> +#include <binder/IAppOpsCallback.h> +#include <soundtrigger/ISoundTriggerHwService.h> +#include <soundtrigger/ISoundTrigger.h> +#include <soundtrigger/ISoundTriggerClient.h> +#include <system/sound_trigger.h> +#include <hardware/sound_trigger.h> + +namespace android { + +class MemoryHeapBase; + +class SoundTriggerHwService : + public BinderService<SoundTriggerHwService>, + public BnSoundTriggerHwService +{ + friend class BinderService<SoundTriggerHwService>; +public: + class Module; + + static char const* getServiceName() { return "media.sound_trigger_hw"; } + + SoundTriggerHwService(); + virtual ~SoundTriggerHwService(); + + // ISoundTriggerHwService + virtual status_t listModules(struct sound_trigger_module_descriptor *modules, + uint32_t *numModules); + + virtual status_t attach(const sound_trigger_module_handle_t handle, + const sp<ISoundTriggerClient>& client, + sp<ISoundTrigger>& module); + + virtual status_t onTransact(uint32_t code, const Parcel& data, + Parcel* reply, uint32_t flags); + + virtual status_t dump(int fd, const Vector<String16>& args); + + class Model : public RefBase { + public: + + enum { + STATE_IDLE, + STATE_ACTIVE + }; + + Model(sound_model_handle_t handle); + ~Model() {} + + sp<IMemory> allocateMemory(size_t size); + void deallocateMemory(); + + sound_model_handle_t mHandle; + int mState; + audio_io_handle_t mInputHandle; + audio_session_t mCaptureSession; + sp<MemoryDealer> mMemoryDealer; + }; + + class Module : public virtual RefBase, + public BnSoundTrigger, + public IBinder::DeathRecipient { + public: + + Module(const sp<SoundTriggerHwService>& service, + sound_trigger_hw_device* hwDevice, + sound_trigger_module_descriptor descriptor, + const sp<ISoundTriggerClient>& client); + + virtual ~Module(); + + virtual void detach(); + + virtual status_t loadSoundModel(const sp<IMemory>& modelMemory, + sound_model_handle_t *handle); + + virtual status_t unloadSoundModel(sound_model_handle_t handle); + + virtual status_t startRecognition(sound_model_handle_t handle, + const sp<IMemory>& dataMemory); + virtual status_t stopRecognition(sound_model_handle_t handle); + + virtual status_t dump(int fd, const Vector<String16>& args); + + + sound_trigger_hw_device *hwDevice() const { return mHwDevice; } + struct sound_trigger_module_descriptor descriptor() { return mDescriptor; } + void setClient(sp<ISoundTriggerClient> client) { mClient = client; } + void clearClient() { mClient.clear(); } + sp<ISoundTriggerClient> client() { return mClient; } + + void sendRecognitionEvent(struct sound_trigger_recognition_event *event); + void onRecognitionEvent(sp<IMemory> eventMemory); + + sp<Model> getModel(sound_model_handle_t handle); + + // IBinder::DeathRecipient implementation + virtual void binderDied(const wp<IBinder> &who); + + private: + Mutex mLock; + wp<SoundTriggerHwService> mService; + struct sound_trigger_hw_device* mHwDevice; + struct sound_trigger_module_descriptor mDescriptor; + sp<ISoundTriggerClient> mClient; + DefaultKeyedVector< sound_model_handle_t, sp<Model> > mModels; + }; // class Module + + class RecognitionEvent : public RefBase { + public: + + RecognitionEvent(sp<IMemory> eventMemory, wp<Module> module); + + virtual ~RecognitionEvent(); + + sp<IMemory> mEventMemory; + wp<Module> mModule; + }; + + class CallbackThread : public Thread { + public: + + CallbackThread(const wp<SoundTriggerHwService>& service); + + virtual ~CallbackThread(); + + // Thread virtuals + virtual bool threadLoop(); + + // RefBase + virtual void onFirstRef(); + + void exit(); + void sendRecognitionEvent(const sp<RecognitionEvent>& event); + + private: + wp<SoundTriggerHwService> mService; + Condition mCallbackCond; + Mutex mCallbackLock; + Vector< sp<RecognitionEvent> > mEventQueue; + }; + + void detachModule(sp<Module> module); + + static void recognitionCallback(struct sound_trigger_recognition_event *event, void *cookie); + void sendRecognitionEvent(const sp<RecognitionEvent>& event); + void onRecognitionEvent(const sp<RecognitionEvent>& event); + + static void soundModelCallback(struct sound_trigger_model_event *event, void *cookie); + +private: + + virtual void onFirstRef(); + + Mutex mServiceLock; + volatile int32_t mNextUniqueId; + DefaultKeyedVector< sound_trigger_module_handle_t, sp<Module> > mModules; + sp<CallbackThread> mCallbackThread; +}; + +} // namespace android + +#endif // ANDROID_HARDWARE_SOUNDTRIGGER_HAL_SERVICE_H |