diff options
Diffstat (limited to 'services/audioflinger/Threads.cpp')
-rw-r--r-- | services/audioflinger/Threads.cpp | 405 |
1 files changed, 311 insertions, 94 deletions
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp index 51025fe..5988d2c 100644 --- a/services/audioflinger/Threads.cpp +++ b/services/audioflinger/Threads.cpp @@ -23,7 +23,9 @@ #include "Configuration.h" #include <math.h> #include <fcntl.h> +#include <linux/futex.h> #include <sys/stat.h> +#include <sys/syscall.h> #include <cutils/properties.h> #include <media/AudioParameter.h> #include <media/AudioResamplerPublic.h> @@ -314,6 +316,165 @@ void CpuStats::sample(const String8 &title // ThreadBase // ---------------------------------------------------------------------------- +// static +const char *AudioFlinger::ThreadBase::threadTypeToString(AudioFlinger::ThreadBase::type_t type) +{ + switch (type) { + case MIXER: + return "MIXER"; + case DIRECT: + return "DIRECT"; + case DUPLICATING: + return "DUPLICATING"; + case RECORD: + return "RECORD"; + case OFFLOAD: + return "OFFLOAD"; + default: + return "unknown"; + } +} + +String8 devicesToString(audio_devices_t devices) +{ + static const struct mapping { + audio_devices_t mDevices; + const char * mString; + } mappingsOut[] = { + AUDIO_DEVICE_OUT_EARPIECE, "EARPIECE", + AUDIO_DEVICE_OUT_SPEAKER, "SPEAKER", + AUDIO_DEVICE_OUT_WIRED_HEADSET, "WIRED_HEADSET", + AUDIO_DEVICE_OUT_WIRED_HEADPHONE, "WIRED_HEADPHONE", + AUDIO_DEVICE_OUT_TELEPHONY_TX, "TELEPHONY_TX", + AUDIO_DEVICE_NONE, "NONE", // must be last + }, mappingsIn[] = { + AUDIO_DEVICE_IN_BUILTIN_MIC, "BUILTIN_MIC", + AUDIO_DEVICE_IN_WIRED_HEADSET, "WIRED_HEADSET", + AUDIO_DEVICE_IN_VOICE_CALL, "VOICE_CALL", + AUDIO_DEVICE_IN_REMOTE_SUBMIX, "REMOTE_SUBMIX", + AUDIO_DEVICE_NONE, "NONE", // must be last + }; + String8 result; + audio_devices_t allDevices = AUDIO_DEVICE_NONE; + const mapping *entry; + if (devices & AUDIO_DEVICE_BIT_IN) { + devices &= ~AUDIO_DEVICE_BIT_IN; + entry = mappingsIn; + } else { + entry = mappingsOut; + } + for ( ; entry->mDevices != AUDIO_DEVICE_NONE; entry++) { + allDevices = (audio_devices_t) (allDevices | entry->mDevices); + if (devices & entry->mDevices) { + if (!result.isEmpty()) { + result.append("|"); + } + result.append(entry->mString); + } + } + if (devices & ~allDevices) { + if (!result.isEmpty()) { + result.append("|"); + } + result.appendFormat("0x%X", devices & ~allDevices); + } + if (result.isEmpty()) { + result.append(entry->mString); + } + return result; +} + +String8 inputFlagsToString(audio_input_flags_t flags) +{ + static const struct mapping { + audio_input_flags_t mFlag; + const char * mString; + } mappings[] = { + AUDIO_INPUT_FLAG_FAST, "FAST", + AUDIO_INPUT_FLAG_HW_HOTWORD, "HW_HOTWORD", + AUDIO_INPUT_FLAG_NONE, "NONE", // must be last + }; + String8 result; + audio_input_flags_t allFlags = AUDIO_INPUT_FLAG_NONE; + const mapping *entry; + for (entry = mappings; entry->mFlag != AUDIO_INPUT_FLAG_NONE; entry++) { + allFlags = (audio_input_flags_t) (allFlags | entry->mFlag); + if (flags & entry->mFlag) { + if (!result.isEmpty()) { + result.append("|"); + } + result.append(entry->mString); + } + } + if (flags & ~allFlags) { + if (!result.isEmpty()) { + result.append("|"); + } + result.appendFormat("0x%X", flags & ~allFlags); + } + if (result.isEmpty()) { + result.append(entry->mString); + } + return result; +} + +String8 outputFlagsToString(audio_output_flags_t flags) +{ + static const struct mapping { + audio_output_flags_t mFlag; + const char * mString; + } mappings[] = { + AUDIO_OUTPUT_FLAG_DIRECT, "DIRECT", + AUDIO_OUTPUT_FLAG_PRIMARY, "PRIMARY", + AUDIO_OUTPUT_FLAG_FAST, "FAST", + AUDIO_OUTPUT_FLAG_DEEP_BUFFER, "DEEP_BUFFER", + AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD, "COMPRESS_OFFLOAD", + AUDIO_OUTPUT_FLAG_NON_BLOCKING, "NON_BLOCKING", + AUDIO_OUTPUT_FLAG_HW_AV_SYNC, "HW_AV_SYNC", + AUDIO_OUTPUT_FLAG_NONE, "NONE", // must be last + }; + String8 result; + audio_output_flags_t allFlags = AUDIO_OUTPUT_FLAG_NONE; + const mapping *entry; + for (entry = mappings; entry->mFlag != AUDIO_OUTPUT_FLAG_NONE; entry++) { + allFlags = (audio_output_flags_t) (allFlags | entry->mFlag); + if (flags & entry->mFlag) { + if (!result.isEmpty()) { + result.append("|"); + } + result.append(entry->mString); + } + } + if (flags & ~allFlags) { + if (!result.isEmpty()) { + result.append("|"); + } + result.appendFormat("0x%X", flags & ~allFlags); + } + if (result.isEmpty()) { + result.append(entry->mString); + } + return result; +} + +const char *sourceToString(audio_source_t source) +{ + switch (source) { + case AUDIO_SOURCE_DEFAULT: return "default"; + case AUDIO_SOURCE_MIC: return "mic"; + case AUDIO_SOURCE_VOICE_UPLINK: return "voice uplink"; + case AUDIO_SOURCE_VOICE_DOWNLINK: return "voice downlink"; + case AUDIO_SOURCE_VOICE_CALL: return "voice call"; + case AUDIO_SOURCE_CAMCORDER: return "camcorder"; + case AUDIO_SOURCE_VOICE_RECOGNITION: return "voice recognition"; + case AUDIO_SOURCE_VOICE_COMMUNICATION: return "voice communication"; + case AUDIO_SOURCE_REMOTE_SUBMIX: return "remote submix"; + case AUDIO_SOURCE_FM_TUNER: return "FM tuner"; + case AUDIO_SOURCE_HOTWORD: return "hotword"; + default: return "unknown"; + } +} + AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id, audio_devices_t outDevice, audio_devices_t inDevice, type_t type) : Thread(false /*canCallJava*/), @@ -577,20 +738,22 @@ void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args __u bool locked = AudioFlinger::dumpTryLock(mLock); if (!locked) { - dprintf(fd, "thread %p maybe dead locked\n", this); + dprintf(fd, "thread %p may be deadlocked\n", this); } + dprintf(fd, " Thread name: %s\n", mThreadName); dprintf(fd, " I/O handle: %d\n", mId); dprintf(fd, " TID: %d\n", getTid()); dprintf(fd, " Standby: %s\n", mStandby ? "yes" : "no"); - dprintf(fd, " Sample rate: %u\n", mSampleRate); + dprintf(fd, " Sample rate: %u Hz\n", mSampleRate); dprintf(fd, " HAL frame count: %zu\n", mFrameCount); + dprintf(fd, " HAL format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat)); dprintf(fd, " HAL buffer size: %u bytes\n", mBufferSize); - dprintf(fd, " Channel Count: %u\n", mChannelCount); - dprintf(fd, " Channel Mask: 0x%08x (%s)\n", mChannelMask, + dprintf(fd, " Channel count: %u\n", mChannelCount); + dprintf(fd, " Channel mask: 0x%08x (%s)\n", mChannelMask, channelMaskToString(mChannelMask, mType != RECORD).string()); - dprintf(fd, " Format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat)); - dprintf(fd, " Frame size: %zu\n", mFrameSize); + dprintf(fd, " Format: 0x%x (%s)\n", mFormat, formatToString(mFormat)); + dprintf(fd, " Frame size: %zu bytes\n", mFrameSize); dprintf(fd, " Pending config events:"); size_t numConfig = mConfigEvents.size(); if (numConfig) { @@ -602,6 +765,9 @@ void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args __u } else { dprintf(fd, " none\n"); } + dprintf(fd, " Output device: %#x (%s)\n", mOutDevice, devicesToString(mOutDevice).string()); + dprintf(fd, " Input device: %#x (%s)\n", mInDevice, devicesToString(mInDevice).string()); + dprintf(fd, " Audio source: %d (%s)\n", mAudioSource, sourceToString(mAudioSource)); if (locked) { mLock.unlock(); @@ -635,19 +801,19 @@ void AudioFlinger::ThreadBase::acquireWakeLock(int uid) String16 AudioFlinger::ThreadBase::getWakeLockTag() { switch (mType) { - case MIXER: - return String16("AudioMix"); - case DIRECT: - return String16("AudioDirectOut"); - case DUPLICATING: - return String16("AudioDup"); - case RECORD: - return String16("AudioIn"); - case OFFLOAD: - return String16("AudioOffload"); - default: - ALOG_ASSERT(false); - return String16("AudioUnknown"); + case MIXER: + return String16("AudioMix"); + case DIRECT: + return String16("AudioDirectOut"); + case DUPLICATING: + return String16("AudioDup"); + case RECORD: + return String16("AudioIn"); + case OFFLOAD: + return String16("AudioOffload"); + default: + ALOG_ASSERT(false); + return String16("AudioUnknown"); } } @@ -674,7 +840,7 @@ void AudioFlinger::ThreadBase::acquireWakeLock_l(int uid) if (status == NO_ERROR) { mWakeLockToken = binder; } - ALOGV("acquireWakeLock_l() %s status %d", mName, status); + ALOGV("acquireWakeLock_l() %s status %d", mThreadName, status); } } @@ -687,7 +853,7 @@ void AudioFlinger::ThreadBase::releaseWakeLock() void AudioFlinger::ThreadBase::releaseWakeLock_l() { if (mWakeLockToken != 0) { - ALOGV("releaseWakeLock_l() %s", mName); + ALOGV("releaseWakeLock_l() %s", mThreadName); if (mPowerManager != 0) { mPowerManager->releaseWakeLock(mWakeLockToken, 0, true /* FIXME force oneway contrary to .aidl */); @@ -708,7 +874,7 @@ void AudioFlinger::ThreadBase::getPowerManager_l() { sp<IBinder> binder = defaultServiceManager()->checkService(String16("power")); if (binder == 0) { - ALOGW("Thread %s cannot connect to the power manager service", mName); + ALOGW("Thread %s cannot connect to the power manager service", mThreadName); } else { mPowerManager = interface_cast<IPowerManager>(binder); binder->linkToDeath(mDeathRecipient); @@ -728,7 +894,7 @@ void AudioFlinger::ThreadBase::updateWakeLockUids_l(const SortedVector<int> &uid status_t status; status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array(), true /* FIXME force oneway contrary to .aidl */); - ALOGV("acquireWakeLock_l() %s status %d", mName, status); + ALOGV("acquireWakeLock_l() %s status %d", mThreadName, status); } } @@ -912,7 +1078,7 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l( // mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo). if (mType == DIRECT) { ALOGW("createEffect_l() Cannot add effect %s on Direct output type thread %s", - desc->name, mName); + desc->name, mThreadName); lStatus = BAD_VALUE; goto Exit; } @@ -936,7 +1102,8 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l( case DUPLICATING: case RECORD: default: - ALOGW("createEffect_l() Cannot add global effect %s on thread %s", desc->name, mName); + ALOGW("createEffect_l() Cannot add global effect %s on thread %s", + desc->name, mThreadName); lStatus = BAD_VALUE; goto Exit; } @@ -1201,8 +1368,8 @@ AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinge // mLatchD, mLatchQ, mLatchDValid(false), mLatchQValid(false) { - snprintf(mName, kNameLength, "AudioOut_%X", id); - mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mName); + snprintf(mThreadName, kThreadNameLength, "AudioOut_%X", id); + mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mThreadName); // Assumes constructor is called by AudioFlinger with it's mLock held, but // it would be safer to explicitly pass initial masterVolume/masterMute as @@ -1315,7 +1482,10 @@ void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& ar void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args) { - dprintf(fd, "\nOutput thread %p:\n", this); + dprintf(fd, "\nOutput thread %p type %d (%s):\n", this, type(), threadTypeToString(type())); + + dumpBase(fd, args); + dprintf(fd, " Normal frame count: %zu\n", mNormalFrameCount); dprintf(fd, " Last write occurred (msecs): %llu\n", ns2ms(systemTime() - mLastWriteTime)); dprintf(fd, " Total writes: %d\n", mNumWrites); @@ -1326,15 +1496,17 @@ void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& dprintf(fd, " Mixer buffer: %p\n", mMixerBuffer); dprintf(fd, " Effect buffer: %p\n", mEffectBuffer); dprintf(fd, " Fast track availMask=%#x\n", mFastTrackAvailMask); - - dumpBase(fd, args); + AudioStreamOut *output = mOutput; + audio_output_flags_t flags = output != NULL ? output->flags : AUDIO_OUTPUT_FLAG_NONE; + String8 flagsAsString = outputFlagsToString(flags); + dprintf(fd, " AudioStreamOut: %p flags %#x (%s)\n", output, flags, flagsAsString.string()); } // Thread virtuals void AudioFlinger::PlaybackThread::onFirstRef() { - run(mName, ANDROID_PRIORITY_URGENT_AUDIO); + run(mThreadName, ANDROID_PRIORITY_URGENT_AUDIO); } // ThreadBase virtuals @@ -1378,9 +1550,10 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac ( (sharedBuffer != 0) ) || - // use case 2: callback handler and frame count is default or at least as large as HAL + // use case 2: frame count is default or at least as large as HAL ( - (tid != -1) && + // we formerly checked for a callback handler (non-0 tid), + // but that is no longer required for TRANSFER_OBTAIN mode ((frameCount == 0) || (frameCount >= mFrameCount)) ) @@ -1420,20 +1593,25 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac audio_is_linear_pcm(format), channelMask, sampleRate, mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask); *flags &= ~IAudioFlinger::TRACK_FAST; - // For compatibility with AudioTrack calculation, buffer depth is forced - // to be at least 2 x the normal mixer frame count and cover audio hardware latency. - // This is probably too conservative, but legacy application code may depend on it. - // If you change this calculation, also review the start threshold which is related. + } + } + // For normal PCM streaming tracks, update minimum frame count. + // For compatibility with AudioTrack calculation, buffer depth is forced + // to be at least 2 x the normal mixer frame count and cover audio hardware latency. + // This is probably too conservative, but legacy application code may depend on it. + // If you change this calculation, also review the start threshold which is related. + if (!(*flags & IAudioFlinger::TRACK_FAST) + && audio_is_linear_pcm(format) && sharedBuffer == 0) { uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream); uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate); if (minBufCount < 2) { minBufCount = 2; } - size_t minFrameCount = mNormalFrameCount * minBufCount; - if (frameCount < minFrameCount) { + size_t minFrameCount = + minBufCount * sourceFramesNeeded(sampleRate, mNormalFrameCount, mSampleRate); + if (frameCount < minFrameCount) { // including frameCount == 0 frameCount = minFrameCount; } - } } *pFrameCount = frameCount; @@ -1831,7 +2009,7 @@ void AudioFlinger::PlaybackThread::readOutputParameters_l() LOG_FATAL("HAL format %#x not supported for mixed output", mFormat); } - mFrameSize = audio_stream_out_frame_size(mOutput->stream); + mFrameSize = mOutput->getFrameSize(); mBufferSize = mOutput->stream->common.get_buffer_size(&mOutput->stream->common); mFrameCount = mBufferSize / mFrameSize; if (mFrameCount & 15) { @@ -1861,6 +2039,22 @@ void AudioFlinger::PlaybackThread::readOutputParameters_l() } } + if (mType == DUPLICATING && mMixerBufferEnabled && mEffectBufferEnabled) { + // For best precision, we use float instead of the associated output + // device format (typically PCM 16 bit). + + mFormat = AUDIO_FORMAT_PCM_FLOAT; + mFrameSize = mChannelCount * audio_bytes_per_sample(mFormat); + mBufferSize = mFrameSize * mFrameCount; + + // TODO: We currently use the associated output device channel mask and sample rate. + // (1) Perhaps use the ORed channel mask of all downstream MixerThreads + // (if a valid mask) to avoid premature downmix. + // (2) Perhaps use the maximum sample rate of all downstream MixerThreads + // instead of the output device sample rate to avoid loss of high frequency information. + // This may need to be updated as MixerThread/OutputTracks are added and not here. + } + // Calculate size of normal sink buffer relative to the HAL output buffer size double multiplier = 1.0; if (mType == MIXER && (kUseFastMixer == FastMixer_Static || @@ -1966,7 +2160,7 @@ status_t AudioFlinger::PlaybackThread::getRenderPosition(uint32_t *halFrames, ui } else { status_t status; uint32_t frames; - status = mOutput->stream->get_render_position(mOutput->stream, &frames); + status = mOutput->getRenderPosition(&frames); *dspFrames = (size_t)frames; return status; } @@ -2008,13 +2202,13 @@ uint32_t AudioFlinger::PlaybackThread::getStrategyForSession_l(int sessionId) } -AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::getOutput() const +AudioStreamOut* AudioFlinger::PlaybackThread::getOutput() const { Mutex::Autolock _l(mLock); return mOutput; } -AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::clearOutput() +AudioStreamOut* AudioFlinger::PlaybackThread::clearOutput() { Mutex::Autolock _l(mLock); AudioStreamOut *output = mOutput; @@ -2137,6 +2331,7 @@ ssize_t AudioFlinger::PlaybackThread::threadLoop_write() } else { bytesWritten = framesWritten; } + mLatchDValid = false; status_t status = mNormalSink->getTimestamp(mLatchD.mTimestamp); if (status == NO_ERROR) { size_t totalFramesWritten = mNormalSink->framesWritten(); @@ -2159,8 +2354,7 @@ ssize_t AudioFlinger::PlaybackThread::threadLoop_write() } // FIXME We should have an implementation of timestamps for direct output threads. // They are used e.g for multichannel PCM playback over HDMI. - bytesWritten = mOutput->stream->write(mOutput->stream, - (char *)mSinkBuffer + offset, mBytesRemaining); + bytesWritten = mOutput->write((char *)mSinkBuffer + offset, mBytesRemaining); if (mUseAsyncWrite && ((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) { // do not wait for async callback in case of error of full write @@ -2640,7 +2834,9 @@ bool AudioFlinger::PlaybackThread::threadLoop() } } else { + ATRACE_BEGIN("sleep"); usleep(sleepTime); + ATRACE_END(); } } @@ -2711,8 +2907,7 @@ status_t AudioFlinger::PlaybackThread::getTimestamp_l(AudioTimestamp& timestamp) if ((mType == OFFLOAD || mType == DIRECT) && mOutput != NULL && mOutput->stream->get_presentation_position) { uint64_t position64; - int ret = mOutput->stream->get_presentation_position( - mOutput->stream, &position64, ×tamp.mTime); + int ret = mOutput->getPresentationPosition(&position64, ×tamp.mTime); if (ret == 0) { timestamp.mPosition = (uint32_t)position64; return NO_ERROR; @@ -2800,6 +2995,12 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud mNormalFrameCount); mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate); + if (type == DUPLICATING) { + // The Duplicating thread uses the AudioMixer and delivers data to OutputTracks + // (downstream MixerThreads) in DuplicatingThread::threadLoop_write(). + // Do not create or use mFastMixer, mOutputSink, mPipeSink, or mNormalSink. + return; + } // create an NBAIO sink for the HAL output stream, and negotiate mOutputSink = new AudioStreamOutSink(output->stream); size_t numCounterOffers = 0; @@ -2841,6 +3042,7 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud NBAIO_Format format = mOutputSink->format(); NBAIO_Format origformat = format; // adjust format to match that of the Fast Mixer + ALOGV("format changed from %d to %d", format.mFormat, fastMixerFormat); format.mFormat = fastMixerFormat; format.mFrameSize = audio_bytes_per_sample(format.mFormat) * format.mChannelCount; @@ -3020,8 +3222,10 @@ ssize_t AudioFlinger::MixerThread::threadLoop_write() #endif } state->mCommand = FastMixerState::MIX_WRITE; +#ifdef FAST_THREAD_STATISTICS mFastMixerDumpState.increaseSamplingN(mAudioFlinger->isLowRamDevice() ? - FastMixerDumpState::kSamplingNforLowRamDevice : FastMixerDumpState::kSamplingN); + FastThreadDumpState::kSamplingNforLowRamDevice : FastThreadDumpState::kSamplingN); +#endif sq->end(); sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED); if (kUseFastMixer == FastMixer_Dynamic) { @@ -3083,7 +3287,7 @@ bool AudioFlinger::PlaybackThread::waitingAsyncCallback() void AudioFlinger::PlaybackThread::threadLoop_standby() { ALOGV("Audio hardware entering standby, mixer %p, suspend count %d", this, mSuspended); - mOutput->stream->common.standby(&mOutput->stream->common); + mOutput->standby(); if (mUseAsyncWrite != 0) { // discard any pending drain or write ack by incrementing sequence mWriteAckSequence = (mWriteAckSequence + 2) & ~1; @@ -3386,8 +3590,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac if (sr == mSampleRate) { desiredFrames = mNormalFrameCount; } else { - // +1 for rounding and +1 for additional sample needed for interpolation - desiredFrames = (mNormalFrameCount * sr) / mSampleRate + 1 + 1; + desiredFrames = sourceFramesNeeded(sr, mNormalFrameCount, mSampleRate); // add frames already consumed but not yet released by the resampler // because mAudioTrackServerProxy->framesReady() will include these frames desiredFrames += mAudioMixer->getUnreleasedFrames(track->name()); @@ -3405,6 +3608,23 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac } size_t framesReady = track->framesReady(); + if (ATRACE_ENABLED()) { + // I wish we had formatted trace names + char traceName[16]; + strcpy(traceName, "nRdy"); + int name = track->name(); + if (AudioMixer::TRACK0 <= name && + name < (int) (AudioMixer::TRACK0 + AudioMixer::MAX_NUM_TRACKS)) { + name -= AudioMixer::TRACK0; + traceName[4] = (name / 10) + '0'; + traceName[5] = (name % 10) + '0'; + } else { + traceName[4] = '?'; + traceName[5] = '?'; + } + traceName[6] = '\0'; + ATRACE_INT(traceName, framesReady); + } if ((framesReady >= minFrames) && track->isReady() && !track->isPaused() && !track->isTerminated()) { @@ -3836,7 +4056,7 @@ bool AudioFlinger::MixerThread::checkForNewParameter_l(const String8& keyValuePa status = mOutput->stream->common.set_parameters(&mOutput->stream->common, keyValuePair.string()); if (!mStandby && status == INVALID_OPERATION) { - mOutput->stream->common.standby(&mOutput->stream->common); + mOutput->standby(); mStandby = true; mBytesWritten = 0; status = mOutput->stream->common.set_parameters(&mOutput->stream->common, @@ -4178,8 +4398,8 @@ void AudioFlinger::DirectOutputThread::threadLoop_mix() while (frameCount) { AudioBufferProvider::Buffer buffer; buffer.frameCount = frameCount; - mActiveTrack->getNextBuffer(&buffer); - if (buffer.raw == NULL) { + status_t status = mActiveTrack->getNextBuffer(&buffer); + if (status != NO_ERROR || buffer.raw == NULL) { memset(curBuf, 0, frameCount * mFrameSize); break; } @@ -4291,7 +4511,7 @@ bool AudioFlinger::DirectOutputThread::checkForNewParameter_l(const String8& key status = mOutput->stream->common.set_parameters(&mOutput->stream->common, keyValuePair.string()); if (!mStandby && status == INVALID_OPERATION) { - mOutput->stream->common.standby(&mOutput->stream->common); + mOutput->standby(); mStandby = true; mBytesWritten = 0; status = mOutput->stream->common.set_parameters(&mOutput->stream->common, @@ -4354,9 +4574,7 @@ void AudioFlinger::DirectOutputThread::cacheParameters_l() void AudioFlinger::DirectOutputThread::flushHw_l() { - if (mOutput->stream->flush != NULL) { - mOutput->stream->flush(mOutput->stream); - } + mOutput->flush(); mHwPaused = false; } @@ -4646,7 +4864,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr size_t audioHALFrames = (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000; size_t framesWritten = - mBytesWritten / audio_stream_out_frame_size(mOutput->stream); + mBytesWritten / mOutput->getFrameSize(); track->presentationComplete(framesWritten, audioHALFrames); track->reset(); tracksToRemove->add(track); @@ -4797,16 +5015,8 @@ void AudioFlinger::DuplicatingThread::threadLoop_sleepTime() ssize_t AudioFlinger::DuplicatingThread::threadLoop_write() { - // We convert the duplicating thread format to AUDIO_FORMAT_PCM_16_BIT - // for delivery downstream as needed. This in-place conversion is safe as - // AUDIO_FORMAT_PCM_16_BIT is smaller than any other supported format - // (AUDIO_FORMAT_PCM_8_BIT is not allowed here). - if (mFormat != AUDIO_FORMAT_PCM_16_BIT) { - memcpy_by_audio_format(mSinkBuffer, AUDIO_FORMAT_PCM_16_BIT, - mSinkBuffer, mFormat, writeFrames * mChannelCount); - } for (size_t i = 0; i < outputTracks.size(); i++) { - outputTracks[i]->write(reinterpret_cast<int16_t*>(mSinkBuffer), writeFrames); + outputTracks[i]->write(mSinkBuffer, writeFrames); } mStandby = false; return (ssize_t)mSinkBufferSize; @@ -4833,25 +5043,26 @@ void AudioFlinger::DuplicatingThread::clearOutputTracks() void AudioFlinger::DuplicatingThread::addOutputTrack(MixerThread *thread) { Mutex::Autolock _l(mLock); - // FIXME explain this formula - size_t frameCount = (3 * mNormalFrameCount * mSampleRate) / thread->sampleRate(); - // OutputTrack is forced to AUDIO_FORMAT_PCM_16_BIT regardless of mFormat - // due to current usage case and restrictions on the AudioBufferProvider. - // Actual buffer conversion is done in threadLoop_write(). - // - // TODO: This may change in the future, depending on multichannel - // (and non int16_t*) support on AF::PlaybackThread::OutputTrack - OutputTrack *outputTrack = new OutputTrack(thread, + // The downstream MixerThread consumes thread->frameCount() amount of frames per mix pass. + // Adjust for thread->sampleRate() to determine minimum buffer frame count. + // Then triple buffer because Threads do not run synchronously and may not be clock locked. + const size_t frameCount = + 3 * sourceFramesNeeded(mSampleRate, thread->frameCount(), thread->sampleRate()); + // TODO: Consider asynchronous sample rate conversion to handle clock disparity + // from different OutputTracks and their associated MixerThreads (e.g. one may + // nearly empty and the other may be dropping data). + + sp<OutputTrack> outputTrack = new OutputTrack(thread, this, mSampleRate, - AUDIO_FORMAT_PCM_16_BIT, + mFormat, mChannelMask, frameCount, IPCThreadState::self()->getCallingUid()); if (outputTrack->cblk() != NULL) { thread->setStreamVolume(AUDIO_STREAM_PATCH, 1.0f); mOutputTracks.add(outputTrack); - ALOGV("addOutputTrack() track %p, on thread %p", outputTrack, thread); + ALOGV("addOutputTrack() track %p, on thread %p", outputTrack.get(), thread); updateWaitTime_l(); } } @@ -4952,8 +5163,8 @@ AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger, // mFastCaptureNBLogWriter , mFastTrackAvail(false) { - snprintf(mName, kNameLength, "AudioIn_%X", id); - mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mName); + snprintf(mThreadName, kThreadNameLength, "AudioIn_%X", id); + mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mThreadName); readInputParameters_l(); @@ -4993,7 +5204,7 @@ AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger, } if (initFastCapture) { - // create a Pipe for FastMixer to write to, and for us and fast tracks to read from + // create a Pipe for FastCapture to write to, and for us and fast tracks to read from NBAIO_Format format = mInputSource->format(); size_t pipeFramesP2 = roundup(mSampleRate / 25); // double-buffering of 20 ms each size_t pipeSize = pipeFramesP2 * Format_frameSize(format); @@ -5094,7 +5305,7 @@ AudioFlinger::RecordThread::~RecordThread() void AudioFlinger::RecordThread::onFirstRef() { - run(mName, PRIORITY_URGENT_AUDIO); + run(mThreadName, PRIORITY_URGENT_AUDIO); } bool AudioFlinger::RecordThread::threadLoop() @@ -5135,7 +5346,9 @@ reacquire_wakelock: // sleep with mutex unlocked if (sleepUs > 0) { + ATRACE_BEGIN("sleep"); usleep(sleepUs); + ATRACE_END(); sleepUs = 0; } @@ -5279,7 +5492,8 @@ reacquire_wakelock: state->mCommand = FastCaptureState::READ_WRITE; #if 0 // FIXME mFastCaptureDumpState.increaseSamplingN(mAudioFlinger->isLowRamDevice() ? - FastCaptureDumpState::kSamplingNforLowRamDevice : FastMixerDumpState::kSamplingN); + FastThreadDumpState::kSamplingNforLowRamDevice : + FastThreadDumpState::kSamplingN); #endif didModify = true; } @@ -5427,8 +5641,8 @@ reacquire_wakelock: upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, (const int16_t *)src, part1); } else { - downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, (const int16_t *)src, - part1); + downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, + (const int16_t *)src, part1); } dst += part1 * activeTrack->mFrameSize; front += part1; @@ -5649,8 +5863,9 @@ sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRe // client expresses a preference for FAST, but we get the final say if (*flags & IAudioFlinger::TRACK_FAST) { if ( - // use case: callback handler - (tid != -1) && + // we formerly checked for a callback handler (non-0 tid), + // but that is no longer required for TRANSFER_OBTAIN mode + // // frame count is not specified, or is exactly the pipe depth ((frameCount == 0) || (frameCount == mPipeFramesP2)) && // PCM data @@ -5939,15 +6154,17 @@ void AudioFlinger::RecordThread::dumpInternals(int fd, const Vector<String16>& a { dprintf(fd, "\nInput thread %p:\n", this); - if (mActiveTracks.size() > 0) { - dprintf(fd, " Buffer size: %zu bytes\n", mBufferSize); - } else { + dumpBase(fd, args); + + if (mActiveTracks.size() == 0) { dprintf(fd, " No active record clients\n"); } dprintf(fd, " Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no"); dprintf(fd, " Fast track available: %s\n", mFastTrackAvail ? "yes" : "no"); - dumpBase(fd, args); + // Make a non-atomic copy of fast capture dump state so it won't change underneath us + const FastCaptureDumpState copy(mFastCaptureDumpState); + copy.dump(fd); } void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args __unused) @@ -6412,4 +6629,4 @@ void AudioFlinger::RecordThread::getAudioPortConfig(struct audio_port_config *co config->ext.mix.usecase.source = mAudioSource; } -}; // namespace android +} // namespace android |