summaryrefslogtreecommitdiffstats
path: root/services
diff options
context:
space:
mode:
Diffstat (limited to 'services')
-rw-r--r--services/audioflinger/AudioFlinger.cpp6
-rw-r--r--services/audioflinger/AudioMixer.cpp186
-rw-r--r--services/audioflinger/AudioMixer.h48
-rw-r--r--services/audioflinger/AudioResamplerFirGen.h3
-rw-r--r--services/audioflinger/AudioResamplerFirProcess.h6
-rw-r--r--services/audioflinger/FastMixer.cpp3
-rw-r--r--services/audioflinger/PlaybackTracks.h4
-rw-r--r--services/audioflinger/Threads.cpp177
-rw-r--r--services/audioflinger/Threads.h6
-rw-r--r--services/audioflinger/Tracks.cpp54
-rwxr-xr-xservices/audioflinger/tests/mixer_to_wav_tests.sh12
-rw-r--r--services/audioflinger/tests/test-mixer.cpp92
-rw-r--r--services/camera/libcameraservice/Android.mk1
-rw-r--r--services/camera/libcameraservice/CameraService.cpp56
-rw-r--r--services/camera/libcameraservice/CameraService.h6
-rw-r--r--services/camera/libcameraservice/api1/Camera2Client.cpp2
-rw-r--r--services/camera/libcameraservice/api1/Camera2Client.h2
-rw-r--r--services/camera/libcameraservice/api1/CameraClient.cpp4
-rw-r--r--services/camera/libcameraservice/api1/CameraClient.h2
-rw-r--r--services/camera/libcameraservice/api2/CameraDeviceClient.cpp2
-rw-r--r--services/camera/libcameraservice/api2/CameraDeviceClient.h2
-rw-r--r--services/camera/libcameraservice/api_pro/ProCamera2Client.cpp2
-rw-r--r--services/camera/libcameraservice/api_pro/ProCamera2Client.h2
-rw-r--r--services/camera/libcameraservice/common/Camera2ClientBase.cpp2
-rw-r--r--services/camera/libcameraservice/common/Camera2ClientBase.h3
-rw-r--r--services/camera/libcameraservice/common/CameraDeviceBase.h3
-rw-r--r--services/camera/libcameraservice/common/CameraModule.cpp130
-rw-r--r--services/camera/libcameraservice/common/CameraModule.h63
-rw-r--r--services/camera/libcameraservice/device1/CameraHardwareInterface.h17
-rw-r--r--services/camera/libcameraservice/device2/Camera2Device.cpp8
-rw-r--r--services/camera/libcameraservice/device2/Camera2Device.h2
-rw-r--r--services/camera/libcameraservice/device3/Camera3Device.cpp9
-rw-r--r--services/camera/libcameraservice/device3/Camera3Device.h2
-rw-r--r--services/medialog/Android.mk2
34 files changed, 656 insertions, 263 deletions
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 993db73..9ad437a 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -185,7 +185,8 @@ AudioFlinger::AudioFlinger()
char value[PROPERTY_VALUE_MAX];
bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1);
if (doLog) {
- mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters", MemoryHeapBase::READ_ONLY);
+ mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters",
+ MemoryHeapBase::READ_ONLY);
}
#ifdef TEE_SINK
@@ -401,6 +402,9 @@ status_t AudioFlinger::dump(int fd, const Vector<String16>& args)
String8 result(kClientLockedString);
write(fd, result.string(), result.size());
}
+
+ EffectDumpEffects(fd);
+
dumpClients(fd, args);
if (clientLocked) {
mClientLock.unlock();
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index fd28ea1..0d4b358 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -430,6 +430,10 @@ void AudioMixer::setLog(NBLog::Writer *log)
mState.mLog = log;
}
+static inline audio_format_t selectMixerInFormat(audio_format_t inputFormat __unused) {
+ return kUseFloat && kUseNewMixer ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+}
+
int AudioMixer::getTrackName(audio_channel_mask_t channelMask,
audio_format_t format, int sessionId)
{
@@ -492,24 +496,23 @@ int AudioMixer::getTrackName(audio_channel_mask_t channelMask,
t->mInputBufferProvider = NULL;
t->mReformatBufferProvider = NULL;
t->downmixerBufferProvider = NULL;
+ t->mPostDownmixReformatBufferProvider = NULL;
t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
t->mFormat = format;
- t->mMixerInFormat = kUseFloat && kUseNewMixer
- ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+ t->mMixerInFormat = selectMixerInFormat(format);
+ t->mDownmixRequiresFormat = AUDIO_FORMAT_INVALID; // no format required
t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits(
AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
// Check the downmixing (or upmixing) requirements.
- status_t status = initTrackDownmix(t, n);
+ status_t status = t->prepareForDownmix();
if (status != OK) {
ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask);
return -1;
}
- // initTrackDownmix() may change the input format requirement.
- // If you desire floating point input to the mixer, it may change
- // to integer because the downmixer requires integer to process.
+ // prepareForDownmix() may change mDownmixRequiresFormat
ALOGVV("mMixerFormat:%#x mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
- prepareTrackForReformat(t, n);
+ t->prepareForReformat();
mTrackNames |= 1 << n;
return TRACK0 + n;
}
@@ -526,7 +529,7 @@ void AudioMixer::invalidateState(uint32_t mask)
}
// Called when channel masks have changed for a track name
-// TODO: Fix Downmixbufferprofider not to (possibly) change mixer input format,
+// TODO: Fix DownmixerBufferProvider not to (possibly) change mixer input format,
// which will simplify this logic.
bool AudioMixer::setChannelMasks(int name,
audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask) {
@@ -551,21 +554,18 @@ bool AudioMixer::setChannelMasks(int name,
// channel masks have changed, does this track need a downmixer?
// update to try using our desired format (if we aren't already using it)
- const audio_format_t prevMixerInFormat = track.mMixerInFormat;
- track.mMixerInFormat = kUseFloat && kUseNewMixer
- ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
- const status_t status = initTrackDownmix(&mState.tracks[name], name);
+ const audio_format_t prevDownmixerFormat = track.mDownmixRequiresFormat;
+ const status_t status = mState.tracks[name].prepareForDownmix();
ALOGE_IF(status != OK,
- "initTrackDownmix error %d, track channel mask %#x, mixer channel mask %#x",
+ "prepareForDownmix error %d, track channel mask %#x, mixer channel mask %#x",
status, track.channelMask, track.mMixerChannelMask);
- const bool mixerInFormatChanged = prevMixerInFormat != track.mMixerInFormat;
- if (mixerInFormatChanged) {
- prepareTrackForReformat(&track, name); // because of downmixer, track format may change!
+ if (prevDownmixerFormat != track.mDownmixRequiresFormat) {
+ track.prepareForReformat(); // because of downmixer, track format may change!
}
- if (track.resampler && (mixerInFormatChanged || mixerChannelCountChanged)) {
- // resampler input format or channels may have changed.
+ if (track.resampler && mixerChannelCountChanged) {
+ // resampler channels may have changed.
const uint32_t resetToSampleRate = track.sampleRate;
delete track.resampler;
track.resampler = NULL;
@@ -576,99 +576,122 @@ bool AudioMixer::setChannelMasks(int name,
return true;
}
-status_t AudioMixer::initTrackDownmix(track_t* pTrack, int trackName)
-{
- // Only remix (upmix or downmix) if the track and mixer/device channel masks
- // are not the same and not handled internally, as mono -> stereo currently is.
- if (pTrack->channelMask != pTrack->mMixerChannelMask
- && !(pTrack->channelMask == AUDIO_CHANNEL_OUT_MONO
- && pTrack->mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO)) {
- return prepareTrackForDownmix(pTrack, trackName);
- }
- // no remix necessary
- unprepareTrackForDownmix(pTrack, trackName);
- return NO_ERROR;
-}
-
-void AudioMixer::unprepareTrackForDownmix(track_t* pTrack, int trackName __unused) {
- ALOGV("AudioMixer::unprepareTrackForDownmix(%d)", trackName);
+void AudioMixer::track_t::unprepareForDownmix() {
+ ALOGV("AudioMixer::unprepareForDownmix(%p)", this);
- if (pTrack->downmixerBufferProvider != NULL) {
+ mDownmixRequiresFormat = AUDIO_FORMAT_INVALID;
+ if (downmixerBufferProvider != NULL) {
// this track had previously been configured with a downmixer, delete it
ALOGV(" deleting old downmixer");
- delete pTrack->downmixerBufferProvider;
- pTrack->downmixerBufferProvider = NULL;
- reconfigureBufferProviders(pTrack);
+ delete downmixerBufferProvider;
+ downmixerBufferProvider = NULL;
+ reconfigureBufferProviders();
} else {
ALOGV(" nothing to do, no downmixer to delete");
}
}
-status_t AudioMixer::prepareTrackForDownmix(track_t* pTrack, int trackName)
+status_t AudioMixer::track_t::prepareForDownmix()
{
- ALOGV("AudioMixer::prepareTrackForDownmix(%d) with mask 0x%x", trackName, pTrack->channelMask);
+ ALOGV("AudioMixer::prepareForDownmix(%p) with mask 0x%x",
+ this, channelMask);
// discard the previous downmixer if there was one
- unprepareTrackForDownmix(pTrack, trackName);
+ unprepareForDownmix();
+ // Only remix (upmix or downmix) if the track and mixer/device channel masks
+ // are not the same and not handled internally, as mono -> stereo currently is.
+ if (channelMask == mMixerChannelMask
+ || (channelMask == AUDIO_CHANNEL_OUT_MONO
+ && mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO)) {
+ return NO_ERROR;
+ }
if (DownmixerBufferProvider::isMultichannelCapable()) {
- DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(pTrack->channelMask,
- pTrack->mMixerChannelMask,
- AUDIO_FORMAT_PCM_16_BIT /* TODO: use pTrack->mMixerInFormat, now only PCM 16 */,
- pTrack->sampleRate, pTrack->sessionId, kCopyBufferFrameCount);
+ DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(channelMask,
+ mMixerChannelMask,
+ AUDIO_FORMAT_PCM_16_BIT /* TODO: use mMixerInFormat, now only PCM 16 */,
+ sampleRate, sessionId, kCopyBufferFrameCount);
if (pDbp->isValid()) { // if constructor completed properly
- pTrack->mMixerInFormat = AUDIO_FORMAT_PCM_16_BIT; // PCM 16 bit required for downmix
- pTrack->downmixerBufferProvider = pDbp;
- reconfigureBufferProviders(pTrack);
+ mDownmixRequiresFormat = AUDIO_FORMAT_PCM_16_BIT; // PCM 16 bit required for downmix
+ downmixerBufferProvider = pDbp;
+ reconfigureBufferProviders();
return NO_ERROR;
}
delete pDbp;
}
// Effect downmixer does not accept the channel conversion. Let's use our remixer.
- RemixBufferProvider* pRbp = new RemixBufferProvider(pTrack->channelMask,
- pTrack->mMixerChannelMask, pTrack->mMixerInFormat, kCopyBufferFrameCount);
+ RemixBufferProvider* pRbp = new RemixBufferProvider(channelMask,
+ mMixerChannelMask, mMixerInFormat, kCopyBufferFrameCount);
// Remix always finds a conversion whereas Downmixer effect above may fail.
- pTrack->downmixerBufferProvider = pRbp;
- reconfigureBufferProviders(pTrack);
+ downmixerBufferProvider = pRbp;
+ reconfigureBufferProviders();
return NO_ERROR;
}
-void AudioMixer::unprepareTrackForReformat(track_t* pTrack, int trackName __unused) {
- ALOGV("AudioMixer::unprepareTrackForReformat(%d)", trackName);
- if (pTrack->mReformatBufferProvider != NULL) {
- delete pTrack->mReformatBufferProvider;
- pTrack->mReformatBufferProvider = NULL;
- reconfigureBufferProviders(pTrack);
+void AudioMixer::track_t::unprepareForReformat() {
+ ALOGV("AudioMixer::unprepareForReformat(%p)", this);
+ bool requiresReconfigure = false;
+ if (mReformatBufferProvider != NULL) {
+ delete mReformatBufferProvider;
+ mReformatBufferProvider = NULL;
+ requiresReconfigure = true;
+ }
+ if (mPostDownmixReformatBufferProvider != NULL) {
+ delete mPostDownmixReformatBufferProvider;
+ mPostDownmixReformatBufferProvider = NULL;
+ requiresReconfigure = true;
+ }
+ if (requiresReconfigure) {
+ reconfigureBufferProviders();
}
}
-status_t AudioMixer::prepareTrackForReformat(track_t* pTrack, int trackName)
+status_t AudioMixer::track_t::prepareForReformat()
{
- ALOGV("AudioMixer::prepareTrackForReformat(%d) with format %#x", trackName, pTrack->mFormat);
- // discard the previous reformatter if there was one
- unprepareTrackForReformat(pTrack, trackName);
- // only configure reformatter if needed
- if (pTrack->mFormat != pTrack->mMixerInFormat) {
- pTrack->mReformatBufferProvider = new ReformatBufferProvider(
- audio_channel_count_from_out_mask(pTrack->channelMask),
- pTrack->mFormat, pTrack->mMixerInFormat,
+ ALOGV("AudioMixer::prepareForReformat(%p) with format %#x", this, mFormat);
+ // discard previous reformatters
+ unprepareForReformat();
+ // only configure reformatters as needed
+ const audio_format_t targetFormat = mDownmixRequiresFormat != AUDIO_FORMAT_INVALID
+ ? mDownmixRequiresFormat : mMixerInFormat;
+ bool requiresReconfigure = false;
+ if (mFormat != targetFormat) {
+ mReformatBufferProvider = new ReformatBufferProvider(
+ audio_channel_count_from_out_mask(channelMask),
+ mFormat,
+ targetFormat,
kCopyBufferFrameCount);
- reconfigureBufferProviders(pTrack);
+ requiresReconfigure = true;
+ }
+ if (targetFormat != mMixerInFormat) {
+ mPostDownmixReformatBufferProvider = new ReformatBufferProvider(
+ audio_channel_count_from_out_mask(mMixerChannelMask),
+ targetFormat,
+ mMixerInFormat,
+ kCopyBufferFrameCount);
+ requiresReconfigure = true;
+ }
+ if (requiresReconfigure) {
+ reconfigureBufferProviders();
}
return NO_ERROR;
}
-void AudioMixer::reconfigureBufferProviders(track_t* pTrack)
+void AudioMixer::track_t::reconfigureBufferProviders()
{
- pTrack->bufferProvider = pTrack->mInputBufferProvider;
- if (pTrack->mReformatBufferProvider) {
- pTrack->mReformatBufferProvider->setBufferProvider(pTrack->bufferProvider);
- pTrack->bufferProvider = pTrack->mReformatBufferProvider;
+ bufferProvider = mInputBufferProvider;
+ if (mReformatBufferProvider) {
+ mReformatBufferProvider->setBufferProvider(bufferProvider);
+ bufferProvider = mReformatBufferProvider;
+ }
+ if (downmixerBufferProvider) {
+ downmixerBufferProvider->setBufferProvider(bufferProvider);
+ bufferProvider = downmixerBufferProvider;
}
- if (pTrack->downmixerBufferProvider) {
- pTrack->downmixerBufferProvider->setBufferProvider(pTrack->bufferProvider);
- pTrack->bufferProvider = pTrack->downmixerBufferProvider;
+ if (mPostDownmixReformatBufferProvider) {
+ mPostDownmixReformatBufferProvider->setBufferProvider(bufferProvider);
+ bufferProvider = mPostDownmixReformatBufferProvider;
}
}
@@ -687,9 +710,9 @@ void AudioMixer::deleteTrackName(int name)
delete track.resampler;
track.resampler = NULL;
// delete the downmixer
- unprepareTrackForDownmix(&mState.tracks[name], name);
+ mState.tracks[name].unprepareForDownmix();
// delete the reformatter
- unprepareTrackForReformat(&mState.tracks[name], name);
+ mState.tracks[name].unprepareForReformat();
mTrackNames &= ~(1<<name);
}
@@ -828,7 +851,7 @@ void AudioMixer::setParameter(int name, int target, int param, void *value)
ALOG_ASSERT(audio_is_linear_pcm(format), "Invalid format %#x", format);
track.mFormat = format;
ALOGV("setParameter(TRACK, FORMAT, %#x)", format);
- prepareTrackForReformat(&track, name);
+ track.prepareForReformat();
invalidateState(1 << name);
}
} break;
@@ -1032,10 +1055,13 @@ void AudioMixer::setBufferProvider(int name, AudioBufferProvider* bufferProvider
if (mState.tracks[name].mReformatBufferProvider != NULL) {
mState.tracks[name].mReformatBufferProvider->reset();
} else if (mState.tracks[name].downmixerBufferProvider != NULL) {
+ mState.tracks[name].downmixerBufferProvider->reset();
+ } else if (mState.tracks[name].mPostDownmixReformatBufferProvider != NULL) {
+ mState.tracks[name].mPostDownmixReformatBufferProvider->reset();
}
mState.tracks[name].mInputBufferProvider = bufferProvider;
- reconfigureBufferProviders(&mState.tracks[name]);
+ mState.tracks[name].reconfigureBufferProviders();
}
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index f4f142b..c5df08a 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -127,10 +127,16 @@ public:
size_t getUnreleasedFrames(int name) const;
static inline bool isValidPcmTrackFormat(audio_format_t format) {
- return format == AUDIO_FORMAT_PCM_16_BIT ||
- format == AUDIO_FORMAT_PCM_24_BIT_PACKED ||
- format == AUDIO_FORMAT_PCM_32_BIT ||
- format == AUDIO_FORMAT_PCM_FLOAT;
+ switch (format) {
+ case AUDIO_FORMAT_PCM_8_BIT:
+ case AUDIO_FORMAT_PCM_16_BIT:
+ case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ case AUDIO_FORMAT_PCM_32_BIT:
+ case AUDIO_FORMAT_PCM_FLOAT:
+ return true;
+ default:
+ return false;
+ }
}
private:
@@ -205,17 +211,34 @@ private:
int32_t* auxBuffer;
// 16-byte boundary
+
+ /* Buffer providers are constructed to translate the track input data as needed.
+ *
+ * 1) mInputBufferProvider: The AudioTrack buffer provider.
+ * 2) mReformatBufferProvider: If not NULL, performs the audio reformat to
+ * match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
+ * requires reformat. For example, it may convert floating point input to
+ * PCM_16_bit if that's required by the downmixer.
+ * 3) downmixerBufferProvider: If not NULL, performs the channel remixing to match
+ * the number of channels required by the mixer sink.
+ * 4) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
+ * the downmixer requirements to the mixer engine input requirements.
+ */
AudioBufferProvider* mInputBufferProvider; // externally provided buffer provider.
CopyBufferProvider* mReformatBufferProvider; // provider wrapper for reformatting.
CopyBufferProvider* downmixerBufferProvider; // wrapper for channel conversion.
+ CopyBufferProvider* mPostDownmixReformatBufferProvider;
+ // 16-byte boundary
int32_t sessionId;
- // 16-byte boundary
audio_format_t mMixerFormat; // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
audio_format_t mFormat; // input track format
audio_format_t mMixerInFormat; // mix internal format AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
// each track must be converted to this format.
+ audio_format_t mDownmixRequiresFormat; // required downmixer format
+ // AUDIO_FORMAT_PCM_16_BIT if 16 bit necessary
+ // AUDIO_FORMAT_INVALID if no required format
float mVolume[MAX_NUM_VOLUMES]; // floating point set volume
float mPrevVolume[MAX_NUM_VOLUMES]; // floating point previous volume
@@ -225,7 +248,6 @@ private:
float mPrevAuxLevel; // floating point prev aux level
float mAuxInc; // floating point aux increment
- // 16-byte boundary
audio_channel_mask_t mMixerChannelMask;
uint32_t mMixerChannelCount;
@@ -236,6 +258,12 @@ private:
void adjustVolumeRamp(bool aux, bool useFloat = false);
size_t getUnreleasedFrames() const { return resampler != NULL ?
resampler->getUnreleasedFrames() : 0; };
+
+ status_t prepareForDownmix();
+ void unprepareForDownmix();
+ status_t prepareForReformat();
+ void unprepareForReformat();
+ void reconfigureBufferProviders();
};
typedef void (*process_hook_t)(state_t* state, int64_t pts);
@@ -382,14 +410,6 @@ private:
bool setChannelMasks(int name,
audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask);
- // TODO: remove unused trackName/trackNum from functions below.
- static status_t initTrackDownmix(track_t* pTrack, int trackName);
- static status_t prepareTrackForDownmix(track_t* pTrack, int trackNum);
- static void unprepareTrackForDownmix(track_t* pTrack, int trackName);
- static status_t prepareTrackForReformat(track_t* pTrack, int trackNum);
- static void unprepareTrackForReformat(track_t* pTrack, int trackName);
- static void reconfigureBufferProviders(track_t* pTrack);
-
static void track__genericResample(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
int32_t* aux);
static void track__nop(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
diff --git a/services/audioflinger/AudioResamplerFirGen.h b/services/audioflinger/AudioResamplerFirGen.h
index f3718b6..a9c84de 100644
--- a/services/audioflinger/AudioResamplerFirGen.h
+++ b/services/audioflinger/AudioResamplerFirGen.h
@@ -204,7 +204,8 @@ struct I0ATerm {
template <>
struct I0ATerm<0> { // 1/sqrt(2*PI);
- static const CONSTEXPR double value = 0.398942280401432677939946059934381868475858631164934657665925;
+ static const CONSTEXPR double value =
+ 0.398942280401432677939946059934381868475858631164934657665925;
};
#if USE_HORNERS_METHOD
diff --git a/services/audioflinger/AudioResamplerFirProcess.h b/services/audioflinger/AudioResamplerFirProcess.h
index efc8055..1118bf8 100644
--- a/services/audioflinger/AudioResamplerFirProcess.h
+++ b/services/audioflinger/AudioResamplerFirProcess.h
@@ -174,7 +174,8 @@ struct InterpNull {
* Process() calls ProcessBase() with TFUNC = InterpCompute, for interpolated phase.
*/
-template <int CHANNELS, int STRIDE, typename TFUNC, typename TC, typename TI, typename TO, typename TINTERP>
+template <int CHANNELS, int STRIDE, typename TFUNC, typename TC, typename TI, typename TO,
+ typename TINTERP>
static inline
void ProcessBase(TO* const out,
size_t count,
@@ -268,7 +269,8 @@ void Process(TO* const out,
TINTERP lerpP,
const TO* const volumeLR)
{
- ProcessBase<CHANNELS, STRIDE, InterpCompute>(out, count, coefsP, coefsN, sP, sN, lerpP, volumeLR);
+ ProcessBase<CHANNELS, STRIDE, InterpCompute>(out, count, coefsP, coefsN, sP, sN, lerpP,
+ volumeLR);
}
/*
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 2678cbf..141a79e 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -629,7 +629,8 @@ void FastMixerDumpState::dump(int fd) const
left.sample(tail[i]);
right.sample(tail[n - (i + 1)]);
}
- dprintf(fd, " Distribution of mix cycle times in ms for the tails (> ~3 stddev outliers):\n"
+ dprintf(fd, " Distribution of mix cycle times in ms for the tails "
+ "(> ~3 stddev outliers):\n"
" left tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n"
" right tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
left.mean()*1e-6, left.minimum()*1e-6, left.maximum()*1e-6, left.stddev()*1e-6,
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index ee48276..902d5e4 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -255,7 +255,7 @@ public:
class Buffer : public AudioBufferProvider::Buffer {
public:
- int16_t *mBuffer;
+ void *mBuffer;
};
OutputTrack(PlaybackThread *thread,
@@ -271,7 +271,7 @@ public:
AudioSystem::SYNC_EVENT_NONE,
int triggerSession = 0);
virtual void stop();
- bool write(int16_t* data, uint32_t frames);
+ bool write(void* data, uint32_t frames);
bool bufferQueueEmpty() const { return mBufferQueue.size() == 0; }
bool isActive() const { return mActive; }
const wp<ThreadBase>& thread() const { return mThread; }
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 51025fe..15dd408 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -172,6 +172,18 @@ static int sFastTrackMultiplier = kFastTrackMultiplier;
// and that all "fast" AudioRecord clients read from. In either case, the size can be small.
static const size_t kRecordThreadReadOnlyHeapSize = 0x2000;
+// Returns the source frames needed to resample to destination frames. This is not a precise
+// value and depends on the resampler (and possibly how it handles rounding internally).
+// If srcSampleRate and dstSampleRate are equal, then it returns destination frames, which
+// may not be a true if the resampler is asynchronous.
+static inline size_t sourceFramesNeeded(
+ uint32_t srcSampleRate, size_t dstFramesRequired, uint32_t dstSampleRate) {
+ // +1 for rounding - always do this even if matched ratio
+ // +1 for additional sample needed for interpolation
+ return srcSampleRate == dstSampleRate ? dstFramesRequired :
+ size_t((uint64_t)dstFramesRequired * srcSampleRate / dstSampleRate + 1 + 1);
+}
+
// ----------------------------------------------------------------------------
static pthread_once_t sFastTrackMultiplierOnce = PTHREAD_ONCE_INIT;
@@ -314,6 +326,64 @@ void CpuStats::sample(const String8 &title
// ThreadBase
// ----------------------------------------------------------------------------
+// static
+const char *AudioFlinger::ThreadBase::threadTypeToString(AudioFlinger::ThreadBase::type_t type)
+{
+ switch (type) {
+ case MIXER:
+ return "MIXER";
+ case DIRECT:
+ return "DIRECT";
+ case DUPLICATING:
+ return "DUPLICATING";
+ case RECORD:
+ return "RECORD";
+ case OFFLOAD:
+ return "OFFLOAD";
+ default:
+ return "unknown";
+ }
+}
+
+static String8 outputFlagsToString(audio_output_flags_t flags)
+{
+ static const struct mapping {
+ audio_output_flags_t mFlag;
+ const char * mString;
+ } mappings[] = {
+ AUDIO_OUTPUT_FLAG_DIRECT, "DIRECT",
+ AUDIO_OUTPUT_FLAG_PRIMARY, "PRIMARY",
+ AUDIO_OUTPUT_FLAG_FAST, "FAST",
+ AUDIO_OUTPUT_FLAG_DEEP_BUFFER, "DEEP_BUFFER",
+ AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD, "COMPRESS_OFFLOAAD",
+ AUDIO_OUTPUT_FLAG_NON_BLOCKING, "NON_BLOCKING",
+ AUDIO_OUTPUT_FLAG_HW_AV_SYNC, "HW_AV_SYNC",
+ AUDIO_OUTPUT_FLAG_NONE, "NONE", // must be last
+ };
+ String8 result;
+ audio_output_flags_t allFlags = AUDIO_OUTPUT_FLAG_NONE;
+ const mapping *entry;
+ for (entry = mappings; entry->mFlag != AUDIO_OUTPUT_FLAG_NONE; entry++) {
+ allFlags = (audio_output_flags_t) (allFlags | entry->mFlag);
+ if (flags & entry->mFlag) {
+ if (!result.isEmpty()) {
+ result.append("|");
+ }
+ result.append(entry->mString);
+ }
+ }
+ if (flags & ~allFlags) {
+ if (!result.isEmpty()) {
+ result.append("|");
+ }
+ result.appendFormat("0x%X", flags & ~allFlags);
+ }
+ if (result.isEmpty()) {
+ result.append(entry->mString);
+ }
+ return result;
+}
+
AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
audio_devices_t outDevice, audio_devices_t inDevice, type_t type)
: Thread(false /*canCallJava*/),
@@ -577,20 +647,21 @@ void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args __u
bool locked = AudioFlinger::dumpTryLock(mLock);
if (!locked) {
- dprintf(fd, "thread %p maybe dead locked\n", this);
+ dprintf(fd, "thread %p may be deadlocked\n", this);
}
dprintf(fd, " I/O handle: %d\n", mId);
dprintf(fd, " TID: %d\n", getTid());
dprintf(fd, " Standby: %s\n", mStandby ? "yes" : "no");
- dprintf(fd, " Sample rate: %u\n", mSampleRate);
+ dprintf(fd, " Sample rate: %u Hz\n", mSampleRate);
dprintf(fd, " HAL frame count: %zu\n", mFrameCount);
+ dprintf(fd, " HAL format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat));
dprintf(fd, " HAL buffer size: %u bytes\n", mBufferSize);
- dprintf(fd, " Channel Count: %u\n", mChannelCount);
- dprintf(fd, " Channel Mask: 0x%08x (%s)\n", mChannelMask,
+ dprintf(fd, " Channel count: %u\n", mChannelCount);
+ dprintf(fd, " Channel mask: 0x%08x (%s)\n", mChannelMask,
channelMaskToString(mChannelMask, mType != RECORD).string());
- dprintf(fd, " Format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat));
- dprintf(fd, " Frame size: %zu\n", mFrameSize);
+ dprintf(fd, " Format: 0x%x (%s)\n", mFormat, formatToString(mFormat));
+ dprintf(fd, " Frame size: %zu bytes\n", mFrameSize);
dprintf(fd, " Pending config events:");
size_t numConfig = mConfigEvents.size();
if (numConfig) {
@@ -1315,7 +1386,7 @@ void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& ar
void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
{
- dprintf(fd, "\nOutput thread %p:\n", this);
+ dprintf(fd, "\nOutput thread %p type %d (%s):\n", this, type(), threadTypeToString(type()));
dprintf(fd, " Normal frame count: %zu\n", mNormalFrameCount);
dprintf(fd, " Last write occurred (msecs): %llu\n", ns2ms(systemTime() - mLastWriteTime));
dprintf(fd, " Total writes: %d\n", mNumWrites);
@@ -1326,6 +1397,10 @@ void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>&
dprintf(fd, " Mixer buffer: %p\n", mMixerBuffer);
dprintf(fd, " Effect buffer: %p\n", mEffectBuffer);
dprintf(fd, " Fast track availMask=%#x\n", mFastTrackAvailMask);
+ AudioStreamOut *output = mOutput;
+ audio_output_flags_t flags = output != NULL ? output->flags : AUDIO_OUTPUT_FLAG_NONE;
+ String8 flagsAsString = outputFlagsToString(flags);
+ dprintf(fd, " AudioStreamOut: %p flags %#x (%s)\n", output, flags, flagsAsString.string());
dumpBase(fd, args);
}
@@ -1861,6 +1936,22 @@ void AudioFlinger::PlaybackThread::readOutputParameters_l()
}
}
+ if (mType == DUPLICATING && mMixerBufferEnabled && mEffectBufferEnabled) {
+ // For best precision, we use float instead of the associated output
+ // device format (typically PCM 16 bit).
+
+ mFormat = AUDIO_FORMAT_PCM_FLOAT;
+ mFrameSize = mChannelCount * audio_bytes_per_sample(mFormat);
+ mBufferSize = mFrameSize * mFrameCount;
+
+ // TODO: We currently use the associated output device channel mask and sample rate.
+ // (1) Perhaps use the ORed channel mask of all downstream MixerThreads
+ // (if a valid mask) to avoid premature downmix.
+ // (2) Perhaps use the maximum sample rate of all downstream MixerThreads
+ // instead of the output device sample rate to avoid loss of high frequency information.
+ // This may need to be updated as MixerThread/OutputTracks are added and not here.
+ }
+
// Calculate size of normal sink buffer relative to the HAL output buffer size
double multiplier = 1.0;
if (mType == MIXER && (kUseFastMixer == FastMixer_Static ||
@@ -2137,6 +2228,7 @@ ssize_t AudioFlinger::PlaybackThread::threadLoop_write()
} else {
bytesWritten = framesWritten;
}
+ mLatchDValid = false;
status_t status = mNormalSink->getTimestamp(mLatchD.mTimestamp);
if (status == NO_ERROR) {
size_t totalFramesWritten = mNormalSink->framesWritten();
@@ -2640,7 +2732,9 @@ bool AudioFlinger::PlaybackThread::threadLoop()
}
} else {
+ ATRACE_BEGIN("sleep");
usleep(sleepTime);
+ ATRACE_END();
}
}
@@ -2800,6 +2894,12 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud
mNormalFrameCount);
mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
+ if (type == DUPLICATING) {
+ // The Duplicating thread uses the AudioMixer and delivers data to OutputTracks
+ // (downstream MixerThreads) in DuplicatingThread::threadLoop_write().
+ // Do not create or use mFastMixer, mOutputSink, mPipeSink, or mNormalSink.
+ return;
+ }
// create an NBAIO sink for the HAL output stream, and negotiate
mOutputSink = new AudioStreamOutSink(output->stream);
size_t numCounterOffers = 0;
@@ -2841,6 +2941,7 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud
NBAIO_Format format = mOutputSink->format();
NBAIO_Format origformat = format;
// adjust format to match that of the Fast Mixer
+ ALOGV("format changed from %d to %d", format.mFormat, fastMixerFormat);
format.mFormat = fastMixerFormat;
format.mFrameSize = audio_bytes_per_sample(format.mFormat) * format.mChannelCount;
@@ -3386,8 +3487,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac
if (sr == mSampleRate) {
desiredFrames = mNormalFrameCount;
} else {
- // +1 for rounding and +1 for additional sample needed for interpolation
- desiredFrames = (mNormalFrameCount * sr) / mSampleRate + 1 + 1;
+ desiredFrames = sourceFramesNeeded(sr, mNormalFrameCount, mSampleRate);
// add frames already consumed but not yet released by the resampler
// because mAudioTrackServerProxy->framesReady() will include these frames
desiredFrames += mAudioMixer->getUnreleasedFrames(track->name());
@@ -3405,6 +3505,23 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac
}
size_t framesReady = track->framesReady();
+ if (ATRACE_ENABLED()) {
+ // I wish we had formatted trace names
+ char traceName[16];
+ strcpy(traceName, "nRdy");
+ int name = track->name();
+ if (AudioMixer::TRACK0 <= name &&
+ name < (int) (AudioMixer::TRACK0 + AudioMixer::MAX_NUM_TRACKS)) {
+ name -= AudioMixer::TRACK0;
+ traceName[4] = (name / 10) + '0';
+ traceName[5] = (name % 10) + '0';
+ } else {
+ traceName[4] = '?';
+ traceName[5] = '?';
+ }
+ traceName[6] = '\0';
+ ATRACE_INT(traceName, framesReady);
+ }
if ((framesReady >= minFrames) && track->isReady() &&
!track->isPaused() && !track->isTerminated())
{
@@ -4797,16 +4914,8 @@ void AudioFlinger::DuplicatingThread::threadLoop_sleepTime()
ssize_t AudioFlinger::DuplicatingThread::threadLoop_write()
{
- // We convert the duplicating thread format to AUDIO_FORMAT_PCM_16_BIT
- // for delivery downstream as needed. This in-place conversion is safe as
- // AUDIO_FORMAT_PCM_16_BIT is smaller than any other supported format
- // (AUDIO_FORMAT_PCM_8_BIT is not allowed here).
- if (mFormat != AUDIO_FORMAT_PCM_16_BIT) {
- memcpy_by_audio_format(mSinkBuffer, AUDIO_FORMAT_PCM_16_BIT,
- mSinkBuffer, mFormat, writeFrames * mChannelCount);
- }
for (size_t i = 0; i < outputTracks.size(); i++) {
- outputTracks[i]->write(reinterpret_cast<int16_t*>(mSinkBuffer), writeFrames);
+ outputTracks[i]->write(mSinkBuffer, writeFrames);
}
mStandby = false;
return (ssize_t)mSinkBufferSize;
@@ -4833,25 +4942,26 @@ void AudioFlinger::DuplicatingThread::clearOutputTracks()
void AudioFlinger::DuplicatingThread::addOutputTrack(MixerThread *thread)
{
Mutex::Autolock _l(mLock);
- // FIXME explain this formula
- size_t frameCount = (3 * mNormalFrameCount * mSampleRate) / thread->sampleRate();
- // OutputTrack is forced to AUDIO_FORMAT_PCM_16_BIT regardless of mFormat
- // due to current usage case and restrictions on the AudioBufferProvider.
- // Actual buffer conversion is done in threadLoop_write().
- //
- // TODO: This may change in the future, depending on multichannel
- // (and non int16_t*) support on AF::PlaybackThread::OutputTrack
- OutputTrack *outputTrack = new OutputTrack(thread,
+ // The downstream MixerThread consumes thread->frameCount() amount of frames per mix pass.
+ // Adjust for thread->sampleRate() to determine minimum buffer frame count.
+ // Then triple buffer because Threads do not run synchronously and may not be clock locked.
+ const size_t frameCount =
+ 3 * sourceFramesNeeded(mSampleRate, thread->frameCount(), thread->sampleRate());
+ // TODO: Consider asynchronous sample rate conversion to handle clock disparity
+ // from different OutputTracks and their associated MixerThreads (e.g. one may
+ // nearly empty and the other may be dropping data).
+
+ sp<OutputTrack> outputTrack = new OutputTrack(thread,
this,
mSampleRate,
- AUDIO_FORMAT_PCM_16_BIT,
+ mFormat,
mChannelMask,
frameCount,
IPCThreadState::self()->getCallingUid());
if (outputTrack->cblk() != NULL) {
thread->setStreamVolume(AUDIO_STREAM_PATCH, 1.0f);
mOutputTracks.add(outputTrack);
- ALOGV("addOutputTrack() track %p, on thread %p", outputTrack, thread);
+ ALOGV("addOutputTrack() track %p, on thread %p", outputTrack.get(), thread);
updateWaitTime_l();
}
}
@@ -5135,7 +5245,9 @@ reacquire_wakelock:
// sleep with mutex unlocked
if (sleepUs > 0) {
+ ATRACE_BEGIN("sleep");
usleep(sleepUs);
+ ATRACE_END();
sleepUs = 0;
}
@@ -5279,7 +5391,8 @@ reacquire_wakelock:
state->mCommand = FastCaptureState::READ_WRITE;
#if 0 // FIXME
mFastCaptureDumpState.increaseSamplingN(mAudioFlinger->isLowRamDevice() ?
- FastCaptureDumpState::kSamplingNforLowRamDevice : FastMixerDumpState::kSamplingN);
+ FastCaptureDumpState::kSamplingNforLowRamDevice :
+ FastMixerDumpState::kSamplingN);
#endif
didModify = true;
}
@@ -5427,8 +5540,8 @@ reacquire_wakelock:
upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, (const int16_t *)src,
part1);
} else {
- downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, (const int16_t *)src,
- part1);
+ downmix_to_mono_i16_from_stereo_i16((int16_t *)dst,
+ (const int16_t *)src, part1);
}
dst += part1 * activeTrack->mFrameSize;
front += part1;
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 1088843..a1ac42c 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -32,6 +32,8 @@ public:
OFFLOAD // Thread class is OffloadThread
};
+ static const char *threadTypeToString(type_t type);
+
ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
audio_devices_t outDevice, audio_devices_t inDevice, type_t type);
virtual ~ThreadBase();
@@ -406,6 +408,7 @@ protected:
audio_channel_mask_t mChannelMask;
uint32_t mChannelCount;
size_t mFrameSize;
+ // not HAL frame size, this is for output sink (to pipe to fast mixer)
audio_format_t mFormat; // Source format for Recording and
// Sink format for Playback.
// Sink format may be different than
@@ -1167,7 +1170,8 @@ private:
const sp<MemoryDealer> mReadOnlyHeap;
// one-time initialization, no locks required
- sp<FastCapture> mFastCapture; // non-0 if there is also a fast capture
+ sp<FastCapture> mFastCapture; // non-0 if there is also
+ // a fast capture
// FIXME audio watchdog thread
// contents are not guaranteed to be consistent, no locks required
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index e970036..7757ea2 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -859,6 +859,7 @@ void AudioFlinger::PlaybackThread::Track::reset()
if (mState == FLUSHED) {
mState = IDLE;
}
+ mPreviousValid = false;
}
}
@@ -1709,14 +1710,13 @@ void AudioFlinger::PlaybackThread::OutputTrack::stop()
mActive = false;
}
-bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames)
+bool AudioFlinger::PlaybackThread::OutputTrack::write(void* data, uint32_t frames)
{
Buffer *pInBuffer;
Buffer inBuffer;
- uint32_t channelCount = mChannelCount;
bool outputBufferFull = false;
inBuffer.frameCount = frames;
- inBuffer.i16 = data;
+ inBuffer.raw = data;
uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
@@ -1726,13 +1726,17 @@ bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t fr
if (thread != 0) {
MixerThread *mixerThread = (MixerThread *)thread.get();
if (mFrameCount > frames) {
+ // For the first write after being inactive, ensure that we have
+ // enough frames to fill mFrameCount (which should be multiples of
+ // the minimum buffer requirements of the downstream MixerThread).
+ // This provides enough frames for the downstream mixer to begin
+ // (see AudioFlinger::PlaybackThread::Track::isReady()).
if (mBufferQueue.size() < kMaxOverFlowBuffers) {
uint32_t startFrames = (mFrameCount - frames);
pInBuffer = new Buffer;
- pInBuffer->mBuffer = new int16_t[startFrames * channelCount];
+ pInBuffer->mBuffer = calloc(1, startFrames * mFrameSize);
pInBuffer->frameCount = startFrames;
- pInBuffer->i16 = pInBuffer->mBuffer;
- memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t));
+ pInBuffer->raw = pInBuffer->mBuffer;
mBufferQueue.add(pInBuffer);
} else {
ALOGW("OutputTrack::write() %p no more buffers in queue", this);
@@ -1773,20 +1777,20 @@ bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t fr
uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
pInBuffer->frameCount;
- memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t));
+ memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * mFrameSize);
Proxy::Buffer buf;
buf.mFrameCount = outFrames;
buf.mRaw = NULL;
mClientProxy->releaseBuffer(&buf);
pInBuffer->frameCount -= outFrames;
- pInBuffer->i16 += outFrames * channelCount;
+ pInBuffer->raw = (int8_t *)pInBuffer->raw + outFrames * mFrameSize;
mOutBuffer.frameCount -= outFrames;
- mOutBuffer.i16 += outFrames * channelCount;
+ mOutBuffer.raw = (int8_t *)mOutBuffer.raw + outFrames * mFrameSize;
if (pInBuffer->frameCount == 0) {
if (mBufferQueue.size()) {
mBufferQueue.removeAt(0);
- delete [] pInBuffer->mBuffer;
+ free(pInBuffer->mBuffer);
delete pInBuffer;
ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this,
mThread.unsafe_get(), mBufferQueue.size());
@@ -1802,11 +1806,10 @@ bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t fr
if (thread != 0 && !thread->standby()) {
if (mBufferQueue.size() < kMaxOverFlowBuffers) {
pInBuffer = new Buffer;
- pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount];
+ pInBuffer->mBuffer = malloc(inBuffer.frameCount * mFrameSize);
pInBuffer->frameCount = inBuffer.frameCount;
- pInBuffer->i16 = pInBuffer->mBuffer;
- memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount *
- sizeof(int16_t));
+ pInBuffer->raw = pInBuffer->mBuffer;
+ memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * mFrameSize);
mBufferQueue.add(pInBuffer);
ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this,
mThread.unsafe_get(), mBufferQueue.size());
@@ -1817,23 +1820,10 @@ bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t fr
}
}
- // Calling write() with a 0 length buffer, means that no more data will be written:
- // If no more buffers are pending, fill output track buffer to make sure it is started
- // by output mixer.
- if (frames == 0 && mBufferQueue.size() == 0) {
- // FIXME borken, replace by getting framesReady() from proxy
- size_t user = 0; // was mCblk->user
- if (user < mFrameCount) {
- frames = mFrameCount - user;
- pInBuffer = new Buffer;
- pInBuffer->mBuffer = new int16_t[frames * channelCount];
- pInBuffer->frameCount = frames;
- pInBuffer->i16 = pInBuffer->mBuffer;
- memset(pInBuffer->raw, 0, frames * channelCount * sizeof(int16_t));
- mBufferQueue.add(pInBuffer);
- } else if (mActive) {
- stop();
- }
+ // Calling write() with a 0 length buffer means that no more data will be written:
+ // We rely on stop() to set the appropriate flags to allow the remaining frames to play out.
+ if (frames == 0 && mBufferQueue.size() == 0 && mActive) {
+ stop();
}
return outputBufferFull;
@@ -1859,7 +1849,7 @@ void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
for (size_t i = 0; i < size; i++) {
Buffer *pBuffer = mBufferQueue.itemAt(i);
- delete [] pBuffer->mBuffer;
+ free(pBuffer->mBuffer);
delete pBuffer;
}
mBufferQueue.clear();
diff --git a/services/audioflinger/tests/mixer_to_wav_tests.sh b/services/audioflinger/tests/mixer_to_wav_tests.sh
index 9b39e77..e60e6d5 100755
--- a/services/audioflinger/tests/mixer_to_wav_tests.sh
+++ b/services/audioflinger/tests/mixer_to_wav_tests.sh
@@ -63,8 +63,18 @@ function createwav() {
# process__genericResampling
# track__Resample / track__genericResample
adb shell test-mixer $1 -s 48000 \
+ -o /sdcard/tm48000grif.wav \
+ sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000 \
+ sine:f,6,6000,19000 chirp:i,4,30000
+ adb pull /sdcard/tm48000grif.wav $2
+
+# Test:
+# process__genericResampling
+# track__Resample / track__genericResample
+ adb shell test-mixer $1 -s 48000 \
-o /sdcard/tm48000gr.wav \
- sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000
+ sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000 \
+ sine:6,6000,19000
adb pull /sdcard/tm48000gr.wav $2
# Test:
diff --git a/services/audioflinger/tests/test-mixer.cpp b/services/audioflinger/tests/test-mixer.cpp
index 9a4fad6..8da6245 100644
--- a/services/audioflinger/tests/test-mixer.cpp
+++ b/services/audioflinger/tests/test-mixer.cpp
@@ -39,7 +39,7 @@ static void usage(const char* name) {
fprintf(stderr, "Usage: %s [-f] [-m] [-c channels]"
" [-s sample-rate] [-o <output-file>] [-a <aux-buffer-file>] [-P csv]"
" (<input-file> | <command>)+\n", name);
- fprintf(stderr, " -f enable floating point input track\n");
+ fprintf(stderr, " -f enable floating point input track by default\n");
fprintf(stderr, " -m enable floating point mixer output\n");
fprintf(stderr, " -c number of mixer output channels\n");
fprintf(stderr, " -s mixer sample-rate\n");
@@ -47,8 +47,8 @@ static void usage(const char* name) {
fprintf(stderr, " -a <aux-buffer-file>\n");
fprintf(stderr, " -P # frames provided per call to resample() in CSV format\n");
fprintf(stderr, " <input-file> is a WAV file\n");
- fprintf(stderr, " <command> can be 'sine:<channels>,<frequency>,<samplerate>'\n");
- fprintf(stderr, " 'chirp:<channels>,<samplerate>'\n");
+ fprintf(stderr, " <command> can be 'sine:[(i|f),]<channels>,<frequency>,<samplerate>'\n");
+ fprintf(stderr, " 'chirp:[(i|f),]<channels>,<samplerate>'\n");
}
static int writeFile(const char *filename, const void *buffer,
@@ -78,6 +78,18 @@ static int writeFile(const char *filename, const void *buffer,
return EXIT_SUCCESS;
}
+const char *parseFormat(const char *s, bool *useFloat) {
+ if (!strncmp(s, "f,", 2)) {
+ *useFloat = true;
+ return s + 2;
+ }
+ if (!strncmp(s, "i,", 2)) {
+ *useFloat = false;
+ return s + 2;
+ }
+ return s;
+}
+
int main(int argc, char* argv[]) {
const char* const progname = argv[0];
bool useInputFloat = false;
@@ -88,8 +100,9 @@ int main(int argc, char* argv[]) {
std::vector<int> Pvalues;
const char* outputFilename = NULL;
const char* auxFilename = NULL;
- std::vector<int32_t> Names;
- std::vector<SignalProvider> Providers;
+ std::vector<int32_t> names;
+ std::vector<SignalProvider> providers;
+ std::vector<audio_format_t> formats;
for (int ch; (ch = getopt(argc, argv, "fmc:s:o:a:P:")) != -1;) {
switch (ch) {
@@ -138,54 +151,65 @@ int main(int argc, char* argv[]) {
size_t outputFrames = 0;
// create providers for each track
- Providers.resize(argc);
+ names.resize(argc);
+ providers.resize(argc);
+ formats.resize(argc);
for (int i = 0; i < argc; ++i) {
static const char chirp[] = "chirp:";
static const char sine[] = "sine:";
static const double kSeconds = 1;
+ bool useFloat = useInputFloat;
if (!strncmp(argv[i], chirp, strlen(chirp))) {
std::vector<int> v;
+ const char *s = parseFormat(argv[i] + strlen(chirp), &useFloat);
- parseCSV(argv[i] + strlen(chirp), v);
+ parseCSV(s, v);
if (v.size() == 2) {
printf("creating chirp(%d %d)\n", v[0], v[1]);
- if (useInputFloat) {
- Providers[i].setChirp<float>(v[0], 0, v[1]/2, v[1], kSeconds);
+ if (useFloat) {
+ providers[i].setChirp<float>(v[0], 0, v[1]/2, v[1], kSeconds);
+ formats[i] = AUDIO_FORMAT_PCM_FLOAT;
} else {
- Providers[i].setChirp<int16_t>(v[0], 0, v[1]/2, v[1], kSeconds);
+ providers[i].setChirp<int16_t>(v[0], 0, v[1]/2, v[1], kSeconds);
+ formats[i] = AUDIO_FORMAT_PCM_16_BIT;
}
- Providers[i].setIncr(Pvalues);
+ providers[i].setIncr(Pvalues);
} else {
fprintf(stderr, "malformed input '%s'\n", argv[i]);
}
} else if (!strncmp(argv[i], sine, strlen(sine))) {
std::vector<int> v;
+ const char *s = parseFormat(argv[i] + strlen(sine), &useFloat);
- parseCSV(argv[i] + strlen(sine), v);
+ parseCSV(s, v);
if (v.size() == 3) {
printf("creating sine(%d %d %d)\n", v[0], v[1], v[2]);
- if (useInputFloat) {
- Providers[i].setSine<float>(v[0], v[1], v[2], kSeconds);
+ if (useFloat) {
+ providers[i].setSine<float>(v[0], v[1], v[2], kSeconds);
+ formats[i] = AUDIO_FORMAT_PCM_FLOAT;
} else {
- Providers[i].setSine<int16_t>(v[0], v[1], v[2], kSeconds);
+ providers[i].setSine<int16_t>(v[0], v[1], v[2], kSeconds);
+ formats[i] = AUDIO_FORMAT_PCM_16_BIT;
}
- Providers[i].setIncr(Pvalues);
+ providers[i].setIncr(Pvalues);
} else {
fprintf(stderr, "malformed input '%s'\n", argv[i]);
}
} else {
printf("creating filename(%s)\n", argv[i]);
if (useInputFloat) {
- Providers[i].setFile<float>(argv[i]);
+ providers[i].setFile<float>(argv[i]);
+ formats[i] = AUDIO_FORMAT_PCM_FLOAT;
} else {
- Providers[i].setFile<short>(argv[i]);
+ providers[i].setFile<short>(argv[i]);
+ formats[i] = AUDIO_FORMAT_PCM_16_BIT;
}
- Providers[i].setIncr(Pvalues);
+ providers[i].setIncr(Pvalues);
}
// calculate the number of output frames
- size_t nframes = (int64_t) Providers[i].getNumFrames() * outputSampleRate
- / Providers[i].getSampleRate();
+ size_t nframes = (int64_t) providers[i].getNumFrames() * outputSampleRate
+ / providers[i].getSampleRate();
if (i == 0 || outputFrames > nframes) { // choose minimum for outputFrames
outputFrames = nframes;
}
@@ -213,22 +237,20 @@ int main(int argc, char* argv[]) {
// create the mixer.
const size_t mixerFrameCount = 320; // typical numbers may range from 240 or 960
AudioMixer *mixer = new AudioMixer(mixerFrameCount, outputSampleRate);
- audio_format_t inputFormat = useInputFloat
- ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
audio_format_t mixerFormat = useMixerFloat
? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
- float f = AudioMixer::UNITY_GAIN_FLOAT / Providers.size(); // normalize volume by # tracks
+ float f = AudioMixer::UNITY_GAIN_FLOAT / providers.size(); // normalize volume by # tracks
static float f0; // zero
// set up the tracks.
- for (size_t i = 0; i < Providers.size(); ++i) {
- //printf("track %d out of %d\n", i, Providers.size());
- uint32_t channelMask = audio_channel_out_mask_from_count(Providers[i].getNumChannels());
+ for (size_t i = 0; i < providers.size(); ++i) {
+ //printf("track %d out of %d\n", i, providers.size());
+ uint32_t channelMask = audio_channel_out_mask_from_count(providers[i].getNumChannels());
int32_t name = mixer->getTrackName(channelMask,
- inputFormat, AUDIO_SESSION_OUTPUT_MIX);
+ formats[i], AUDIO_SESSION_OUTPUT_MIX);
ALOG_ASSERT(name >= 0);
- Names.push_back(name);
- mixer->setBufferProvider(name, &Providers[i]);
+ names[i] = name;
+ mixer->setBufferProvider(name, &providers[i]);
mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
(void *)outputAddr);
mixer->setParameter(
@@ -240,7 +262,7 @@ int main(int argc, char* argv[]) {
name,
AudioMixer::TRACK,
AudioMixer::FORMAT,
- (void *)(uintptr_t)inputFormat);
+ (void *)(uintptr_t)formats[i]);
mixer->setParameter(
name,
AudioMixer::TRACK,
@@ -255,7 +277,7 @@ int main(int argc, char* argv[]) {
name,
AudioMixer::RESAMPLE,
AudioMixer::SAMPLE_RATE,
- (void *)(uintptr_t)Providers[i].getSampleRate());
+ (void *)(uintptr_t)providers[i].getSampleRate());
if (useRamp) {
mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f0);
mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f0);
@@ -277,11 +299,11 @@ int main(int argc, char* argv[]) {
// pump the mixer to process data.
size_t i;
for (i = 0; i < outputFrames - mixerFrameCount; i += mixerFrameCount) {
- for (size_t j = 0; j < Names.size(); ++j) {
- mixer->setParameter(Names[j], AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
+ for (size_t j = 0; j < names.size(); ++j) {
+ mixer->setParameter(names[j], AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
(char *) outputAddr + i * outputFrameSize);
if (auxFilename) {
- mixer->setParameter(Names[j], AudioMixer::TRACK, AudioMixer::AUX_BUFFER,
+ mixer->setParameter(names[j], AudioMixer::TRACK, AudioMixer::AUX_BUFFER,
(char *) auxAddr + i * auxFrameSize);
}
}
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index e184d97..de9551d 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -25,6 +25,7 @@ LOCAL_SRC_FILES:= \
CameraDeviceFactory.cpp \
common/Camera2ClientBase.cpp \
common/CameraDeviceBase.cpp \
+ common/CameraModule.cpp \
common/FrameProcessorBase.cpp \
api1/CameraClient.cpp \
api1/Camera2Client.cpp \
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 1232c32..485b979 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -113,14 +113,17 @@ void CameraService::onFirstRef()
BnCameraService::onFirstRef();
+ camera_module_t *rawModule;
if (hw_get_module(CAMERA_HARDWARE_MODULE_ID,
- (const hw_module_t **)&mModule) < 0) {
+ (const hw_module_t **)&rawModule) < 0) {
ALOGE("Could not load camera HAL module");
mNumberOfCameras = 0;
}
else {
- ALOGI("Loaded \"%s\" camera module", mModule->common.name);
- mNumberOfCameras = mModule->get_number_of_cameras();
+ mModule = new CameraModule(rawModule);
+ const hw_module_t *common = mModule->getRawModule();
+ ALOGI("Loaded \"%s\" camera module", common->name);
+ mNumberOfCameras = mModule->getNumberOfCameras();
if (mNumberOfCameras > MAX_CAMERAS) {
ALOGE("Number of cameras(%d) > MAX_CAMERAS(%d).",
mNumberOfCameras, MAX_CAMERAS);
@@ -130,14 +133,13 @@ void CameraService::onFirstRef()
setCameraFree(i);
}
- if (mModule->common.module_api_version >=
- CAMERA_MODULE_API_VERSION_2_1) {
- mModule->set_callbacks(this);
+ if (common->module_api_version >= CAMERA_MODULE_API_VERSION_2_1) {
+ mModule->setCallbacks(this);
}
VendorTagDescriptor::clearGlobalVendorTagDescriptor();
- if (mModule->common.module_api_version >= CAMERA_MODULE_API_VERSION_2_2) {
+ if (common->module_api_version >= CAMERA_MODULE_API_VERSION_2_2) {
setUpVendorTags();
}
@@ -152,6 +154,9 @@ CameraService::~CameraService() {
}
}
+ if (mModule) {
+ delete mModule;
+ }
VendorTagDescriptor::clearGlobalVendorTagDescriptor();
gCameraService = NULL;
}
@@ -236,7 +241,7 @@ status_t CameraService::getCameraInfo(int cameraId,
struct camera_info info;
status_t rc = filterGetInfoErrorCode(
- mModule->get_camera_info(cameraId, &info));
+ mModule->getCameraInfo(cameraId, &info));
cameraInfo->facing = info.facing;
cameraInfo->orientation = info.orientation;
return rc;
@@ -347,7 +352,7 @@ status_t CameraService::getCameraCharacteristics(int cameraId,
int facing;
status_t ret = OK;
- if (mModule->common.module_api_version < CAMERA_MODULE_API_VERSION_2_0 ||
+ if (mModule->getRawModule()->module_api_version < CAMERA_MODULE_API_VERSION_2_0 ||
getDeviceVersion(cameraId, &facing) <= CAMERA_DEVICE_API_VERSION_2_1 ) {
/**
* Backwards compatibility mode for old HALs:
@@ -368,7 +373,7 @@ status_t CameraService::getCameraCharacteristics(int cameraId,
* Normal HAL 2.1+ codepath.
*/
struct camera_info info;
- ret = filterGetInfoErrorCode(mModule->get_camera_info(cameraId, &info));
+ ret = filterGetInfoErrorCode(mModule->getCameraInfo(cameraId, &info));
*cameraInfo = info.static_camera_characteristics;
}
@@ -387,12 +392,12 @@ status_t CameraService::getCameraVendorTagDescriptor(/*out*/sp<VendorTagDescript
int CameraService::getDeviceVersion(int cameraId, int* facing) {
struct camera_info info;
- if (mModule->get_camera_info(cameraId, &info) != OK) {
+ if (mModule->getCameraInfo(cameraId, &info) != OK) {
return -1;
}
int deviceVersion;
- if (mModule->common.module_api_version >= CAMERA_MODULE_API_VERSION_2_0) {
+ if (mModule->getRawModule()->module_api_version >= CAMERA_MODULE_API_VERSION_2_0) {
deviceVersion = info.device_version;
} else {
deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
@@ -433,13 +438,13 @@ bool CameraService::setUpVendorTags() {
vendor_tag_ops_t vOps = vendor_tag_ops_t();
// Check if vendor operations have been implemented
- if (mModule->get_vendor_tag_ops == NULL) {
+ if (!mModule->isVendorTagDefined()) {
ALOGI("%s: No vendor tags defined for this device.", __FUNCTION__);
return false;
}
ATRACE_BEGIN("camera3->get_metadata_vendor_tag_ops");
- mModule->get_vendor_tag_ops(&vOps);
+ mModule->getVendorTagOps(&vOps);
ATRACE_END();
// Ensure all vendor operations are present
@@ -789,8 +794,9 @@ status_t CameraService::connectLegacy(
/*out*/
sp<ICamera>& device) {
+ int apiVersion = mModule->getRawModule()->module_api_version;
if (halVersion != CAMERA_HAL_API_VERSION_UNSPECIFIED &&
- mModule->common.module_api_version < CAMERA_MODULE_API_VERSION_2_3) {
+ apiVersion < CAMERA_MODULE_API_VERSION_2_3) {
/*
* Either the HAL version is unspecified in which case this just creates
* a camera client selected by the latest device version, or
@@ -798,7 +804,7 @@ status_t CameraService::connectLegacy(
* the open_legacy call
*/
ALOGE("%s: camera HAL module version %x doesn't support connecting to legacy HAL devices!",
- __FUNCTION__, mModule->common.module_api_version);
+ __FUNCTION__, apiVersion);
return INVALID_OPERATION;
}
@@ -1633,14 +1639,11 @@ status_t CameraService::dump(int fd, const Vector<String16>& args) {
return NO_ERROR;
}
- result = String8::format("Camera module HAL API version: 0x%x\n",
- mModule->common.hal_api_version);
- result.appendFormat("Camera module API version: 0x%x\n",
- mModule->common.module_api_version);
- result.appendFormat("Camera module name: %s\n",
- mModule->common.name);
- result.appendFormat("Camera module author: %s\n",
- mModule->common.author);
+ const hw_module_t* common = mModule->getRawModule();
+ result = String8::format("Camera module HAL API version: 0x%x\n", common->hal_api_version);
+ result.appendFormat("Camera module API version: 0x%x\n", common->module_api_version);
+ result.appendFormat("Camera module name: %s\n", common->name);
+ result.appendFormat("Camera module author: %s\n", common->author);
result.appendFormat("Number of camera devices: %d\n\n", mNumberOfCameras);
sp<VendorTagDescriptor> desc = VendorTagDescriptor::getGlobalVendorTagDescriptor();
@@ -1660,7 +1663,7 @@ status_t CameraService::dump(int fd, const Vector<String16>& args) {
result = String8::format("Camera %d static information:\n", i);
camera_info info;
- status_t rc = mModule->get_camera_info(i, &info);
+ status_t rc = mModule->getCameraInfo(i, &info);
if (rc != OK) {
result.appendFormat(" Error reading static information!\n");
write(fd, result.string(), result.size());
@@ -1669,8 +1672,7 @@ status_t CameraService::dump(int fd, const Vector<String16>& args) {
info.facing == CAMERA_FACING_BACK ? "BACK" : "FRONT");
result.appendFormat(" Orientation: %d\n", info.orientation);
int deviceVersion;
- if (mModule->common.module_api_version <
- CAMERA_MODULE_API_VERSION_2_0) {
+ if (common->module_api_version < CAMERA_MODULE_API_VERSION_2_0) {
deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
} else {
deviceVersion = info.device_version;
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 126d8d9..7d0df3a 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -37,6 +37,8 @@
#include <camera/ICameraServiceListener.h>
+#include "common/CameraModule.h"
+
/* This needs to be increased if we can have more cameras */
#define MAX_CAMERAS 2
@@ -153,7 +155,7 @@ public:
class BasicClient : public virtual RefBase {
public:
- virtual status_t initialize(camera_module_t *module) = 0;
+ virtual status_t initialize(CameraModule *module) = 0;
virtual void disconnect();
// because we can't virtually inherit IInterface, which breaks
@@ -385,7 +387,7 @@ private:
sp<MediaPlayer> mSoundPlayer[NUM_SOUNDS];
int mSoundRef; // reference count (release all MediaPlayer when 0)
- camera_module_t *mModule;
+ CameraModule* mModule;
Vector<sp<ICameraServiceListener> >
mListenerList;
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 0ed5586..4ac5166 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -67,7 +67,7 @@ Camera2Client::Camera2Client(const sp<CameraService>& cameraService,
mLegacyMode = legacyMode;
}
-status_t Camera2Client::initialize(camera_module_t *module)
+status_t Camera2Client::initialize(CameraModule *module)
{
ATRACE_CALL();
ALOGV("%s: Initializing client for camera %d", __FUNCTION__, mCameraId);
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index d68bb29..5a8241f 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -94,7 +94,7 @@ public:
virtual ~Camera2Client();
- status_t initialize(camera_module_t *module);
+ status_t initialize(CameraModule *module);
virtual status_t dump(int fd, const Vector<String16>& args);
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index bbb2fe0..6bea3b6 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -59,7 +59,7 @@ CameraClient::CameraClient(const sp<CameraService>& cameraService,
LOG1("CameraClient::CameraClient X (pid %d, id %d)", callingPid, cameraId);
}
-status_t CameraClient::initialize(camera_module_t *module) {
+status_t CameraClient::initialize(CameraModule *module) {
int callingPid = getCallingPid();
status_t res;
@@ -75,7 +75,7 @@ status_t CameraClient::initialize(camera_module_t *module) {
snprintf(camera_device_name, sizeof(camera_device_name), "%d", mCameraId);
mHardware = new CameraHardwareInterface(camera_device_name);
- res = mHardware->initialize(&module->common);
+ res = mHardware->initialize(module);
if (res != OK) {
ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 63a9d0f..95616b2 100644
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -68,7 +68,7 @@ public:
bool legacyMode = false);
~CameraClient();
- status_t initialize(camera_module_t *module);
+ status_t initialize(CameraModule *module);
status_t dump(int fd, const Vector<String16>& args);
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 6a1ee44..acc092c 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -71,7 +71,7 @@ CameraDeviceClient::CameraDeviceClient(const sp<CameraService>& cameraService,
ALOGI("CameraDeviceClient %d: Opened", cameraId);
}
-status_t CameraDeviceClient::initialize(camera_module_t *module)
+status_t CameraDeviceClient::initialize(CameraModule *module)
{
ATRACE_CALL();
status_t res;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 84e46b7..e687175 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -119,7 +119,7 @@ public:
int servicePid);
virtual ~CameraDeviceClient();
- virtual status_t initialize(camera_module_t *module);
+ virtual status_t initialize(CameraModule *module);
virtual status_t dump(int fd, const Vector<String16>& args);
diff --git a/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp b/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
index 59e5083..30a89c2 100644
--- a/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
+++ b/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
@@ -50,7 +50,7 @@ ProCamera2Client::ProCamera2Client(const sp<CameraService>& cameraService,
mExclusiveLock = false;
}
-status_t ProCamera2Client::initialize(camera_module_t *module)
+status_t ProCamera2Client::initialize(CameraModule *module)
{
ATRACE_CALL();
status_t res;
diff --git a/services/camera/libcameraservice/api_pro/ProCamera2Client.h b/services/camera/libcameraservice/api_pro/ProCamera2Client.h
index 9d83122..7f5f6ac 100644
--- a/services/camera/libcameraservice/api_pro/ProCamera2Client.h
+++ b/services/camera/libcameraservice/api_pro/ProCamera2Client.h
@@ -85,7 +85,7 @@ public:
int servicePid);
virtual ~ProCamera2Client();
- virtual status_t initialize(camera_module_t *module);
+ virtual status_t initialize(CameraModule *module);
virtual status_t dump(int fd, const Vector<String16>& args);
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 453c8bd..0415d67 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -78,7 +78,7 @@ status_t Camera2ClientBase<TClientBase>::checkPid(const char* checkLocation)
}
template <typename TClientBase>
-status_t Camera2ClientBase<TClientBase>::initialize(camera_module_t *module) {
+status_t Camera2ClientBase<TClientBase>::initialize(CameraModule *module) {
ATRACE_CALL();
ALOGV("%s: Initializing client for camera %d", __FUNCTION__,
TClientBase::mCameraId);
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index e09c1b5..eb21d55 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -18,6 +18,7 @@
#define ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_BASE_H
#include "common/CameraDeviceBase.h"
+#include "common/CameraModule.h"
#include "camera/CaptureResult.h"
namespace android {
@@ -55,7 +56,7 @@ public:
int servicePid);
virtual ~Camera2ClientBase();
- virtual status_t initialize(camera_module_t *module);
+ virtual status_t initialize(CameraModule *module);
virtual status_t dump(int fd, const Vector<String16>& args);
/**
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index d26e20c..06615f6 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -29,6 +29,7 @@
#include "hardware/camera3.h"
#include "camera/CameraMetadata.h"
#include "camera/CaptureResult.h"
+#include "common/CameraModule.h"
namespace android {
@@ -45,7 +46,7 @@ class CameraDeviceBase : public virtual RefBase {
*/
virtual int getId() const = 0;
- virtual status_t initialize(camera_module_t *module) = 0;
+ virtual status_t initialize(CameraModule *module) = 0;
virtual status_t disconnect() = 0;
virtual status_t dump(int fd, const Vector<String16> &args) = 0;
diff --git a/services/camera/libcameraservice/common/CameraModule.cpp b/services/camera/libcameraservice/common/CameraModule.cpp
new file mode 100644
index 0000000..bbf47e8
--- /dev/null
+++ b/services/camera/libcameraservice/common/CameraModule.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraModule"
+//#define LOG_NDEBUG 0
+
+#include "CameraModule.h"
+
+namespace android {
+
+void CameraModule::deriveCameraCharacteristicsKeys(
+ uint32_t deviceVersion, CameraMetadata &chars) {
+ // HAL1 devices should not reach here
+ if (deviceVersion < CAMERA_DEVICE_API_VERSION_2_0) {
+ ALOGV("%s: Cannot derive keys for HAL version < 2.0");
+ return;
+ }
+
+ // Keys added in HAL3.3
+ if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_3) {
+ Vector<uint8_t> controlModes;
+ uint8_t data = ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE;
+ chars.update(ANDROID_CONTROL_AE_LOCK_AVAILABLE, &data, /*count*/1);
+ data = ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE;
+ chars.update(ANDROID_CONTROL_AWB_LOCK_AVAILABLE, &data, /*count*/1);
+ controlModes.push(ANDROID_CONTROL_MODE_OFF);
+ controlModes.push(ANDROID_CONTROL_MODE_AUTO);
+ camera_metadata_entry entry = chars.find(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
+ if (entry.count > 1 || entry.data.u8[0] != ANDROID_CONTROL_SCENE_MODE_DISABLED) {
+ controlModes.push(ANDROID_CONTROL_MODE_USE_SCENE_MODE);
+ }
+ chars.update(ANDROID_CONTROL_AVAILABLE_MODES, controlModes);
+ }
+ return;
+}
+
+CameraModule::CameraModule(camera_module_t *module) {
+ if (module == NULL) {
+ ALOGE("%s: camera hardware module must not be null", __FUNCTION__);
+ assert(0);
+ }
+
+ mModule = module;
+ for (int i = 0; i < MAX_CAMERAS_PER_MODULE; i++) {
+ mCameraInfoCached[i] = false;
+ }
+}
+
+int CameraModule::getCameraInfo(int cameraId, struct camera_info *info) {
+ Mutex::Autolock lock(mCameraInfoLock);
+ if (cameraId < 0 || cameraId >= MAX_CAMERAS_PER_MODULE) {
+ ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId);
+ return -EINVAL;
+ }
+
+ camera_info &wrappedInfo = mCameraInfo[cameraId];
+ if (!mCameraInfoCached[cameraId]) {
+ camera_info rawInfo;
+ int ret = mModule->get_camera_info(cameraId, &rawInfo);
+ if (ret != 0) {
+ return ret;
+ }
+ CameraMetadata &m = mCameraCharacteristics[cameraId];
+ m = rawInfo.static_camera_characteristics;
+ int deviceVersion;
+ int apiVersion = mModule->common.module_api_version;
+ if (apiVersion >= CAMERA_MODULE_API_VERSION_2_0) {
+ deviceVersion = rawInfo.device_version;
+ } else {
+ deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
+ }
+ deriveCameraCharacteristicsKeys(deviceVersion, m);
+ wrappedInfo = rawInfo;
+ wrappedInfo.static_camera_characteristics = m.getAndLock();
+ mCameraInfoCached[cameraId] = true;
+ }
+ *info = wrappedInfo;
+ return 0;
+}
+
+int CameraModule::open(const char* id, struct hw_device_t** device) {
+ return mModule->common.methods->open(&mModule->common, id, device);
+}
+
+int CameraModule::openLegacy(
+ const char* id, uint32_t halVersion, struct hw_device_t** device) {
+ return mModule->open_legacy(&mModule->common, id, halVersion, device);
+}
+
+const hw_module_t* CameraModule::getRawModule() {
+ return &mModule->common;
+}
+
+int CameraModule::getNumberOfCameras() {
+ return mModule->get_number_of_cameras();
+}
+
+int CameraModule::setCallbacks(const camera_module_callbacks_t *callbacks) {
+ return mModule->set_callbacks(callbacks);
+}
+
+bool CameraModule::isVendorTagDefined() {
+ return mModule->get_vendor_tag_ops != NULL;
+}
+
+void CameraModule::getVendorTagOps(vendor_tag_ops_t* ops) {
+ if (mModule->get_vendor_tag_ops) {
+ mModule->get_vendor_tag_ops(ops);
+ }
+}
+
+int CameraModule::setTorchMode(const char* camera_id, bool enable) {
+ return mModule->set_torch_mode(camera_id, enable);
+}
+
+}; // namespace android
+
diff --git a/services/camera/libcameraservice/common/CameraModule.h b/services/camera/libcameraservice/common/CameraModule.h
new file mode 100644
index 0000000..31b9ae2
--- /dev/null
+++ b/services/camera/libcameraservice/common/CameraModule.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERAMODULE_H
+#define ANDROID_SERVERS_CAMERA_CAMERAMODULE_H
+
+#include <hardware/camera.h>
+#include <camera/CameraMetadata.h>
+#include <utils/Mutex.h>
+
+/* This needs to be increased if we can have more cameras */
+#define MAX_CAMERAS_PER_MODULE 2
+
+
+namespace android {
+/**
+ * A wrapper class for HAL camera module.
+ *
+ * This class wraps camera_module_t returned from HAL to provide a wrapped
+ * get_camera_info implementation which CameraService generates some
+ * camera characteristics keys defined in newer HAL version on an older HAL.
+ */
+class CameraModule {
+public:
+ CameraModule(camera_module_t *module);
+
+ const hw_module_t* getRawModule();
+ int getCameraInfo(int cameraId, struct camera_info *info);
+ int getNumberOfCameras(void);
+ int open(const char* id, struct hw_device_t** device);
+ int openLegacy(const char* id, uint32_t halVersion, struct hw_device_t** device);
+ int setCallbacks(const camera_module_callbacks_t *callbacks);
+ bool isVendorTagDefined();
+ void getVendorTagOps(vendor_tag_ops_t* ops);
+ int setTorchMode(const char* camera_id, bool enable);
+
+private:
+ // Derive camera characteristics keys defined after HAL device version
+ static void deriveCameraCharacteristicsKeys(uint32_t deviceVersion, CameraMetadata &chars);
+ camera_module_t *mModule;
+ CameraMetadata mCameraCharacteristics[MAX_CAMERAS_PER_MODULE];
+ camera_info mCameraInfo[MAX_CAMERAS_PER_MODULE];
+ bool mCameraInfoCached[MAX_CAMERAS_PER_MODULE];
+ Mutex mCameraInfoLock;
+};
+
+} // namespace android
+
+#endif
+
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
index 1935c2b..9e1cdc9 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
@@ -89,24 +89,23 @@ public:
}
}
- status_t initialize(hw_module_t *module)
+ status_t initialize(CameraModule *module)
{
ALOGI("Opening camera %s", mName.string());
- camera_module_t *cameraModule = reinterpret_cast<camera_module_t *>(module);
camera_info info;
- status_t res = cameraModule->get_camera_info(atoi(mName.string()), &info);
+ status_t res = module->getCameraInfo(atoi(mName.string()), &info);
if (res != OK) return res;
int rc = OK;
- if (module->module_api_version >= CAMERA_MODULE_API_VERSION_2_3 &&
+ if (module->getRawModule()->module_api_version >= CAMERA_MODULE_API_VERSION_2_3 &&
info.device_version > CAMERA_DEVICE_API_VERSION_1_0) {
// Open higher version camera device as HAL1.0 device.
- rc = cameraModule->open_legacy(module, mName.string(),
- CAMERA_DEVICE_API_VERSION_1_0,
- (hw_device_t **)&mDevice);
+ rc = module->openLegacy(mName.string(),
+ CAMERA_DEVICE_API_VERSION_1_0,
+ (hw_device_t **)&mDevice);
} else {
- rc = CameraService::filterOpenErrorCode(module->methods->open(
- module, mName.string(), (hw_device_t **)&mDevice));
+ rc = CameraService::filterOpenErrorCode(module->open(
+ mName.string(), (hw_device_t **)&mDevice));
}
if (rc != OK) {
ALOGE("Could not open camera %s: %d", mName.string(), rc);
diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
index d1158d6..be66c4d 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.cpp
+++ b/services/camera/libcameraservice/device2/Camera2Device.cpp
@@ -53,7 +53,7 @@ int Camera2Device::getId() const {
return mId;
}
-status_t Camera2Device::initialize(camera_module_t *module)
+status_t Camera2Device::initialize(CameraModule *module)
{
ATRACE_CALL();
ALOGV("%s: Initializing device for camera %d", __FUNCTION__, mId);
@@ -68,8 +68,8 @@ status_t Camera2Device::initialize(camera_module_t *module)
camera2_device_t *device;
- res = CameraService::filterOpenErrorCode(module->common.methods->open(
- &module->common, name, reinterpret_cast<hw_device_t**>(&device)));
+ res = CameraService::filterOpenErrorCode(module->open(
+ name, reinterpret_cast<hw_device_t**>(&device)));
if (res != OK) {
ALOGE("%s: Could not open camera %d: %s (%d)", __FUNCTION__,
@@ -87,7 +87,7 @@ status_t Camera2Device::initialize(camera_module_t *module)
}
camera_info info;
- res = module->get_camera_info(mId, &info);
+ res = module->getCameraInfo(mId, &info);
if (res != OK ) return res;
if (info.device_version != device->common.version) {
diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
index 4def8ae..1cc5482 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.h
+++ b/services/camera/libcameraservice/device2/Camera2Device.h
@@ -43,7 +43,7 @@ class Camera2Device: public CameraDeviceBase {
* CameraDevice interface
*/
virtual int getId() const;
- virtual status_t initialize(camera_module_t *module);
+ virtual status_t initialize(CameraModule *module);
virtual status_t disconnect();
virtual status_t dump(int fd, const Vector<String16>& args);
virtual const CameraMetadata& info() const;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 53e6fa9..9a4e5ac 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -86,7 +86,7 @@ int Camera3Device::getId() const {
* CameraDeviceBase interface
*/
-status_t Camera3Device::initialize(camera_module_t *module)
+status_t Camera3Device::initialize(CameraModule *module)
{
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
@@ -106,9 +106,8 @@ status_t Camera3Device::initialize(camera_module_t *module)
camera3_device_t *device;
ATRACE_BEGIN("camera3->open");
- res = CameraService::filterOpenErrorCode(module->common.methods->open(
- &module->common, deviceName.string(),
- reinterpret_cast<hw_device_t**>(&device)));
+ res = CameraService::filterOpenErrorCode(module->open(
+ deviceName.string(), reinterpret_cast<hw_device_t**>(&device)));
ATRACE_END();
if (res != OK) {
@@ -127,7 +126,7 @@ status_t Camera3Device::initialize(camera_module_t *module)
}
camera_info info;
- res = CameraService::filterGetInfoErrorCode(module->get_camera_info(
+ res = CameraService::filterGetInfoErrorCode(module->getCameraInfo(
mId, &info));
if (res != OK) return res;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index ec8dc10..de10cfe 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -73,7 +73,7 @@ class Camera3Device :
virtual int getId() const;
// Transitions to idle state on success.
- virtual status_t initialize(camera_module_t *module);
+ virtual status_t initialize(CameraModule *module);
virtual status_t disconnect();
virtual status_t dump(int fd, const Vector<String16> &args);
virtual const CameraMetadata& info() const;
diff --git a/services/medialog/Android.mk b/services/medialog/Android.mk
index 95f2fef..03438bf 100644
--- a/services/medialog/Android.mk
+++ b/services/medialog/Android.mk
@@ -10,4 +10,6 @@ LOCAL_MODULE:= libmedialogservice
LOCAL_32_BIT_ONLY := true
+LOCAL_C_INCLUDES := $(call include-path-for, audio-utils)
+
include $(BUILD_SHARED_LIBRARY)