summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--camera/Android.mk1
-rw-r--r--camera/ICameraService.cpp3
-rw-r--r--camera/ICameraServiceProxy.cpp55
-rw-r--r--include/camera/ICameraServiceProxy.h52
-rw-r--r--media/libmedia/JetPlayer.cpp8
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.cpp1
-rw-r--r--media/libmediaplayerservice/nuplayer/GenericSource.cpp28
-rw-r--r--media/libmediaplayerservice/nuplayer/GenericSource.h3
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp5
-rw-r--r--media/libstagefright/ACodec.cpp6
-rw-r--r--media/libstagefright/AudioSource.cpp3
-rw-r--r--media/libstagefright/NuCachedSource2.cpp7
-rw-r--r--media/libstagefright/rtsp/MyHandler.h2
-rw-r--r--services/audioflinger/AudioFlinger.h9
-rw-r--r--services/audioflinger/Threads.cpp107
-rw-r--r--services/audiopolicy/managerdefault/AudioPolicyManager.cpp56
-rw-r--r--services/camera/libcameraservice/CameraService.cpp19
-rw-r--r--services/camera/libcameraservice/CameraService.h2
-rw-r--r--services/camera/libcameraservice/api1/Camera2Client.cpp10
-rw-r--r--services/camera/libcameraservice/api1/Camera2Client.h2
-rw-r--r--services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp54
-rw-r--r--services/camera/libcameraservice/api1/client2/CaptureSequencer.h9
22 files changed, 356 insertions, 86 deletions
diff --git a/camera/Android.mk b/camera/Android.mk
index 4c4700b..471cb0d 100644
--- a/camera/Android.mk
+++ b/camera/Android.mk
@@ -28,6 +28,7 @@ LOCAL_SRC_FILES:= \
ICameraClient.cpp \
ICameraService.cpp \
ICameraServiceListener.cpp \
+ ICameraServiceProxy.cpp \
ICameraRecordingProxy.cpp \
ICameraRecordingProxyListener.cpp \
camera2/ICameraDeviceUser.cpp \
diff --git a/camera/ICameraService.cpp b/camera/ICameraService.cpp
index a02dbe2..0071e70 100644
--- a/camera/ICameraService.cpp
+++ b/camera/ICameraService.cpp
@@ -494,7 +494,8 @@ status_t BnCameraService::onTransact(
__FUNCTION__, len);
return FAILED_TRANSACTION;
}
- int32_t events[len] = {};
+ int32_t events[len];
+ memset(events, 0, sizeof(int32_t) * len);
status_t status = data.read(events, sizeof(int32_t) * len);
if (status != NO_ERROR) {
ALOGE("%s: Received poorly formatted binder request: notifySystemEvent.",
diff --git a/camera/ICameraServiceProxy.cpp b/camera/ICameraServiceProxy.cpp
new file mode 100644
index 0000000..06a5afb
--- /dev/null
+++ b/camera/ICameraServiceProxy.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "BpCameraServiceProxy"
+
+#include <stdint.h>
+
+#include <binder/Parcel.h>
+
+#include <camera/ICameraServiceProxy.h>
+
+namespace android {
+
+class BpCameraServiceProxy: public BpInterface<ICameraServiceProxy> {
+public:
+ BpCameraServiceProxy(const sp<IBinder>& impl) : BpInterface<ICameraServiceProxy>(impl) {}
+
+ virtual void pingForUserUpdate() {
+ Parcel data, reply;
+ data.writeInterfaceToken(ICameraServiceProxy::getInterfaceDescriptor());
+ remote()->transact(BnCameraServiceProxy::PING_FOR_USER_UPDATE, data, &reply,
+ IBinder::FLAG_ONEWAY);
+ }
+};
+
+
+IMPLEMENT_META_INTERFACE(CameraServiceProxy, "android.hardware.ICameraServiceProxy");
+
+status_t BnCameraServiceProxy::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+ uint32_t flags) {
+ switch(code) {
+ case PING_FOR_USER_UPDATE: {
+ CHECK_INTERFACE(ICameraServiceProxy, data, reply);
+ pingForUserUpdate();
+ return NO_ERROR;
+ } break;
+ default:
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+}
+}; // namespace android
+
diff --git a/include/camera/ICameraServiceProxy.h b/include/camera/ICameraServiceProxy.h
new file mode 100644
index 0000000..12a555f
--- /dev/null
+++ b/include/camera/ICameraServiceProxy.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_ICAMERASERVICEPROXY_H
+#define ANDROID_HARDWARE_ICAMERASERVICEPROXY_H
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+class ICameraServiceProxy : public IInterface {
+public:
+ enum {
+ PING_FOR_USER_UPDATE = IBinder::FIRST_CALL_TRANSACTION,
+ };
+
+ DECLARE_META_INTERFACE(CameraServiceProxy);
+
+ virtual void pingForUserUpdate() = 0;
+};
+
+class BnCameraServiceProxy: public BnInterface<ICameraServiceProxy>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+
+
+}; // namespace android
+
+#endif // ANDROID_HARDWARE_ICAMERASERVICEPROXY_H
+
+
diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp
index 271be0c..34deb59 100644
--- a/media/libmedia/JetPlayer.cpp
+++ b/media/libmedia/JetPlayer.cpp
@@ -85,12 +85,18 @@ int JetPlayer::init()
// create the output AudioTrack
mAudioTrack = new AudioTrack();
- mAudioTrack->set(AUDIO_STREAM_MUSIC, //TODO parameterize this
+ status_t status = mAudioTrack->set(AUDIO_STREAM_MUSIC, //TODO parameterize this
pLibConfig->sampleRate,
AUDIO_FORMAT_PCM_16_BIT,
audio_channel_out_mask_from_count(pLibConfig->numChannels),
(size_t) mTrackBufferSize,
AUDIO_OUTPUT_FLAG_NONE);
+ if (status != OK) {
+ ALOGE("JetPlayer::init(): Error initializing JET library; AudioTrack error %d", status);
+ mAudioTrack.clear();
+ mState = EAS_STATE_ERROR;
+ return EAS_FAILURE;
+ }
// create render and playback thread
{
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 891a9e9..efbc0d6 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -1628,6 +1628,7 @@ status_t MediaPlayerService::AudioOutput::open(
if ((t == 0) || (t->initCheck() != NO_ERROR)) {
ALOGE("Unable to create audio track");
delete newcbd;
+ // t goes out of scope, so reference count drops to zero
return NO_INIT;
} else {
// successful AudioTrack initialization implies a legacy stream type was generated
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 5e7b644..88a7745 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -1510,17 +1510,7 @@ void NuPlayer::GenericSource::readBuffer(
mVideoTimeUs = timeUs;
}
- // formatChange && seeking: track whose source is changed during selection
- // formatChange && !seeking: track whose source is not changed during selection
- // !formatChange: normal seek
- if ((seeking || formatChange)
- && (trackType == MEDIA_TRACK_TYPE_AUDIO
- || trackType == MEDIA_TRACK_TYPE_VIDEO)) {
- ATSParser::DiscontinuityType type = (formatChange && seeking)
- ? ATSParser::DISCONTINUITY_FORMATCHANGE
- : ATSParser::DISCONTINUITY_NONE;
- track->mPackets->queueDiscontinuity( type, NULL, true /* discard */);
- }
+ queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
sp<ABuffer> buffer = mediaBufferToABuffer(
mbuf, trackType, seekTimeUs, actualTimeUs);
@@ -1538,10 +1528,26 @@ void NuPlayer::GenericSource::readBuffer(
false /* discard */);
#endif
} else {
+ queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
track->mPackets->signalEOS(err);
break;
}
}
}
+void NuPlayer::GenericSource::queueDiscontinuityIfNeeded(
+ bool seeking, bool formatChange, media_track_type trackType, Track *track) {
+ // formatChange && seeking: track whose source is changed during selection
+ // formatChange && !seeking: track whose source is not changed during selection
+ // !formatChange: normal seek
+ if ((seeking || formatChange)
+ && (trackType == MEDIA_TRACK_TYPE_AUDIO
+ || trackType == MEDIA_TRACK_TYPE_VIDEO)) {
+ ATSParser::DiscontinuityType type = (formatChange && seeking)
+ ? ATSParser::DISCONTINUITY_FORMATCHANGE
+ : ATSParser::DISCONTINUITY_NONE;
+ track->mPackets->queueDiscontinuity(type, NULL /* extra */, true /* discard */);
+ }
+}
+
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 7fab051..0a75e4c 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -200,6 +200,9 @@ private:
media_track_type trackType,
int64_t seekTimeUs = -1ll, int64_t *actualTimeUs = NULL, bool formatChange = false);
+ void queueDiscontinuityIfNeeded(
+ bool seeking, bool formatChange, media_track_type trackType, Track *track);
+
void schedulePollBuffering();
void cancelPollBuffering();
void restartPollBuffering();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 376c93a..d169964 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -414,6 +414,11 @@ bool NuPlayer::Decoder::handleAnInputBuffer(size_t index) {
sp<ABuffer> buffer;
mCodec->getInputBuffer(index, &buffer);
+ if (buffer == NULL) {
+ handleError(UNKNOWN_ERROR);
+ return false;
+ }
+
if (index >= mInputBuffers.size()) {
for (size_t i = mInputBuffers.size(); i <= index; ++i) {
mInputBuffers.add();
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 34bd4c7..70480a2 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2857,7 +2857,9 @@ status_t ACodec::setupVideoEncoder(const char *mime, const sp<AMessage> &msg) {
break;
}
- ALOGI("setupVideoEncoder succeeded");
+ if (err == OK) {
+ ALOGI("setupVideoEncoder succeeded");
+ }
return err;
}
@@ -5124,7 +5126,7 @@ bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
sp<CodecObserver> observer = new CodecObserver;
IOMX::node_id node = 0;
- status_t err = OMX_ErrorComponentNotFound;
+ status_t err = NAME_NOT_FOUND;
for (size_t matchIndex = 0; matchIndex < matchingCodecs.size();
++matchIndex) {
componentName = matchingCodecs.itemAt(matchIndex).mName.string();
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index e5a6a9b..34f0148 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -85,6 +85,9 @@ AudioSource::AudioSource(
this,
frameCount /*notificationFrames*/);
mInitCheck = mRecord->initCheck();
+ if (mInitCheck != OK) {
+ mRecord.clear();
+ }
} else {
mInitCheck = status;
}
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp
index 1c53b40..f82636b 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libstagefright/NuCachedSource2.cpp
@@ -583,6 +583,13 @@ ssize_t NuCachedSource2::readInternal(off64_t offset, void *data, size_t size) {
Mutex::Autolock autoLock(mLock);
+ // If we're disconnecting, return EOS and don't access *data pointer.
+ // data could be on the stack of the caller to NuCachedSource2::readAt(),
+ // which may have exited already.
+ if (mDisconnecting) {
+ return ERROR_END_OF_STREAM;
+ }
+
if (!mFetching) {
mLastAccessPos = offset;
restartPrefetcherIfNecessary_l(
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index ba17e90..e64a7a1 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -1737,7 +1737,7 @@ private:
}
if (!mAllTracksHaveTime) {
- bool allTracksHaveTime = true;
+ bool allTracksHaveTime = (mTracks.size() > 0);
for (size_t i = 0; i < mTracks.size(); ++i) {
TrackInfo *track = &mTracks.editItemAt(i);
if (track->mNTPAnchorUs < 0) {
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 73590ae..d087ced 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -359,6 +359,15 @@ private:
// check that channelMask is the "canonical" one we expect for the channelCount.
return channelMask == audio_channel_out_mask_from_count(channelCount);
}
+ case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+ if (kEnableExtendedChannels) {
+ const uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
+ if (channelCount >= FCC_2 // mono is not supported at this time
+ && channelCount <= AudioMixer::MAX_NUM_CHANNELS) {
+ return true;
+ }
+ }
+ return false;
default:
return false;
}
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 1ed4e18..9f08851 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -709,49 +709,62 @@ void AudioFlinger::ThreadBase::processConfigEvents_l()
String8 channelMaskToString(audio_channel_mask_t mask, bool output) {
String8 s;
- if (output) {
- if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
- if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, ");
- if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, ");
- if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low freq, ");
- if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, ");
- if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, ");
- if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, ");
- if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) s.append("front-right-of-center, ");
- if (mask & AUDIO_CHANNEL_OUT_BACK_CENTER) s.append("back-center, ");
- if (mask & AUDIO_CHANNEL_OUT_SIDE_LEFT) s.append("side-left, ");
- if (mask & AUDIO_CHANNEL_OUT_SIDE_RIGHT) s.append("side-right, ");
- if (mask & AUDIO_CHANNEL_OUT_TOP_CENTER) s.append("top-center ,");
- if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT) s.append("top-front-left, ");
- if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, ");
- if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, ");
- if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
- if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, " );
- if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " );
- if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown, ");
- } else {
- if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
- if (mask & AUDIO_CHANNEL_IN_RIGHT) s.append("right, ");
- if (mask & AUDIO_CHANNEL_IN_FRONT) s.append("front, ");
- if (mask & AUDIO_CHANNEL_IN_BACK) s.append("back, ");
- if (mask & AUDIO_CHANNEL_IN_LEFT_PROCESSED) s.append("left-processed, ");
- if (mask & AUDIO_CHANNEL_IN_RIGHT_PROCESSED) s.append("right-processed, ");
- if (mask & AUDIO_CHANNEL_IN_FRONT_PROCESSED) s.append("front-processed, ");
- if (mask & AUDIO_CHANNEL_IN_BACK_PROCESSED) s.append("back-processed, ");
- if (mask & AUDIO_CHANNEL_IN_PRESSURE) s.append("pressure, ");
- if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, ");
- if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, ");
- if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, ");
- if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
- if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
- if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown, ");
- }
- int len = s.length();
- if (s.length() > 2) {
- char *str = s.lockBuffer(len);
- s.unlockBuffer(len - 2);
- }
- return s;
+ const audio_channel_representation_t representation = audio_channel_mask_get_representation(mask);
+
+ switch (representation) {
+ case AUDIO_CHANNEL_REPRESENTATION_POSITION: {
+ if (output) {
+ if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
+ if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, ");
+ if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, ");
+ if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low freq, ");
+ if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, ");
+ if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, ");
+ if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, ");
+ if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) s.append("front-right-of-center, ");
+ if (mask & AUDIO_CHANNEL_OUT_BACK_CENTER) s.append("back-center, ");
+ if (mask & AUDIO_CHANNEL_OUT_SIDE_LEFT) s.append("side-left, ");
+ if (mask & AUDIO_CHANNEL_OUT_SIDE_RIGHT) s.append("side-right, ");
+ if (mask & AUDIO_CHANNEL_OUT_TOP_CENTER) s.append("top-center ,");
+ if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT) s.append("top-front-left, ");
+ if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, ");
+ if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, ");
+ if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
+ if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, " );
+ if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " );
+ if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown, ");
+ } else {
+ if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
+ if (mask & AUDIO_CHANNEL_IN_RIGHT) s.append("right, ");
+ if (mask & AUDIO_CHANNEL_IN_FRONT) s.append("front, ");
+ if (mask & AUDIO_CHANNEL_IN_BACK) s.append("back, ");
+ if (mask & AUDIO_CHANNEL_IN_LEFT_PROCESSED) s.append("left-processed, ");
+ if (mask & AUDIO_CHANNEL_IN_RIGHT_PROCESSED) s.append("right-processed, ");
+ if (mask & AUDIO_CHANNEL_IN_FRONT_PROCESSED) s.append("front-processed, ");
+ if (mask & AUDIO_CHANNEL_IN_BACK_PROCESSED) s.append("back-processed, ");
+ if (mask & AUDIO_CHANNEL_IN_PRESSURE) s.append("pressure, ");
+ if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, ");
+ if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, ");
+ if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, ");
+ if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
+ if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
+ if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown, ");
+ }
+ const int len = s.length();
+ if (len > 2) {
+ char *str = s.lockBuffer(len); // needed?
+ s.unlockBuffer(len - 2); // remove trailing ", "
+ }
+ return s;
+ }
+ case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+ s.appendFormat("index mask, bits:%#x", audio_channel_mask_get_bits(mask));
+ return s;
+ default:
+ s.appendFormat("unknown mask, representation:%d bits:%#x",
+ representation, audio_channel_mask_get_bits(mask));
+ return s;
+ }
}
void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args __unused)
@@ -1597,10 +1610,12 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac
) &&
// PCM data
audio_is_linear_pcm(format) &&
- // identical channel mask to sink, or mono in and stereo sink
+ // TODO: extract as a data library function that checks that a computationally
+ // expensive downmixer is not required: isFastOutputChannelConversion()
(channelMask == mChannelMask ||
- (channelMask == AUDIO_CHANNEL_OUT_MONO &&
- mChannelMask == AUDIO_CHANNEL_OUT_STEREO)) &&
+ mChannelMask != AUDIO_CHANNEL_OUT_STEREO ||
+ (channelMask == AUDIO_CHANNEL_OUT_MONO
+ /* && mChannelMask == AUDIO_CHANNEL_OUT_STEREO */)) &&
// hardware sample rate
(sampleRate == mSampleRate) &&
// normal mixer has an associated fast mixer
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index d1ee400..17060e9 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -579,24 +579,43 @@ sp<IOProfile> AudioPolicyManager::getProfileForDirectOutput(
audio_channel_mask_t channelMask,
audio_output_flags_t flags)
{
+ // only retain flags that will drive the direct output profile selection
+ // if explicitly requested
+ static const uint32_t kRelevantFlags =
+ (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+ flags =
+ (audio_output_flags_t)((flags & kRelevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
+
+ sp<IOProfile> profile;
+
for (size_t i = 0; i < mHwModules.size(); i++) {
if (mHwModules[i]->mHandle == 0) {
continue;
}
for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) {
- sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
- bool found = profile->isCompatibleProfile(device, String8(""),
+ sp<IOProfile> curProfile = mHwModules[i]->mOutputProfiles[j];
+ if (!curProfile->isCompatibleProfile(device, String8(""),
samplingRate, NULL /*updatedSamplingRate*/,
format, NULL /*updatedFormat*/,
channelMask, NULL /*updatedChannelMask*/,
- flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD ?
- AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD : AUDIO_OUTPUT_FLAG_DIRECT);
- if (found && (mAvailableOutputDevices.types() & profile->mSupportedDevices.types())) {
- return profile;
+ flags)) {
+ continue;
+ }
+ // reject profiles not corresponding to a device currently available
+ if ((mAvailableOutputDevices.types() & curProfile->mSupportedDevices.types()) == 0) {
+ continue;
+ }
+ // if several profiles are compatible, give priority to one with offload capability
+ if (profile != 0 && ((curProfile->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0)) {
+ continue;
+ }
+ profile = curProfile;
+ if ((profile->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+ break;
}
}
}
- return 0;
+ return profile;
}
audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream,
@@ -819,10 +838,27 @@ audio_io_handle_t AudioPolicyManager::getOutputForDevice(
if (outputDesc != NULL) {
closeOutput(outputDesc->mIoHandle);
}
+
+ // if the selected profile is offloaded and no offload info was specified,
+ // create a default one
+ audio_offload_info_t defaultOffloadInfo = AUDIO_INFO_INITIALIZER;
+ if ((profile->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) && !offloadInfo) {
+ flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+ defaultOffloadInfo.sample_rate = samplingRate;
+ defaultOffloadInfo.channel_mask = channelMask;
+ defaultOffloadInfo.format = format;
+ defaultOffloadInfo.stream_type = stream;
+ defaultOffloadInfo.bit_rate = 0;
+ defaultOffloadInfo.duration_us = -1;
+ defaultOffloadInfo.has_video = true; // conservative
+ defaultOffloadInfo.is_streaming = true; // likely
+ offloadInfo = &defaultOffloadInfo;
+ }
+
outputDesc = new SwAudioOutputDescriptor(profile, mpClientInterface);
outputDesc->mDevice = device;
outputDesc->mLatency = 0;
- outputDesc->mFlags =(audio_output_flags_t) (outputDesc->mFlags | flags);
+ outputDesc->mFlags = (audio_output_flags_t)(outputDesc->mFlags | flags);
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
config.sample_rate = samplingRate;
config.channel_mask = channelMask;
@@ -854,10 +890,6 @@ audio_io_handle_t AudioPolicyManager::getOutputForDevice(
if (audio_is_linear_pcm(format) && samplingRate <= MAX_MIXER_SAMPLING_RATE) {
goto non_direct_output;
}
- // fall back to mixer output if possible when the direct output could not be open
- if (audio_is_linear_pcm(format) && samplingRate <= MAX_MIXER_SAMPLING_RATE) {
- goto non_direct_output;
- }
return AUDIO_IO_HANDLE_NONE;
}
outputDesc->mSamplingRate = config.sample_rate;
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index fc9a332..79e73f9 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -33,6 +33,7 @@
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
#include <binder/ProcessInfoService.h>
+#include <camera/ICameraServiceProxy.h>
#include <cutils/atomic.h>
#include <cutils/properties.h>
#include <gui/Surface.h>
@@ -224,6 +225,18 @@ void CameraService::onFirstRef()
}
CameraDeviceFactory::registerService(this);
+
+ CameraService::pingCameraServiceProxy();
+}
+
+void CameraService::pingCameraServiceProxy() {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->getService(String16("media.camera.proxy"));
+ if (binder == nullptr) {
+ return;
+ }
+ sp<ICameraServiceProxy> proxyBinder = interface_cast<ICameraServiceProxy>(binder);
+ proxyBinder->pingForUserUpdate();
}
CameraService::~CameraService() {
@@ -959,6 +972,10 @@ status_t CameraService::handleEvictionsLocked(const String8& cameraId, int clien
"(PID %" PRId32 ", priority %" PRId32 ")", i->getKey().string(),
String8{i->getValue()->getPackageName()}.string(), i->getOwnerId(),
i->getPriority());
+ ALOGE(" Conflicts with: Device %s, client package %s (PID %"
+ PRId32 ", priority %" PRId32 ")", i->getKey().string(),
+ String8{i->getValue()->getPackageName()}.string(), i->getOwnerId(),
+ i->getPriority());
}
// Log the client's attempt
@@ -1975,7 +1992,7 @@ String8 CameraService::CameraClientManager::toString() const {
auto conflicting = i->getConflicting();
auto clientSp = i->getValue();
String8 packageName;
- userid_t clientUserId;
+ userid_t clientUserId = 0;
if (clientSp.get() != nullptr) {
packageName = String8{clientSp->getPackageName()};
uid_t clientUid = clientSp->getClientUid();
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 9b7163a..ce3cb44 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -702,6 +702,8 @@ private:
static String8 toString(std::set<userid_t> intSet);
+ static void pingCameraServiceProxy();
+
};
template<class Func>
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 05ede92..f2d6ab2 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -1881,6 +1881,16 @@ void Camera2Client::notifyAutoExposure(uint8_t newState, int triggerId) {
mCaptureSequencer->notifyAutoExposure(newState, triggerId);
}
+void Camera2Client::notifyShutter(const CaptureResultExtras& resultExtras,
+ nsecs_t timestamp) {
+ (void)resultExtras;
+ (void)timestamp;
+
+ ALOGV("%s: Shutter notification for request id %" PRId32 " at time %" PRId64,
+ __FUNCTION__, resultExtras.requestId, timestamp);
+ mCaptureSequencer->notifyShutter(resultExtras, timestamp);
+}
+
camera2::SharedParameters& Camera2Client::getParameters() {
return mParameters;
}
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index a988037..3784aab 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -106,6 +106,8 @@ public:
virtual void notifyAutoFocus(uint8_t newState, int triggerId);
virtual void notifyAutoExposure(uint8_t newState, int triggerId);
+ virtual void notifyShutter(const CaptureResultExtras& resultExtras,
+ nsecs_t timestamp);
/**
* Interface used by independent components of Camera2Client.
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index 9849f4d..d847e0f 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -43,6 +43,8 @@ CaptureSequencer::CaptureSequencer(wp<Camera2Client> client):
mNewFrameReceived(false),
mNewCaptureReceived(false),
mShutterNotified(false),
+ mHalNotifiedShutter(false),
+ mShutterCaptureId(-1),
mClient(client),
mCaptureState(IDLE),
mStateTransitionCount(0),
@@ -106,6 +108,16 @@ void CaptureSequencer::notifyAutoExposure(uint8_t newState, int triggerId) {
}
}
+void CaptureSequencer::notifyShutter(const CaptureResultExtras& resultExtras,
+ nsecs_t timestamp) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mInputMutex);
+ if (!mHalNotifiedShutter && resultExtras.requestId == mShutterCaptureId) {
+ mHalNotifiedShutter = true;
+ mShutterNotifySignal.signal();
+ }
+}
+
void CaptureSequencer::onResultAvailable(const CaptureResult &result) {
ATRACE_CALL();
ALOGV("%s: New result available.", __FUNCTION__);
@@ -335,6 +347,11 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStart(
} else {
nextState = STANDARD_START;
}
+ {
+ Mutex::Autolock l(mInputMutex);
+ mShutterCaptureId = mCaptureId;
+ mHalNotifiedShutter = false;
+ }
mShutterNotified = false;
return nextState;
@@ -541,6 +558,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCapture(
return DONE;
}
}
+
// TODO: Capture should be atomic with setStreamingRequest here
res = client->getCameraDevice()->capture(captureCopy);
if (res != OK) {
@@ -560,6 +578,31 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait(
ATRACE_CALL();
Mutex::Autolock l(mInputMutex);
+
+ // Wait for shutter callback
+ while (!mHalNotifiedShutter) {
+ if (mTimeoutCount <= 0) {
+ break;
+ }
+ res = mShutterNotifySignal.waitRelative(mInputMutex, kWaitDuration);
+ if (res == TIMED_OUT) {
+ mTimeoutCount--;
+ return STANDARD_CAPTURE_WAIT;
+ }
+ }
+
+ if (mHalNotifiedShutter) {
+ if (!mShutterNotified) {
+ SharedParameters::Lock l(client->getParameters());
+ /* warning: this also locks a SharedCameraCallbacks */
+ shutterNotifyLocked(l.mParameters, client, mMsgType);
+ mShutterNotified = true;
+ }
+ } else if (mTimeoutCount <= 0) {
+ ALOGW("Timed out waiting for shutter notification");
+ return DONE;
+ }
+
// Wait for new metadata result (mNewFrame)
while (!mNewFrameReceived) {
res = mNewFrameSignal.waitRelative(mInputMutex, kWaitDuration);
@@ -569,15 +612,6 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait(
}
}
- // Approximation of the shutter being closed
- // - TODO: use the hal3 exposure callback in Camera3Device instead
- if (mNewFrameReceived && !mShutterNotified) {
- SharedParameters::Lock l(client->getParameters());
- /* warning: this also locks a SharedCameraCallbacks */
- shutterNotifyLocked(l.mParameters, client, mMsgType);
- mShutterNotified = true;
- }
-
// Wait until jpeg was captured by JpegProcessor
while (mNewFrameReceived && !mNewCaptureReceived) {
res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
@@ -591,6 +625,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait(
return DONE;
}
if (mNewFrameReceived && mNewCaptureReceived) {
+
if (mNewFrameId != mCaptureId) {
ALOGW("Mismatched capture frame IDs: Expected %d, got %d",
mCaptureId, mNewFrameId);
@@ -667,7 +702,6 @@ CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureWait(
sp<Camera2Client> &/*client*/) {
status_t res;
ATRACE_CALL();
-
while (!mNewCaptureReceived) {
res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
if (res == TIMED_OUT) {
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
index d42ab13..10252fb 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
@@ -62,6 +62,10 @@ class CaptureSequencer:
// Notifications about AE state changes
void notifyAutoExposure(uint8_t newState, int triggerId);
+ // Notifications about shutter (capture start)
+ void notifyShutter(const CaptureResultExtras& resultExtras,
+ nsecs_t timestamp);
+
// Notification from the frame processor
virtual void onResultAvailable(const CaptureResult &result);
@@ -95,7 +99,10 @@ class CaptureSequencer:
sp<MemoryBase> mCaptureBuffer;
Condition mNewCaptureSignal;
- bool mShutterNotified;
+ bool mShutterNotified; // Has CaptureSequencer sent shutter to Client
+ bool mHalNotifiedShutter; // Has HAL sent shutter to CaptureSequencer
+ int32_t mShutterCaptureId; // The captureId which is waiting for shutter notification
+ Condition mShutterNotifySignal;
/**
* Internal to CaptureSequencer