summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/media/stagefright/ACodec.h2
-rw-r--r--media/libmedia/Android.mk2
-rw-r--r--media/libstagefright/ACodec.cpp8
-rw-r--r--media/libstagefright/data/media_codecs_google_video.xml91
-rw-r--r--services/audiopolicy/AudioPolicyEffects.cpp36
-rw-r--r--services/audiopolicy/AudioPolicyEffects.h4
-rw-r--r--services/audiopolicy/AudioPolicyManager.cpp20
-rw-r--r--services/camera/libcameraservice/Android.mk1
-rw-r--r--services/camera/libcameraservice/device3/Camera3Device.cpp76
-rw-r--r--services/camera/libcameraservice/device3/Camera3Device.h15
-rw-r--r--services/camera/libcameraservice/device3/Camera3DummyStream.cpp97
-rw-r--r--services/camera/libcameraservice/device3/Camera3DummyStream.h98
-rw-r--r--services/soundtrigger/SoundTriggerHwService.cpp58
13 files changed, 458 insertions, 50 deletions
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index eb31c77..da4c20c 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -234,7 +234,7 @@ private:
status_t setComponentRole(bool isEncoder, const char *mime);
status_t configureCodec(const char *mime, const sp<AMessage> &msg);
- status_t configureTunneledVideoPlayback(int64_t audioHwSync,
+ status_t configureTunneledVideoPlayback(int32_t audioHwSync,
const sp<ANativeWindow> &nativeWindow);
status_t setVideoPortFormatType(
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 3be0651..37bc418 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -70,7 +70,7 @@ LOCAL_SHARED_LIBRARIES := \
LOCAL_STATIC_LIBRARIES += libinstantssq
-LOCAL_WHOLE_STATIC_LIBRARY := libmedia_helper
+LOCAL_WHOLE_STATIC_LIBRARIES := libmedia_helper
LOCAL_MODULE:= libmedia
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index e4e463a..19a5908 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1245,13 +1245,13 @@ status_t ACodec::configureCodec(
tunneled != 0) {
ALOGI("Configuring TUNNELED video playback.");
- int64_t audioHwSync = 0;
- if (!msg->findInt64("audio-hw-sync", &audioHwSync)) {
+ int32_t audioHwSync = 0;
+ if (!msg->findInt32("audio-hw-sync", &audioHwSync)) {
ALOGW("No Audio HW Sync provided for video tunnel");
}
err = configureTunneledVideoPlayback(audioHwSync, nativeWindow);
if (err != OK) {
- ALOGE("configureTunneledVideoPlayback(%" PRId64 ",%p) failed!",
+ ALOGE("configureTunneledVideoPlayback(%d,%p) failed!",
audioHwSync, nativeWindow.get());
return err;
}
@@ -1898,7 +1898,7 @@ status_t ACodec::setupRawAudioFormat(
}
status_t ACodec::configureTunneledVideoPlayback(
- int64_t audioHwSync, const sp<ANativeWindow> &nativeWindow) {
+ int32_t audioHwSync, const sp<ANativeWindow> &nativeWindow) {
native_handle_t* sidebandHandle;
status_t err = mOMX->configureVideoTunnelMode(
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
index 9b930bc..c97be28 100644
--- a/media/libstagefright/data/media_codecs_google_video.xml
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -16,18 +16,89 @@
<Included>
<Decoders>
- <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es" />
- <MediaCodec name="OMX.google.h263.decoder" type="video/3gpp" />
- <MediaCodec name="OMX.google.h264.decoder" type="video/avc" />
- <MediaCodec name="OMX.google.hevc.decoder" type="video/hevc" />
- <MediaCodec name="OMX.google.vp8.decoder" type="video/x-vnd.on2.vp8" />
- <MediaCodec name="OMX.google.vp9.decoder" type="video/x-vnd.on2.vp9" />
+ <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es">
+ <!-- profiles and levels: ProfileSimple : Level3 -->
+ <Limit name="size" min="2x2" max="352x288" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="12-11880" />
+ <Limit name="bitrate" range="1-384000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.h263.decoder" type="video/3gpp">
+ <!-- profiles and levels: ProfileBaseline : Level30, ProfileBaseline : Level45
+ ProfileISWV2 : Level30, ProfileISWV2 : Level45 -->
+ <Limit name="size" min="2x2" max="352x288" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-384000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.h264.decoder" type="video/avc">
+ <!-- profiles and levels: ProfileBaseline : Level51 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-983040" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.hevc.decoder" type="video/hevc">
+ <!-- profiles and levels: ProfileMain : MainTierLevel51 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="8x8" />
+ <Limit name="block-count" range="1-139264" />
+ <Limit name="blocks-per-second" range="1-2000000" />
+ <Limit name="bitrate" range="1-10000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.vp8.decoder" type="video/x-vnd.on2.vp8">
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-1000000" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.vp9.decoder" type="video/x-vnd.on2.vp9">
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-500000" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
</Decoders>
<Encoders>
- <MediaCodec name="OMX.google.h263.encoder" type="video/3gpp" />
- <MediaCodec name="OMX.google.h264.encoder" type="video/avc" />
- <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es" />
- <MediaCodec name="OMX.google.vp8.encoder" type="video/x-vnd.on2.vp8" />
+ <MediaCodec name="OMX.google.h263.encoder" type="video/3gpp">
+ <!-- profiles and levels: ProfileBaseline : Level45 -->
+ <Limit name="size" min="2x2" max="176x144" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-128000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.h264.encoder" type="video/avc">
+ <!-- profiles and levels: ProfileBaseline : Level2 -->
+ <Limit name="size" min="2x2" max="896x896" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-11880" />
+ <Limit name="bitrate" range="1-2000000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es">
+ <!-- profiles and levels: ProfileCore : Level2 -->
+ <Limit name="size" min="2x2" max="176x144" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="12-1485" />
+ <Limit name="bitrate" range="1-64000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.vp8.encoder" type="video/x-vnd.on2.vp8">
+ <!-- profiles and levels: ProfileMain : Level_Version0-3 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="bitrate-modes" value="VBR,CBR" />
+ </MediaCodec>
</Encoders>
</Included>
diff --git a/services/audiopolicy/AudioPolicyEffects.cpp b/services/audiopolicy/AudioPolicyEffects.cpp
index cc0e965..c45acd0 100644
--- a/services/audiopolicy/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/AudioPolicyEffects.cpp
@@ -98,8 +98,12 @@ status_t AudioPolicyEffects::addInputEffects(audio_io_handle_t input,
inputDesc = new EffectVector(audioSession);
mInputs.add(input, inputDesc);
} else {
+ // EffectVector is existing and we just need to increase ref count
inputDesc = mInputs.valueAt(idx);
}
+ inputDesc->mRefCount++;
+
+ ALOGV("addInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
for (size_t i = 0; i < effects.size(); i++) {
@@ -133,10 +137,14 @@ status_t AudioPolicyEffects::releaseInputEffects(audio_io_handle_t input)
return status;
}
EffectVector *inputDesc = mInputs.valueAt(index);
- setProcessorEnabled(inputDesc, false);
- delete inputDesc;
- mInputs.removeItemsAt(index);
- ALOGV("releaseInputEffects(): all effects released");
+ inputDesc->mRefCount--;
+ ALOGV("releaseInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
+ if (inputDesc->mRefCount == 0) {
+ setProcessorEnabled(inputDesc, false);
+ delete inputDesc;
+ mInputs.removeItemsAt(index);
+ ALOGV("releaseInputEffects(): all effects released");
+ }
return status;
}
@@ -223,8 +231,12 @@ status_t AudioPolicyEffects::addOutputSessionEffects(audio_io_handle_t output,
procDesc = new EffectVector(audioSession);
mOutputSessions.add(audioSession, procDesc);
} else {
+ // EffectVector is existing and we just need to increase ref count
procDesc = mOutputSessions.valueAt(idx);
}
+ procDesc->mRefCount++;
+
+ ALOGV("addOutputSessionEffects(): session: %d, refCount: %d", audioSession, procDesc->mRefCount);
Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects;
for (size_t i = 0; i < effects.size(); i++) {
@@ -262,12 +274,16 @@ status_t AudioPolicyEffects::releaseOutputSessionEffects(audio_io_handle_t outpu
}
EffectVector *procDesc = mOutputSessions.valueAt(index);
- setProcessorEnabled(procDesc, false);
- procDesc->mEffects.clear();
- delete procDesc;
- mOutputSessions.removeItemsAt(index);
- ALOGV("releaseOutputSessionEffects(): output processing released from session: %d",
- audioSession);
+ procDesc->mRefCount--;
+ ALOGV("releaseOutputSessionEffects(): session: %d, refCount: %d", audioSession, procDesc->mRefCount);
+ if (procDesc->mRefCount == 0) {
+ setProcessorEnabled(procDesc, false);
+ procDesc->mEffects.clear();
+ delete procDesc;
+ mOutputSessions.removeItemsAt(index);
+ ALOGV("releaseOutputSessionEffects(): output processing released from session: %d",
+ audioSession);
+ }
return status;
}
diff --git a/services/audiopolicy/AudioPolicyEffects.h b/services/audiopolicy/AudioPolicyEffects.h
index 351cb1a..dbe0d0e 100644
--- a/services/audiopolicy/AudioPolicyEffects.h
+++ b/services/audiopolicy/AudioPolicyEffects.h
@@ -131,9 +131,11 @@ private:
// class to store voctor of AudioEffects
class EffectVector {
public:
- EffectVector(int session) : mSessionId(session) {}
+ EffectVector(int session) : mSessionId(session), mRefCount(0) {}
/*virtual*/ ~EffectVector() {}
const int mSessionId;
+ // AudioPolicyManager keeps mLock, no need for lock on reference count here
+ int mRefCount;
Vector< sp<AudioEffect> >mEffects;
};
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 06dd22c..a805923 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -1297,21 +1297,23 @@ audio_io_handle_t AudioPolicyManager::getInput(audio_source_t inputSource,
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
bool isSoundTrigger = false;
+ audio_source_t halInputSource = inputSource;
if (inputSource == AUDIO_SOURCE_HOTWORD) {
ssize_t index = mSoundTriggerSessions.indexOfKey(session);
if (index >= 0) {
input = mSoundTriggerSessions.valueFor(session);
isSoundTrigger = true;
ALOGV("SoundTrigger capture on session %d input %d", session, input);
+ } else {
+ halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
}
}
-
status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
&input,
&config,
&device,
String8(""),
- inputSource,
+ halInputSource,
flags);
// only accept input with the exact requested set of parameters
@@ -4317,6 +4319,20 @@ uint32_t AudioPolicyManager::setOutputDevice(audio_io_handle_t output,
mpClientInterface->onAudioPatchListUpdate();
}
}
+
+ // inform all input as well
+ for (size_t i = 0; i < mInputs.size(); i++) {
+ const sp<AudioInputDescriptor> inputDescriptor = mInputs.valueAt(i);
+ if (!isVirtualInputDevice(inputDescriptor->mDevice)) {
+ AudioParameter inputCmd = AudioParameter();
+ ALOGV("%s: inform input %d of device:%d", __func__,
+ inputDescriptor->mIoHandle, device);
+ inputCmd.addInt(String8(AudioParameter::keyRouting),device);
+ mpClientInterface->setParameters(inputDescriptor->mIoHandle,
+ inputCmd.toString(),
+ delayMs);
+ }
+ }
}
// update stream volumes according to new device
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 2f485b9..9d6ab23 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -47,6 +47,7 @@ LOCAL_SRC_FILES:= \
device3/Camera3InputStream.cpp \
device3/Camera3OutputStream.cpp \
device3/Camera3ZslStream.cpp \
+ device3/Camera3DummyStream.cpp \
device3/StatusTracker.cpp \
gui/RingBufferConsumer.cpp \
utils/CameraTraces.cpp \
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 9b51b99..6f78db5 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -48,6 +48,7 @@
#include "device3/Camera3OutputStream.h"
#include "device3/Camera3InputStream.h"
#include "device3/Camera3ZslStream.h"
+#include "device3/Camera3DummyStream.h"
#include "CameraService.h"
using namespace android::camera3;
@@ -181,6 +182,7 @@ status_t Camera3Device::initialize(camera_module_t *module)
mHal3Device = device;
mStatus = STATUS_UNCONFIGURED;
mNextStreamId = 0;
+ mDummyStreamId = NO_STREAM;
mNeedConfig = true;
mPauseStateNotify = false;
@@ -1418,6 +1420,15 @@ status_t Camera3Device::configureStreamsLocked() {
return OK;
}
+ // Workaround for device HALv3.2 or older spec bug - zero streams requires
+ // adding a dummy stream instead.
+ // TODO: Bug: 17321404 for fixing the HAL spec and removing this workaround.
+ if (mOutputStreams.size() == 0) {
+ addDummyStreamLocked();
+ } else {
+ tryRemoveDummyStreamLocked();
+ }
+
// Start configuring the streams
ALOGV("%s: Camera %d: Starting stream configuration", __FUNCTION__, mId);
@@ -1540,7 +1551,7 @@ status_t Camera3Device::configureStreamsLocked() {
mNeedConfig = false;
- if (config.num_streams > 0) {
+ if (mDummyStreamId == NO_STREAM) {
mStatus = STATUS_CONFIGURED;
} else {
mStatus = STATUS_UNCONFIGURED;
@@ -1554,6 +1565,69 @@ status_t Camera3Device::configureStreamsLocked() {
return OK;
}
+status_t Camera3Device::addDummyStreamLocked() {
+ ATRACE_CALL();
+ status_t res;
+
+ if (mDummyStreamId != NO_STREAM) {
+ // Should never be adding a second dummy stream when one is already
+ // active
+ SET_ERR_L("%s: Camera %d: A dummy stream already exists!",
+ __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ ALOGV("%s: Camera %d: Adding a dummy stream", __FUNCTION__, mId);
+
+ sp<Camera3OutputStreamInterface> dummyStream =
+ new Camera3DummyStream(mNextStreamId);
+
+ res = mOutputStreams.add(mNextStreamId, dummyStream);
+ if (res < 0) {
+ SET_ERR_L("Can't add dummy stream to set: %s (%d)", strerror(-res), res);
+ return res;
+ }
+
+ mDummyStreamId = mNextStreamId;
+ mNextStreamId++;
+
+ return OK;
+}
+
+status_t Camera3Device::tryRemoveDummyStreamLocked() {
+ ATRACE_CALL();
+ status_t res;
+
+ if (mDummyStreamId == NO_STREAM) return OK;
+ if (mOutputStreams.size() == 1) return OK;
+
+ ALOGV("%s: Camera %d: Removing the dummy stream", __FUNCTION__, mId);
+
+ // Ok, have a dummy stream and there's at least one other output stream,
+ // so remove the dummy
+
+ sp<Camera3StreamInterface> deletedStream;
+ ssize_t outputStreamIdx = mOutputStreams.indexOfKey(mDummyStreamId);
+ if (outputStreamIdx == NAME_NOT_FOUND) {
+ SET_ERR_L("Dummy stream %d does not appear to exist", mDummyStreamId);
+ return INVALID_OPERATION;
+ }
+
+ deletedStream = mOutputStreams.editValueAt(outputStreamIdx);
+ mOutputStreams.removeItemsAt(outputStreamIdx);
+
+ // Free up the stream endpoint so that it can be used by some other stream
+ res = deletedStream->disconnect();
+ if (res != OK) {
+ SET_ERR_L("Can't disconnect deleted dummy stream %d", mDummyStreamId);
+ // fall through since we want to still list the stream as deleted.
+ }
+ mDeletedStreams.add(deletedStream);
+ mDummyStreamId = NO_STREAM;
+
+ return res;
+}
+
void Camera3Device::setErrorState(const char *fmt, ...) {
Mutex::Autolock l(mLock);
va_list args;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index e3c98ef..b99ed7e 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -151,6 +151,8 @@ class Camera3Device :
struct RequestTrigger;
// minimal jpeg buffer size: 256KB + blob header
static const ssize_t kMinJpegBufferSize = 256 * 1024 + sizeof(camera3_jpeg_blob);
+ // Constant to use for stream ID when one doesn't exist
+ static const int NO_STREAM = -1;
// A lock to enforce serialization on the input/configure side
// of the public interface.
@@ -196,6 +198,8 @@ class Camera3Device :
int mNextStreamId;
bool mNeedConfig;
+ int mDummyStreamId;
+
// Whether to send state updates upstream
// Pause when doing transparent reconfiguration
bool mPauseStateNotify;
@@ -291,6 +295,17 @@ class Camera3Device :
status_t configureStreamsLocked();
/**
+ * Add a dummy stream to the current stream set as a workaround for
+ * not allowing 0 streams in the camera HAL spec.
+ */
+ status_t addDummyStreamLocked();
+
+ /**
+ * Remove a dummy stream if the current config includes real streams.
+ */
+ status_t tryRemoveDummyStreamLocked();
+
+ /**
* Set device into an error state due to some fatal failure, and set an
* error message to indicate why. Only the first call's message will be
* used. The message is also sent to the log.
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
new file mode 100644
index 0000000..6656b09
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-DummyStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include "Camera3DummyStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+Camera3DummyStream::Camera3DummyStream(int id) :
+ Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, DUMMY_WIDTH, DUMMY_HEIGHT,
+ /*maxSize*/0, DUMMY_FORMAT) {
+
+}
+
+Camera3DummyStream::~Camera3DummyStream() {
+
+}
+
+status_t Camera3DummyStream::getBufferLocked(camera3_stream_buffer *buffer) {
+ ATRACE_CALL();
+ ALOGE("%s: Stream %d: Dummy stream cannot produce buffers!", mId);
+ return INVALID_OPERATION;
+}
+
+status_t Camera3DummyStream::returnBufferLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp) {
+ ATRACE_CALL();
+ ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", mId);
+ return INVALID_OPERATION;
+}
+
+status_t Camera3DummyStream::returnBufferCheckedLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output,
+ /*out*/
+ sp<Fence> *releaseFenceOut) {
+ ATRACE_CALL();
+ ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", mId);
+ return INVALID_OPERATION;
+}
+
+void Camera3DummyStream::dump(int fd, const Vector<String16> &args) const {
+ (void) args;
+ String8 lines;
+ lines.appendFormat(" Stream[%d]: Dummy\n", mId);
+ write(fd, lines.string(), lines.size());
+
+ Camera3IOStreamBase::dump(fd, args);
+}
+
+status_t Camera3DummyStream::setTransform(int transform) {
+ ATRACE_CALL();
+ // Do nothing
+ return OK;
+}
+
+status_t Camera3DummyStream::configureQueueLocked() {
+ // Do nothing
+ return OK;
+}
+
+status_t Camera3DummyStream::disconnectLocked() {
+ mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG
+ : STATE_CONSTRUCTED;
+ return OK;
+}
+
+status_t Camera3DummyStream::getEndpointUsage(uint32_t *usage) {
+ *usage = DUMMY_USAGE;
+ return OK;
+}
+
+}; // namespace camera3
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
new file mode 100644
index 0000000..3e42623
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_DUMMY_STREAM_H
+#define ANDROID_SERVERS_CAMERA3_DUMMY_STREAM_H
+
+#include <utils/RefBase.h>
+#include <gui/Surface.h>
+
+#include "Camera3Stream.h"
+#include "Camera3IOStreamBase.h"
+#include "Camera3OutputStreamInterface.h"
+
+namespace android {
+namespace camera3 {
+
+/**
+ * A dummy output stream class, to be used as a placeholder when no valid
+ * streams are configured by the client.
+ * This is necessary because camera HAL v3.2 or older disallow configuring
+ * 0 output streams, while the public camera2 API allows for it.
+ */
+class Camera3DummyStream :
+ public Camera3IOStreamBase,
+ public Camera3OutputStreamInterface {
+
+ public:
+ /**
+ * Set up a dummy stream; doesn't actually connect to anything, and uses
+ * a default dummy format and size.
+ */
+ Camera3DummyStream(int id);
+
+ virtual ~Camera3DummyStream();
+
+ /**
+ * Camera3Stream interface
+ */
+
+ virtual void dump(int fd, const Vector<String16> &args) const;
+
+ status_t setTransform(int transform);
+
+ protected:
+
+ /**
+ * Note that we release the lock briefly in this function
+ */
+ virtual status_t returnBufferCheckedLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output,
+ /*out*/
+ sp<Fence> *releaseFenceOut);
+
+ virtual status_t disconnectLocked();
+
+ private:
+
+ // Default dummy parameters; 320x240 is a required size for all devices,
+ // otherwise act like a SurfaceView would.
+ static const int DUMMY_WIDTH = 320;
+ static const int DUMMY_HEIGHT = 240;
+ static const int DUMMY_FORMAT = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ static const uint32_t DUMMY_USAGE = GRALLOC_USAGE_HW_COMPOSER;
+
+ /**
+ * Internal Camera3Stream interface
+ */
+ virtual status_t getBufferLocked(camera3_stream_buffer *buffer);
+ virtual status_t returnBufferLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp);
+
+ virtual status_t configureQueueLocked();
+
+ virtual status_t getEndpointUsage(uint32_t *usage);
+
+}; // class Camera3DummyStream
+
+} // namespace camera3
+
+} // namespace android
+
+#endif
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 2502e0d..b5aaee3 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -249,7 +249,7 @@ sp<IMemory> SoundTriggerHwService::prepareRecognitionEvent_l(
event->data_offset = sizeof(struct sound_trigger_recognition_event);
break;
default:
- return eventMemory;
+ return eventMemory;
}
size_t size = event->data_offset + event->data_size;
@@ -653,7 +653,6 @@ void SoundTriggerHwService::Module::onCallbackEvent(const sp<CallbackEvent>& eve
{
ALOGV("onCallbackEvent type %d", event->mType);
- AutoMutex lock(mLock);
sp<IMemory> eventMemory = event->mMemory;
if (eventMemory == 0 || eventMemory->pointer() == NULL) {
@@ -668,34 +667,53 @@ void SoundTriggerHwService::Module::onCallbackEvent(const sp<CallbackEvent>& eve
case CallbackEvent::TYPE_RECOGNITION: {
struct sound_trigger_recognition_event *recognitionEvent =
(struct sound_trigger_recognition_event *)eventMemory->pointer();
+ sp<ISoundTriggerClient> client;
+ {
+ AutoMutex lock(mLock);
+ sp<Model> model = getModel(recognitionEvent->model);
+ if (model == 0) {
+ ALOGW("%s model == 0", __func__);
+ return;
+ }
+ if (model->mState != Model::STATE_ACTIVE) {
+ ALOGV("onCallbackEvent model->mState %d != Model::STATE_ACTIVE", model->mState);
+ return;
+ }
- sp<Model> model = getModel(recognitionEvent->model);
- if (model == 0) {
- ALOGW("%s model == 0", __func__);
- return;
+ recognitionEvent->capture_session = model->mCaptureSession;
+ model->mState = Model::STATE_IDLE;
+ client = mClient;
}
- if (model->mState != Model::STATE_ACTIVE) {
- ALOGV("onCallbackEvent model->mState %d != Model::STATE_ACTIVE", model->mState);
- return;
+ if (client != 0) {
+ client->onRecognitionEvent(eventMemory);
}
-
- recognitionEvent->capture_session = model->mCaptureSession;
- mClient->onRecognitionEvent(eventMemory);
- model->mState = Model::STATE_IDLE;
} break;
case CallbackEvent::TYPE_SOUNDMODEL: {
struct sound_trigger_model_event *soundmodelEvent =
(struct sound_trigger_model_event *)eventMemory->pointer();
-
- sp<Model> model = getModel(soundmodelEvent->model);
- if (model == 0) {
- ALOGW("%s model == 0", __func__);
- return;
+ sp<ISoundTriggerClient> client;
+ {
+ AutoMutex lock(mLock);
+ sp<Model> model = getModel(soundmodelEvent->model);
+ if (model == 0) {
+ ALOGW("%s model == 0", __func__);
+ return;
+ }
+ client = mClient;
+ }
+ if (client != 0) {
+ client->onSoundModelEvent(eventMemory);
}
- mClient->onSoundModelEvent(eventMemory);
} break;
case CallbackEvent::TYPE_SERVICE_STATE: {
- mClient->onServiceStateChange(eventMemory);
+ sp<ISoundTriggerClient> client;
+ {
+ AutoMutex lock(mLock);
+ client = mClient;
+ }
+ if (client != 0) {
+ client->onServiceStateChange(eventMemory);
+ }
} break;
default:
LOG_ALWAYS_FATAL("onCallbackEvent unknown event type %d", event->mType);