summaryrefslogtreecommitdiffstats
path: root/services/camera
diff options
context:
space:
mode:
Diffstat (limited to 'services/camera')
-rw-r--r--services/camera/libcameraservice/Android.mk46
-rw-r--r--services/camera/libcameraservice/CameraDeviceFactory.cpp71
-rw-r--r--services/camera/libcameraservice/CameraDeviceFactory.h45
-rw-r--r--services/camera/libcameraservice/CameraService.cpp910
-rw-r--r--services/camera/libcameraservice/CameraService.h292
-rw-r--r--services/camera/libcameraservice/api1/Camera2Client.cpp (renamed from services/camera/libcameraservice/Camera2Client.cpp)649
-rw-r--r--services/camera/libcameraservice/api1/Camera2Client.h (renamed from services/camera/libcameraservice/Camera2Client.h)106
-rw-r--r--services/camera/libcameraservice/api1/CameraClient.cpp (renamed from services/camera/libcameraservice/CameraClient.cpp)89
-rw-r--r--services/camera/libcameraservice/api1/CameraClient.h (renamed from services/camera/libcameraservice/CameraClient.h)14
-rw-r--r--services/camera/libcameraservice/api1/client2/BurstCapture.cpp (renamed from services/camera/libcameraservice/camera2/BurstCapture.cpp)13
-rw-r--r--services/camera/libcameraservice/api1/client2/BurstCapture.h (renamed from services/camera/libcameraservice/camera2/BurstCapture.h)5
-rw-r--r--services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp548
-rw-r--r--services/camera/libcameraservice/api1/client2/CallbackProcessor.h (renamed from services/camera/libcameraservice/camera2/CallbackProcessor.h)27
-rw-r--r--services/camera/libcameraservice/api1/client2/Camera2Heap.h (renamed from services/camera/libcameraservice/camera2/Camera2Heap.h)0
-rw-r--r--services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp (renamed from services/camera/libcameraservice/camera2/CaptureSequencer.cpp)142
-rw-r--r--services/camera/libcameraservice/api1/client2/CaptureSequencer.h (renamed from services/camera/libcameraservice/camera2/CaptureSequencer.h)18
-rw-r--r--services/camera/libcameraservice/api1/client2/FrameProcessor.cpp356
-rw-r--r--services/camera/libcameraservice/api1/client2/FrameProcessor.h111
-rw-r--r--services/camera/libcameraservice/api1/client2/JpegCompressor.cpp (renamed from services/camera/libcameraservice/camera2/JpegCompressor.cpp)5
-rw-r--r--services/camera/libcameraservice/api1/client2/JpegCompressor.h (renamed from services/camera/libcameraservice/camera2/JpegCompressor.h)0
-rw-r--r--services/camera/libcameraservice/api1/client2/JpegProcessor.cpp (renamed from services/camera/libcameraservice/camera2/JpegProcessor.cpp)73
-rw-r--r--services/camera/libcameraservice/api1/client2/JpegProcessor.h (renamed from services/camera/libcameraservice/camera2/JpegProcessor.h)14
-rw-r--r--services/camera/libcameraservice/api1/client2/Parameters.cpp (renamed from services/camera/libcameraservice/camera2/Parameters.cpp)788
-rw-r--r--services/camera/libcameraservice/api1/client2/Parameters.h (renamed from services/camera/libcameraservice/camera2/Parameters.h)53
-rw-r--r--services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp (renamed from services/camera/libcameraservice/camera2/StreamingProcessor.cpp)432
-rw-r--r--services/camera/libcameraservice/api1/client2/StreamingProcessor.h (renamed from services/camera/libcameraservice/camera2/StreamingProcessor.h)35
-rw-r--r--services/camera/libcameraservice/api1/client2/ZslProcessor.cpp (renamed from services/camera/libcameraservice/camera2/ZslProcessor.cpp)109
-rw-r--r--services/camera/libcameraservice/api1/client2/ZslProcessor.h (renamed from services/camera/libcameraservice/camera2/ZslProcessor.h)29
-rw-r--r--services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp482
-rw-r--r--services/camera/libcameraservice/api1/client2/ZslProcessor3.h136
-rw-r--r--services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h59
-rw-r--r--services/camera/libcameraservice/api2/CameraDeviceClient.cpp680
-rw-r--r--services/camera/libcameraservice/api2/CameraDeviceClient.h154
-rw-r--r--services/camera/libcameraservice/api_pro/ProCamera2Client.cpp446
-rw-r--r--services/camera/libcameraservice/api_pro/ProCamera2Client.h123
-rw-r--r--services/camera/libcameraservice/camera2/CallbackProcessor.cpp301
-rw-r--r--services/camera/libcameraservice/camera2/CameraMetadata.cpp296
-rw-r--r--services/camera/libcameraservice/camera2/CameraMetadata.h173
-rw-r--r--services/camera/libcameraservice/camera2/FrameProcessor.cpp308
-rw-r--r--services/camera/libcameraservice/common/Camera2ClientBase.cpp331
-rw-r--r--services/camera/libcameraservice/common/Camera2ClientBase.h134
-rw-r--r--services/camera/libcameraservice/common/CameraDeviceBase.cpp30
-rw-r--r--services/camera/libcameraservice/common/CameraDeviceBase.h234
-rw-r--r--services/camera/libcameraservice/common/FrameProcessorBase.cpp195
-rw-r--r--services/camera/libcameraservice/common/FrameProcessorBase.h (renamed from services/camera/libcameraservice/camera2/FrameProcessor.h)51
-rw-r--r--services/camera/libcameraservice/device1/CameraHardwareInterface.h (renamed from services/camera/libcameraservice/CameraHardwareInterface.h)24
-rw-r--r--services/camera/libcameraservice/device2/Camera2Device.cpp (renamed from services/camera/libcameraservice/Camera2Device.cpp)129
-rw-r--r--services/camera/libcameraservice/device2/Camera2Device.h (renamed from services/camera/libcameraservice/Camera2Device.h)212
-rw-r--r--services/camera/libcameraservice/device3/Camera3Device.cpp2557
-rw-r--r--services/camera/libcameraservice/device3/Camera3Device.h545
-rw-r--r--services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp249
-rw-r--r--services/camera/libcameraservice/device3/Camera3IOStreamBase.h103
-rw-r--r--services/camera/libcameraservice/device3/Camera3InputStream.cpp239
-rw-r--r--services/camera/libcameraservice/device3/Camera3InputStream.h83
-rw-r--r--services/camera/libcameraservice/device3/Camera3OutputStream.cpp394
-rw-r--r--services/camera/libcameraservice/device3/Camera3OutputStream.h104
-rw-r--r--services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h43
-rw-r--r--services/camera/libcameraservice/device3/Camera3Stream.cpp426
-rw-r--r--services/camera/libcameraservice/device3/Camera3Stream.h291
-rw-r--r--services/camera/libcameraservice/device3/Camera3StreamBufferListener.h48
-rw-r--r--services/camera/libcameraservice/device3/Camera3StreamInterface.h162
-rw-r--r--services/camera/libcameraservice/device3/Camera3ZslStream.cpp328
-rw-r--r--services/camera/libcameraservice/device3/Camera3ZslStream.h105
-rw-r--r--services/camera/libcameraservice/device3/StatusTracker.cpp219
-rw-r--r--services/camera/libcameraservice/device3/StatusTracker.h130
-rw-r--r--services/camera/libcameraservice/gui/RingBufferConsumer.cpp359
-rw-r--r--services/camera/libcameraservice/gui/RingBufferConsumer.h187
-rw-r--r--services/camera/libcameraservice/utils/CameraTraces.cpp94
-rw-r--r--services/camera/libcameraservice/utils/CameraTraces.h66
-rw-r--r--services/camera/tests/CameraServiceTest/Android.mk26
-rw-r--r--services/camera/tests/CameraServiceTest/CameraServiceTest.cpp924
-rw-r--r--services/camera/tests/CameraServiceTest/MODULE_LICENSE_APACHE20
-rw-r--r--services/camera/tests/CameraServiceTest/NOTICE190
73 files changed, 13905 insertions, 3445 deletions
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index eff47c8..51ba698 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -8,27 +8,42 @@ include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
CameraService.cpp \
- CameraClient.cpp \
- Camera2Client.cpp \
- Camera2Device.cpp \
- camera2/CameraMetadata.cpp \
- camera2/Parameters.cpp \
- camera2/FrameProcessor.cpp \
- camera2/StreamingProcessor.cpp \
- camera2/JpegProcessor.cpp \
- camera2/CallbackProcessor.cpp \
- camera2/ZslProcessor.cpp \
- camera2/BurstCapture.cpp \
- camera2/JpegCompressor.cpp \
- camera2/CaptureSequencer.cpp
+ CameraDeviceFactory.cpp \
+ common/Camera2ClientBase.cpp \
+ common/CameraDeviceBase.cpp \
+ common/FrameProcessorBase.cpp \
+ api1/CameraClient.cpp \
+ api1/Camera2Client.cpp \
+ api1/client2/Parameters.cpp \
+ api1/client2/FrameProcessor.cpp \
+ api1/client2/StreamingProcessor.cpp \
+ api1/client2/JpegProcessor.cpp \
+ api1/client2/CallbackProcessor.cpp \
+ api1/client2/ZslProcessor.cpp \
+ api1/client2/BurstCapture.cpp \
+ api1/client2/JpegCompressor.cpp \
+ api1/client2/CaptureSequencer.cpp \
+ api1/client2/ZslProcessor3.cpp \
+ api2/CameraDeviceClient.cpp \
+ api_pro/ProCamera2Client.cpp \
+ device2/Camera2Device.cpp \
+ device3/Camera3Device.cpp \
+ device3/Camera3Stream.cpp \
+ device3/Camera3IOStreamBase.cpp \
+ device3/Camera3InputStream.cpp \
+ device3/Camera3OutputStream.cpp \
+ device3/Camera3ZslStream.cpp \
+ device3/StatusTracker.cpp \
+ gui/RingBufferConsumer.cpp \
+ utils/CameraTraces.cpp \
LOCAL_SHARED_LIBRARIES:= \
libui \
+ liblog \
libutils \
libbinder \
libcutils \
libmedia \
- libmedia_native \
libcamera_client \
libgui \
libhardware \
@@ -40,6 +55,9 @@ LOCAL_C_INCLUDES += \
system/media/camera/include \
external/jpeg
+
+LOCAL_CFLAGS += -Wall -Wextra
+
LOCAL_MODULE:= libcameraservice
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/camera/libcameraservice/CameraDeviceFactory.cpp b/services/camera/libcameraservice/CameraDeviceFactory.cpp
new file mode 100644
index 0000000..7fdf304
--- /dev/null
+++ b/services/camera/libcameraservice/CameraDeviceFactory.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "CameraDeviceFactory"
+#include <utils/Log.h>
+
+#include "CameraService.h"
+#include "CameraDeviceFactory.h"
+#include "common/CameraDeviceBase.h"
+#include "device2/Camera2Device.h"
+#include "device3/Camera3Device.h"
+
+namespace android {
+
+wp<CameraService> CameraDeviceFactory::sService;
+
+sp<CameraDeviceBase> CameraDeviceFactory::createDevice(int cameraId) {
+
+ sp<CameraService> svc = sService.promote();
+ if (svc == 0) {
+ ALOGE("%s: No service registered", __FUNCTION__);
+ return NULL;
+ }
+
+ int deviceVersion = svc->getDeviceVersion(cameraId, /*facing*/NULL);
+
+ sp<CameraDeviceBase> device;
+
+ switch (deviceVersion) {
+ case CAMERA_DEVICE_API_VERSION_2_0:
+ case CAMERA_DEVICE_API_VERSION_2_1:
+ device = new Camera2Device(cameraId);
+ break;
+ case CAMERA_DEVICE_API_VERSION_3_0:
+ device = new Camera3Device(cameraId);
+ break;
+ default:
+ ALOGE("%s: Camera %d: Unknown HAL device version %d",
+ __FUNCTION__, cameraId, deviceVersion);
+ device = NULL;
+ break;
+ }
+
+ ALOGV_IF(device != 0, "Created a new camera device for version %d",
+ deviceVersion);
+
+ return device;
+}
+
+void CameraDeviceFactory::registerService(wp<CameraService> service) {
+ ALOGV("%s: Registered service %p", __FUNCTION__,
+ service.promote().get());
+
+ sService = service;
+}
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/CameraDeviceFactory.h b/services/camera/libcameraservice/CameraDeviceFactory.h
new file mode 100644
index 0000000..236dc56
--- /dev/null
+++ b/services/camera/libcameraservice/CameraDeviceFactory.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERADEVICEFACTORY_H
+#define ANDROID_SERVERS_CAMERA_CAMERADEVICEFACTORY_H
+
+#include <utils/RefBase.h>
+
+namespace android {
+
+class CameraDeviceBase;
+class CameraService;
+
+/**
+ * Create the right instance of Camera2Device or Camera3Device
+ * automatically based on the device version.
+ */
+class CameraDeviceFactory : public virtual RefBase {
+ public:
+ static void registerService(wp<CameraService> service);
+
+ // Prerequisite: Call registerService.
+ static sp<CameraDeviceBase> createDevice(int cameraId);
+ private:
+ CameraDeviceFactory(wp<CameraService> service);
+
+ static wp<CameraService> sService;
+};
+
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 124d24d..87027f7 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -22,13 +22,13 @@
#include <sys/types.h>
#include <pthread.h>
+#include <binder/AppOpsManager.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
#include <cutils/atomic.h>
#include <cutils/properties.h>
-#include <gui/SurfaceTextureClient.h>
#include <gui/Surface.h>
#include <hardware/hardware.h>
#include <media/AudioSystem.h>
@@ -38,8 +38,12 @@
#include <utils/String16.h>
#include "CameraService.h"
-#include "CameraClient.h"
-#include "Camera2Client.h"
+#include "api1/CameraClient.h"
+#include "api1/Camera2Client.h"
+#include "api_pro/ProCamera2Client.h"
+#include "api2/CameraDeviceClient.h"
+#include "utils/CameraTraces.h"
+#include "CameraDeviceFactory.h"
namespace android {
@@ -65,6 +69,20 @@ static int getCallingUid() {
return IPCThreadState::self()->getCallingUid();
}
+extern "C" {
+static void camera_device_status_change(
+ const struct camera_module_callbacks* callbacks,
+ int camera_id,
+ int new_status) {
+ sp<CameraService> cs = const_cast<CameraService*>(
+ static_cast<const CameraService*>(callbacks));
+
+ cs->onDeviceStatusChanged(
+ camera_id,
+ new_status);
+}
+} // extern "C"
+
// ----------------------------------------------------------------------------
// This is ugly and only safe if we never re-create the CameraService, but
@@ -72,14 +90,22 @@ static int getCallingUid() {
static CameraService *gCameraService;
CameraService::CameraService()
-:mSoundRef(0), mModule(0)
+ :mSoundRef(0), mModule(0)
{
ALOGI("CameraService started (pid=%d)", getpid());
gCameraService = this;
+
+ for (size_t i = 0; i < MAX_CAMERAS; ++i) {
+ mStatusList[i] = ICameraServiceListener::STATUS_PRESENT;
+ }
+
+ this->camera_device_status_change = android::camera_device_status_change;
}
void CameraService::onFirstRef()
{
+ LOG1("CameraService::onFirstRef");
+
BnCameraService::onFirstRef();
if (hw_get_module(CAMERA_HARDWARE_MODULE_ID,
@@ -88,6 +114,7 @@ void CameraService::onFirstRef()
mNumberOfCameras = 0;
}
else {
+ ALOGI("Loaded \"%s\" camera module", mModule->common.name);
mNumberOfCameras = mModule->get_number_of_cameras();
if (mNumberOfCameras > MAX_CAMERAS) {
ALOGE("Number of cameras(%d) > MAX_CAMERAS(%d).",
@@ -97,6 +124,13 @@ void CameraService::onFirstRef()
for (int i = 0; i < mNumberOfCameras; i++) {
setCameraFree(i);
}
+
+ if (mModule->common.module_api_version >=
+ CAMERA_MODULE_API_VERSION_2_1) {
+ mModule->set_callbacks(this);
+ }
+
+ CameraDeviceFactory::registerService(this);
}
}
@@ -110,6 +144,67 @@ CameraService::~CameraService() {
gCameraService = NULL;
}
+void CameraService::onDeviceStatusChanged(int cameraId,
+ int newStatus)
+{
+ ALOGI("%s: Status changed for cameraId=%d, newStatus=%d", __FUNCTION__,
+ cameraId, newStatus);
+
+ if (cameraId < 0 || cameraId >= MAX_CAMERAS) {
+ ALOGE("%s: Bad camera ID %d", __FUNCTION__, cameraId);
+ return;
+ }
+
+ if ((int)getStatus(cameraId) == newStatus) {
+ ALOGE("%s: State transition to the same status 0x%x not allowed",
+ __FUNCTION__, (uint32_t)newStatus);
+ return;
+ }
+
+ /* don't do this in updateStatus
+ since it is also called from connect and we could get into a deadlock */
+ if (newStatus == CAMERA_DEVICE_STATUS_NOT_PRESENT) {
+ Vector<sp<BasicClient> > clientsToDisconnect;
+ {
+ Mutex::Autolock al(mServiceLock);
+
+ /* Find all clients that we need to disconnect */
+ sp<BasicClient> client = mClient[cameraId].promote();
+ if (client.get() != NULL) {
+ clientsToDisconnect.push_back(client);
+ }
+
+ int i = cameraId;
+ for (size_t j = 0; j < mProClientList[i].size(); ++j) {
+ sp<ProClient> cl = mProClientList[i][j].promote();
+ if (cl != NULL) {
+ clientsToDisconnect.push_back(cl);
+ }
+ }
+ }
+
+ /* now disconnect them. don't hold the lock
+ or we can get into a deadlock */
+
+ for (size_t i = 0; i < clientsToDisconnect.size(); ++i) {
+ sp<BasicClient> client = clientsToDisconnect[i];
+
+ client->disconnect();
+ /**
+ * The remote app will no longer be able to call methods on the
+ * client since the client PID will be reset to 0
+ */
+ }
+
+ ALOGV("%s: After unplug, disconnected %d clients",
+ __FUNCTION__, clientsToDisconnect.size());
+ }
+
+ updateStatus(
+ static_cast<ICameraServiceListener::Status>(newStatus), cameraId);
+
+}
+
int32_t CameraService::getNumberOfCameras() {
return mNumberOfCameras;
}
@@ -117,7 +212,7 @@ int32_t CameraService::getNumberOfCameras() {
status_t CameraService::getCameraInfo(int cameraId,
struct CameraInfo* cameraInfo) {
if (!mModule) {
- return NO_INIT;
+ return -ENODEV;
}
if (cameraId < 0 || cameraId >= mNumberOfCameras) {
@@ -131,22 +226,112 @@ status_t CameraService::getCameraInfo(int cameraId,
return rc;
}
-sp<ICamera> CameraService::connect(
- const sp<ICameraClient>& cameraClient, int cameraId) {
+status_t CameraService::getCameraCharacteristics(int cameraId,
+ CameraMetadata* cameraInfo) {
+ if (!cameraInfo) {
+ ALOGE("%s: cameraInfo is NULL", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (!mModule) {
+ ALOGE("%s: camera hardware module doesn't exist", __FUNCTION__);
+ return -ENODEV;
+ }
+
+ if (mModule->common.module_api_version < CAMERA_MODULE_API_VERSION_2_0) {
+ // TODO: Remove this check once HAL1 shim is in place.
+ ALOGE("%s: Only HAL module version V2 or higher supports static metadata", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (cameraId < 0 || cameraId >= mNumberOfCameras) {
+ ALOGE("%s: Invalid camera id: %d", __FUNCTION__, cameraId);
+ return BAD_VALUE;
+ }
+
+ int facing;
+ if (getDeviceVersion(cameraId, &facing) == CAMERA_DEVICE_API_VERSION_1_0) {
+ // TODO: Remove this check once HAL1 shim is in place.
+ ALOGE("%s: HAL1 doesn't support static metadata yet", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (getDeviceVersion(cameraId, &facing) <= CAMERA_DEVICE_API_VERSION_2_1) {
+ // Disable HAL2.x support for camera2 API for now.
+ ALOGW("%s: HAL2.x doesn't support getCameraCharacteristics for now", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ struct camera_info info;
+ status_t ret = mModule->get_camera_info(cameraId, &info);
+ *cameraInfo = info.static_camera_characteristics;
+
+ return ret;
+}
+
+int CameraService::getDeviceVersion(int cameraId, int* facing) {
+ struct camera_info info;
+ if (mModule->get_camera_info(cameraId, &info) != OK) {
+ return -1;
+ }
+
+ int deviceVersion;
+ if (mModule->common.module_api_version >= CAMERA_MODULE_API_VERSION_2_0) {
+ deviceVersion = info.device_version;
+ } else {
+ deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
+ }
+
+ if (facing) {
+ *facing = info.facing;
+ }
+
+ return deviceVersion;
+}
+
+bool CameraService::isValidCameraId(int cameraId) {
+ int facing;
+ int deviceVersion = getDeviceVersion(cameraId, &facing);
+
+ switch(deviceVersion) {
+ case CAMERA_DEVICE_API_VERSION_1_0:
+ case CAMERA_DEVICE_API_VERSION_2_0:
+ case CAMERA_DEVICE_API_VERSION_2_1:
+ case CAMERA_DEVICE_API_VERSION_3_0:
+ return true;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+status_t CameraService::validateConnect(int cameraId,
+ /*inout*/
+ int& clientUid) const {
+
int callingPid = getCallingPid();
- LOG1("CameraService::connect E (pid %d, id %d)", callingPid, cameraId);
+ if (clientUid == USE_CALLING_UID) {
+ clientUid = getCallingUid();
+ } else {
+ // We only trust our own process to forward client UIDs
+ if (callingPid != getpid()) {
+ ALOGE("CameraService::connect X (pid %d) rejected (don't trust clientUid)",
+ callingPid);
+ return PERMISSION_DENIED;
+ }
+ }
if (!mModule) {
ALOGE("Camera HAL module not loaded");
- return NULL;
+ return -ENODEV;
}
- sp<Client> client;
if (cameraId < 0 || cameraId >= mNumberOfCameras) {
ALOGE("CameraService::connect X (pid %d) rejected (invalid cameraId %d).",
callingPid, cameraId);
- return NULL;
+ return -ENODEV;
}
char value[PROPERTY_VALUE_MAX];
@@ -154,95 +339,432 @@ sp<ICamera> CameraService::connect(
if (strcmp(value, "1") == 0) {
// Camera is disabled by DevicePolicyManager.
ALOGI("Camera is disabled. connect X (pid %d) rejected", callingPid);
- return NULL;
+ return -EACCES;
}
- Mutex::Autolock lock(mServiceLock);
+ ICameraServiceListener::Status currentStatus = getStatus(cameraId);
+ if (currentStatus == ICameraServiceListener::STATUS_NOT_PRESENT) {
+ ALOGI("Camera is not plugged in,"
+ " connect X (pid %d) rejected", callingPid);
+ return -ENODEV;
+ } else if (currentStatus == ICameraServiceListener::STATUS_ENUMERATING) {
+ ALOGI("Camera is enumerating,"
+ " connect X (pid %d) rejected", callingPid);
+ return -EBUSY;
+ }
+ // Else don't check for STATUS_NOT_AVAILABLE.
+ // -- It's done implicitly in canConnectUnsafe /w the mBusy array
+
+ return OK;
+}
+
+bool CameraService::canConnectUnsafe(int cameraId,
+ const String16& clientPackageName,
+ const sp<IBinder>& remoteCallback,
+ sp<BasicClient> &client) {
+ String8 clientName8(clientPackageName);
+ int callingPid = getCallingPid();
+
if (mClient[cameraId] != 0) {
client = mClient[cameraId].promote();
if (client != 0) {
- if (cameraClient->asBinder() == client->getCameraClient()->asBinder()) {
+ if (remoteCallback == client->getRemote()) {
LOG1("CameraService::connect X (pid %d) (the same client)",
callingPid);
- return client;
+ return true;
} else {
- ALOGW("CameraService::connect X (pid %d) rejected (existing client).",
- callingPid);
- return NULL;
+ // TODOSC: need to support 1 regular client,
+ // multiple shared clients here
+ ALOGW("CameraService::connect X (pid %d) rejected"
+ " (existing client).", callingPid);
+ return false;
}
}
mClient[cameraId].clear();
}
+ /*
+ mBusy is set to false as the last step of the Client destructor,
+ after which it is guaranteed that the Client destructor has finished (
+ including any inherited destructors)
+
+ We only need this for a Client subclasses since we don't allow
+ multiple Clents to be opened concurrently, but multiple BasicClient
+ would be fine
+ */
if (mBusy[cameraId]) {
- ALOGW("CameraService::connect X (pid %d) rejected"
- " (camera %d is still busy).", callingPid, cameraId);
- return NULL;
+ ALOGW("CameraService::connect X (pid %d, \"%s\") rejected"
+ " (camera %d is still busy).", callingPid,
+ clientName8.string(), cameraId);
+ return false;
}
- struct camera_info info;
- if (mModule->get_camera_info(cameraId, &info) != OK) {
- ALOGE("Invalid camera id %d", cameraId);
- return NULL;
+ return true;
+}
+
+status_t CameraService::connect(
+ const sp<ICameraClient>& cameraClient,
+ int cameraId,
+ const String16& clientPackageName,
+ int clientUid,
+ /*out*/
+ sp<ICamera>& device) {
+
+ String8 clientName8(clientPackageName);
+ int callingPid = getCallingPid();
+
+ LOG1("CameraService::connect E (pid %d \"%s\", id %d)", callingPid,
+ clientName8.string(), cameraId);
+
+ status_t status = validateConnect(cameraId, /*inout*/clientUid);
+ if (status != OK) {
+ return status;
}
- int deviceVersion;
- if (mModule->common.module_api_version == CAMERA_MODULE_API_VERSION_2_0) {
- deviceVersion = info.device_version;
- } else {
- deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
+
+ sp<Client> client;
+ {
+ Mutex::Autolock lock(mServiceLock);
+ sp<BasicClient> clientTmp;
+ if (!canConnectUnsafe(cameraId, clientPackageName,
+ cameraClient->asBinder(),
+ /*out*/clientTmp)) {
+ return -EBUSY;
+ } else if (client.get() != NULL) {
+ device = static_cast<Client*>(clientTmp.get());
+ return OK;
+ }
+
+ int facing = -1;
+ int deviceVersion = getDeviceVersion(cameraId, &facing);
+
+ // If there are other non-exclusive users of the camera,
+ // this will tear them down before we can reuse the camera
+ if (isValidCameraId(cameraId)) {
+ // transition from PRESENT -> NOT_AVAILABLE
+ updateStatus(ICameraServiceListener::STATUS_NOT_AVAILABLE,
+ cameraId);
+ }
+
+ switch(deviceVersion) {
+ case CAMERA_DEVICE_API_VERSION_1_0:
+ client = new CameraClient(this, cameraClient,
+ clientPackageName, cameraId,
+ facing, callingPid, clientUid, getpid());
+ break;
+ case CAMERA_DEVICE_API_VERSION_2_0:
+ case CAMERA_DEVICE_API_VERSION_2_1:
+ case CAMERA_DEVICE_API_VERSION_3_0:
+ client = new Camera2Client(this, cameraClient,
+ clientPackageName, cameraId,
+ facing, callingPid, clientUid, getpid(),
+ deviceVersion);
+ break;
+ case -1:
+ ALOGE("Invalid camera id %d", cameraId);
+ return BAD_VALUE;
+ default:
+ ALOGE("Unknown camera device HAL version: %d", deviceVersion);
+ return INVALID_OPERATION;
+ }
+
+ status_t status = connectFinishUnsafe(client, client->getRemote());
+ if (status != OK) {
+ // this is probably not recoverable.. maybe the client can try again
+ // OK: we can only get here if we were originally in PRESENT state
+ updateStatus(ICameraServiceListener::STATUS_PRESENT, cameraId);
+ return status;
+ }
+
+ mClient[cameraId] = client;
+ LOG1("CameraService::connect X (id %d, this pid is %d)", cameraId,
+ getpid());
}
+ // important: release the mutex here so the client can call back
+ // into the service from its destructor (can be at the end of the call)
- switch(deviceVersion) {
- case CAMERA_DEVICE_API_VERSION_1_0:
- client = new CameraClient(this, cameraClient, cameraId,
- info.facing, callingPid, getpid());
- break;
- case CAMERA_DEVICE_API_VERSION_2_0:
- client = new Camera2Client(this, cameraClient, cameraId,
- info.facing, callingPid, getpid());
- break;
- default:
- ALOGE("Unknown camera device HAL version: %d", deviceVersion);
- return NULL;
+ device = client;
+ return OK;
+}
+
+status_t CameraService::connectFinishUnsafe(const sp<BasicClient>& client,
+ const sp<IBinder>& remoteCallback) {
+ status_t status = client->initialize(mModule);
+ if (status != OK) {
+ return status;
}
- if (client->initialize(mModule) != OK) {
- return NULL;
+ remoteCallback->linkToDeath(this);
+
+ return OK;
+}
+
+status_t CameraService::connectPro(
+ const sp<IProCameraCallbacks>& cameraCb,
+ int cameraId,
+ const String16& clientPackageName,
+ int clientUid,
+ /*out*/
+ sp<IProCameraUser>& device)
+{
+ String8 clientName8(clientPackageName);
+ int callingPid = getCallingPid();
+
+ LOG1("CameraService::connectPro E (pid %d \"%s\", id %d)", callingPid,
+ clientName8.string(), cameraId);
+ status_t status = validateConnect(cameraId, /*inout*/clientUid);
+ if (status != OK) {
+ return status;
}
- cameraClient->asBinder()->linkToDeath(this);
+ sp<ProClient> client;
+ {
+ Mutex::Autolock lock(mServiceLock);
+ {
+ sp<BasicClient> client;
+ if (!canConnectUnsafe(cameraId, clientPackageName,
+ cameraCb->asBinder(),
+ /*out*/client)) {
+ return -EBUSY;
+ }
+ }
- mClient[cameraId] = client;
- LOG1("CameraService::connect X (id %d, this pid is %d)", cameraId, getpid());
- return client;
+ int facing = -1;
+ int deviceVersion = getDeviceVersion(cameraId, &facing);
+
+ switch(deviceVersion) {
+ case CAMERA_DEVICE_API_VERSION_1_0:
+ ALOGE("Camera id %d uses HALv1, doesn't support ProCamera",
+ cameraId);
+ return -EOPNOTSUPP;
+ break;
+ case CAMERA_DEVICE_API_VERSION_2_0:
+ case CAMERA_DEVICE_API_VERSION_2_1:
+ case CAMERA_DEVICE_API_VERSION_3_0:
+ client = new ProCamera2Client(this, cameraCb, String16(),
+ cameraId, facing, callingPid, USE_CALLING_UID, getpid());
+ break;
+ case -1:
+ ALOGE("Invalid camera id %d", cameraId);
+ return BAD_VALUE;
+ default:
+ ALOGE("Unknown camera device HAL version: %d", deviceVersion);
+ return INVALID_OPERATION;
+ }
+
+ status_t status = connectFinishUnsafe(client, client->getRemote());
+ if (status != OK) {
+ return status;
+ }
+
+ mProClientList[cameraId].push(client);
+
+ LOG1("CameraService::connectPro X (id %d, this pid is %d)", cameraId,
+ getpid());
+ }
+ // important: release the mutex here so the client can call back
+ // into the service from its destructor (can be at the end of the call)
+ device = client;
+ return OK;
}
-void CameraService::removeClient(const sp<ICameraClient>& cameraClient) {
+status_t CameraService::connectDevice(
+ const sp<ICameraDeviceCallbacks>& cameraCb,
+ int cameraId,
+ const String16& clientPackageName,
+ int clientUid,
+ /*out*/
+ sp<ICameraDeviceUser>& device)
+{
+
+ String8 clientName8(clientPackageName);
int callingPid = getCallingPid();
- LOG1("CameraService::removeClient E (pid %d)", callingPid);
+
+ LOG1("CameraService::connectDevice E (pid %d \"%s\", id %d)", callingPid,
+ clientName8.string(), cameraId);
+
+ status_t status = validateConnect(cameraId, /*inout*/clientUid);
+ if (status != OK) {
+ return status;
+ }
+
+ sp<CameraDeviceClient> client;
+ {
+ Mutex::Autolock lock(mServiceLock);
+ {
+ sp<BasicClient> client;
+ if (!canConnectUnsafe(cameraId, clientPackageName,
+ cameraCb->asBinder(),
+ /*out*/client)) {
+ return -EBUSY;
+ }
+ }
+
+ int facing = -1;
+ int deviceVersion = getDeviceVersion(cameraId, &facing);
+
+ // If there are other non-exclusive users of the camera,
+ // this will tear them down before we can reuse the camera
+ if (isValidCameraId(cameraId)) {
+ // transition from PRESENT -> NOT_AVAILABLE
+ updateStatus(ICameraServiceListener::STATUS_NOT_AVAILABLE,
+ cameraId);
+ }
+
+ switch(deviceVersion) {
+ case CAMERA_DEVICE_API_VERSION_1_0:
+ ALOGW("Camera using old HAL version: %d", deviceVersion);
+ return -EOPNOTSUPP;
+ // TODO: don't allow 2.0 Only allow 2.1 and higher
+ case CAMERA_DEVICE_API_VERSION_2_0:
+ case CAMERA_DEVICE_API_VERSION_2_1:
+ case CAMERA_DEVICE_API_VERSION_3_0:
+ client = new CameraDeviceClient(this, cameraCb, String16(),
+ cameraId, facing, callingPid, USE_CALLING_UID, getpid());
+ break;
+ case -1:
+ ALOGE("Invalid camera id %d", cameraId);
+ return BAD_VALUE;
+ default:
+ ALOGE("Unknown camera device HAL version: %d", deviceVersion);
+ return INVALID_OPERATION;
+ }
+
+ status_t status = connectFinishUnsafe(client, client->getRemote());
+ if (status != OK) {
+ // this is probably not recoverable.. maybe the client can try again
+ // OK: we can only get here if we were originally in PRESENT state
+ updateStatus(ICameraServiceListener::STATUS_PRESENT, cameraId);
+ return status;
+ }
+
+ LOG1("CameraService::connectDevice X (id %d, this pid is %d)", cameraId,
+ getpid());
+
+ mClient[cameraId] = client;
+ }
+ // important: release the mutex here so the client can call back
+ // into the service from its destructor (can be at the end of the call)
+
+ device = client;
+ return OK;
+}
+
+
+status_t CameraService::addListener(
+ const sp<ICameraServiceListener>& listener) {
+ ALOGV("%s: Add listener %p", __FUNCTION__, listener.get());
+
+ Mutex::Autolock lock(mServiceLock);
+
+ Vector<sp<ICameraServiceListener> >::iterator it, end;
+ for (it = mListenerList.begin(); it != mListenerList.end(); ++it) {
+ if ((*it)->asBinder() == listener->asBinder()) {
+ ALOGW("%s: Tried to add listener %p which was already subscribed",
+ __FUNCTION__, listener.get());
+ return ALREADY_EXISTS;
+ }
+ }
+
+ mListenerList.push_back(listener);
+
+ /* Immediately signal current status to this listener only */
+ {
+ Mutex::Autolock m(mStatusMutex) ;
+ int numCams = getNumberOfCameras();
+ for (int i = 0; i < numCams; ++i) {
+ listener->onStatusChanged(mStatusList[i], i);
+ }
+ }
+
+ return OK;
+}
+status_t CameraService::removeListener(
+ const sp<ICameraServiceListener>& listener) {
+ ALOGV("%s: Remove listener %p", __FUNCTION__, listener.get());
+
+ Mutex::Autolock lock(mServiceLock);
+
+ Vector<sp<ICameraServiceListener> >::iterator it;
+ for (it = mListenerList.begin(); it != mListenerList.end(); ++it) {
+ if ((*it)->asBinder() == listener->asBinder()) {
+ mListenerList.erase(it);
+ return OK;
+ }
+ }
+
+ ALOGW("%s: Tried to remove a listener %p which was not subscribed",
+ __FUNCTION__, listener.get());
+
+ return BAD_VALUE;
+}
+
+void CameraService::removeClientByRemote(const wp<IBinder>& remoteBinder) {
+ int callingPid = getCallingPid();
+ LOG1("CameraService::removeClientByRemote E (pid %d)", callingPid);
// Declare this before the lock to make absolutely sure the
// destructor won't be called with the lock held.
Mutex::Autolock lock(mServiceLock);
int outIndex;
- sp<Client> client = findClientUnsafe(cameraClient->asBinder(), outIndex);
+ sp<BasicClient> client = findClientUnsafe(remoteBinder, outIndex);
if (client != 0) {
// Found our camera, clear and leave.
LOG1("removeClient: clear camera %d", outIndex);
mClient[outIndex].clear();
- client->unlinkToDeath(this);
+ client->getRemote()->unlinkToDeath(this);
+ } else {
+
+ sp<ProClient> clientPro = findProClientUnsafe(remoteBinder);
+
+ if (clientPro != NULL) {
+ // Found our camera, clear and leave.
+ LOG1("removeClient: clear pro %p", clientPro.get());
+
+ clientPro->getRemoteCallback()->asBinder()->unlinkToDeath(this);
+ }
+ }
+
+ LOG1("CameraService::removeClientByRemote X (pid %d)", callingPid);
+}
+
+sp<CameraService::ProClient> CameraService::findProClientUnsafe(
+ const wp<IBinder>& cameraCallbacksRemote)
+{
+ sp<ProClient> clientPro;
+
+ for (int i = 0; i < mNumberOfCameras; ++i) {
+ Vector<size_t> removeIdx;
+
+ for (size_t j = 0; j < mProClientList[i].size(); ++j) {
+ wp<ProClient> cl = mProClientList[i][j];
+
+ sp<ProClient> clStrong = cl.promote();
+ if (clStrong != NULL && clStrong->getRemote() == cameraCallbacksRemote) {
+ clientPro = clStrong;
+ break;
+ } else if (clStrong == NULL) {
+ // mark to clean up dead ptr
+ removeIdx.push(j);
+ }
+ }
+
+ // remove stale ptrs (in reverse so the indices dont change)
+ for (ssize_t j = (ssize_t)removeIdx.size() - 1; j >= 0; --j) {
+ mProClientList[i].removeAt(removeIdx[j]);
+ }
+
}
- LOG1("CameraService::removeClient X (pid %d)", callingPid);
+ return clientPro;
}
-sp<CameraService::Client> CameraService::findClientUnsafe(
+sp<CameraService::BasicClient> CameraService::findClientUnsafe(
const wp<IBinder>& cameraClient, int& outIndex) {
- sp<Client> client;
+ sp<BasicClient> client;
for (int i = 0; i < mNumberOfCameras; i++) {
@@ -251,7 +773,7 @@ sp<CameraService::Client> CameraService::findClientUnsafe(
if (mClient[i] == 0) continue;
// Promote mClient. It can fail if we are called from this path:
- // Client::~Client() -> disconnect() -> removeClient().
+ // Client::~Client() -> disconnect() -> removeClientByRemote().
client = mClient[i].promote();
// Clean up stale client entry
@@ -260,7 +782,7 @@ sp<CameraService::Client> CameraService::findClientUnsafe(
continue;
}
- if (cameraClient == client->getCameraClient()->asBinder()) {
+ if (cameraClient == client->getRemote()) {
// Found our camera
outIndex = i;
return client;
@@ -271,7 +793,7 @@ sp<CameraService::Client> CameraService::findClientUnsafe(
return NULL;
}
-CameraService::Client* CameraService::getClientByIdUnsafe(int cameraId) {
+CameraService::BasicClient* CameraService::getClientByIdUnsafe(int cameraId) {
if (cameraId < 0 || cameraId >= mNumberOfCameras) return NULL;
return mClient[cameraId].unsafe_get();
}
@@ -281,12 +803,12 @@ Mutex* CameraService::getClientLockById(int cameraId) {
return &mClientLock[cameraId];
}
-sp<CameraService::Client> CameraService::getClientByRemote(
+sp<CameraService::BasicClient> CameraService::getClientByRemote(
const wp<IBinder>& cameraClient) {
// Declare this before the lock to make absolutely sure the
// destructor won't be called with the lock held.
- sp<Client> client;
+ sp<BasicClient> client;
Mutex::Autolock lock(mServiceLock);
@@ -301,6 +823,7 @@ status_t CameraService::onTransact(
// Permission checks
switch (code) {
case BnCameraService::CONNECT:
+ case BnCameraService::CONNECT_PRO:
const int pid = getCallingPid();
const int self_pid = getpid();
if (pid != self_pid) {
@@ -389,41 +912,145 @@ void CameraService::playSound(sound_kind kind) {
CameraService::Client::Client(const sp<CameraService>& cameraService,
const sp<ICameraClient>& cameraClient,
- int cameraId, int cameraFacing, int clientPid, int servicePid) {
+ const String16& clientPackageName,
+ int cameraId, int cameraFacing,
+ int clientPid, uid_t clientUid,
+ int servicePid) :
+ CameraService::BasicClient(cameraService, cameraClient->asBinder(),
+ clientPackageName,
+ cameraId, cameraFacing,
+ clientPid, clientUid,
+ servicePid)
+{
int callingPid = getCallingPid();
LOG1("Client::Client E (pid %d, id %d)", callingPid, cameraId);
- mCameraService = cameraService;
- mCameraClient = cameraClient;
- mCameraId = cameraId;
- mCameraFacing = cameraFacing;
- mClientPid = clientPid;
- mServicePid = servicePid;
- mDestructionStarted = false;
+ mRemoteCallback = cameraClient;
cameraService->setCameraBusy(cameraId);
cameraService->loadSound();
+
LOG1("Client::Client X (pid %d, id %d)", callingPid, cameraId);
}
// tear down the client
CameraService::Client::~Client() {
- mCameraService->releaseSound();
+ ALOGV("~Client");
+ mDestructionStarted = true;
+ mCameraService->releaseSound();
// unconditionally disconnect. function is idempotent
Client::disconnect();
}
+CameraService::BasicClient::BasicClient(const sp<CameraService>& cameraService,
+ const sp<IBinder>& remoteCallback,
+ const String16& clientPackageName,
+ int cameraId, int cameraFacing,
+ int clientPid, uid_t clientUid,
+ int servicePid):
+ mClientPackageName(clientPackageName)
+{
+ mCameraService = cameraService;
+ mRemoteBinder = remoteCallback;
+ mCameraId = cameraId;
+ mCameraFacing = cameraFacing;
+ mClientPid = clientPid;
+ mClientUid = clientUid;
+ mServicePid = servicePid;
+ mOpsActive = false;
+ mDestructionStarted = false;
+}
+
+CameraService::BasicClient::~BasicClient() {
+ ALOGV("~BasicClient");
+ mDestructionStarted = true;
+}
+
+void CameraService::BasicClient::disconnect() {
+ ALOGV("BasicClient::disconnect");
+ mCameraService->removeClientByRemote(mRemoteBinder);
+ // client shouldn't be able to call into us anymore
+ mClientPid = 0;
+}
+
+status_t CameraService::BasicClient::startCameraOps() {
+ int32_t res;
+
+ mOpsCallback = new OpsCallback(this);
+
+ {
+ ALOGV("%s: Start camera ops, package name = %s, client UID = %d",
+ __FUNCTION__, String8(mClientPackageName).string(), mClientUid);
+ }
+
+ mAppOpsManager.startWatchingMode(AppOpsManager::OP_CAMERA,
+ mClientPackageName, mOpsCallback);
+ res = mAppOpsManager.startOp(AppOpsManager::OP_CAMERA,
+ mClientUid, mClientPackageName);
+
+ if (res != AppOpsManager::MODE_ALLOWED) {
+ ALOGI("Camera %d: Access for \"%s\" has been revoked",
+ mCameraId, String8(mClientPackageName).string());
+ return PERMISSION_DENIED;
+ }
+ mOpsActive = true;
+ return OK;
+}
+
+status_t CameraService::BasicClient::finishCameraOps() {
+ if (mOpsActive) {
+ mAppOpsManager.finishOp(AppOpsManager::OP_CAMERA, mClientUid,
+ mClientPackageName);
+ mOpsActive = false;
+ }
+ mAppOpsManager.stopWatchingMode(mOpsCallback);
+ mOpsCallback.clear();
+
+ return OK;
+}
+
+void CameraService::BasicClient::opChanged(int32_t op, const String16& packageName) {
+ String8 name(packageName);
+ String8 myName(mClientPackageName);
+
+ if (op != AppOpsManager::OP_CAMERA) {
+ ALOGW("Unexpected app ops notification received: %d", op);
+ return;
+ }
+
+ int32_t res;
+ res = mAppOpsManager.checkOp(AppOpsManager::OP_CAMERA,
+ mClientUid, mClientPackageName);
+ ALOGV("checkOp returns: %d, %s ", res,
+ res == AppOpsManager::MODE_ALLOWED ? "ALLOWED" :
+ res == AppOpsManager::MODE_IGNORED ? "IGNORED" :
+ res == AppOpsManager::MODE_ERRORED ? "ERRORED" :
+ "UNKNOWN");
+
+ if (res != AppOpsManager::MODE_ALLOWED) {
+ ALOGI("Camera %d: Access for \"%s\" revoked", mCameraId,
+ myName.string());
+ // Reset the client PID to allow server-initiated disconnect,
+ // and to prevent further calls by client.
+ mClientPid = getCallingPid();
+ notifyError();
+ disconnect();
+ }
+}
+
// ----------------------------------------------------------------------------
Mutex* CameraService::Client::getClientLockFromCookie(void* user) {
- return gCameraService->getClientLockById((int) user);
+ return gCameraService->getClientLockById((int)(intptr_t) user);
}
// Provide client pointer for callbacks. Client lock returned from getClientLockFromCookie should
// be acquired for this to be safe
CameraService::Client* CameraService::Client::getClientFromCookie(void* user) {
- Client* client = gCameraService->getClientByIdUnsafe((int) user);
+ BasicClient *basicClient = gCameraService->getClientByIdUnsafe((int)(intptr_t) user);
+ // OK: only CameraClient calls this, and they already cast anyway.
+ Client* client = static_cast<Client*>(basicClient);
// This could happen if the Client is in the process of shutting down (the
// last strong reference is gone, but the destructor hasn't finished
@@ -436,10 +1063,62 @@ CameraService::Client* CameraService::Client::getClientFromCookie(void* user) {
return client;
}
+void CameraService::Client::notifyError() {
+ mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0);
+}
+
// NOTE: function is idempotent
void CameraService::Client::disconnect() {
- mCameraService->removeClient(mCameraClient);
+ ALOGV("Client::disconnect");
+ BasicClient::disconnect();
mCameraService->setCameraFree(mCameraId);
+
+ StatusVector rejectSourceStates;
+ rejectSourceStates.push_back(ICameraServiceListener::STATUS_NOT_PRESENT);
+ rejectSourceStates.push_back(ICameraServiceListener::STATUS_ENUMERATING);
+
+ // Transition to PRESENT if the camera is not in either of above 2 states
+ mCameraService->updateStatus(ICameraServiceListener::STATUS_PRESENT,
+ mCameraId,
+ &rejectSourceStates);
+}
+
+CameraService::Client::OpsCallback::OpsCallback(wp<BasicClient> client):
+ mClient(client) {
+}
+
+void CameraService::Client::OpsCallback::opChanged(int32_t op,
+ const String16& packageName) {
+ sp<BasicClient> client = mClient.promote();
+ if (client != NULL) {
+ client->opChanged(op, packageName);
+ }
+}
+
+// ----------------------------------------------------------------------------
+// IProCamera
+// ----------------------------------------------------------------------------
+
+CameraService::ProClient::ProClient(const sp<CameraService>& cameraService,
+ const sp<IProCameraCallbacks>& remoteCallback,
+ const String16& clientPackageName,
+ int cameraId,
+ int cameraFacing,
+ int clientPid,
+ uid_t clientUid,
+ int servicePid)
+ : CameraService::BasicClient(cameraService, remoteCallback->asBinder(),
+ clientPackageName, cameraId, cameraFacing,
+ clientPid, clientUid, servicePid)
+{
+ mRemoteCallback = remoteCallback;
+}
+
+CameraService::ProClient::~ProClient() {
+}
+
+void CameraService::ProClient::notifyError() {
+ mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0);
}
// ----------------------------------------------------------------------------
@@ -523,7 +1202,7 @@ status_t CameraService::dump(int fd, const Vector<String16>& args) {
}
}
- sp<Client> client = mClient[i].promote();
+ sp<BasicClient> client = mClient[i].promote();
if (client == 0) {
result = String8::format(" Device is closed, no client instance\n");
write(fd, result.string(), result.size());
@@ -541,6 +1220,10 @@ status_t CameraService::dump(int fd, const Vector<String16>& args) {
if (locked) mServiceLock.unlock();
+ // Dump camera traces if there were any
+ write(fd, "\n", 1);
+ camera3::CameraTraces::dump(fd, args);
+
// change logging level
int n = args.size();
for (int i = 0; i + 1 < n; i++) {
@@ -568,7 +1251,7 @@ status_t CameraService::dump(int fd, const Vector<String16>& args) {
ALOGV("java clients' binder died");
- sp<Client> cameraClient = getClientByRemote(who);
+ sp<BasicClient> cameraClient = getClientByRemote(who);
if (cameraClient == 0) {
ALOGV("java clients' binder death already cleaned up (normal case)");
@@ -582,4 +1265,83 @@ status_t CameraService::dump(int fd, const Vector<String16>& args) {
}
+void CameraService::updateStatus(ICameraServiceListener::Status status,
+ int32_t cameraId,
+ const StatusVector *rejectSourceStates) {
+ // do not lock mServiceLock here or can get into a deadlock from
+ // connect() -> ProClient::disconnect -> updateStatus
+ Mutex::Autolock lock(mStatusMutex);
+
+ ICameraServiceListener::Status oldStatus = mStatusList[cameraId];
+
+ mStatusList[cameraId] = status;
+
+ if (oldStatus != status) {
+ ALOGV("%s: Status has changed for camera ID %d from 0x%x to 0x%x",
+ __FUNCTION__, cameraId, (uint32_t)oldStatus, (uint32_t)status);
+
+ if (oldStatus == ICameraServiceListener::STATUS_NOT_PRESENT &&
+ (status != ICameraServiceListener::STATUS_PRESENT &&
+ status != ICameraServiceListener::STATUS_ENUMERATING)) {
+
+ ALOGW("%s: From NOT_PRESENT can only transition into PRESENT"
+ " or ENUMERATING", __FUNCTION__);
+ mStatusList[cameraId] = oldStatus;
+ return;
+ }
+
+ if (rejectSourceStates != NULL) {
+ const StatusVector &rejectList = *rejectSourceStates;
+ StatusVector::const_iterator it = rejectList.begin();
+
+ /**
+ * Sometimes we want to conditionally do a transition.
+ * For example if a client disconnects, we want to go to PRESENT
+ * only if we weren't already in NOT_PRESENT or ENUMERATING.
+ */
+ for (; it != rejectList.end(); ++it) {
+ if (oldStatus == *it) {
+ ALOGV("%s: Rejecting status transition for Camera ID %d, "
+ " since the source state was was in one of the bad "
+ " states.", __FUNCTION__, cameraId);
+ mStatusList[cameraId] = oldStatus;
+ return;
+ }
+ }
+ }
+
+ /**
+ * ProClients lose their exclusive lock.
+ * - Done before the CameraClient can initialize the HAL device,
+ * since we want to be able to close it before they get to initialize
+ */
+ if (status == ICameraServiceListener::STATUS_NOT_AVAILABLE) {
+ Vector<wp<ProClient> > proClients(mProClientList[cameraId]);
+ Vector<wp<ProClient> >::const_iterator it;
+
+ for (it = proClients.begin(); it != proClients.end(); ++it) {
+ sp<ProClient> proCl = it->promote();
+ if (proCl.get() != NULL) {
+ proCl->onExclusiveLockStolen();
+ }
+ }
+ }
+
+ Vector<sp<ICameraServiceListener> >::const_iterator it;
+ for (it = mListenerList.begin(); it != mListenerList.end(); ++it) {
+ (*it)->onStatusChanged(status, cameraId);
+ }
+ }
+}
+
+ICameraServiceListener::Status CameraService::getStatus(int cameraId) const {
+ if (cameraId < 0 || cameraId >= MAX_CAMERAS) {
+ ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId);
+ return ICameraServiceListener::STATUS_UNKNOWN;
+ }
+
+ Mutex::Autolock al(mStatusMutex);
+ return mStatusList[cameraId];
+}
+
}; // namespace android
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 4dab340..ad6a582 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -18,10 +18,22 @@
#ifndef ANDROID_SERVERS_CAMERA_CAMERASERVICE_H
#define ANDROID_SERVERS_CAMERA_CAMERASERVICE_H
+#include <utils/Vector.h>
+#include <binder/AppOpsManager.h>
#include <binder/BinderService.h>
+#include <binder/IAppOpsCallback.h>
#include <camera/ICameraService.h>
#include <hardware/camera.h>
+#include <camera/ICamera.h>
+#include <camera/ICameraClient.h>
+#include <camera/IProCameraUser.h>
+#include <camera/IProCameraCallbacks.h>
+#include <camera/camera2/ICameraDeviceUser.h>
+#include <camera/camera2/ICameraDeviceCallbacks.h>
+
+#include <camera/ICameraServiceListener.h>
+
/* This needs to be increased if we can have more cameras */
#define MAX_CAMERAS 2
@@ -35,32 +47,64 @@ class MediaPlayer;
class CameraService :
public BinderService<CameraService>,
public BnCameraService,
- public IBinder::DeathRecipient
+ public IBinder::DeathRecipient,
+ public camera_module_callbacks_t
{
friend class BinderService<CameraService>;
public:
class Client;
+ class BasicClient;
+
+ // Implementation of BinderService<T>
static char const* getServiceName() { return "media.camera"; }
CameraService();
virtual ~CameraService();
+ /////////////////////////////////////////////////////////////////////
+ // HAL Callbacks
+ virtual void onDeviceStatusChanged(int cameraId,
+ int newStatus);
+
+ /////////////////////////////////////////////////////////////////////
+ // ICameraService
virtual int32_t getNumberOfCameras();
virtual status_t getCameraInfo(int cameraId,
struct CameraInfo* cameraInfo);
- virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient, int cameraId);
- virtual void removeClient(const sp<ICameraClient>& cameraClient);
- // returns plain pointer of client. Note that mClientLock should be acquired to
- // prevent the client from destruction. The result can be NULL.
- virtual Client* getClientByIdUnsafe(int cameraId);
- virtual Mutex* getClientLockById(int cameraId);
+ virtual status_t getCameraCharacteristics(int cameraId,
+ CameraMetadata* cameraInfo);
- virtual sp<Client> getClientByRemote(const wp<IBinder>& cameraClient);
+ virtual status_t connect(const sp<ICameraClient>& cameraClient, int cameraId,
+ const String16& clientPackageName, int clientUid,
+ /*out*/
+ sp<ICamera>& device);
- virtual status_t dump(int fd, const Vector<String16>& args);
+ virtual status_t connectPro(const sp<IProCameraCallbacks>& cameraCb,
+ int cameraId, const String16& clientPackageName, int clientUid,
+ /*out*/
+ sp<IProCameraUser>& device);
+
+ virtual status_t connectDevice(
+ const sp<ICameraDeviceCallbacks>& cameraCb,
+ int cameraId,
+ const String16& clientPackageName,
+ int clientUid,
+ /*out*/
+ sp<ICameraDeviceUser>& device);
+
+ virtual status_t addListener(const sp<ICameraServiceListener>& listener);
+ virtual status_t removeListener(
+ const sp<ICameraServiceListener>& listener);
+
+ // Extra permissions checks
virtual status_t onTransact(uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags);
- virtual void onFirstRef();
+
+ virtual status_t dump(int fd, const Vector<String16>& args);
+
+ /////////////////////////////////////////////////////////////////////
+ // Client functionality
+ virtual void removeClientByRemote(const wp<IBinder>& remoteBinder);
enum sound_kind {
SOUND_SHUTTER = 0,
@@ -72,17 +116,108 @@ public:
void playSound(sound_kind kind);
void releaseSound();
- class Client : public BnCamera
+ /////////////////////////////////////////////////////////////////////
+ // CameraDeviceFactory functionality
+ int getDeviceVersion(int cameraId, int* facing = NULL);
+
+
+ /////////////////////////////////////////////////////////////////////
+ // CameraClient functionality
+
+ // returns plain pointer of client. Note that mClientLock should be acquired to
+ // prevent the client from destruction. The result can be NULL.
+ virtual BasicClient* getClientByIdUnsafe(int cameraId);
+ virtual Mutex* getClientLockById(int cameraId);
+
+ class BasicClient : public virtual RefBase {
+ public:
+ virtual status_t initialize(camera_module_t *module) = 0;
+
+ virtual void disconnect() = 0;
+
+ // because we can't virtually inherit IInterface, which breaks
+ // virtual inheritance
+ virtual sp<IBinder> asBinderWrapper() = 0;
+
+ // Return the remote callback binder object (e.g. IProCameraCallbacks)
+ sp<IBinder> getRemote() {
+ return mRemoteBinder;
+ }
+
+ virtual status_t dump(int fd, const Vector<String16>& args) = 0;
+
+ protected:
+ BasicClient(const sp<CameraService>& cameraService,
+ const sp<IBinder>& remoteCallback,
+ const String16& clientPackageName,
+ int cameraId,
+ int cameraFacing,
+ int clientPid,
+ uid_t clientUid,
+ int servicePid);
+
+ virtual ~BasicClient();
+
+ // the instance is in the middle of destruction. When this is set,
+ // the instance should not be accessed from callback.
+ // CameraService's mClientLock should be acquired to access this.
+ // - subclasses should set this to true in their destructors.
+ bool mDestructionStarted;
+
+ // these are initialized in the constructor.
+ sp<CameraService> mCameraService; // immutable after constructor
+ int mCameraId; // immutable after constructor
+ int mCameraFacing; // immutable after constructor
+ const String16 mClientPackageName;
+ pid_t mClientPid;
+ uid_t mClientUid; // immutable after constructor
+ pid_t mServicePid; // immutable after constructor
+
+ // - The app-side Binder interface to receive callbacks from us
+ sp<IBinder> mRemoteBinder; // immutable after constructor
+
+ // permissions management
+ status_t startCameraOps();
+ status_t finishCameraOps();
+
+ // Notify client about a fatal error
+ virtual void notifyError() = 0;
+ private:
+ AppOpsManager mAppOpsManager;
+
+ class OpsCallback : public BnAppOpsCallback {
+ public:
+ OpsCallback(wp<BasicClient> client);
+ virtual void opChanged(int32_t op, const String16& packageName);
+
+ private:
+ wp<BasicClient> mClient;
+
+ }; // class OpsCallback
+
+ sp<OpsCallback> mOpsCallback;
+ // Track whether startCameraOps was called successfully, to avoid
+ // finishing what we didn't start.
+ bool mOpsActive;
+
+ // IAppOpsCallback interface, indirected through opListener
+ virtual void opChanged(int32_t op, const String16& packageName);
+ }; // class BasicClient
+
+ class Client : public BnCamera, public BasicClient
{
public:
+ typedef ICameraClient TCamCallbacks;
+
// ICamera interface (see ICamera for details)
virtual void disconnect();
virtual status_t connect(const sp<ICameraClient>& client) = 0;
virtual status_t lock() = 0;
virtual status_t unlock() = 0;
- virtual status_t setPreviewDisplay(const sp<Surface>& surface) = 0;
- virtual status_t setPreviewTexture(const sp<ISurfaceTexture>& surfaceTexture) = 0;
+ virtual status_t setPreviewTarget(const sp<IGraphicBufferProducer>& bufferProducer)=0;
virtual void setPreviewCallbackFlag(int flag) = 0;
+ virtual status_t setPreviewCallbackTarget(
+ const sp<IGraphicBufferProducer>& callbackProducer) = 0;
virtual status_t startPreview() = 0;
virtual void stopPreview() = 0;
virtual bool previewEnabled() = 0;
@@ -101,49 +236,117 @@ public:
// Interface used by CameraService
Client(const sp<CameraService>& cameraService,
const sp<ICameraClient>& cameraClient,
+ const String16& clientPackageName,
int cameraId,
int cameraFacing,
int clientPid,
+ uid_t clientUid,
int servicePid);
~Client();
// return our camera client
- const sp<ICameraClient>& getCameraClient() {
- return mCameraClient;
+ const sp<ICameraClient>& getRemoteCallback() {
+ return mRemoteCallback;
}
- virtual status_t initialize(camera_module_t *module) = 0;
-
- virtual status_t dump(int fd, const Vector<String16>& args) = 0;
+ virtual sp<IBinder> asBinderWrapper() {
+ return asBinder();
+ }
protected:
static Mutex* getClientLockFromCookie(void* user);
// convert client from cookie. Client lock should be acquired before getting Client.
static Client* getClientFromCookie(void* user);
- // the instance is in the middle of destruction. When this is set,
- // the instance should not be accessed from callback.
- // CameraService's mClientLock should be acquired to access this.
- bool mDestructionStarted;
+ virtual void notifyError();
- // these are initialized in the constructor.
- sp<CameraService> mCameraService; // immutable after constructor
- sp<ICameraClient> mCameraClient;
- int mCameraId; // immutable after constructor
- int mCameraFacing; // immutable after constructor
- pid_t mClientPid;
- pid_t mServicePid; // immutable after constructor
+ // Initialized in constructor
- };
+ // - The app-side Binder interface to receive callbacks from us
+ sp<ICameraClient> mRemoteCallback;
+
+ }; // class Client
+
+ class ProClient : public BnProCameraUser, public BasicClient {
+ public:
+ typedef IProCameraCallbacks TCamCallbacks;
+
+ ProClient(const sp<CameraService>& cameraService,
+ const sp<IProCameraCallbacks>& remoteCallback,
+ const String16& clientPackageName,
+ int cameraId,
+ int cameraFacing,
+ int clientPid,
+ uid_t clientUid,
+ int servicePid);
+
+ virtual ~ProClient();
+
+ const sp<IProCameraCallbacks>& getRemoteCallback() {
+ return mRemoteCallback;
+ }
+
+ /***
+ IProCamera implementation
+ ***/
+ virtual status_t connect(const sp<IProCameraCallbacks>& callbacks)
+ = 0;
+ virtual status_t exclusiveTryLock() = 0;
+ virtual status_t exclusiveLock() = 0;
+ virtual status_t exclusiveUnlock() = 0;
+
+ virtual bool hasExclusiveLock() = 0;
+
+ // Note that the callee gets a copy of the metadata.
+ virtual int submitRequest(camera_metadata_t* metadata,
+ bool streaming = false) = 0;
+ virtual status_t cancelRequest(int requestId) = 0;
+
+ // Callbacks from camera service
+ virtual void onExclusiveLockStolen() = 0;
+
+ protected:
+ virtual void notifyError();
+
+ sp<IProCameraCallbacks> mRemoteCallback;
+ }; // class ProClient
private:
+
+ // Delay-load the Camera HAL module
+ virtual void onFirstRef();
+
+ // Step 1. Check if we can connect, before we acquire the service lock.
+ status_t validateConnect(int cameraId,
+ /*inout*/
+ int& clientUid) const;
+
+ // Step 2. Check if we can connect, after we acquire the service lock.
+ bool canConnectUnsafe(int cameraId,
+ const String16& clientPackageName,
+ const sp<IBinder>& remoteCallback,
+ /*out*/
+ sp<BasicClient> &client);
+
+ // When connection is successful, initialize client and track its death
+ status_t connectFinishUnsafe(const sp<BasicClient>& client,
+ const sp<IBinder>& remoteCallback);
+
+ virtual sp<BasicClient> getClientByRemote(const wp<IBinder>& cameraClient);
+
Mutex mServiceLock;
- wp<Client> mClient[MAX_CAMERAS]; // protected by mServiceLock
+ // either a Client or CameraDeviceClient
+ wp<BasicClient> mClient[MAX_CAMERAS]; // protected by mServiceLock
Mutex mClientLock[MAX_CAMERAS]; // prevent Client destruction inside callbacks
int mNumberOfCameras;
+ typedef wp<ProClient> weak_pro_client_ptr;
+ Vector<weak_pro_client_ptr> mProClientList[MAX_CAMERAS];
+
// needs to be called with mServiceLock held
- sp<Client> findClientUnsafe(const wp<IBinder>& cameraClient, int& outIndex);
+ sp<BasicClient> findClientUnsafe(const wp<IBinder>& cameraClient, int& outIndex);
+ sp<ProClient> findProClientUnsafe(
+ const wp<IBinder>& cameraCallbacksRemote);
// atomics to record whether the hardware is allocated to some client.
volatile int32_t mBusy[MAX_CAMERAS];
@@ -159,8 +362,31 @@ private:
camera_module_t *mModule;
+ Vector<sp<ICameraServiceListener> >
+ mListenerList;
+
+ // guard only mStatusList and the broadcasting of ICameraServiceListener
+ mutable Mutex mStatusMutex;
+ ICameraServiceListener::Status
+ mStatusList[MAX_CAMERAS];
+
+ // Read the current status (locks mStatusMutex)
+ ICameraServiceListener::Status
+ getStatus(int cameraId) const;
+
+ typedef Vector<ICameraServiceListener::Status> StatusVector;
+ // Broadcast the new status if it changed (locks the service mutex)
+ void updateStatus(
+ ICameraServiceListener::Status status,
+ int32_t cameraId,
+ const StatusVector *rejectSourceStates = NULL);
+
// IBinder::DeathRecipient implementation
- virtual void binderDied(const wp<IBinder> &who);
+ virtual void binderDied(const wp<IBinder> &who);
+
+ // Helpers
+
+ bool isValidCameraId(int cameraId);
};
} // namespace android
diff --git a/services/camera/libcameraservice/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index e59a240..ba1e772 100644
--- a/services/camera/libcameraservice/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -14,18 +14,25 @@
* limitations under the License.
*/
-#define LOG_TAG "Camera2"
+#define LOG_TAG "Camera2Client"
#define ATRACE_TAG ATRACE_TAG_CAMERA
//#define LOG_NDEBUG 0
+#include <inttypes.h>
#include <utils/Log.h>
#include <utils/Trace.h>
#include <cutils/properties.h>
-#include <gui/SurfaceTextureClient.h>
#include <gui/Surface.h>
-#include "camera2/Parameters.h"
-#include "Camera2Client.h"
+
+#include "api1/Camera2Client.h"
+
+#include "api1/client2/StreamingProcessor.h"
+#include "api1/client2/JpegProcessor.h"
+#include "api1/client2/CaptureSequencer.h"
+#include "api1/client2/CallbackProcessor.h"
+#include "api1/client2/ZslProcessor.h"
+#include "api1/client2/ZslProcessor3.h"
#define ALOG1(...) ALOGD_IF(gLogLevel >= 1, __VA_ARGS__);
#define ALOG2(...) ALOGD_IF(gLogLevel >= 2, __VA_ARGS__);
@@ -37,70 +44,58 @@ static int getCallingPid() {
return IPCThreadState::self()->getCallingPid();
}
-static int getCallingUid() {
- return IPCThreadState::self()->getCallingUid();
-}
-
// Interface used by CameraService
Camera2Client::Camera2Client(const sp<CameraService>& cameraService,
const sp<ICameraClient>& cameraClient,
+ const String16& clientPackageName,
int cameraId,
int cameraFacing,
int clientPid,
- int servicePid):
- Client(cameraService, cameraClient,
- cameraId, cameraFacing, clientPid, servicePid),
- mSharedCameraClient(cameraClient),
- mParameters(cameraId, cameraFacing)
+ uid_t clientUid,
+ int servicePid,
+ int deviceVersion):
+ Camera2ClientBase(cameraService, cameraClient, clientPackageName,
+ cameraId, cameraFacing, clientPid, clientUid, servicePid),
+ mParameters(cameraId, cameraFacing),
+ mDeviceVersion(deviceVersion)
{
ATRACE_CALL();
- ALOGI("Camera %d: Opened", cameraId);
-
- mDevice = new Camera2Device(cameraId);
SharedParameters::Lock l(mParameters);
l.mParameters.state = Parameters::DISCONNECTED;
}
-status_t Camera2Client::checkPid(const char* checkLocation) const {
- int callingPid = getCallingPid();
- if (callingPid == mClientPid) return NO_ERROR;
-
- ALOGE("%s: attempt to use a locked camera from a different process"
- " (old pid %d, new pid %d)", checkLocation, mClientPid, callingPid);
- return PERMISSION_DENIED;
-}
-
status_t Camera2Client::initialize(camera_module_t *module)
{
ATRACE_CALL();
ALOGV("%s: Initializing client for camera %d", __FUNCTION__, mCameraId);
status_t res;
- res = mDevice->initialize(module);
+ res = Camera2ClientBase::initialize(module);
if (res != OK) {
- ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return NO_INIT;
+ return res;
}
- res = mDevice->setNotifyCallback(this);
-
- SharedParameters::Lock l(mParameters);
+ {
+ SharedParameters::Lock l(mParameters);
- res = l.mParameters.initialize(&(mDevice->info()));
- if (res != OK) {
- ALOGE("%s: Camera %d: unable to build defaults: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return NO_INIT;
+ res = l.mParameters.initialize(&(mDevice->info()));
+ if (res != OK) {
+ ALOGE("%s: Camera %d: unable to build defaults: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ return NO_INIT;
+ }
}
String8 threadName;
mStreamingProcessor = new StreamingProcessor(this);
+ threadName = String8::format("C2-%d-StreamProc",
+ mCameraId);
+ mStreamingProcessor->run(threadName.string());
- mFrameProcessor = new FrameProcessor(this);
+ mFrameProcessor = new FrameProcessor(mDevice, this);
threadName = String8::format("C2-%d-FrameProc",
mCameraId);
mFrameProcessor->run(threadName.string());
@@ -115,10 +110,27 @@ status_t Camera2Client::initialize(camera_module_t *module)
mCameraId);
mJpegProcessor->run(threadName.string());
- mZslProcessor = new ZslProcessor(this, mCaptureSequencer);
+ switch (mDeviceVersion) {
+ case CAMERA_DEVICE_API_VERSION_2_0: {
+ sp<ZslProcessor> zslProc =
+ new ZslProcessor(this, mCaptureSequencer);
+ mZslProcessor = zslProc;
+ mZslProcessorThread = zslProc;
+ break;
+ }
+ case CAMERA_DEVICE_API_VERSION_3_0:{
+ sp<ZslProcessor3> zslProc =
+ new ZslProcessor3(this, mCaptureSequencer);
+ mZslProcessor = zslProc;
+ mZslProcessorThread = zslProc;
+ break;
+ }
+ default:
+ break;
+ }
threadName = String8::format("C2-%d-ZslProc",
mCameraId);
- mZslProcessor->run(threadName.string());
+ mZslProcessorThread->run(threadName.string());
mCallbackProcessor = new CallbackProcessor(this);
threadName = String8::format("C2-%d-CallbkProc",
@@ -126,6 +138,7 @@ status_t Camera2Client::initialize(camera_module_t *module)
mCallbackProcessor->run(threadName.string());
if (gLogLevel >= 1) {
+ SharedParameters::Lock l(mParameters);
ALOGD("%s: Default parameters converted from camera %d:", __FUNCTION__,
mCameraId);
ALOGD("%s", l.mParameters.paramsFlattened.string());
@@ -136,6 +149,7 @@ status_t Camera2Client::initialize(camera_module_t *module)
Camera2Client::~Camera2Client() {
ATRACE_CALL();
+ ALOGV("~Camera2Client");
mDestructionStarted = true;
@@ -146,9 +160,10 @@ Camera2Client::~Camera2Client() {
status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
String8 result;
- result.appendFormat("Client2[%d] (%p) PID: %d, dump:\n",
+ result.appendFormat("Client2[%d] (%p) Client: %s PID: %d, dump:\n",
mCameraId,
- getCameraClient()->asBinder().get(),
+ getRemoteCallback()->asBinder().get(),
+ String8(mClientPackageName).string(),
mClientPid);
result.append(" State: ");
#define CASE_APPEND_ENUM(x) case x: result.append(#x "\n"); break;
@@ -179,7 +194,7 @@ status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
result.appendFormat(" GPS lat x long x alt: %f x %f x %f\n",
p.gpsCoordinates[0], p.gpsCoordinates[1],
p.gpsCoordinates[2]);
- result.appendFormat(" GPS timestamp: %lld\n",
+ result.appendFormat(" GPS timestamp: %" PRId64 "\n",
p.gpsTimestamp);
result.appendFormat(" GPS processing method: %s\n",
p.gpsProcessingMethod.string());
@@ -187,37 +202,37 @@ status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
result.append(" White balance mode: ");
switch (p.wbMode) {
- CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_AUTO)
- CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_INCANDESCENT)
- CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_FLUORESCENT)
- CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_WARM_FLUORESCENT)
- CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_DAYLIGHT)
- CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_CLOUDY_DAYLIGHT)
- CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_TWILIGHT)
- CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_SHADE)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_AUTO)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_INCANDESCENT)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_FLUORESCENT)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_DAYLIGHT)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_TWILIGHT)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_SHADE)
default: result.append("UNKNOWN\n");
}
result.append(" Effect mode: ");
switch (p.effectMode) {
- CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_OFF)
- CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MONO)
- CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_NEGATIVE)
- CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_SOLARIZE)
- CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_SEPIA)
- CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_POSTERIZE)
- CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_WHITEBOARD)
- CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_BLACKBOARD)
- CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_AQUA)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_OFF)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_MONO)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_NEGATIVE)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_SOLARIZE)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_SEPIA)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_POSTERIZE)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_WHITEBOARD)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_BLACKBOARD)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_AQUA)
default: result.append("UNKNOWN\n");
}
result.append(" Antibanding mode: ");
switch (p.antibandingMode) {
- CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_AUTO)
- CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_OFF)
- CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_50HZ)
- CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_60HZ)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ)
default: result.append("UNKNOWN\n");
}
@@ -272,6 +287,7 @@ status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_INACTIVE)
CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN)
CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED)
+ CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_PASSIVE_UNFOCUSED)
CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN)
CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED)
CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED)
@@ -315,6 +331,10 @@ status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
result.appendFormat(" Video stabilization is %s\n",
p.videoStabilization ? "enabled" : "disabled");
+ result.appendFormat(" Selected still capture FPS range: %d - %d\n",
+ p.fastInfo.bestStillCaptureFpsRange[0],
+ p.fastInfo.bestStillCaptureFpsRange[1]);
+
result.append(" Current streams:\n");
result.appendFormat(" Preview stream ID: %d\n",
getPreviewStreamId());
@@ -337,6 +357,10 @@ status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
result.appendFormat(" meteringCropRegion\n");
haveQuirk = true;
}
+ if (p.quirks.partialResults) {
+ result.appendFormat(" usePartialResult\n");
+ haveQuirk = true;
+ }
if (!haveQuirk) {
result.appendFormat(" none\n");
}
@@ -351,26 +375,15 @@ status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
mZslProcessor->dump(fd, args);
- result = " Device dump:\n";
- write(fd, result.string(), result.size());
-
- status_t res = mDevice->dump(fd, args);
- if (res != OK) {
- result = String8::format(" Error dumping device: %s (%d)",
- strerror(-res), res);
- write(fd, result.string(), result.size());
- }
-
+ return dumpDevice(fd, args);
#undef CASE_APPEND_ENUM
- return NO_ERROR;
}
// ICamera interface
void Camera2Client::disconnect() {
ATRACE_CALL();
- Mutex::Autolock icl(mICameraLock);
- status_t res;
+ Mutex::Autolock icl(mBinderSerializationLock);
// Allow both client and the media server to disconnect at all times
int callingPid = getCallingPid();
@@ -380,6 +393,12 @@ void Camera2Client::disconnect() {
ALOGV("Camera %d: Shutting down", mCameraId);
+ /**
+ * disconnect() cannot call any methods that might need to promote a
+ * wp<Camera2Client>, since disconnect can be called from the destructor, at
+ * which point all such promotions will fail.
+ */
+
stopPreviewL();
{
@@ -394,18 +413,20 @@ void Camera2Client::disconnect() {
mCallbackProcessor->deleteStream();
mZslProcessor->deleteStream();
+ mStreamingProcessor->requestExit();
mFrameProcessor->requestExit();
mCaptureSequencer->requestExit();
mJpegProcessor->requestExit();
- mZslProcessor->requestExit();
+ mZslProcessorThread->requestExit();
mCallbackProcessor->requestExit();
ALOGV("Camera %d: Waiting for threads", mCameraId);
+ mStreamingProcessor->join();
mFrameProcessor->join();
mCaptureSequencer->join();
mJpegProcessor->join();
- mZslProcessor->join();
+ mZslProcessorThread->join();
mCallbackProcessor->join();
ALOGV("Camera %d: Disconnecting device", mCameraId);
@@ -420,7 +441,7 @@ void Camera2Client::disconnect() {
status_t Camera2Client::connect(const sp<ICameraClient>& client) {
ATRACE_CALL();
ALOGV("%s: E", __FUNCTION__);
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
if (mClientPid != 0 && getCallingPid() != mClientPid) {
ALOGE("%s: Camera %d: Connection attempt from pid %d; "
@@ -431,8 +452,8 @@ status_t Camera2Client::connect(const sp<ICameraClient>& client) {
mClientPid = getCallingPid();
- mCameraClient = client;
- mSharedCameraClient = client;
+ mRemoteCallback = client;
+ mSharedCameraCallbacks = client;
return OK;
}
@@ -440,7 +461,7 @@ status_t Camera2Client::connect(const sp<ICameraClient>& client) {
status_t Camera2Client::lock() {
ATRACE_CALL();
ALOGV("%s: E", __FUNCTION__);
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
ALOGV("%s: Camera %d: Lock call from pid %d; current client pid %d",
__FUNCTION__, mCameraId, getCallingPid(), mClientPid);
@@ -461,7 +482,7 @@ status_t Camera2Client::lock() {
status_t Camera2Client::unlock() {
ATRACE_CALL();
ALOGV("%s: E", __FUNCTION__);
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
ALOGV("%s: Camera %d: Unlock call from pid %d; current client pid %d",
__FUNCTION__, mCameraId, getCallingPid(), mClientPid);
@@ -473,8 +494,8 @@ status_t Camera2Client::unlock() {
return INVALID_OPERATION;
}
mClientPid = 0;
- mCameraClient.clear();
- mSharedCameraClient.clear();
+ mRemoteCallback.clear();
+ mSharedCameraCallbacks.clear();
return OK;
}
@@ -483,37 +504,22 @@ status_t Camera2Client::unlock() {
return EBUSY;
}
-status_t Camera2Client::setPreviewDisplay(
- const sp<Surface>& surface) {
+status_t Camera2Client::setPreviewTarget(
+ const sp<IGraphicBufferProducer>& bufferProducer) {
ATRACE_CALL();
ALOGV("%s: E", __FUNCTION__);
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
status_t res;
if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
sp<IBinder> binder;
sp<ANativeWindow> window;
- if (surface != 0) {
- binder = surface->asBinder();
- window = surface;
- }
-
- return setPreviewWindowL(binder,window);
-}
-
-status_t Camera2Client::setPreviewTexture(
- const sp<ISurfaceTexture>& surfaceTexture) {
- ATRACE_CALL();
- ALOGV("%s: E", __FUNCTION__);
- Mutex::Autolock icl(mICameraLock);
- status_t res;
- if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
-
- sp<IBinder> binder;
- sp<ANativeWindow> window;
- if (surfaceTexture != 0) {
- binder = surfaceTexture->asBinder();
- window = new SurfaceTextureClient(surfaceTexture);
+ if (bufferProducer != 0) {
+ binder = bufferProducer->asBinder();
+ // Using controlledByApp flag to ensure that the buffer queue remains in
+ // async mode for the old camera API, where many applications depend
+ // on that behavior.
+ window = new Surface(bufferProducer, /*controlledByApp*/ true);
}
return setPreviewWindowL(binder, window);
}
@@ -549,7 +555,12 @@ status_t Camera2Client::setPreviewWindowL(const sp<IBinder>& binder,
break;
case Parameters::PREVIEW:
// Already running preview - need to stop and create a new stream
- mStreamingProcessor->stopStream();
+ res = stopStream();
+ if (res != OK) {
+ ALOGE("%s: Unable to stop preview to swap windows: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
state = Parameters::WAITING_FOR_PREVIEW_WINDOW;
break;
}
@@ -574,8 +585,8 @@ status_t Camera2Client::setPreviewWindowL(const sp<IBinder>& binder,
void Camera2Client::setPreviewCallbackFlag(int flag) {
ATRACE_CALL();
ALOGV("%s: Camera %d: Flag 0x%x", __FUNCTION__, mCameraId, flag);
- Mutex::Autolock icl(mICameraLock);
- status_t res;
+ Mutex::Autolock icl(mBinderSerializationLock);
+
if ( checkPid(__FUNCTION__) != OK) return;
SharedParameters::Lock l(mParameters);
@@ -584,36 +595,110 @@ void Camera2Client::setPreviewCallbackFlag(int flag) {
void Camera2Client::setPreviewCallbackFlagL(Parameters &params, int flag) {
status_t res = OK;
+
+ switch(params.state) {
+ case Parameters::STOPPED:
+ case Parameters::WAITING_FOR_PREVIEW_WINDOW:
+ case Parameters::PREVIEW:
+ case Parameters::STILL_CAPTURE:
+ // OK
+ break;
+ default:
+ if (flag & CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) {
+ ALOGE("%s: Camera %d: Can't use preview callbacks "
+ "in state %d", __FUNCTION__, mCameraId, params.state);
+ return;
+ }
+ }
+
if (flag & CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK) {
ALOGV("%s: setting oneshot", __FUNCTION__);
params.previewCallbackOneShot = true;
}
if (params.previewCallbackFlags != (uint32_t)flag) {
+
+ if (params.previewCallbackSurface && flag != CAMERA_FRAME_CALLBACK_FLAG_NOOP) {
+ // Disable any existing preview callback window when enabling
+ // preview callback flags
+ res = mCallbackProcessor->setCallbackWindow(NULL);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to clear preview callback surface:"
+ " %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
+ return;
+ }
+ params.previewCallbackSurface = false;
+ }
+
params.previewCallbackFlags = flag;
- switch(params.state) {
- case Parameters::PREVIEW:
+
+ if (params.state == Parameters::PREVIEW) {
res = startPreviewL(params, true);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to refresh request in state %s",
+ __FUNCTION__, mCameraId,
+ Parameters::getStateName(params.state));
+ }
+ }
+ }
+}
+
+status_t Camera2Client::setPreviewCallbackTarget(
+ const sp<IGraphicBufferProducer>& callbackProducer) {
+ ATRACE_CALL();
+ ALOGV("%s: E", __FUNCTION__);
+ Mutex::Autolock icl(mBinderSerializationLock);
+ status_t res;
+ if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+ sp<ANativeWindow> window;
+ if (callbackProducer != 0) {
+ window = new Surface(callbackProducer);
+ }
+
+ res = mCallbackProcessor->setCallbackWindow(window);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to set preview callback surface: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ return res;
+ }
+
+ SharedParameters::Lock l(mParameters);
+
+ if (window != NULL) {
+ // Disable traditional callbacks when a valid callback target is given
+ l.mParameters.previewCallbackFlags = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
+ l.mParameters.previewCallbackOneShot = false;
+ l.mParameters.previewCallbackSurface = true;
+ } else {
+ // Disable callback target if given a NULL interface.
+ l.mParameters.previewCallbackSurface = false;
+ }
+
+ switch(l.mParameters.state) {
+ case Parameters::PREVIEW:
+ res = startPreviewL(l.mParameters, true);
break;
case Parameters::RECORD:
case Parameters::VIDEO_SNAPSHOT:
- res = startRecordingL(params, true);
+ res = startRecordingL(l.mParameters, true);
break;
default:
break;
- }
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to refresh request in state %s",
- __FUNCTION__, mCameraId,
- Parameters::getStateName(params.state));
- }
+ }
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to refresh request in state %s",
+ __FUNCTION__, mCameraId,
+ Parameters::getStateName(l.mParameters.state));
}
+ return OK;
}
+
status_t Camera2Client::startPreview() {
ATRACE_CALL();
ALOGV("%s: E", __FUNCTION__);
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
status_t res;
if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
SharedParameters::Lock l(mParameters);
@@ -655,10 +740,48 @@ status_t Camera2Client::startPreviewL(Parameters &params, bool restart) {
return res;
}
- Vector<uint8_t> outputStreams;
- bool callbacksEnabled = params.previewCallbackFlags &
- CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK;
+ // We could wait to create the JPEG output stream until first actual use
+ // (first takePicture call). However, this would substantially increase the
+ // first capture latency on HAL3 devices, and potentially on some HAL2
+ // devices. So create it unconditionally at preview start. As a drawback,
+ // this increases gralloc memory consumption for applications that don't
+ // ever take a picture.
+ // TODO: Find a better compromise, though this likely would involve HAL
+ // changes.
+ res = updateProcessorStream(mJpegProcessor, params);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't pre-configure still image "
+ "stream: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ return res;
+ }
+
+ Vector<int32_t> outputStreams;
+ bool callbacksEnabled = (params.previewCallbackFlags &
+ CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) ||
+ params.previewCallbackSurface;
+
if (callbacksEnabled) {
+ // Can't have recording stream hanging around when enabling callbacks,
+ // since it exceeds the max stream count on some devices.
+ if (mStreamingProcessor->getRecordingStreamId() != NO_STREAM) {
+ ALOGV("%s: Camera %d: Clearing out recording stream before "
+ "creating callback stream", __FUNCTION__, mCameraId);
+ res = mStreamingProcessor->stopStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't stop streaming to delete "
+ "recording stream", __FUNCTION__, mCameraId);
+ return res;
+ }
+ res = mStreamingProcessor->deleteRecordingStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete recording stream before "
+ "enabling callbacks: %s (%d)", __FUNCTION__, mCameraId,
+ strerror(-res), res);
+ return res;
+ }
+ }
+
res = mCallbackProcessor->updateStream(params);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update callback stream: %s (%d)",
@@ -668,7 +791,7 @@ status_t Camera2Client::startPreviewL(Parameters &params, bool restart) {
outputStreams.push(getCallbackStreamId());
}
if (params.zslMode && !params.recordingHint) {
- res = mZslProcessor->updateStream(params);
+ res = updateProcessorStream(mZslProcessor, params);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update ZSL stream: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
@@ -692,18 +815,6 @@ status_t Camera2Client::startPreviewL(Parameters &params, bool restart) {
res = mStreamingProcessor->startStream(StreamingProcessor::PREVIEW,
outputStreams);
} else {
- // With recording hint set, we're going to be operating under the
- // assumption that the user will record video. To optimize recording
- // startup time, create the necessary output streams for recording and
- // video snapshot now if they don't already exist.
- res = mJpegProcessor->updateStream(params);
- if (res != OK) {
- ALOGE("%s: Camera %d: Can't pre-configure still image "
- "stream: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
-
if (!restart) {
res = mStreamingProcessor->updateRecordingRequest(params);
if (res != OK) {
@@ -729,7 +840,7 @@ status_t Camera2Client::startPreviewL(Parameters &params, bool restart) {
void Camera2Client::stopPreview() {
ATRACE_CALL();
ALOGV("%s: E", __FUNCTION__);
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
status_t res;
if ( (res = checkPid(__FUNCTION__) ) != OK) return;
stopPreviewL();
@@ -747,8 +858,7 @@ void Camera2Client::stopPreviewL() {
switch (state) {
case Parameters::DISCONNECTED:
- ALOGE("%s: Camera %d: Call before initialized",
- __FUNCTION__, mCameraId);
+ // Nothing to do.
break;
case Parameters::STOPPED:
case Parameters::VIDEO_SNAPSHOT:
@@ -757,7 +867,12 @@ void Camera2Client::stopPreviewL() {
// no break
case Parameters::RECORD:
case Parameters::PREVIEW:
- mStreamingProcessor->stopStream();
+ syncWithDevice();
+ res = stopStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't stop streaming: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
res = mDevice->waitUntilDrained();
if (res != OK) {
ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
@@ -778,7 +893,7 @@ void Camera2Client::stopPreviewL() {
bool Camera2Client::previewEnabled() {
ATRACE_CALL();
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
status_t res;
if ( (res = checkPid(__FUNCTION__) ) != OK) return false;
@@ -788,7 +903,7 @@ bool Camera2Client::previewEnabled() {
status_t Camera2Client::storeMetaDataInBuffers(bool enabled) {
ATRACE_CALL();
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
status_t res;
if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
@@ -813,7 +928,7 @@ status_t Camera2Client::storeMetaDataInBuffers(bool enabled) {
status_t Camera2Client::startRecording() {
ATRACE_CALL();
ALOGV("%s: E", __FUNCTION__);
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
status_t res;
if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
SharedParameters::Lock l(mParameters);
@@ -864,25 +979,40 @@ status_t Camera2Client::startRecordingL(Parameters &params, bool restart) {
}
}
- res = mStreamingProcessor->updateRecordingStream(params);
+ // Not all devices can support a preview callback stream and a recording
+ // stream at the same time, so assume none of them can.
+ if (mCallbackProcessor->getStreamId() != NO_STREAM) {
+ ALOGV("%s: Camera %d: Clearing out callback stream before "
+ "creating recording stream", __FUNCTION__, mCameraId);
+ res = mStreamingProcessor->stopStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't stop streaming to delete callback stream",
+ __FUNCTION__, mCameraId);
+ return res;
+ }
+ res = mCallbackProcessor->deleteStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete callback stream before "
+ "record: %s (%d)", __FUNCTION__, mCameraId,
+ strerror(-res), res);
+ return res;
+ }
+ }
+ // Disable callbacks if they're enabled; can't record and use callbacks,
+ // and we can't fail record start without stagefright asserting.
+ params.previewCallbackFlags = 0;
+
+ res = updateProcessorStream<
+ StreamingProcessor,
+ &StreamingProcessor::updateRecordingStream>(mStreamingProcessor,
+ params);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update recording stream: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
return res;
}
- Vector<uint8_t> outputStreams;
- bool callbacksEnabled = params.previewCallbackFlags &
- CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK;
- if (callbacksEnabled) {
- res = mCallbackProcessor->updateStream(params);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to update callback stream: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
- outputStreams.push(getCallbackStreamId());
- }
+ Vector<int32_t> outputStreams;
outputStreams.push(getPreviewStreamId());
outputStreams.push(getRecordingStreamId());
@@ -904,7 +1034,7 @@ status_t Camera2Client::startRecordingL(Parameters &params, bool restart) {
void Camera2Client::stopRecording() {
ATRACE_CALL();
ALOGV("%s: E", __FUNCTION__);
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
SharedParameters::Lock l(mParameters);
status_t res;
@@ -936,7 +1066,7 @@ void Camera2Client::stopRecording() {
bool Camera2Client::recordingEnabled() {
ATRACE_CALL();
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
if ( checkPid(__FUNCTION__) != OK) return false;
@@ -953,7 +1083,7 @@ bool Camera2Client::recordingEnabledL() {
void Camera2Client::releaseRecordingFrame(const sp<IMemory>& mem) {
ATRACE_CALL();
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
if ( checkPid(__FUNCTION__) != OK) return;
mStreamingProcessor->releaseRecordingFrame(mem);
@@ -961,7 +1091,7 @@ void Camera2Client::releaseRecordingFrame(const sp<IMemory>& mem) {
status_t Camera2Client::autoFocus() {
ATRACE_CALL();
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
ALOGV("%s: Camera %d", __FUNCTION__, mCameraId);
status_t res;
if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
@@ -979,8 +1109,12 @@ status_t Camera2Client::autoFocus() {
* If the camera does not support auto-focus, it is a no-op and
* onAutoFocus(boolean, Camera) callback will be called immediately
* with a fake value of success set to true.
+ *
+ * Similarly, if focus mode is set to INFINITY, there's no reason to
+ * bother the HAL.
*/
- if (l.mParameters.focusMode == Parameters::FOCUS_MODE_FIXED) {
+ if (l.mParameters.focusMode == Parameters::FOCUS_MODE_FIXED ||
+ l.mParameters.focusMode == Parameters::FOCUS_MODE_INFINITY) {
notifyImmediately = true;
notifySuccess = true;
}
@@ -999,9 +1133,9 @@ status_t Camera2Client::autoFocus() {
* Send immediate notification back to client
*/
if (notifyImmediately) {
- SharedCameraClient::Lock l(mSharedCameraClient);
- if (l.mCameraClient != 0) {
- l.mCameraClient->notifyCallback(CAMERA_MSG_FOCUS,
+ SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
+ if (l.mRemoteCallback != 0) {
+ l.mRemoteCallback->notifyCallback(CAMERA_MSG_FOCUS,
notifySuccess ? 1 : 0, 0);
}
return OK;
@@ -1023,6 +1157,8 @@ status_t Camera2Client::autoFocus() {
l.mParameters.currentAfTriggerId = ++l.mParameters.afTriggerCounter;
triggerId = l.mParameters.currentAfTriggerId;
}
+ ATRACE_ASYNC_BEGIN(kAutofocusLabel, triggerId);
+
syncWithDevice();
mDevice->triggerAutofocus(triggerId);
@@ -1032,7 +1168,7 @@ status_t Camera2Client::autoFocus() {
status_t Camera2Client::cancelAutoFocus() {
ATRACE_CALL();
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
ALOGV("%s: Camera %d", __FUNCTION__, mCameraId);
status_t res;
if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
@@ -1040,6 +1176,17 @@ status_t Camera2Client::cancelAutoFocus() {
int triggerId;
{
SharedParameters::Lock l(mParameters);
+ // Canceling does nothing in FIXED or INFINITY modes
+ if (l.mParameters.focusMode == Parameters::FOCUS_MODE_FIXED ||
+ l.mParameters.focusMode == Parameters::FOCUS_MODE_INFINITY) {
+ return OK;
+ }
+
+ // An active AF trigger is canceled
+ if (l.mParameters.afTriggerCounter == l.mParameters.currentAfTriggerId) {
+ ATRACE_ASYNC_END(kAutofocusLabel, l.mParameters.currentAfTriggerId);
+ }
+
triggerId = ++l.mParameters.afTriggerCounter;
// When using triggerAfWithAuto quirk, may need to reset focus mode to
@@ -1064,10 +1211,11 @@ status_t Camera2Client::cancelAutoFocus() {
status_t Camera2Client::takePicture(int msgType) {
ATRACE_CALL();
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
status_t res;
if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+ int takePictureCounter;
{
SharedParameters::Lock l(mParameters);
switch (l.mParameters.state) {
@@ -1100,18 +1248,21 @@ status_t Camera2Client::takePicture(int msgType) {
ALOGV("%s: Camera %d: Starting picture capture", __FUNCTION__, mCameraId);
- res = mJpegProcessor->updateStream(l.mParameters);
+ res = updateProcessorStream(mJpegProcessor, l.mParameters);
if (res != OK) {
ALOGE("%s: Camera %d: Can't set up still image stream: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
return res;
}
+ takePictureCounter = ++l.mParameters.takePictureCounter;
}
+ ATRACE_ASYNC_BEGIN(kTakepictureLabel, takePictureCounter);
+
// Need HAL to have correct settings before (possibly) triggering precapture
syncWithDevice();
- res = mCaptureSequencer->startCapture();
+ res = mCaptureSequencer->startCapture(msgType);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to start capture: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
@@ -1123,7 +1274,7 @@ status_t Camera2Client::takePicture(int msgType) {
status_t Camera2Client::setParameters(const String8& params) {
ATRACE_CALL();
ALOGV("%s: Camera %d", __FUNCTION__, mCameraId);
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
status_t res;
if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
@@ -1140,7 +1291,7 @@ status_t Camera2Client::setParameters(const String8& params) {
String8 Camera2Client::getParameters() const {
ATRACE_CALL();
ALOGV("%s: Camera %d", __FUNCTION__, mCameraId);
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
if ( checkPid(__FUNCTION__) != OK) return String8();
SharedParameters::ReadLock l(mParameters);
@@ -1150,7 +1301,7 @@ String8 Camera2Client::getParameters() const {
status_t Camera2Client::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2) {
ATRACE_CALL();
- Mutex::Autolock icl(mICameraLock);
+ Mutex::Autolock icl(mBinderSerializationLock);
status_t res;
if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
@@ -1244,7 +1395,7 @@ status_t Camera2Client::commandPlayRecordingSoundL() {
return OK;
}
-status_t Camera2Client::commandStartFaceDetectionL(int type) {
+status_t Camera2Client::commandStartFaceDetectionL(int /*type*/) {
ALOGV("%s: Camera %d: Starting face detection",
__FUNCTION__, mCameraId);
status_t res;
@@ -1265,10 +1416,10 @@ status_t Camera2Client::commandStartFaceDetectionL(int type) {
}
// Ignoring type
if (l.mParameters.fastInfo.bestFaceDetectMode ==
- ANDROID_STATS_FACE_DETECTION_OFF) {
+ ANDROID_STATISTICS_FACE_DETECT_MODE_OFF) {
ALOGE("%s: Camera %d: Face detection not supported",
__FUNCTION__, mCameraId);
- return INVALID_OPERATION;
+ return BAD_VALUE;
}
if (l.mParameters.enableFaceDetect) return OK;
@@ -1325,16 +1476,6 @@ status_t Camera2Client::commandSetVideoBufferCountL(size_t count) {
}
/** Device-related methods */
-
-void Camera2Client::notifyError(int errorCode, int arg1, int arg2) {
- ALOGE("Error condition %d reported by HAL, arguments %d, %d", errorCode, arg1, arg2);
-}
-
-void Camera2Client::notifyShutter(int frameNumber, nsecs_t timestamp) {
- ALOGV("%s: Shutter notification for frame %d at time %lld", __FUNCTION__,
- frameNumber, timestamp);
-}
-
void Camera2Client::notifyAutoFocus(uint8_t newState, int triggerId) {
ALOGV("%s: Autofocus state now %d, last trigger %d",
__FUNCTION__, newState, triggerId);
@@ -1345,7 +1486,24 @@ void Camera2Client::notifyAutoFocus(uint8_t newState, int triggerId) {
bool afInMotion = false;
{
SharedParameters::Lock l(mParameters);
+ // Trace end of AF state
+ char tmp[32];
+ if (l.mParameters.afStateCounter > 0) {
+ camera_metadata_enum_snprint(
+ ANDROID_CONTROL_AF_STATE, l.mParameters.focusState, tmp, sizeof(tmp));
+ ATRACE_ASYNC_END(tmp, l.mParameters.afStateCounter);
+ }
+
+ // Update state
l.mParameters.focusState = newState;
+ l.mParameters.afStateCounter++;
+
+ // Trace start of AF state
+
+ camera_metadata_enum_snprint(
+ ANDROID_CONTROL_AF_STATE, l.mParameters.focusState, tmp, sizeof(tmp));
+ ATRACE_ASYNC_BEGIN(tmp, l.mParameters.afStateCounter);
+
switch (l.mParameters.focusMode) {
case Parameters::FOCUS_MODE_AUTO:
case Parameters::FOCUS_MODE_MACRO:
@@ -1367,6 +1525,7 @@ void Camera2Client::notifyAutoFocus(uint8_t newState, int triggerId) {
case ANDROID_CONTROL_AF_STATE_INACTIVE:
case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN:
case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
+ case ANDROID_CONTROL_AF_STATE_PASSIVE_UNFOCUSED:
default:
// Unexpected in AUTO/MACRO mode
ALOGE("%s: Unexpected AF state transition in AUTO/MACRO mode: %d",
@@ -1409,6 +1568,7 @@ void Camera2Client::notifyAutoFocus(uint8_t newState, int triggerId) {
afInMotion = true;
// no break
case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
+ case ANDROID_CONTROL_AF_STATE_PASSIVE_UNFOCUSED:
// Stop passive scan, inform upstream
if (l.mParameters.enableFocusMoveMessages) {
sendMovingMessage = true;
@@ -1430,16 +1590,17 @@ void Camera2Client::notifyAutoFocus(uint8_t newState, int triggerId) {
}
}
if (sendMovingMessage) {
- SharedCameraClient::Lock l(mSharedCameraClient);
- if (l.mCameraClient != 0) {
- l.mCameraClient->notifyCallback(CAMERA_MSG_FOCUS_MOVE,
+ SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
+ if (l.mRemoteCallback != 0) {
+ l.mRemoteCallback->notifyCallback(CAMERA_MSG_FOCUS_MOVE,
afInMotion ? 1 : 0, 0);
}
}
if (sendCompletedMessage) {
- SharedCameraClient::Lock l(mSharedCameraClient);
- if (l.mCameraClient != 0) {
- l.mCameraClient->notifyCallback(CAMERA_MSG_FOCUS,
+ ATRACE_ASYNC_END(kAutofocusLabel, triggerId);
+ SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
+ if (l.mRemoteCallback != 0) {
+ l.mRemoteCallback->notifyCallback(CAMERA_MSG_FOCUS,
success ? 1 : 0, 0);
}
}
@@ -1451,23 +1612,6 @@ void Camera2Client::notifyAutoExposure(uint8_t newState, int triggerId) {
mCaptureSequencer->notifyAutoExposure(newState, triggerId);
}
-void Camera2Client::notifyAutoWhitebalance(uint8_t newState, int triggerId) {
- ALOGV("%s: Auto-whitebalance state now %d, last trigger %d",
- __FUNCTION__, newState, triggerId);
-}
-
-int Camera2Client::getCameraId() const {
- return mCameraId;
-}
-
-const sp<Camera2Device>& Camera2Client::getCameraDevice() {
- return mDevice;
-}
-
-const sp<CameraService>& Camera2Client::getCameraService() {
- return mCameraService;
-}
-
camera2::SharedParameters& Camera2Client::getParameters() {
return mParameters;
}
@@ -1506,32 +1650,6 @@ status_t Camera2Client::stopStream() {
return mStreamingProcessor->stopStream();
}
-Camera2Client::SharedCameraClient::Lock::Lock(SharedCameraClient &client):
- mCameraClient(client.mCameraClient),
- mSharedClient(client) {
- mSharedClient.mCameraClientLock.lock();
-}
-
-Camera2Client::SharedCameraClient::Lock::~Lock() {
- mSharedClient.mCameraClientLock.unlock();
-}
-
-Camera2Client::SharedCameraClient::SharedCameraClient(const sp<ICameraClient>&client):
- mCameraClient(client) {
-}
-
-Camera2Client::SharedCameraClient& Camera2Client::SharedCameraClient::operator=(
- const sp<ICameraClient>&client) {
- Mutex::Autolock l(mCameraClientLock);
- mCameraClient = client;
- return *this;
-}
-
-void Camera2Client::SharedCameraClient::clear() {
- Mutex::Autolock l(mCameraClientLock);
- mCameraClient.clear();
-}
-
const int32_t Camera2Client::kPreviewRequestIdStart;
const int32_t Camera2Client::kPreviewRequestIdEnd;
const int32_t Camera2Client::kRecordingRequestIdStart;
@@ -1633,4 +1751,63 @@ status_t Camera2Client::syncWithDevice() {
return res;
}
+template <typename ProcessorT>
+status_t Camera2Client::updateProcessorStream(sp<ProcessorT> processor,
+ camera2::Parameters params) {
+ // No default template arguments until C++11, so we need this overload
+ return updateProcessorStream<ProcessorT, &ProcessorT::updateStream>(
+ processor, params);
+}
+
+template <typename ProcessorT,
+ status_t (ProcessorT::*updateStreamF)(const Parameters &)>
+status_t Camera2Client::updateProcessorStream(sp<ProcessorT> processor,
+ Parameters params) {
+ status_t res;
+
+ // Get raw pointer since sp<T> doesn't have operator->*
+ ProcessorT *processorPtr = processor.get();
+ res = (processorPtr->*updateStreamF)(params);
+
+ /**
+ * Can't update the stream if it's busy?
+ *
+ * Then we need to stop the device (by temporarily clearing the request
+ * queue) and then try again. Resume streaming once we're done.
+ */
+ if (res == -EBUSY) {
+ ALOGV("%s: Camera %d: Pausing to update stream", __FUNCTION__,
+ mCameraId);
+ res = mStreamingProcessor->togglePauseStream(/*pause*/true);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't pause streaming: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+
+ res = mDevice->waitUntilDrained();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+
+ res = (processorPtr->*updateStreamF)(params);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Failed to update processing stream "
+ " despite having halted streaming first: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+
+ res = mStreamingProcessor->togglePauseStream(/*pause*/false);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't unpause streaming: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+ }
+
+ return res;
+}
+
+const char* Camera2Client::kAutofocusLabel = "autofocus";
+const char* Camera2Client::kTakepictureLabel = "take_picture";
+
} // namespace android
diff --git a/services/camera/libcameraservice/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index 55ead02..fe0bf74 100644
--- a/services/camera/libcameraservice/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -17,26 +17,36 @@
#ifndef ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_H
#define ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_H
-#include "Camera2Device.h"
#include "CameraService.h"
-#include "camera2/Parameters.h"
-#include "camera2/FrameProcessor.h"
-#include "camera2/StreamingProcessor.h"
-#include "camera2/JpegProcessor.h"
-#include "camera2/ZslProcessor.h"
-#include "camera2/CaptureSequencer.h"
-#include "camera2/CallbackProcessor.h"
+#include "common/CameraDeviceBase.h"
+#include "common/Camera2ClientBase.h"
+#include "api1/client2/Parameters.h"
+#include "api1/client2/FrameProcessor.h"
+//#include "api1/client2/StreamingProcessor.h"
+//#include "api1/client2/JpegProcessor.h"
+//#include "api1/client2/ZslProcessorInterface.h"
+//#include "api1/client2/CaptureSequencer.h"
+//#include "api1/client2/CallbackProcessor.h"
namespace android {
+namespace camera2 {
+
+class StreamingProcessor;
+class JpegProcessor;
+class ZslProcessorInterface;
+class CaptureSequencer;
+class CallbackProcessor;
+
+}
+
class IMemory;
/**
- * Implements the android.hardware.camera API on top of
- * camera device HAL version 2.
+ * Interface between android.hardware.Camera API and Camera HAL device for versions
+ * CAMERA_DEVICE_API_VERSION_2_0 and 3_0.
*/
class Camera2Client :
- public CameraService::Client,
- public Camera2Device::NotificationListener
+ public Camera2ClientBase<CameraService::Client>
{
public:
/**
@@ -47,10 +57,12 @@ public:
virtual status_t connect(const sp<ICameraClient>& client);
virtual status_t lock();
virtual status_t unlock();
- virtual status_t setPreviewDisplay(const sp<Surface>& surface);
- virtual status_t setPreviewTexture(
- const sp<ISurfaceTexture>& surfaceTexture);
+ virtual status_t setPreviewTarget(
+ const sp<IGraphicBufferProducer>& bufferProducer);
virtual void setPreviewCallbackFlag(int flag);
+ virtual status_t setPreviewCallbackTarget(
+ const sp<IGraphicBufferProducer>& callbackProducer);
+
virtual status_t startPreview();
virtual void stopPreview();
virtual bool previewEnabled();
@@ -72,10 +84,14 @@ public:
Camera2Client(const sp<CameraService>& cameraService,
const sp<ICameraClient>& cameraClient,
+ const String16& clientPackageName,
int cameraId,
int cameraFacing,
int clientPid,
- int servicePid);
+ uid_t clientUid,
+ int servicePid,
+ int deviceVersion);
+
virtual ~Camera2Client();
status_t initialize(camera_module_t *module);
@@ -83,22 +99,16 @@ public:
virtual status_t dump(int fd, const Vector<String16>& args);
/**
- * Interface used by Camera2Device
+ * Interface used by CameraDeviceBase
*/
- virtual void notifyError(int errorCode, int arg1, int arg2);
- virtual void notifyShutter(int frameNumber, nsecs_t timestamp);
virtual void notifyAutoFocus(uint8_t newState, int triggerId);
virtual void notifyAutoExposure(uint8_t newState, int triggerId);
- virtual void notifyAutoWhitebalance(uint8_t newState, int triggerId);
/**
* Interface used by independent components of Camera2Client.
*/
- int getCameraId() const;
- const sp<Camera2Device>& getCameraDevice();
- const sp<CameraService>& getCameraService();
camera2::SharedParameters& getParameters();
int getPreviewStreamId() const;
@@ -114,27 +124,6 @@ public:
status_t stopStream();
- // Simple class to ensure that access to ICameraClient is serialized by
- // requiring mCameraClientLock to be locked before access to mCameraClient
- // is possible.
- class SharedCameraClient {
- public:
- class Lock {
- public:
- Lock(SharedCameraClient &client);
- ~Lock();
- sp<ICameraClient> &mCameraClient;
- private:
- SharedCameraClient &mSharedClient;
- };
- SharedCameraClient(const sp<ICameraClient>& client);
- SharedCameraClient& operator=(const sp<ICameraClient>& client);
- void clear();
- private:
- sp<ICameraClient> mCameraClient;
- mutable Mutex mCameraClientLock;
- } mSharedCameraClient;
-
static size_t calculateBufferSize(int width, int height,
int format, int stride);
@@ -147,17 +136,13 @@ public:
static const int32_t kCaptureRequestIdStart = 30000000;
static const int32_t kCaptureRequestIdEnd = 40000000;
+ // Constant strings for ATRACE logging
+ static const char* kAutofocusLabel;
+ static const char* kTakepictureLabel;
+
private:
/** ICamera interface-related private members */
-
- // Mutex that must be locked by methods implementing the ICamera interface.
- // Ensures serialization between incoming ICamera calls. All methods below
- // that append 'L' to the name assume that mICameraLock is locked when
- // they're called
- mutable Mutex mICameraLock;
-
typedef camera2::Parameters Parameters;
- typedef camera2::CameraMetadata CameraMetadata;
status_t setPreviewWindowL(const sp<IBinder>& binder,
sp<ANativeWindow> window);
@@ -185,10 +170,17 @@ private:
void setPreviewCallbackFlagL(Parameters &params, int flag);
status_t updateRequests(Parameters &params);
+ int mDeviceVersion;
// Used with stream IDs
static const int NO_STREAM = -1;
+ template <typename ProcessorT>
+ status_t updateProcessorStream(sp<ProcessorT> processor, Parameters params);
+ template <typename ProcessorT,
+ status_t (ProcessorT::*updateStreamF)(const Parameters &)>
+ status_t updateProcessorStream(sp<ProcessorT> processor, Parameters params);
+
sp<camera2::FrameProcessor> mFrameProcessor;
/* Preview/Recording related members */
@@ -204,23 +196,17 @@ private:
sp<camera2::CaptureSequencer> mCaptureSequencer;
sp<camera2::JpegProcessor> mJpegProcessor;
- sp<camera2::ZslProcessor> mZslProcessor;
+ sp<camera2::ZslProcessorInterface> mZslProcessor;
+ sp<Thread> mZslProcessorThread;
/** Notification-related members */
bool mAfInMotion;
- /** Camera2Device instance wrapping HAL2 entry */
-
- sp<Camera2Device> mDevice;
-
/** Utility members */
// Wait until the camera device has received the latest control settings
status_t syncWithDevice();
-
- // Verify that caller is the owner of the camera
- status_t checkPid(const char *checkLocation) const;
};
}; // namespace android
diff --git a/services/camera/libcameraservice/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index b930c02..30b7bb8 100644
--- a/services/camera/libcameraservice/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -18,11 +18,10 @@
//#define LOG_NDEBUG 0
#include <cutils/properties.h>
-#include <gui/SurfaceTextureClient.h>
#include <gui/Surface.h>
-#include "CameraClient.h"
-#include "CameraHardwareInterface.h"
+#include "api1/CameraClient.h"
+#include "device1/CameraHardwareInterface.h"
#include "CameraService.h"
namespace android {
@@ -34,15 +33,14 @@ static int getCallingPid() {
return IPCThreadState::self()->getCallingPid();
}
-static int getCallingUid() {
- return IPCThreadState::self()->getCallingUid();
-}
-
CameraClient::CameraClient(const sp<CameraService>& cameraService,
const sp<ICameraClient>& cameraClient,
- int cameraId, int cameraFacing, int clientPid, int servicePid):
- Client(cameraService, cameraClient,
- cameraId, cameraFacing, clientPid, servicePid)
+ const String16& clientPackageName,
+ int cameraId, int cameraFacing,
+ int clientPid, int clientUid,
+ int servicePid):
+ Client(cameraService, cameraClient, clientPackageName,
+ cameraId, cameraFacing, clientPid, clientUid, servicePid)
{
int callingPid = getCallingPid();
LOG1("CameraClient::CameraClient E (pid %d, id %d)", callingPid, cameraId);
@@ -62,10 +60,17 @@ CameraClient::CameraClient(const sp<CameraService>& cameraService,
status_t CameraClient::initialize(camera_module_t *module) {
int callingPid = getCallingPid();
+ status_t res;
+
LOG1("CameraClient::initialize E (pid %d, id %d)", callingPid, mCameraId);
+ // Verify ops permissions
+ res = startCameraOps();
+ if (res != OK) {
+ return res;
+ }
+
char camera_device_name[10];
- status_t res;
snprintf(camera_device_name, sizeof(camera_device_name), "%d", mCameraId);
mHardware = new CameraHardwareInterface(camera_device_name);
@@ -80,7 +85,7 @@ status_t CameraClient::initialize(camera_module_t *module) {
mHardware->setCallbacks(notifyCallback,
dataCallback,
dataCallbackTimestamp,
- (void *)mCameraId);
+ (void *)(uintptr_t)mCameraId);
// Enable zoom, error, focus, and metadata messages by default
enableMsgType(CAMERA_MSG_ERROR | CAMERA_MSG_ZOOM | CAMERA_MSG_FOCUS |
@@ -112,7 +117,7 @@ status_t CameraClient::dump(int fd, const Vector<String16>& args) {
size_t len = snprintf(buffer, SIZE, "Client[%d] (%p) PID: %d\n",
mCameraId,
- getCameraClient()->asBinder().get(),
+ getRemoteCallback()->asBinder().get(),
mClientPid);
len = (len > SIZE - 1) ? SIZE - 1 : len;
write(fd, buffer, len);
@@ -168,10 +173,10 @@ status_t CameraClient::unlock() {
return INVALID_OPERATION;
}
mClientPid = 0;
- LOG1("clear mCameraClient (pid %d)", callingPid);
+ LOG1("clear mRemoteCallback (pid %d)", callingPid);
// we need to remove the reference to ICameraClient so that when the app
// goes away, the reference count goes to 0.
- mCameraClient.clear();
+ mRemoteCallback.clear();
}
return result;
}
@@ -188,14 +193,15 @@ status_t CameraClient::connect(const sp<ICameraClient>& client) {
return EBUSY;
}
- if (mCameraClient != 0 && (client->asBinder() == mCameraClient->asBinder())) {
+ if (mRemoteCallback != 0 &&
+ (client->asBinder() == mRemoteCallback->asBinder())) {
LOG1("Connect to the same client");
return NO_ERROR;
}
mPreviewCallbackFlag = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
mClientPid = callingPid;
- mCameraClient = client;
+ mRemoteCallback = client;
LOG1("connect X (pid %d)", callingPid);
return NO_ERROR;
@@ -302,26 +308,20 @@ status_t CameraClient::setPreviewWindow(const sp<IBinder>& binder,
return result;
}
-// set the Surface that the preview will use
-status_t CameraClient::setPreviewDisplay(const sp<Surface>& surface) {
- LOG1("setPreviewDisplay(%p) (pid %d)", surface.get(), getCallingPid());
-
- sp<IBinder> binder(surface != 0 ? surface->asBinder() : 0);
- sp<ANativeWindow> window(surface);
- return setPreviewWindow(binder, window);
-}
-
-// set the SurfaceTexture that the preview will use
-status_t CameraClient::setPreviewTexture(
- const sp<ISurfaceTexture>& surfaceTexture) {
- LOG1("setPreviewTexture(%p) (pid %d)", surfaceTexture.get(),
+// set the buffer consumer that the preview will use
+status_t CameraClient::setPreviewTarget(
+ const sp<IGraphicBufferProducer>& bufferProducer) {
+ LOG1("setPreviewTarget(%p) (pid %d)", bufferProducer.get(),
getCallingPid());
sp<IBinder> binder;
sp<ANativeWindow> window;
- if (surfaceTexture != 0) {
- binder = surfaceTexture->asBinder();
- window = new SurfaceTextureClient(surfaceTexture);
+ if (bufferProducer != 0) {
+ binder = bufferProducer->asBinder();
+ // Using controlledByApp flag to ensure that the buffer queue remains in
+ // async mode for the old camera API, where many applications depend
+ // on that behavior.
+ window = new Surface(bufferProducer, /*controlledByApp*/ true);
}
return setPreviewWindow(binder, window);
}
@@ -341,6 +341,13 @@ void CameraClient::setPreviewCallbackFlag(int callback_flag) {
}
}
+status_t CameraClient::setPreviewCallbackTarget(
+ const sp<IGraphicBufferProducer>& callbackProducer) {
+ (void)callbackProducer;
+ ALOGE("%s: Unimplemented!", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
// start preview mode
status_t CameraClient::startPreview() {
LOG1("startPreview (pid %d)", getCallingPid());
@@ -775,7 +782,7 @@ void CameraClient::handleShutter(void) {
mCameraService->playSound(CameraService::SOUND_SHUTTER);
}
- sp<ICameraClient> c = mCameraClient;
+ sp<ICameraClient> c = mRemoteCallback;
if (c != 0) {
mLock.unlock();
c->notifyCallback(CAMERA_MSG_SHUTTER, 0, 0);
@@ -806,7 +813,7 @@ void CameraClient::handlePreviewData(int32_t msgType,
}
// hold a strong pointer to the client
- sp<ICameraClient> c = mCameraClient;
+ sp<ICameraClient> c = mRemoteCallback;
// clear callback flags if no client or one-shot mode
if (c == 0 || (mPreviewCallbackFlag & CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK)) {
@@ -836,7 +843,7 @@ void CameraClient::handlePreviewData(int32_t msgType,
void CameraClient::handlePostview(const sp<IMemory>& mem) {
disableMsgType(CAMERA_MSG_POSTVIEW_FRAME);
- sp<ICameraClient> c = mCameraClient;
+ sp<ICameraClient> c = mRemoteCallback;
mLock.unlock();
if (c != 0) {
c->dataCallback(CAMERA_MSG_POSTVIEW_FRAME, mem, NULL);
@@ -851,7 +858,7 @@ void CameraClient::handleRawPicture(const sp<IMemory>& mem) {
size_t size;
sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
- sp<ICameraClient> c = mCameraClient;
+ sp<ICameraClient> c = mRemoteCallback;
mLock.unlock();
if (c != 0) {
c->dataCallback(CAMERA_MSG_RAW_IMAGE, mem, NULL);
@@ -862,7 +869,7 @@ void CameraClient::handleRawPicture(const sp<IMemory>& mem) {
void CameraClient::handleCompressedPicture(const sp<IMemory>& mem) {
disableMsgType(CAMERA_MSG_COMPRESSED_IMAGE);
- sp<ICameraClient> c = mCameraClient;
+ sp<ICameraClient> c = mRemoteCallback;
mLock.unlock();
if (c != 0) {
c->dataCallback(CAMERA_MSG_COMPRESSED_IMAGE, mem, NULL);
@@ -872,7 +879,7 @@ void CameraClient::handleCompressedPicture(const sp<IMemory>& mem) {
void CameraClient::handleGenericNotify(int32_t msgType,
int32_t ext1, int32_t ext2) {
- sp<ICameraClient> c = mCameraClient;
+ sp<ICameraClient> c = mRemoteCallback;
mLock.unlock();
if (c != 0) {
c->notifyCallback(msgType, ext1, ext2);
@@ -881,7 +888,7 @@ void CameraClient::handleGenericNotify(int32_t msgType,
void CameraClient::handleGenericData(int32_t msgType,
const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata) {
- sp<ICameraClient> c = mCameraClient;
+ sp<ICameraClient> c = mRemoteCallback;
mLock.unlock();
if (c != 0) {
c->dataCallback(msgType, dataPtr, metadata);
@@ -890,7 +897,7 @@ void CameraClient::handleGenericData(int32_t msgType,
void CameraClient::handleGenericDataTimestamp(nsecs_t timestamp,
int32_t msgType, const sp<IMemory>& dataPtr) {
- sp<ICameraClient> c = mCameraClient;
+ sp<ICameraClient> c = mRemoteCallback;
mLock.unlock();
if (c != 0) {
c->dataCallbackTimestamp(timestamp, msgType, dataPtr);
diff --git a/services/camera/libcameraservice/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 2f31c4e..4b89564 100644
--- a/services/camera/libcameraservice/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -24,6 +24,11 @@ namespace android {
class MemoryHeapBase;
class CameraHardwareInterface;
+/**
+ * Interface between android.hardware.Camera API and Camera HAL device for version
+ * CAMERA_DEVICE_API_VERSION_1_0.
+ */
+
class CameraClient : public CameraService::Client
{
public:
@@ -32,9 +37,10 @@ public:
virtual status_t connect(const sp<ICameraClient>& client);
virtual status_t lock();
virtual status_t unlock();
- virtual status_t setPreviewDisplay(const sp<Surface>& surface);
- virtual status_t setPreviewTexture(const sp<ISurfaceTexture>& surfaceTexture);
+ virtual status_t setPreviewTarget(const sp<IGraphicBufferProducer>& bufferProducer);
virtual void setPreviewCallbackFlag(int flag);
+ virtual status_t setPreviewCallbackTarget(
+ const sp<IGraphicBufferProducer>& callbackProducer);
virtual status_t startPreview();
virtual void stopPreview();
virtual bool previewEnabled();
@@ -53,9 +59,11 @@ public:
// Interface used by CameraService
CameraClient(const sp<CameraService>& cameraService,
const sp<ICameraClient>& cameraClient,
+ const String16& clientPackageName,
int cameraId,
int cameraFacing,
int clientPid,
+ int clientUid,
int servicePid);
~CameraClient();
@@ -124,7 +132,7 @@ private:
// Ensures atomicity among the public methods
mutable Mutex mLock;
- // This is a binder of Surface or SurfaceTexture.
+ // This is a binder of Surface or Surface.
sp<IBinder> mSurface;
sp<ANativeWindow> mPreviewWindow;
diff --git a/services/camera/libcameraservice/camera2/BurstCapture.cpp b/services/camera/libcameraservice/api1/client2/BurstCapture.cpp
index f56c50c..0bfdfd4 100644
--- a/services/camera/libcameraservice/camera2/BurstCapture.cpp
+++ b/services/camera/libcameraservice/api1/client2/BurstCapture.cpp
@@ -22,8 +22,8 @@
#include "BurstCapture.h"
-#include "../Camera2Client.h"
-#include "JpegCompressor.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/JpegCompressor.h"
namespace android {
namespace camera2 {
@@ -38,7 +38,8 @@ BurstCapture::BurstCapture(wp<Camera2Client> client, wp<CaptureSequencer> sequen
BurstCapture::~BurstCapture() {
}
-status_t BurstCapture::start(Vector<CameraMetadata> &metadatas, int32_t firstCaptureId) {
+status_t BurstCapture::start(Vector<CameraMetadata> &/*metadatas*/,
+ int32_t /*firstCaptureId*/) {
ALOGE("Not completely implemented");
return INVALID_OPERATION;
}
@@ -75,7 +76,7 @@ bool BurstCapture::threadLoop() {
CpuConsumer::LockedBuffer* BurstCapture::jpegEncode(
CpuConsumer::LockedBuffer *imgBuffer,
- int quality)
+ int /*quality*/)
{
ALOGV("%s", __FUNCTION__);
@@ -91,7 +92,7 @@ CpuConsumer::LockedBuffer* BurstCapture::jpegEncode(
buffers.push_back(imgEncoded);
sp<JpegCompressor> jpeg = new JpegCompressor();
- status_t res = jpeg->start(buffers, 1);
+ jpeg->start(buffers, 1);
bool success = jpeg->waitForDone(10 * 1e9);
if(success) {
@@ -103,7 +104,7 @@ CpuConsumer::LockedBuffer* BurstCapture::jpegEncode(
}
}
-status_t BurstCapture::processFrameAvailable(sp<Camera2Client> &client) {
+status_t BurstCapture::processFrameAvailable(sp<Camera2Client> &/*client*/) {
ALOGE("Not implemented");
return INVALID_OPERATION;
}
diff --git a/services/camera/libcameraservice/camera2/BurstCapture.h b/services/camera/libcameraservice/api1/client2/BurstCapture.h
index dfc45eb..ea321fd 100644
--- a/services/camera/libcameraservice/camera2/BurstCapture.h
+++ b/services/camera/libcameraservice/api1/client2/BurstCapture.h
@@ -17,11 +17,12 @@
#ifndef ANDROID_SERVERS_CAMERA_BURST_CAPTURE_H
#define ANDROID_SERVERS_CAMERA_BURST_CAPTURE_H
-#include "camera2/CameraMetadata.h"
+#include <camera/CameraMetadata.h>
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
#include <gui/CpuConsumer.h>
-#include "Camera2Device.h"
+
+#include "device2/Camera2Device.h"
namespace android {
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
new file mode 100644
index 0000000..d2ac79c
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
@@ -0,0 +1,548 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2-CallbackProcessor"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <gui/Surface.h>
+
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/CallbackProcessor.h"
+
+#define ALIGN(x, mask) ( ((x) + (mask) - 1) & ~((mask) - 1) )
+
+namespace android {
+namespace camera2 {
+
+CallbackProcessor::CallbackProcessor(sp<Camera2Client> client):
+ Thread(false),
+ mClient(client),
+ mDevice(client->getCameraDevice()),
+ mId(client->getCameraId()),
+ mCallbackAvailable(false),
+ mCallbackToApp(false),
+ mCallbackStreamId(NO_STREAM) {
+}
+
+CallbackProcessor::~CallbackProcessor() {
+ ALOGV("%s: Exit", __FUNCTION__);
+ deleteStream();
+}
+
+void CallbackProcessor::onFrameAvailable() {
+ Mutex::Autolock l(mInputMutex);
+ if (!mCallbackAvailable) {
+ mCallbackAvailable = true;
+ mCallbackAvailableSignal.signal();
+ }
+}
+
+status_t CallbackProcessor::setCallbackWindow(
+ sp<ANativeWindow> callbackWindow) {
+ ATRACE_CALL();
+ status_t res;
+
+ Mutex::Autolock l(mInputMutex);
+
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return OK;
+ sp<CameraDeviceBase> device = client->getCameraDevice();
+
+ // If the window is changing, clear out stream if it already exists
+ if (mCallbackWindow != callbackWindow && mCallbackStreamId != NO_STREAM) {
+ res = device->deleteStream(mCallbackStreamId);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete old stream "
+ "for callbacks: %s (%d)", __FUNCTION__,
+ client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ mCallbackStreamId = NO_STREAM;
+ mCallbackConsumer.clear();
+ }
+ mCallbackWindow = callbackWindow;
+ mCallbackToApp = (mCallbackWindow != NULL);
+
+ return OK;
+}
+
+status_t CallbackProcessor::updateStream(const Parameters &params) {
+ ATRACE_CALL();
+ status_t res;
+
+ Mutex::Autolock l(mInputMutex);
+
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ // If possible, use the flexible YUV format
+ int32_t callbackFormat = params.previewFormat;
+ if (mCallbackToApp) {
+ // TODO: etalvala: This should use the flexible YUV format as well, but
+ // need to reconcile HAL2/HAL3 requirements.
+ callbackFormat = HAL_PIXEL_FORMAT_YV12;
+ } else if(params.fastInfo.useFlexibleYuv &&
+ (params.previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP ||
+ params.previewFormat == HAL_PIXEL_FORMAT_YV12) ) {
+ callbackFormat = HAL_PIXEL_FORMAT_YCbCr_420_888;
+ }
+
+ if (!mCallbackToApp && mCallbackConsumer == 0) {
+ // Create CPU buffer queue endpoint, since app hasn't given us one
+ // Make it async to avoid disconnect deadlocks
+ sp<BufferQueue> bq = new BufferQueue();
+ mCallbackConsumer = new CpuConsumer(bq, kCallbackHeapCount);
+ mCallbackConsumer->setFrameAvailableListener(this);
+ mCallbackConsumer->setName(String8("Camera2Client::CallbackConsumer"));
+ mCallbackWindow = new Surface(bq);
+ }
+
+ if (mCallbackStreamId != NO_STREAM) {
+ // Check if stream parameters have to change
+ uint32_t currentWidth, currentHeight, currentFormat;
+ res = device->getStreamInfo(mCallbackStreamId,
+ &currentWidth, &currentHeight, &currentFormat);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Error querying callback output stream info: "
+ "%s (%d)", __FUNCTION__, mId,
+ strerror(-res), res);
+ return res;
+ }
+ if (currentWidth != (uint32_t)params.previewWidth ||
+ currentHeight != (uint32_t)params.previewHeight ||
+ currentFormat != (uint32_t)callbackFormat) {
+ // Since size should only change while preview is not running,
+ // assuming that all existing use of old callback stream is
+ // completed.
+ ALOGV("%s: Camera %d: Deleting stream %d since the buffer "
+ "parameters changed", __FUNCTION__, mId, mCallbackStreamId);
+ res = device->deleteStream(mCallbackStreamId);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete old output stream "
+ "for callbacks: %s (%d)", __FUNCTION__,
+ mId, strerror(-res), res);
+ return res;
+ }
+ mCallbackStreamId = NO_STREAM;
+ }
+ }
+
+ if (mCallbackStreamId == NO_STREAM) {
+ ALOGV("Creating callback stream: %d x %d, format 0x%x, API format 0x%x",
+ params.previewWidth, params.previewHeight,
+ callbackFormat, params.previewFormat);
+ res = device->createStream(mCallbackWindow,
+ params.previewWidth, params.previewHeight,
+ callbackFormat, 0, &mCallbackStreamId);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't create output stream for callbacks: "
+ "%s (%d)", __FUNCTION__, mId,
+ strerror(-res), res);
+ return res;
+ }
+ }
+
+ return OK;
+}
+
+status_t CallbackProcessor::deleteStream() {
+ ATRACE_CALL();
+ sp<CameraDeviceBase> device;
+ status_t res;
+ {
+ Mutex::Autolock l(mInputMutex);
+
+ if (mCallbackStreamId == NO_STREAM) {
+ return OK;
+ }
+ device = mDevice.promote();
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+ }
+ res = device->waitUntilDrained();
+ if (res != OK) {
+ ALOGE("%s: Error waiting for HAL to drain: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ res = device->deleteStream(mCallbackStreamId);
+ if (res != OK) {
+ ALOGE("%s: Unable to delete callback stream: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ {
+ Mutex::Autolock l(mInputMutex);
+
+ mCallbackHeap.clear();
+ mCallbackWindow.clear();
+ mCallbackConsumer.clear();
+
+ mCallbackStreamId = NO_STREAM;
+ }
+ return OK;
+}
+
+int CallbackProcessor::getStreamId() const {
+ Mutex::Autolock l(mInputMutex);
+ return mCallbackStreamId;
+}
+
+void CallbackProcessor::dump(int /*fd*/, const Vector<String16>& /*args*/) const {
+}
+
+bool CallbackProcessor::threadLoop() {
+ status_t res;
+
+ {
+ Mutex::Autolock l(mInputMutex);
+ while (!mCallbackAvailable) {
+ res = mCallbackAvailableSignal.waitRelative(mInputMutex,
+ kWaitDuration);
+ if (res == TIMED_OUT) return true;
+ }
+ mCallbackAvailable = false;
+ }
+
+ do {
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) {
+ res = discardNewCallback();
+ } else {
+ res = processNewCallback(client);
+ }
+ } while (res == OK);
+
+ return true;
+}
+
+status_t CallbackProcessor::discardNewCallback() {
+ ATRACE_CALL();
+ status_t res;
+ CpuConsumer::LockedBuffer imgBuffer;
+ res = mCallbackConsumer->lockNextBuffer(&imgBuffer);
+ if (res != OK) {
+ if (res != BAD_VALUE) {
+ ALOGE("%s: Camera %d: Error receiving next callback buffer: "
+ "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
+ }
+ return res;
+ }
+ mCallbackConsumer->unlockBuffer(imgBuffer);
+ return OK;
+}
+
+status_t CallbackProcessor::processNewCallback(sp<Camera2Client> &client) {
+ ATRACE_CALL();
+ status_t res;
+
+ sp<Camera2Heap> callbackHeap;
+ bool useFlexibleYuv = false;
+ int32_t previewFormat = 0;
+ size_t heapIdx;
+
+ {
+ /* acquire SharedParameters before mMutex so we don't dead lock
+ with Camera2Client code calling into StreamingProcessor */
+ SharedParameters::Lock l(client->getParameters());
+ Mutex::Autolock m(mInputMutex);
+ CpuConsumer::LockedBuffer imgBuffer;
+ if (mCallbackStreamId == NO_STREAM) {
+ ALOGV("%s: Camera %d:No stream is available"
+ , __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ ALOGV("%s: Getting buffer", __FUNCTION__);
+ res = mCallbackConsumer->lockNextBuffer(&imgBuffer);
+ if (res != OK) {
+ if (res != BAD_VALUE) {
+ ALOGE("%s: Camera %d: Error receiving next callback buffer: "
+ "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
+ }
+ return res;
+ }
+ ALOGV("%s: Camera %d: Preview callback available", __FUNCTION__,
+ mId);
+
+ if ( l.mParameters.state != Parameters::PREVIEW
+ && l.mParameters.state != Parameters::RECORD
+ && l.mParameters.state != Parameters::VIDEO_SNAPSHOT) {
+ ALOGV("%s: Camera %d: No longer streaming",
+ __FUNCTION__, mId);
+ mCallbackConsumer->unlockBuffer(imgBuffer);
+ return OK;
+ }
+
+ if (! (l.mParameters.previewCallbackFlags &
+ CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) ) {
+ ALOGV("%s: No longer enabled, dropping", __FUNCTION__);
+ mCallbackConsumer->unlockBuffer(imgBuffer);
+ return OK;
+ }
+ if ((l.mParameters.previewCallbackFlags &
+ CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK) &&
+ !l.mParameters.previewCallbackOneShot) {
+ ALOGV("%s: One shot mode, already sent, dropping", __FUNCTION__);
+ mCallbackConsumer->unlockBuffer(imgBuffer);
+ return OK;
+ }
+
+ if (imgBuffer.width != static_cast<uint32_t>(l.mParameters.previewWidth) ||
+ imgBuffer.height != static_cast<uint32_t>(l.mParameters.previewHeight)) {
+ ALOGW("%s: The preview size has changed to %d x %d from %d x %d, this buffer is"
+ " no longer valid, dropping",__FUNCTION__,
+ l.mParameters.previewWidth, l.mParameters.previewHeight,
+ imgBuffer.width, imgBuffer.height);
+ mCallbackConsumer->unlockBuffer(imgBuffer);
+ return OK;
+ }
+
+ previewFormat = l.mParameters.previewFormat;
+ useFlexibleYuv = l.mParameters.fastInfo.useFlexibleYuv &&
+ (previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP ||
+ previewFormat == HAL_PIXEL_FORMAT_YV12);
+
+ int32_t expectedFormat = useFlexibleYuv ?
+ HAL_PIXEL_FORMAT_YCbCr_420_888 : previewFormat;
+
+ if (imgBuffer.format != expectedFormat) {
+ ALOGE("%s: Camera %d: Unexpected format for callback: "
+ "0x%x, expected 0x%x", __FUNCTION__, mId,
+ imgBuffer.format, expectedFormat);
+ mCallbackConsumer->unlockBuffer(imgBuffer);
+ return INVALID_OPERATION;
+ }
+
+ // In one-shot mode, stop sending callbacks after the first one
+ if (l.mParameters.previewCallbackFlags &
+ CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK) {
+ ALOGV("%s: clearing oneshot", __FUNCTION__);
+ l.mParameters.previewCallbackOneShot = false;
+ }
+
+ uint32_t destYStride = 0;
+ uint32_t destCStride = 0;
+ if (useFlexibleYuv) {
+ if (previewFormat == HAL_PIXEL_FORMAT_YV12) {
+ // Strides must align to 16 for YV12
+ destYStride = ALIGN(imgBuffer.width, 16);
+ destCStride = ALIGN(destYStride / 2, 16);
+ } else {
+ // No padding for NV21
+ ALOG_ASSERT(previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP,
+ "Unexpected preview format 0x%x", previewFormat);
+ destYStride = imgBuffer.width;
+ destCStride = destYStride / 2;
+ }
+ } else {
+ destYStride = imgBuffer.stride;
+ // don't care about cStride
+ }
+
+ size_t bufferSize = Camera2Client::calculateBufferSize(
+ imgBuffer.width, imgBuffer.height,
+ previewFormat, destYStride);
+ size_t currentBufferSize = (mCallbackHeap == 0) ?
+ 0 : (mCallbackHeap->mHeap->getSize() / kCallbackHeapCount);
+ if (bufferSize != currentBufferSize) {
+ mCallbackHeap.clear();
+ mCallbackHeap = new Camera2Heap(bufferSize, kCallbackHeapCount,
+ "Camera2Client::CallbackHeap");
+ if (mCallbackHeap->mHeap->getSize() == 0) {
+ ALOGE("%s: Camera %d: Unable to allocate memory for callbacks",
+ __FUNCTION__, mId);
+ mCallbackConsumer->unlockBuffer(imgBuffer);
+ return INVALID_OPERATION;
+ }
+
+ mCallbackHeapHead = 0;
+ mCallbackHeapFree = kCallbackHeapCount;
+ }
+
+ if (mCallbackHeapFree == 0) {
+ ALOGE("%s: Camera %d: No free callback buffers, dropping frame",
+ __FUNCTION__, mId);
+ mCallbackConsumer->unlockBuffer(imgBuffer);
+ return OK;
+ }
+
+ heapIdx = mCallbackHeapHead;
+
+ mCallbackHeapHead = (mCallbackHeapHead + 1) & kCallbackHeapCount;
+ mCallbackHeapFree--;
+
+ // TODO: Get rid of this copy by passing the gralloc queue all the way
+ // to app
+
+ ssize_t offset;
+ size_t size;
+ sp<IMemoryHeap> heap =
+ mCallbackHeap->mBuffers[heapIdx]->getMemory(&offset,
+ &size);
+ uint8_t *data = (uint8_t*)heap->getBase() + offset;
+
+ if (!useFlexibleYuv) {
+ // Can just memcpy when HAL format matches API format
+ memcpy(data, imgBuffer.data, bufferSize);
+ } else {
+ res = convertFromFlexibleYuv(previewFormat, data, imgBuffer,
+ destYStride, destCStride);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't convert between 0x%x and 0x%x formats!",
+ __FUNCTION__, mId, imgBuffer.format, previewFormat);
+ mCallbackConsumer->unlockBuffer(imgBuffer);
+ return BAD_VALUE;
+ }
+ }
+
+ ALOGV("%s: Freeing buffer", __FUNCTION__);
+ mCallbackConsumer->unlockBuffer(imgBuffer);
+
+ // mCallbackHeap may get freed up once input mutex is released
+ callbackHeap = mCallbackHeap;
+ }
+
+ // Call outside parameter lock to allow re-entrancy from notification
+ {
+ Camera2Client::SharedCameraCallbacks::Lock
+ l(client->mSharedCameraCallbacks);
+ if (l.mRemoteCallback != 0) {
+ ALOGV("%s: Camera %d: Invoking client data callback",
+ __FUNCTION__, mId);
+ l.mRemoteCallback->dataCallback(CAMERA_MSG_PREVIEW_FRAME,
+ callbackHeap->mBuffers[heapIdx], NULL);
+ }
+ }
+
+ // Only increment free if we're still using the same heap
+ mCallbackHeapFree++;
+
+ ALOGV("%s: exit", __FUNCTION__);
+
+ return OK;
+}
+
+status_t CallbackProcessor::convertFromFlexibleYuv(int32_t previewFormat,
+ uint8_t *dst,
+ const CpuConsumer::LockedBuffer &src,
+ uint32_t dstYStride,
+ uint32_t dstCStride) const {
+
+ if (previewFormat != HAL_PIXEL_FORMAT_YCrCb_420_SP &&
+ previewFormat != HAL_PIXEL_FORMAT_YV12) {
+ ALOGE("%s: Camera %d: Unexpected preview format when using "
+ "flexible YUV: 0x%x", __FUNCTION__, mId, previewFormat);
+ return INVALID_OPERATION;
+ }
+
+ // Copy Y plane, adjusting for stride
+ const uint8_t *ySrc = src.data;
+ uint8_t *yDst = dst;
+ for (size_t row = 0; row < src.height; row++) {
+ memcpy(yDst, ySrc, src.width);
+ ySrc += src.stride;
+ yDst += dstYStride;
+ }
+
+ // Copy/swizzle chroma planes, 4:2:0 subsampling
+ const uint8_t *cbSrc = src.dataCb;
+ const uint8_t *crSrc = src.dataCr;
+ size_t chromaHeight = src.height / 2;
+ size_t chromaWidth = src.width / 2;
+ ssize_t chromaGap = src.chromaStride -
+ (chromaWidth * src.chromaStep);
+ size_t dstChromaGap = dstCStride - chromaWidth;
+
+ if (previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP) {
+ // Flexible YUV chroma to NV21 chroma
+ uint8_t *crcbDst = yDst;
+ // Check for shortcuts
+ if (cbSrc == crSrc + 1 && src.chromaStep == 2) {
+ ALOGV("%s: Fast NV21->NV21", __FUNCTION__);
+ // Source has semiplanar CrCb chroma layout, can copy by rows
+ for (size_t row = 0; row < chromaHeight; row++) {
+ memcpy(crcbDst, crSrc, src.width);
+ crcbDst += src.width;
+ crSrc += src.chromaStride;
+ }
+ } else {
+ ALOGV("%s: Generic->NV21", __FUNCTION__);
+ // Generic copy, always works but not very efficient
+ for (size_t row = 0; row < chromaHeight; row++) {
+ for (size_t col = 0; col < chromaWidth; col++) {
+ *(crcbDst++) = *crSrc;
+ *(crcbDst++) = *cbSrc;
+ crSrc += src.chromaStep;
+ cbSrc += src.chromaStep;
+ }
+ crSrc += chromaGap;
+ cbSrc += chromaGap;
+ }
+ }
+ } else {
+ // flexible YUV chroma to YV12 chroma
+ ALOG_ASSERT(previewFormat == HAL_PIXEL_FORMAT_YV12,
+ "Unexpected preview format 0x%x", previewFormat);
+ uint8_t *crDst = yDst;
+ uint8_t *cbDst = yDst + chromaHeight * dstCStride;
+ if (src.chromaStep == 1) {
+ ALOGV("%s: Fast YV12->YV12", __FUNCTION__);
+ // Source has planar chroma layout, can copy by row
+ for (size_t row = 0; row < chromaHeight; row++) {
+ memcpy(crDst, crSrc, chromaWidth);
+ crDst += dstCStride;
+ crSrc += src.chromaStride;
+ }
+ for (size_t row = 0; row < chromaHeight; row++) {
+ memcpy(cbDst, cbSrc, chromaWidth);
+ cbDst += dstCStride;
+ cbSrc += src.chromaStride;
+ }
+ } else {
+ ALOGV("%s: Generic->YV12", __FUNCTION__);
+ // Generic copy, always works but not very efficient
+ for (size_t row = 0; row < chromaHeight; row++) {
+ for (size_t col = 0; col < chromaWidth; col++) {
+ *(crDst++) = *crSrc;
+ *(cbDst++) = *cbSrc;
+ crSrc += src.chromaStep;
+ cbSrc += src.chromaStep;
+ }
+ crSrc += chromaGap;
+ cbSrc += chromaGap;
+ crDst += dstChromaGap;
+ cbDst += dstChromaGap;
+ }
+ }
+ }
+
+ return OK;
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.h b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
index c2a1372..613f5be 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
@@ -23,27 +23,31 @@
#include <utils/Mutex.h>
#include <utils/Condition.h>
#include <gui/CpuConsumer.h>
-#include "Parameters.h"
-#include "CameraMetadata.h"
-#include "Camera2Heap.h"
+
+#include "api1/client2/Camera2Heap.h"
namespace android {
class Camera2Client;
+class CameraDeviceBase;
namespace camera2 {
+class Parameters;
+
/***
* Still image capture output image processing
*/
class CallbackProcessor:
public Thread, public CpuConsumer::FrameAvailableListener {
public:
- CallbackProcessor(wp<Camera2Client> client);
+ CallbackProcessor(sp<Camera2Client> client);
~CallbackProcessor();
void onFrameAvailable();
+ // Set to NULL to disable the direct-to-app callback window
+ status_t setCallbackWindow(sp<ANativeWindow> callbackWindow);
status_t updateStream(const Parameters &params);
status_t deleteStream();
int getStreamId() const;
@@ -52,6 +56,8 @@ class CallbackProcessor:
private:
static const nsecs_t kWaitDuration = 10000000; // 10 ms
wp<Camera2Client> mClient;
+ wp<CameraDeviceBase> mDevice;
+ int mId;
mutable Mutex mInputMutex;
bool mCallbackAvailable;
@@ -61,6 +67,9 @@ class CallbackProcessor:
NO_STREAM = -1
};
+ // True if mCallbackWindow is a remote consumer, false if just the local
+ // mCallbackConsumer
+ bool mCallbackToApp;
int mCallbackStreamId;
static const size_t kCallbackHeapCount = 6;
sp<CpuConsumer> mCallbackConsumer;
@@ -72,7 +81,15 @@ class CallbackProcessor:
virtual bool threadLoop();
status_t processNewCallback(sp<Camera2Client> &client);
-
+ // Used when shutting down
+ status_t discardNewCallback();
+
+ // Convert from flexible YUV to NV21 or YV12
+ status_t convertFromFlexibleYuv(int32_t previewFormat,
+ uint8_t *dst,
+ const CpuConsumer::LockedBuffer &src,
+ uint32_t dstYStride,
+ uint32_t dstCStride) const;
};
diff --git a/services/camera/libcameraservice/camera2/Camera2Heap.h b/services/camera/libcameraservice/api1/client2/Camera2Heap.h
index 9c72d76..9c72d76 100644
--- a/services/camera/libcameraservice/camera2/Camera2Heap.h
+++ b/services/camera/libcameraservice/api1/client2/Camera2Heap.h
diff --git a/services/camera/libcameraservice/camera2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index fe4abc0..8a4ce4e 100644
--- a/services/camera/libcameraservice/camera2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -22,11 +22,11 @@
#include <utils/Trace.h>
#include <utils/Vector.h>
-#include "CaptureSequencer.h"
-#include "BurstCapture.h"
-#include "../Camera2Device.h"
-#include "../Camera2Client.h"
-#include "Parameters.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/CaptureSequencer.h"
+#include "api1/client2/BurstCapture.h"
+#include "api1/client2/Parameters.h"
+#include "api1/client2/ZslProcessorInterface.h"
namespace android {
namespace camera2 {
@@ -43,9 +43,11 @@ CaptureSequencer::CaptureSequencer(wp<Camera2Client> client):
mShutterNotified(false),
mClient(client),
mCaptureState(IDLE),
+ mStateTransitionCount(0),
mTriggerId(0),
mTimeoutCount(0),
- mCaptureId(Camera2Client::kCaptureRequestIdStart) {
+ mCaptureId(Camera2Client::kCaptureRequestIdStart),
+ mMsgType(0) {
ALOGV("%s", __FUNCTION__);
}
@@ -53,12 +55,12 @@ CaptureSequencer::~CaptureSequencer() {
ALOGV("%s: Exit", __FUNCTION__);
}
-void CaptureSequencer::setZslProcessor(wp<ZslProcessor> processor) {
+void CaptureSequencer::setZslProcessor(wp<ZslProcessorInterface> processor) {
Mutex::Autolock l(mInputMutex);
mZslProcessor = processor;
}
-status_t CaptureSequencer::startCapture() {
+status_t CaptureSequencer::startCapture(int msgType) {
ALOGV("%s", __FUNCTION__);
ATRACE_CALL();
Mutex::Autolock l(mInputMutex);
@@ -67,6 +69,7 @@ status_t CaptureSequencer::startCapture() {
return INVALID_OPERATION;
}
if (!mStartCapture) {
+ mMsgType = msgType;
mStartCapture = true;
mStartCaptureSignal.signal();
}
@@ -101,12 +104,12 @@ void CaptureSequencer::notifyAutoExposure(uint8_t newState, int triggerId) {
}
}
-void CaptureSequencer::onFrameAvailable(int32_t frameId,
+void CaptureSequencer::onFrameAvailable(int32_t requestId,
const CameraMetadata &frame) {
ALOGV("%s: Listener found new frame", __FUNCTION__);
ATRACE_CALL();
Mutex::Autolock l(mInputMutex);
- mNewFrameId = frameId;
+ mNewFrameId = requestId;
mNewFrame = frame;
if (!mNewFrameReceived) {
mNewFrameReceived = true;
@@ -128,7 +131,7 @@ void CaptureSequencer::onCaptureAvailable(nsecs_t timestamp,
}
-void CaptureSequencer::dump(int fd, const Vector<String16>& args) {
+void CaptureSequencer::dump(int fd, const Vector<String16>& /*args*/) {
String8 result;
if (mCaptureRequest.entryCount() != 0) {
result = " Capture request:\n";
@@ -182,7 +185,6 @@ const CaptureSequencer::StateManager
};
bool CaptureSequencer::threadLoop() {
- status_t res;
sp<Camera2Client> client = mClient.promote();
if (client == 0) return false;
@@ -197,8 +199,14 @@ bool CaptureSequencer::threadLoop() {
Mutex::Autolock l(mStateMutex);
if (currentState != mCaptureState) {
+ if (mCaptureState != IDLE) {
+ ATRACE_ASYNC_END(kStateNames[mCaptureState], mStateTransitionCount);
+ }
mCaptureState = currentState;
- ATRACE_INT("cam2_capt_state", mCaptureState);
+ mStateTransitionCount++;
+ if (mCaptureState != IDLE) {
+ ATRACE_ASYNC_BEGIN(kStateNames[mCaptureState], mStateTransitionCount);
+ }
ALOGV("Camera %d: New capture state %s",
client->getCameraId(), kStateNames[mCaptureState]);
mStateChanged.signal();
@@ -213,7 +221,8 @@ bool CaptureSequencer::threadLoop() {
return true;
}
-CaptureSequencer::CaptureState CaptureSequencer::manageIdle(sp<Camera2Client> &client) {
+CaptureSequencer::CaptureState CaptureSequencer::manageIdle(
+ sp<Camera2Client> &/*client*/) {
status_t res;
Mutex::Autolock l(mInputMutex);
while (!mStartCapture) {
@@ -241,6 +250,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageDone(sp<Camera2Client> &c
mBusy = false;
}
+ int takePictureCounter = 0;
{
SharedParameters::Lock l(client->getParameters());
switch (l.mParameters.state) {
@@ -250,6 +260,12 @@ CaptureSequencer::CaptureState CaptureSequencer::manageDone(sp<Camera2Client> &c
res = INVALID_OPERATION;
break;
case Parameters::STILL_CAPTURE:
+ res = client->getCameraDevice()->waitUntilDrained();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't idle after still capture: "
+ "%s (%d)", __FUNCTION__, client->getCameraId(),
+ strerror(-res), res);
+ }
l.mParameters.state = Parameters::STOPPED;
break;
case Parameters::VIDEO_SNAPSHOT:
@@ -262,17 +278,26 @@ CaptureSequencer::CaptureState CaptureSequencer::manageDone(sp<Camera2Client> &c
Parameters::getStateName(l.mParameters.state));
res = INVALID_OPERATION;
}
+ takePictureCounter = l.mParameters.takePictureCounter;
}
- sp<ZslProcessor> processor = mZslProcessor.promote();
+ sp<ZslProcessorInterface> processor = mZslProcessor.promote();
if (processor != 0) {
+ ALOGV("%s: Memory optimization, clearing ZSL queue",
+ __FUNCTION__);
processor->clearZslQueue();
}
+ /**
+ * Fire the jpegCallback in Camera#takePicture(..., jpegCallback)
+ */
if (mCaptureBuffer != 0 && res == OK) {
- Camera2Client::SharedCameraClient::Lock l(client->mSharedCameraClient);
+ ATRACE_ASYNC_END(Camera2Client::kTakepictureLabel, takePictureCounter);
+
+ Camera2Client::SharedCameraCallbacks::Lock
+ l(client->mSharedCameraCallbacks);
ALOGV("%s: Sending still image to client", __FUNCTION__);
- if (l.mCameraClient != 0) {
- l.mCameraClient->dataCallback(CAMERA_MSG_COMPRESSED_IMAGE,
+ if (l.mRemoteCallback != 0) {
+ l.mRemoteCallback->dataCallback(CAMERA_MSG_COMPRESSED_IMAGE,
mCaptureBuffer, NULL);
} else {
ALOGV("%s: No client!", __FUNCTION__);
@@ -318,7 +343,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageZslStart(
sp<Camera2Client> &client) {
ALOGV("%s", __FUNCTION__);
status_t res;
- sp<ZslProcessor> processor = mZslProcessor.promote();
+ sp<ZslProcessorInterface> processor = mZslProcessor.promote();
if (processor == 0) {
ALOGE("%s: No ZSL queue to use!", __FUNCTION__);
return DONE;
@@ -342,21 +367,21 @@ CaptureSequencer::CaptureState CaptureSequencer::manageZslStart(
}
SharedParameters::Lock l(client->getParameters());
- /* warning: this also locks a SharedCameraClient */
- shutterNotifyLocked(l.mParameters, client);
+ /* warning: this also locks a SharedCameraCallbacks */
+ shutterNotifyLocked(l.mParameters, client, mMsgType);
mShutterNotified = true;
mTimeoutCount = kMaxTimeoutsForCaptureEnd;
return STANDARD_CAPTURE_WAIT;
}
CaptureSequencer::CaptureState CaptureSequencer::manageZslWaiting(
- sp<Camera2Client> &client) {
+ sp<Camera2Client> &/*client*/) {
ALOGV("%s", __FUNCTION__);
return DONE;
}
CaptureSequencer::CaptureState CaptureSequencer::manageZslReprocessing(
- sp<Camera2Client> &client) {
+ sp<Camera2Client> &/*client*/) {
ALOGV("%s", __FUNCTION__);
return START;
}
@@ -364,10 +389,24 @@ CaptureSequencer::CaptureState CaptureSequencer::manageZslReprocessing(
CaptureSequencer::CaptureState CaptureSequencer::manageStandardStart(
sp<Camera2Client> &client) {
ATRACE_CALL();
+
+ bool isAeConverged = false;
+ // Get the onFrameAvailable callback when the requestID == mCaptureId
client->registerFrameListener(mCaptureId, mCaptureId + 1,
this);
+
+ {
+ Mutex::Autolock l(mInputMutex);
+ isAeConverged = (mAEState == ANDROID_CONTROL_AE_STATE_CONVERGED);
+ }
+
{
SharedParameters::Lock l(client->getParameters());
+ // Skip AE precapture when it is already converged and not in force flash mode.
+ if (l.mParameters.flashMode != Parameters::FLASH_MODE_ON && isAeConverged) {
+ return STANDARD_CAPTURE;
+ }
+
mTriggerId = l.mParameters.precaptureTriggerCounter++;
}
client->getCameraDevice()->triggerPrecaptureMetering(mTriggerId);
@@ -378,7 +417,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardStart(
}
CaptureSequencer::CaptureState CaptureSequencer::manageStandardPrecaptureWait(
- sp<Camera2Client> &client) {
+ sp<Camera2Client> &/*client*/) {
status_t res;
ATRACE_CALL();
Mutex::Autolock l(mInputMutex);
@@ -421,8 +460,16 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCapture(
status_t res;
ATRACE_CALL();
SharedParameters::Lock l(client->getParameters());
- Vector<uint8_t> outputStreams;
-
+ Vector<int32_t> outputStreams;
+ uint8_t captureIntent = static_cast<uint8_t>(ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE);
+
+ /**
+ * Set up output streams in the request
+ * - preview
+ * - capture/jpeg
+ * - callback (if preview callbacks enabled)
+ * - recording (if recording enabled)
+ */
outputStreams.push(client->getPreviewStreamId());
outputStreams.push(client->getCaptureStreamId());
@@ -433,6 +480,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCapture(
if (l.mParameters.state == Parameters::VIDEO_SNAPSHOT) {
outputStreams.push(client->getRecordingStreamId());
+ captureIntent = static_cast<uint8_t>(ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT);
}
res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
@@ -442,6 +490,10 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCapture(
&mCaptureId, 1);
}
if (res == OK) {
+ res = mCaptureRequest.update(ANDROID_CONTROL_CAPTURE_INTENT,
+ &captureIntent, 1);
+ }
+ if (res == OK) {
res = mCaptureRequest.sort();
}
@@ -451,6 +503,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCapture(
return DONE;
}
+ // Create a capture copy since CameraDeviceBase#capture takes ownership
CameraMetadata captureCopy = mCaptureRequest;
if (captureCopy.entryCount() == 0) {
ALOGE("%s: Camera %d: Unable to copy capture request for HAL device",
@@ -458,7 +511,12 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCapture(
return DONE;
}
+ /**
+ * Clear the streaming request for still-capture pictures
+ * (as opposed to i.e. video snapshots)
+ */
if (l.mParameters.state == Parameters::STILL_CAPTURE) {
+ // API definition of takePicture() - stop preview before taking pic
res = client->stopStream();
if (res != OK) {
ALOGE("%s: Camera %d: Unable to stop preview for still capture: "
@@ -485,6 +543,8 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait(
status_t res;
ATRACE_CALL();
Mutex::Autolock l(mInputMutex);
+
+ // Wait for new metadata result (mNewFrame)
while (!mNewFrameReceived) {
res = mNewFrameSignal.waitRelative(mInputMutex, kWaitDuration);
if (res == TIMED_OUT) {
@@ -492,12 +552,17 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait(
break;
}
}
+
+ // Approximation of the shutter being closed
+ // - TODO: use the hal3 exposure callback in Camera3Device instead
if (mNewFrameReceived && !mShutterNotified) {
SharedParameters::Lock l(client->getParameters());
- /* warning: this also locks a SharedCameraClient */
- shutterNotifyLocked(l.mParameters, client);
+ /* warning: this also locks a SharedCameraCallbacks */
+ shutterNotifyLocked(l.mParameters, client, mMsgType);
mShutterNotified = true;
}
+
+ // Wait until jpeg was captured by JpegProcessor
while (mNewFrameReceived && !mNewCaptureReceived) {
res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
if (res == TIMED_OUT) {
@@ -521,7 +586,9 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait(
}
if (entry.data.i64[0] != mCaptureTimestamp) {
ALOGW("Mismatched capture timestamps: Metadata frame %lld,"
- " captured buffer %lld", entry.data.i64[0], mCaptureTimestamp);
+ " captured buffer %lld",
+ entry.data.i64[0],
+ mCaptureTimestamp);
}
client->removeFrameListener(mCaptureId, mCaptureId + 1, this);
@@ -578,7 +645,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureStart(
}
CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureWait(
- sp<Camera2Client> &client) {
+ sp<Camera2Client> &/*client*/) {
status_t res;
ATRACE_CALL();
@@ -639,24 +706,27 @@ status_t CaptureSequencer::updateCaptureRequest(const Parameters &params,
}
/*static*/ void CaptureSequencer::shutterNotifyLocked(const Parameters &params,
- sp<Camera2Client> client) {
+ sp<Camera2Client> client, int msgType) {
ATRACE_CALL();
- if (params.state == Parameters::STILL_CAPTURE && params.playShutterSound) {
+ if (params.state == Parameters::STILL_CAPTURE
+ && params.playShutterSound
+ && (msgType & CAMERA_MSG_SHUTTER)) {
client->getCameraService()->playSound(CameraService::SOUND_SHUTTER);
}
{
- Camera2Client::SharedCameraClient::Lock l(client->mSharedCameraClient);
+ Camera2Client::SharedCameraCallbacks::Lock
+ l(client->mSharedCameraCallbacks);
ALOGV("%s: Notifying of shutter close to client", __FUNCTION__);
- if (l.mCameraClient != 0) {
+ if (l.mRemoteCallback != 0) {
// ShutterCallback
- l.mCameraClient->notifyCallback(CAMERA_MSG_SHUTTER,
+ l.mRemoteCallback->notifyCallback(CAMERA_MSG_SHUTTER,
/*ext1*/0, /*ext2*/0);
// RawCallback with null buffer
- l.mCameraClient->notifyCallback(CAMERA_MSG_RAW_IMAGE_NOTIFY,
+ l.mRemoteCallback->notifyCallback(CAMERA_MSG_RAW_IMAGE_NOTIFY,
/*ext1*/0, /*ext2*/0);
} else {
ALOGV("%s: No client!", __FUNCTION__);
diff --git a/services/camera/libcameraservice/camera2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
index 4cde9c8..9fb4ee7 100644
--- a/services/camera/libcameraservice/camera2/CaptureSequencer.h
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
@@ -23,7 +23,7 @@
#include <utils/Vector.h>
#include <utils/Mutex.h>
#include <utils/Condition.h>
-#include "CameraMetadata.h"
+#include "camera/CameraMetadata.h"
#include "Parameters.h"
#include "FrameProcessor.h"
@@ -33,7 +33,7 @@ class Camera2Client;
namespace camera2 {
-class ZslProcessor;
+class ZslProcessorInterface;
class BurstCapture;
/**
@@ -48,10 +48,10 @@ class CaptureSequencer:
~CaptureSequencer();
// Get reference to the ZslProcessor, which holds the ZSL buffers and frames
- void setZslProcessor(wp<ZslProcessor> processor);
+ void setZslProcessor(wp<ZslProcessorInterface> processor);
// Begin still image capture
- status_t startCapture();
+ status_t startCapture(int msgType);
// Wait until current image capture completes; returns immediately if no
// capture is active. Returns TIMED_OUT if capture does not complete during
@@ -62,7 +62,7 @@ class CaptureSequencer:
void notifyAutoExposure(uint8_t newState, int triggerId);
// Notifications from the frame processor
- virtual void onFrameAvailable(int32_t frameId, const CameraMetadata &frame);
+ virtual void onFrameAvailable(int32_t requestId, const CameraMetadata &frame);
// Notifications from the JPEG processor
void onCaptureAvailable(nsecs_t timestamp, sp<MemoryBase> captureBuffer);
@@ -100,12 +100,12 @@ class CaptureSequencer:
* Internal to CaptureSequencer
*/
static const nsecs_t kWaitDuration = 100000000; // 100 ms
- static const int kMaxTimeoutsForPrecaptureStart = 2; // 200 ms
+ static const int kMaxTimeoutsForPrecaptureStart = 10; // 1 sec
static const int kMaxTimeoutsForPrecaptureEnd = 20; // 2 sec
static const int kMaxTimeoutsForCaptureEnd = 40; // 4 sec
wp<Camera2Client> mClient;
- wp<ZslProcessor> mZslProcessor;
+ wp<ZslProcessorInterface> mZslProcessor;
sp<BurstCapture> mBurstCapture;
enum CaptureState {
@@ -125,6 +125,7 @@ class CaptureSequencer:
NUM_CAPTURE_STATES
} mCaptureState;
static const char* kStateNames[];
+ int mStateTransitionCount;
Mutex mStateMutex; // Guards mCaptureState
Condition mStateChanged;
@@ -138,6 +139,7 @@ class CaptureSequencer:
bool mAeInPrecapture;
int32_t mCaptureId;
+ int mMsgType;
// Main internal methods
@@ -167,7 +169,7 @@ class CaptureSequencer:
// Emit Shutter/Raw callback to java, and maybe play a shutter sound
static void shutterNotifyLocked(const Parameters &params,
- sp<Camera2Client> client);
+ sp<Camera2Client> client, int msgType);
};
}; // namespace camera2
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
new file mode 100644
index 0000000..19acae4
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -0,0 +1,356 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2-FrameProcessor"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/FrameProcessor.h"
+
+namespace android {
+namespace camera2 {
+
+FrameProcessor::FrameProcessor(wp<CameraDeviceBase> device,
+ sp<Camera2Client> client) :
+ FrameProcessorBase(device),
+ mClient(client),
+ mLastFrameNumberOfFaces(0),
+ mLast3AFrameNumber(-1) {
+
+ sp<CameraDeviceBase> d = device.promote();
+ mSynthesize3ANotify = !(d->willNotify3A());
+
+ {
+ SharedParameters::Lock l(client->getParameters());
+ mUsePartialQuirk = l.mParameters.quirks.partialResults;
+
+ // Initialize starting 3A state
+ m3aState.afTriggerId = l.mParameters.afTriggerCounter;
+ m3aState.aeTriggerId = l.mParameters.precaptureTriggerCounter;
+ // Check if lens is fixed-focus
+ if (l.mParameters.focusMode == Parameters::FOCUS_MODE_FIXED) {
+ m3aState.afMode = ANDROID_CONTROL_AF_MODE_OFF;
+ }
+ }
+}
+
+FrameProcessor::~FrameProcessor() {
+}
+
+bool FrameProcessor::processSingleFrame(CameraMetadata &frame,
+ const sp<CameraDeviceBase> &device) {
+
+ sp<Camera2Client> client = mClient.promote();
+ if (!client.get()) {
+ return false;
+ }
+
+ bool partialResult = false;
+ if (mUsePartialQuirk) {
+ camera_metadata_entry_t entry;
+ entry = frame.find(ANDROID_QUIRKS_PARTIAL_RESULT);
+ if (entry.count > 0 &&
+ entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+ partialResult = true;
+ }
+ }
+
+ if (!partialResult && processFaceDetect(frame, client) != OK) {
+ return false;
+ }
+
+ if (mSynthesize3ANotify) {
+ process3aState(frame, client);
+ }
+
+ return FrameProcessorBase::processSingleFrame(frame, device);
+}
+
+status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame,
+ const sp<Camera2Client> &client) {
+ status_t res = BAD_VALUE;
+ ATRACE_CALL();
+ camera_metadata_ro_entry_t entry;
+ bool enableFaceDetect;
+
+ {
+ SharedParameters::Lock l(client->getParameters());
+ enableFaceDetect = l.mParameters.enableFaceDetect;
+ }
+ entry = frame.find(ANDROID_STATISTICS_FACE_DETECT_MODE);
+
+ // TODO: This should be an error once implementations are compliant
+ if (entry.count == 0) {
+ return OK;
+ }
+
+ uint8_t faceDetectMode = entry.data.u8[0];
+
+ camera_frame_metadata metadata;
+ Vector<camera_face_t> faces;
+ metadata.number_of_faces = 0;
+
+ if (enableFaceDetect &&
+ faceDetectMode != ANDROID_STATISTICS_FACE_DETECT_MODE_OFF) {
+
+ SharedParameters::Lock l(client->getParameters());
+ entry = frame.find(ANDROID_STATISTICS_FACE_RECTANGLES);
+ if (entry.count == 0) {
+ // No faces this frame
+ /* warning: locks SharedCameraCallbacks */
+ callbackFaceDetection(client, metadata);
+ return OK;
+ }
+ metadata.number_of_faces = entry.count / 4;
+ if (metadata.number_of_faces >
+ l.mParameters.fastInfo.maxFaces) {
+ ALOGE("%s: Camera %d: More faces than expected! (Got %d, max %d)",
+ __FUNCTION__, client->getCameraId(),
+ metadata.number_of_faces, l.mParameters.fastInfo.maxFaces);
+ return res;
+ }
+ const int32_t *faceRects = entry.data.i32;
+
+ entry = frame.find(ANDROID_STATISTICS_FACE_SCORES);
+ if (entry.count == 0) {
+ ALOGE("%s: Camera %d: Unable to read face scores",
+ __FUNCTION__, client->getCameraId());
+ return res;
+ }
+ const uint8_t *faceScores = entry.data.u8;
+
+ const int32_t *faceLandmarks = NULL;
+ const int32_t *faceIds = NULL;
+
+ if (faceDetectMode == ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) {
+ entry = frame.find(ANDROID_STATISTICS_FACE_LANDMARKS);
+ if (entry.count == 0) {
+ ALOGE("%s: Camera %d: Unable to read face landmarks",
+ __FUNCTION__, client->getCameraId());
+ return res;
+ }
+ faceLandmarks = entry.data.i32;
+
+ entry = frame.find(ANDROID_STATISTICS_FACE_IDS);
+
+ if (entry.count == 0) {
+ ALOGE("%s: Camera %d: Unable to read face IDs",
+ __FUNCTION__, client->getCameraId());
+ return res;
+ }
+ faceIds = entry.data.i32;
+ }
+
+ faces.setCapacity(metadata.number_of_faces);
+
+ size_t maxFaces = metadata.number_of_faces;
+ for (size_t i = 0; i < maxFaces; i++) {
+ if (faceScores[i] == 0) {
+ metadata.number_of_faces--;
+ continue;
+ }
+ if (faceScores[i] > 100) {
+ ALOGW("%s: Face index %d with out of range score %d",
+ __FUNCTION__, i, faceScores[i]);
+ }
+
+ camera_face_t face;
+
+ face.rect[0] = l.mParameters.arrayXToNormalized(faceRects[i*4 + 0]);
+ face.rect[1] = l.mParameters.arrayYToNormalized(faceRects[i*4 + 1]);
+ face.rect[2] = l.mParameters.arrayXToNormalized(faceRects[i*4 + 2]);
+ face.rect[3] = l.mParameters.arrayYToNormalized(faceRects[i*4 + 3]);
+
+ face.score = faceScores[i];
+ if (faceDetectMode == ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) {
+ face.id = faceIds[i];
+ face.left_eye[0] =
+ l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 0]);
+ face.left_eye[1] =
+ l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 1]);
+ face.right_eye[0] =
+ l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 2]);
+ face.right_eye[1] =
+ l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 3]);
+ face.mouth[0] =
+ l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 4]);
+ face.mouth[1] =
+ l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 5]);
+ } else {
+ face.id = 0;
+ face.left_eye[0] = face.left_eye[1] = -2000;
+ face.right_eye[0] = face.right_eye[1] = -2000;
+ face.mouth[0] = face.mouth[1] = -2000;
+ }
+ faces.push_back(face);
+ }
+
+ metadata.faces = faces.editArray();
+ }
+
+ /* warning: locks SharedCameraCallbacks */
+ callbackFaceDetection(client, metadata);
+
+ return OK;
+}
+
+status_t FrameProcessor::process3aState(const CameraMetadata &frame,
+ const sp<Camera2Client> &client) {
+
+ ATRACE_CALL();
+ camera_metadata_ro_entry_t entry;
+ int cameraId = client->getCameraId();
+
+ entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
+ int32_t frameNumber = entry.data.i32[0];
+
+ // Don't send 3A notifications for the same frame number twice
+ if (frameNumber <= mLast3AFrameNumber) {
+ ALOGV("%s: Already sent 3A for frame number %d, skipping",
+ __FUNCTION__, frameNumber);
+ return OK;
+ }
+
+ mLast3AFrameNumber = frameNumber;
+
+ // Get 3A states from result metadata
+ bool gotAllStates = true;
+
+ AlgState new3aState;
+
+ // TODO: Also use AE mode, AE trigger ID
+
+ gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AF_MODE,
+ &new3aState.afMode, frameNumber, cameraId);
+
+ gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AWB_MODE,
+ &new3aState.awbMode, frameNumber, cameraId);
+
+ gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AE_STATE,
+ &new3aState.aeState, frameNumber, cameraId);
+
+ gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AF_STATE,
+ &new3aState.afState, frameNumber, cameraId);
+
+ gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AWB_STATE,
+ &new3aState.awbState, frameNumber, cameraId);
+
+ gotAllStates &= get3aResult<int32_t>(frame, ANDROID_CONTROL_AF_TRIGGER_ID,
+ &new3aState.afTriggerId, frameNumber, cameraId);
+
+ gotAllStates &= get3aResult<int32_t>(frame, ANDROID_CONTROL_AE_PRECAPTURE_ID,
+ &new3aState.aeTriggerId, frameNumber, cameraId);
+
+ if (!gotAllStates) return BAD_VALUE;
+
+ if (new3aState.aeState != m3aState.aeState) {
+ ALOGV("%s: Camera %d: AE state %d->%d",
+ __FUNCTION__, cameraId,
+ m3aState.aeState, new3aState.aeState);
+ client->notifyAutoExposure(new3aState.aeState, new3aState.aeTriggerId);
+ }
+
+ if (new3aState.afState != m3aState.afState ||
+ new3aState.afMode != m3aState.afMode ||
+ new3aState.afTriggerId != m3aState.afTriggerId) {
+ ALOGV("%s: Camera %d: AF state %d->%d. AF mode %d->%d. Trigger %d->%d",
+ __FUNCTION__, cameraId,
+ m3aState.afState, new3aState.afState,
+ m3aState.afMode, new3aState.afMode,
+ m3aState.afTriggerId, new3aState.afTriggerId);
+ client->notifyAutoFocus(new3aState.afState, new3aState.afTriggerId);
+ }
+ if (new3aState.awbState != m3aState.awbState ||
+ new3aState.awbMode != m3aState.awbMode) {
+ ALOGV("%s: Camera %d: AWB state %d->%d. AWB mode %d->%d",
+ __FUNCTION__, cameraId,
+ m3aState.awbState, new3aState.awbState,
+ m3aState.awbMode, new3aState.awbMode);
+ client->notifyAutoWhitebalance(new3aState.awbState,
+ new3aState.aeTriggerId);
+ }
+
+ m3aState = new3aState;
+
+ return OK;
+}
+
+template<typename Src, typename T>
+bool FrameProcessor::get3aResult(const CameraMetadata& result, int32_t tag,
+ T* value, int32_t frameNumber, int cameraId) {
+ camera_metadata_ro_entry_t entry;
+ if (value == NULL) {
+ ALOGE("%s: Camera %d: Value to write to is NULL",
+ __FUNCTION__, cameraId);
+ return false;
+ }
+
+ entry = result.find(tag);
+ if (entry.count == 0) {
+ ALOGE("%s: Camera %d: No %s provided by HAL for frame %d!",
+ __FUNCTION__, cameraId,
+ get_camera_metadata_tag_name(tag), frameNumber);
+ return false;
+ } else {
+ switch(sizeof(Src)){
+ case sizeof(uint8_t):
+ *value = static_cast<T>(entry.data.u8[0]);
+ break;
+ case sizeof(int32_t):
+ *value = static_cast<T>(entry.data.i32[0]);
+ break;
+ default:
+ ALOGE("%s: Camera %d: Unsupported source",
+ __FUNCTION__, cameraId);
+ return false;
+ }
+ }
+ return true;
+}
+
+
+void FrameProcessor::callbackFaceDetection(sp<Camera2Client> client,
+ const camera_frame_metadata &metadata) {
+
+ camera_frame_metadata *metadata_ptr =
+ const_cast<camera_frame_metadata*>(&metadata);
+
+ /**
+ * Filter out repeated 0-face callbacks,
+ * but not when the last frame was >0
+ */
+ if (metadata.number_of_faces != 0 ||
+ mLastFrameNumberOfFaces != metadata.number_of_faces) {
+
+ Camera2Client::SharedCameraCallbacks::Lock
+ l(client->mSharedCameraCallbacks);
+ if (l.mRemoteCallback != NULL) {
+ l.mRemoteCallback->dataCallback(CAMERA_MSG_PREVIEW_METADATA,
+ NULL,
+ metadata_ptr);
+ }
+ }
+
+ mLastFrameNumberOfFaces = metadata.number_of_faces;
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
new file mode 100644
index 0000000..856ad32
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_FRAMEPROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_FRAMEPROCESSOR_H
+
+#include <utils/Thread.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+#include <utils/KeyedVector.h>
+#include <utils/List.h>
+#include <camera/CameraMetadata.h>
+
+#include "common/FrameProcessorBase.h"
+
+struct camera_frame_metadata;
+
+namespace android {
+
+class Camera2Client;
+
+namespace camera2 {
+
+/* Output frame metadata processing thread. This thread waits for new
+ * frames from the device, and analyzes them as necessary.
+ */
+class FrameProcessor : public FrameProcessorBase {
+ public:
+ FrameProcessor(wp<CameraDeviceBase> device, sp<Camera2Client> client);
+ ~FrameProcessor();
+
+ private:
+ wp<Camera2Client> mClient;
+
+ bool mSynthesize3ANotify;
+
+ int mLastFrameNumberOfFaces;
+
+ void processNewFrames(const sp<Camera2Client> &client);
+
+ virtual bool processSingleFrame(CameraMetadata &frame,
+ const sp<CameraDeviceBase> &device);
+
+ status_t processFaceDetect(const CameraMetadata &frame,
+ const sp<Camera2Client> &client);
+
+ // Send 3A state change notifications to client based on frame metadata
+ status_t process3aState(const CameraMetadata &frame,
+ const sp<Camera2Client> &client);
+
+ // Helper for process3aState
+ template<typename Src, typename T>
+ bool get3aResult(const CameraMetadata& result, int32_t tag, T* value,
+ int32_t frameNumber, int cameraId);
+
+
+ struct AlgState {
+ // TODO: also track AE mode
+ camera_metadata_enum_android_control_af_mode afMode;
+ camera_metadata_enum_android_control_awb_mode awbMode;
+
+ camera_metadata_enum_android_control_ae_state aeState;
+ camera_metadata_enum_android_control_af_state afState;
+ camera_metadata_enum_android_control_awb_state awbState;
+
+ int32_t afTriggerId;
+ int32_t aeTriggerId;
+
+ // These defaults need to match those in Parameters.cpp
+ AlgState() :
+ afMode(ANDROID_CONTROL_AF_MODE_AUTO),
+ awbMode(ANDROID_CONTROL_AWB_MODE_AUTO),
+ aeState(ANDROID_CONTROL_AE_STATE_INACTIVE),
+ afState(ANDROID_CONTROL_AF_STATE_INACTIVE),
+ awbState(ANDROID_CONTROL_AWB_STATE_INACTIVE),
+ afTriggerId(0),
+ aeTriggerId(0) {
+ }
+ } m3aState;
+
+ // Whether the partial result quirk is enabled for this device
+ bool mUsePartialQuirk;
+
+ // Track most recent frame number for which 3A notifications were sent for.
+ // Used to filter against sending 3A notifications for the same frame
+ // several times.
+ int32_t mLast3AFrameNumber;
+
+ // Emit FaceDetection event to java if faces changed
+ void callbackFaceDetection(sp<Camera2Client> client,
+ const camera_frame_metadata &metadata);
+};
+
+
+}; //namespace camera2
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/camera2/JpegCompressor.cpp b/services/camera/libcameraservice/api1/client2/JpegCompressor.cpp
index 702ef58..2f0c67d 100644
--- a/services/camera/libcameraservice/camera2/JpegCompressor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegCompressor.cpp
@@ -144,7 +144,7 @@ bool JpegCompressor::isBusy() {
}
// old function -- TODO: update for new buffer type
-bool JpegCompressor::isStreamInUse(uint32_t id) {
+bool JpegCompressor::isStreamInUse(uint32_t /*id*/) {
ALOGV("%s", __FUNCTION__);
Mutex::Autolock lock(mBusyMutex);
@@ -203,7 +203,7 @@ void JpegCompressor::jpegInitDestination(j_compress_ptr cinfo) {
dest->free_in_buffer = kMaxJpegSize;
}
-boolean JpegCompressor::jpegEmptyOutputBuffer(j_compress_ptr cinfo) {
+boolean JpegCompressor::jpegEmptyOutputBuffer(j_compress_ptr /*cinfo*/) {
ALOGV("%s", __FUNCTION__);
ALOGE("%s: JPEG destination buffer overflow!",
__FUNCTION__);
@@ -211,6 +211,7 @@ boolean JpegCompressor::jpegEmptyOutputBuffer(j_compress_ptr cinfo) {
}
void JpegCompressor::jpegTermDestination(j_compress_ptr cinfo) {
+ (void) cinfo; // TODO: clean up
ALOGV("%s", __FUNCTION__);
ALOGV("%s: Done writing JPEG data. %d bytes left in buffer",
__FUNCTION__, cinfo->dest->free_in_buffer);
diff --git a/services/camera/libcameraservice/camera2/JpegCompressor.h b/services/camera/libcameraservice/api1/client2/JpegCompressor.h
index 945b1de..945b1de 100644
--- a/services/camera/libcameraservice/camera2/JpegCompressor.h
+++ b/services/camera/libcameraservice/api1/client2/JpegCompressor.h
diff --git a/services/camera/libcameraservice/camera2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index ffc072b..77d5c8a 100644
--- a/services/camera/libcameraservice/camera2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -24,22 +24,24 @@
#include <binder/MemoryHeapBase.h>
#include <utils/Log.h>
#include <utils/Trace.h>
+#include <gui/Surface.h>
-#include "JpegProcessor.h"
-#include <gui/SurfaceTextureClient.h>
-#include "../Camera2Device.h"
-#include "../Camera2Client.h"
-
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/Camera2Heap.h"
+#include "api1/client2/CaptureSequencer.h"
+#include "api1/client2/JpegProcessor.h"
namespace android {
namespace camera2 {
JpegProcessor::JpegProcessor(
- wp<Camera2Client> client,
+ sp<Camera2Client> client,
wp<CaptureSequencer> sequencer):
Thread(false),
- mClient(client),
+ mDevice(client->getCameraDevice()),
mSequencer(sequencer),
+ mId(client->getCameraId()),
mCaptureAvailable(false),
mCaptureStreamId(NO_STREAM) {
}
@@ -64,32 +66,34 @@ status_t JpegProcessor::updateStream(const Parameters &params) {
Mutex::Autolock l(mInputMutex);
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return OK;
- sp<Camera2Device> device = client->getCameraDevice();
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
// Find out buffer size for JPEG
camera_metadata_ro_entry_t maxJpegSize =
params.staticInfo(ANDROID_JPEG_MAX_SIZE);
if (maxJpegSize.count == 0) {
ALOGE("%s: Camera %d: Can't find ANDROID_JPEG_MAX_SIZE!",
- __FUNCTION__, client->getCameraId());
+ __FUNCTION__, mId);
return INVALID_OPERATION;
}
if (mCaptureConsumer == 0) {
// Create CPU buffer queue endpoint
- mCaptureConsumer = new CpuConsumer(1);
+ sp<BufferQueue> bq = new BufferQueue();
+ mCaptureConsumer = new CpuConsumer(bq, 1);
mCaptureConsumer->setFrameAvailableListener(this);
mCaptureConsumer->setName(String8("Camera2Client::CaptureConsumer"));
- mCaptureWindow = new SurfaceTextureClient(
- mCaptureConsumer->getProducerInterface());
+ mCaptureWindow = new Surface(bq);
// Create memory for API consumption
mCaptureHeap = new MemoryHeapBase(maxJpegSize.data.i32[0], 0,
"Camera2Client::CaptureHeap");
if (mCaptureHeap->getSize() == 0) {
ALOGE("%s: Camera %d: Unable to allocate memory for capture",
- __FUNCTION__, client->getCameraId());
+ __FUNCTION__, mId);
return NO_MEMORY;
}
}
@@ -102,18 +106,22 @@ status_t JpegProcessor::updateStream(const Parameters &params) {
if (res != OK) {
ALOGE("%s: Camera %d: Error querying capture output stream info: "
"%s (%d)", __FUNCTION__,
- client->getCameraId(), strerror(-res), res);
+ mId, strerror(-res), res);
return res;
}
if (currentWidth != (uint32_t)params.pictureWidth ||
currentHeight != (uint32_t)params.pictureHeight) {
ALOGV("%s: Camera %d: Deleting stream %d since the buffer dimensions changed",
- __FUNCTION__, client->getCameraId(), mCaptureStreamId);
+ __FUNCTION__, mId, mCaptureStreamId);
res = device->deleteStream(mCaptureStreamId);
- if (res != OK) {
+ if (res == -EBUSY) {
+ ALOGV("%s: Camera %d: Device is busy, call updateStream again "
+ " after it becomes idle", __FUNCTION__, mId);
+ return res;
+ } else if (res != OK) {
ALOGE("%s: Camera %d: Unable to delete old output stream "
"for capture: %s (%d)", __FUNCTION__,
- client->getCameraId(), strerror(-res), res);
+ mId, strerror(-res), res);
return res;
}
mCaptureStreamId = NO_STREAM;
@@ -128,7 +136,7 @@ status_t JpegProcessor::updateStream(const Parameters &params) {
&mCaptureStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Can't create output stream for capture: "
- "%s (%d)", __FUNCTION__, client->getCameraId(),
+ "%s (%d)", __FUNCTION__, mId,
strerror(-res), res);
return res;
}
@@ -139,14 +147,15 @@ status_t JpegProcessor::updateStream(const Parameters &params) {
status_t JpegProcessor::deleteStream() {
ATRACE_CALL();
- status_t res;
Mutex::Autolock l(mInputMutex);
if (mCaptureStreamId != NO_STREAM) {
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return OK;
- sp<Camera2Device> device = client->getCameraDevice();
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
device->deleteStream(mCaptureStreamId);
@@ -164,7 +173,7 @@ int JpegProcessor::getStreamId() const {
return mCaptureStreamId;
}
-void JpegProcessor::dump(int fd, const Vector<String16>& args) const {
+void JpegProcessor::dump(int /*fd*/, const Vector<String16>& /*args*/) const {
}
bool JpegProcessor::threadLoop() {
@@ -181,15 +190,13 @@ bool JpegProcessor::threadLoop() {
}
do {
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return false;
- res = processNewCapture(client);
+ res = processNewCapture();
} while (res == OK);
return true;
}
-status_t JpegProcessor::processNewCapture(sp<Camera2Client> &client) {
+status_t JpegProcessor::processNewCapture() {
ATRACE_CALL();
status_t res;
sp<Camera2Heap> captureHeap;
@@ -201,17 +208,17 @@ status_t JpegProcessor::processNewCapture(sp<Camera2Client> &client) {
if (res != BAD_VALUE) {
ALOGE("%s: Camera %d: Error receiving still image buffer: "
"%s (%d)", __FUNCTION__,
- client->getCameraId(), strerror(-res), res);
+ mId, strerror(-res), res);
}
return res;
}
ALOGV("%s: Camera %d: Still capture available", __FUNCTION__,
- client->getCameraId());
+ mId);
if (imgBuffer.format != HAL_PIXEL_FORMAT_BLOB) {
ALOGE("%s: Camera %d: Unexpected format for still image: "
- "%x, expected %x", __FUNCTION__, client->getCameraId(),
+ "%x, expected %x", __FUNCTION__, mId,
imgBuffer.format,
HAL_PIXEL_FORMAT_BLOB);
mCaptureConsumer->unlockBuffer(imgBuffer);
@@ -356,7 +363,7 @@ size_t JpegProcessor::findJpegSize(uint8_t* jpegBuffer, size_t maxSize) {
// Find End of Image
// Scan JPEG buffer until End of Image (EOI)
bool foundEnd = false;
- for (size; size <= maxSize - MARKER_LENGTH; size++) {
+ for ( ; size <= maxSize - MARKER_LENGTH; size++) {
if ( checkJpegEnd(jpegBuffer + size) ) {
foundEnd = true;
size += MARKER_LENGTH;
diff --git a/services/camera/libcameraservice/camera2/JpegProcessor.h b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
index 836bd02..b2c05df 100644
--- a/services/camera/libcameraservice/camera2/JpegProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
@@ -23,17 +23,19 @@
#include <utils/Mutex.h>
#include <utils/Condition.h>
#include <gui/CpuConsumer.h>
-#include "Parameters.h"
-#include "CameraMetadata.h"
+
+#include "camera/CameraMetadata.h"
namespace android {
class Camera2Client;
+class CameraDeviceBase;
class MemoryHeapBase;
namespace camera2 {
class CaptureSequencer;
+class Parameters;
/***
* Still image capture output image processing
@@ -41,9 +43,10 @@ class CaptureSequencer;
class JpegProcessor:
public Thread, public CpuConsumer::FrameAvailableListener {
public:
- JpegProcessor(wp<Camera2Client> client, wp<CaptureSequencer> sequencer);
+ JpegProcessor(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
~JpegProcessor();
+ // CpuConsumer listener implementation
void onFrameAvailable();
status_t updateStream(const Parameters &params);
@@ -53,8 +56,9 @@ class JpegProcessor:
void dump(int fd, const Vector<String16>& args) const;
private:
static const nsecs_t kWaitDuration = 10000000; // 10 ms
- wp<Camera2Client> mClient;
+ wp<CameraDeviceBase> mDevice;
wp<CaptureSequencer> mSequencer;
+ int mId;
mutable Mutex mInputMutex;
bool mCaptureAvailable;
@@ -71,7 +75,7 @@ class JpegProcessor:
virtual bool threadLoop();
- status_t processNewCapture(sp<Camera2Client> &client);
+ status_t processNewCapture();
size_t findJpegSize(uint8_t* jpegBuffer, size_t maxSize);
};
diff --git a/services/camera/libcameraservice/camera2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 9a0083a..08af566 100644
--- a/services/camera/libcameraservice/camera2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -58,13 +58,13 @@ status_t Parameters::initialize(const CameraMetadata *info) {
res = buildQuirks();
if (res != OK) return res;
- camera_metadata_ro_entry_t availableProcessedSizes =
- staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, 2);
- if (!availableProcessedSizes.count) return NO_INIT;
+ const Size MAX_PREVIEW_SIZE = { MAX_PREVIEW_WIDTH, MAX_PREVIEW_HEIGHT };
+ res = getFilteredPreviewSizes(MAX_PREVIEW_SIZE, &availablePreviewSizes);
+ if (res != OK) return res;
// TODO: Pick more intelligently
- previewWidth = availableProcessedSizes.data.i32[0];
- previewHeight = availableProcessedSizes.data.i32[1];
+ previewWidth = availablePreviewSizes[0].width;
+ previewHeight = availablePreviewSizes[0].height;
videoWidth = previewWidth;
videoHeight = previewHeight;
@@ -75,12 +75,13 @@ status_t Parameters::initialize(const CameraMetadata *info) {
previewWidth, previewHeight));
{
String8 supportedPreviewSizes;
- for (size_t i=0; i < availableProcessedSizes.count; i += 2) {
+ for (size_t i = 0; i < availablePreviewSizes.size(); i++) {
if (i != 0) supportedPreviewSizes += ",";
supportedPreviewSizes += String8::format("%dx%d",
- availableProcessedSizes.data.i32[i],
- availableProcessedSizes.data.i32[i+1]);
+ availablePreviewSizes[i].width,
+ availablePreviewSizes[i].height);
}
+ ALOGV("Supported preview sizes are: %s", supportedPreviewSizes.string());
params.set(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES,
supportedPreviewSizes);
params.set(CameraParameters::KEY_SUPPORTED_VIDEO_SIZES,
@@ -152,7 +153,16 @@ status_t Parameters::initialize(const CameraMetadata *info) {
supportedPreviewFormats +=
CameraParameters::PIXEL_FORMAT_RGBA8888;
break;
+ case HAL_PIXEL_FORMAT_YCbCr_420_888:
+ // Flexible YUV allows both YV12 and NV21
+ supportedPreviewFormats +=
+ CameraParameters::PIXEL_FORMAT_YUV420P;
+ supportedPreviewFormats += ",";
+ supportedPreviewFormats +=
+ CameraParameters::PIXEL_FORMAT_YUV420SP;
+ break;
// Not advertizing JPEG, RAW_SENSOR, etc, for preview formats
+ case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
case HAL_PIXEL_FORMAT_RAW_SENSOR:
case HAL_PIXEL_FORMAT_BLOB:
addComma = false;
@@ -173,7 +183,7 @@ status_t Parameters::initialize(const CameraMetadata *info) {
// still have to do something sane for them
// NOTE: Not scaled like FPS range values are.
- previewFps = fpsFromRange(previewFpsRange[0], previewFpsRange[1]);
+ int previewFps = fpsFromRange(previewFpsRange[0], previewFpsRange[1]);
params.set(CameraParameters::KEY_PREVIEW_FRAME_RATE,
previewFps);
@@ -239,9 +249,17 @@ status_t Parameters::initialize(const CameraMetadata *info) {
staticInfo(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, 4);
if (!availableJpegThumbnailSizes.count) return NO_INIT;
- // TODO: Pick default thumbnail size sensibly
- jpegThumbSize[0] = availableJpegThumbnailSizes.data.i32[0];
- jpegThumbSize[1] = availableJpegThumbnailSizes.data.i32[1];
+ // Pick the largest thumbnail size that matches still image aspect ratio.
+ ALOG_ASSERT(pictureWidth > 0 && pictureHeight > 0,
+ "Invalid picture size, %d x %d", pictureWidth, pictureHeight);
+ float picAspectRatio = static_cast<float>(pictureWidth) / pictureHeight;
+ Size thumbnailSize =
+ getMaxSizeForRatio(
+ picAspectRatio,
+ &availableJpegThumbnailSizes.data.i32[0],
+ availableJpegThumbnailSizes.count);
+ jpegThumbSize[0] = thumbnailSize.width;
+ jpegThumbSize[1] = thumbnailSize.height;
params.set(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH,
jpegThumbSize[0]);
@@ -278,53 +296,56 @@ status_t Parameters::initialize(const CameraMetadata *info) {
gpsProcessingMethod = "unknown";
// GPS fields in CameraParameters are not set by implementation
- wbMode = ANDROID_CONTROL_AWB_AUTO;
+ wbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
params.set(CameraParameters::KEY_WHITE_BALANCE,
CameraParameters::WHITE_BALANCE_AUTO);
camera_metadata_ro_entry_t availableWhiteBalanceModes =
- staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES);
- {
+ staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES, 0, 0, false);
+ if (!availableWhiteBalanceModes.count) {
+ params.set(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE,
+ CameraParameters::WHITE_BALANCE_AUTO);
+ } else {
String8 supportedWhiteBalance;
bool addComma = false;
for (size_t i=0; i < availableWhiteBalanceModes.count; i++) {
if (addComma) supportedWhiteBalance += ",";
addComma = true;
switch (availableWhiteBalanceModes.data.u8[i]) {
- case ANDROID_CONTROL_AWB_AUTO:
+ case ANDROID_CONTROL_AWB_MODE_AUTO:
supportedWhiteBalance +=
CameraParameters::WHITE_BALANCE_AUTO;
break;
- case ANDROID_CONTROL_AWB_INCANDESCENT:
+ case ANDROID_CONTROL_AWB_MODE_INCANDESCENT:
supportedWhiteBalance +=
CameraParameters::WHITE_BALANCE_INCANDESCENT;
break;
- case ANDROID_CONTROL_AWB_FLUORESCENT:
+ case ANDROID_CONTROL_AWB_MODE_FLUORESCENT:
supportedWhiteBalance +=
CameraParameters::WHITE_BALANCE_FLUORESCENT;
break;
- case ANDROID_CONTROL_AWB_WARM_FLUORESCENT:
+ case ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT:
supportedWhiteBalance +=
CameraParameters::WHITE_BALANCE_WARM_FLUORESCENT;
break;
- case ANDROID_CONTROL_AWB_DAYLIGHT:
+ case ANDROID_CONTROL_AWB_MODE_DAYLIGHT:
supportedWhiteBalance +=
CameraParameters::WHITE_BALANCE_DAYLIGHT;
break;
- case ANDROID_CONTROL_AWB_CLOUDY_DAYLIGHT:
+ case ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT:
supportedWhiteBalance +=
CameraParameters::WHITE_BALANCE_CLOUDY_DAYLIGHT;
break;
- case ANDROID_CONTROL_AWB_TWILIGHT:
+ case ANDROID_CONTROL_AWB_MODE_TWILIGHT:
supportedWhiteBalance +=
CameraParameters::WHITE_BALANCE_TWILIGHT;
break;
- case ANDROID_CONTROL_AWB_SHADE:
+ case ANDROID_CONTROL_AWB_MODE_SHADE:
supportedWhiteBalance +=
CameraParameters::WHITE_BALANCE_SHADE;
break;
// Skipping values not mappable to v1 API
- case ANDROID_CONTROL_AWB_OFF:
+ case ANDROID_CONTROL_AWB_MODE_OFF:
addComma = false;
break;
default:
@@ -339,53 +360,55 @@ status_t Parameters::initialize(const CameraMetadata *info) {
supportedWhiteBalance);
}
- effectMode = ANDROID_CONTROL_EFFECT_OFF;
+ effectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
params.set(CameraParameters::KEY_EFFECT,
CameraParameters::EFFECT_NONE);
camera_metadata_ro_entry_t availableEffects =
- staticInfo(ANDROID_CONTROL_AVAILABLE_EFFECTS);
- if (!availableEffects.count) return NO_INIT;
- {
+ staticInfo(ANDROID_CONTROL_AVAILABLE_EFFECTS, 0, 0, false);
+ if (!availableEffects.count) {
+ params.set(CameraParameters::KEY_SUPPORTED_EFFECTS,
+ CameraParameters::EFFECT_NONE);
+ } else {
String8 supportedEffects;
bool addComma = false;
for (size_t i=0; i < availableEffects.count; i++) {
if (addComma) supportedEffects += ",";
addComma = true;
switch (availableEffects.data.u8[i]) {
- case ANDROID_CONTROL_EFFECT_OFF:
+ case ANDROID_CONTROL_EFFECT_MODE_OFF:
supportedEffects +=
CameraParameters::EFFECT_NONE;
break;
- case ANDROID_CONTROL_EFFECT_MONO:
+ case ANDROID_CONTROL_EFFECT_MODE_MONO:
supportedEffects +=
CameraParameters::EFFECT_MONO;
break;
- case ANDROID_CONTROL_EFFECT_NEGATIVE:
+ case ANDROID_CONTROL_EFFECT_MODE_NEGATIVE:
supportedEffects +=
CameraParameters::EFFECT_NEGATIVE;
break;
- case ANDROID_CONTROL_EFFECT_SOLARIZE:
+ case ANDROID_CONTROL_EFFECT_MODE_SOLARIZE:
supportedEffects +=
CameraParameters::EFFECT_SOLARIZE;
break;
- case ANDROID_CONTROL_EFFECT_SEPIA:
+ case ANDROID_CONTROL_EFFECT_MODE_SEPIA:
supportedEffects +=
CameraParameters::EFFECT_SEPIA;
break;
- case ANDROID_CONTROL_EFFECT_POSTERIZE:
+ case ANDROID_CONTROL_EFFECT_MODE_POSTERIZE:
supportedEffects +=
CameraParameters::EFFECT_POSTERIZE;
break;
- case ANDROID_CONTROL_EFFECT_WHITEBOARD:
+ case ANDROID_CONTROL_EFFECT_MODE_WHITEBOARD:
supportedEffects +=
CameraParameters::EFFECT_WHITEBOARD;
break;
- case ANDROID_CONTROL_EFFECT_BLACKBOARD:
+ case ANDROID_CONTROL_EFFECT_MODE_BLACKBOARD:
supportedEffects +=
CameraParameters::EFFECT_BLACKBOARD;
break;
- case ANDROID_CONTROL_EFFECT_AQUA:
+ case ANDROID_CONTROL_EFFECT_MODE_AQUA:
supportedEffects +=
CameraParameters::EFFECT_AQUA;
break;
@@ -399,33 +422,35 @@ status_t Parameters::initialize(const CameraMetadata *info) {
params.set(CameraParameters::KEY_SUPPORTED_EFFECTS, supportedEffects);
}
- antibandingMode = ANDROID_CONTROL_AE_ANTIBANDING_AUTO;
+ antibandingMode = ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO;
params.set(CameraParameters::KEY_ANTIBANDING,
CameraParameters::ANTIBANDING_AUTO);
camera_metadata_ro_entry_t availableAntibandingModes =
- staticInfo(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES);
- if (!availableAntibandingModes.count) return NO_INIT;
- {
+ staticInfo(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES, 0, 0, false);
+ if (!availableAntibandingModes.count) {
+ params.set(CameraParameters::KEY_SUPPORTED_ANTIBANDING,
+ CameraParameters::ANTIBANDING_OFF);
+ } else {
String8 supportedAntibanding;
bool addComma = false;
for (size_t i=0; i < availableAntibandingModes.count; i++) {
if (addComma) supportedAntibanding += ",";
addComma = true;
switch (availableAntibandingModes.data.u8[i]) {
- case ANDROID_CONTROL_AE_ANTIBANDING_OFF:
+ case ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF:
supportedAntibanding +=
CameraParameters::ANTIBANDING_OFF;
break;
- case ANDROID_CONTROL_AE_ANTIBANDING_50HZ:
+ case ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ:
supportedAntibanding +=
CameraParameters::ANTIBANDING_50HZ;
break;
- case ANDROID_CONTROL_AE_ANTIBANDING_60HZ:
+ case ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ:
supportedAntibanding +=
CameraParameters::ANTIBANDING_60HZ;
break;
- case ANDROID_CONTROL_AE_ANTIBANDING_AUTO:
+ case ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO:
supportedAntibanding +=
CameraParameters::ANTIBANDING_AUTO;
break;
@@ -446,9 +471,10 @@ status_t Parameters::initialize(const CameraMetadata *info) {
CameraParameters::SCENE_MODE_AUTO);
camera_metadata_ro_entry_t availableSceneModes =
- staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
- if (!availableSceneModes.count) return NO_INIT;
- {
+ staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES, 0, 0, false);
+ if (!availableSceneModes.count) {
+ params.remove(CameraParameters::KEY_SCENE_MODE);
+ } else {
String8 supportedSceneModes(CameraParameters::SCENE_MODE_AUTO);
bool addComma = true;
bool noSceneModes = false;
@@ -534,18 +560,22 @@ status_t Parameters::initialize(const CameraMetadata *info) {
if (!noSceneModes) {
params.set(CameraParameters::KEY_SUPPORTED_SCENE_MODES,
supportedSceneModes);
+ } else {
+ params.remove(CameraParameters::KEY_SCENE_MODE);
}
}
+ bool isFlashAvailable = false;
camera_metadata_ro_entry_t flashAvailable =
- staticInfo(ANDROID_FLASH_AVAILABLE, 1, 1);
- if (!flashAvailable.count) return NO_INIT;
+ staticInfo(ANDROID_FLASH_INFO_AVAILABLE, 0, 1, false);
+ if (flashAvailable.count) {
+ isFlashAvailable = flashAvailable.data.u8[0];
+ }
camera_metadata_ro_entry_t availableAeModes =
- staticInfo(ANDROID_CONTROL_AE_AVAILABLE_MODES);
- if (!availableAeModes.count) return NO_INIT;
+ staticInfo(ANDROID_CONTROL_AE_AVAILABLE_MODES, 0, 0, false);
- if (flashAvailable.data.u8[0]) {
+ if (isFlashAvailable) {
flashMode = Parameters::FLASH_MODE_OFF;
params.set(CameraParameters::KEY_FLASH_MODE,
CameraParameters::FLASH_MODE_OFF);
@@ -557,7 +587,7 @@ status_t Parameters::initialize(const CameraMetadata *info) {
"," + CameraParameters::FLASH_MODE_TORCH;
for (size_t i=0; i < availableAeModes.count; i++) {
if (availableAeModes.data.u8[i] ==
- ANDROID_CONTROL_AE_ON_AUTO_FLASH_REDEYE) {
+ ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE) {
supportedFlashModes = supportedFlashModes + "," +
CameraParameters::FLASH_MODE_RED_EYE;
break;
@@ -574,14 +604,12 @@ status_t Parameters::initialize(const CameraMetadata *info) {
}
camera_metadata_ro_entry_t minFocusDistance =
- staticInfo(ANDROID_LENS_MINIMUM_FOCUS_DISTANCE, 1, 1);
- if (!minFocusDistance.count) return NO_INIT;
+ staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 1, false);
camera_metadata_ro_entry_t availableAfModes =
- staticInfo(ANDROID_CONTROL_AF_AVAILABLE_MODES);
- if (!availableAfModes.count) return NO_INIT;
+ staticInfo(ANDROID_CONTROL_AF_AVAILABLE_MODES, 0, 0, false);
- if (minFocusDistance.data.f[0] == 0) {
+ if (!minFocusDistance.count || minFocusDistance.data.f[0] == 0) {
// Fixed-focus lens
focusMode = Parameters::FOCUS_MODE_FIXED;
params.set(CameraParameters::KEY_FOCUS_MODE,
@@ -599,28 +627,28 @@ status_t Parameters::initialize(const CameraMetadata *info) {
if (addComma) supportedFocusModes += ",";
addComma = true;
switch (availableAfModes.data.u8[i]) {
- case ANDROID_CONTROL_AF_AUTO:
+ case ANDROID_CONTROL_AF_MODE_AUTO:
supportedFocusModes +=
CameraParameters::FOCUS_MODE_AUTO;
break;
- case ANDROID_CONTROL_AF_MACRO:
+ case ANDROID_CONTROL_AF_MODE_MACRO:
supportedFocusModes +=
CameraParameters::FOCUS_MODE_MACRO;
break;
- case ANDROID_CONTROL_AF_CONTINUOUS_VIDEO:
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
supportedFocusModes +=
CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO;
break;
- case ANDROID_CONTROL_AF_CONTINUOUS_PICTURE:
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
supportedFocusModes +=
CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE;
break;
- case ANDROID_CONTROL_AF_EDOF:
+ case ANDROID_CONTROL_AF_MODE_EDOF:
supportedFocusModes +=
CameraParameters::FOCUS_MODE_EDOF;
break;
// Not supported in old API
- case ANDROID_CONTROL_AF_OFF:
+ case ANDROID_CONTROL_AF_MODE_OFF:
addComma = false;
break;
default:
@@ -651,21 +679,19 @@ status_t Parameters::initialize(const CameraMetadata *info) {
focusingAreas.add(Parameters::Area(0,0,0,0,0));
camera_metadata_ro_entry_t availableFocalLengths =
- staticInfo(ANDROID_LENS_AVAILABLE_FOCAL_LENGTHS);
+ staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, 0, 0, false);
if (!availableFocalLengths.count) return NO_INIT;
float minFocalLength = availableFocalLengths.data.f[0];
params.setFloat(CameraParameters::KEY_FOCAL_LENGTH, minFocalLength);
- camera_metadata_ro_entry_t sensorSize =
- staticInfo(ANDROID_SENSOR_PHYSICAL_SIZE, 2, 2);
- if (!sensorSize.count) return NO_INIT;
+ float horizFov, vertFov;
+ res = calculatePictureFovs(&horizFov, &vertFov);
+ if (res != OK) {
+ ALOGE("%s: Can't calculate field of views!", __FUNCTION__);
+ return res;
+ }
- // The fields of view here assume infinity focus, maximum wide angle
- float horizFov = 180 / M_PI *
- 2 * atanf(sensorSize.data.f[0] / (2 * minFocalLength));
- float vertFov = 180 / M_PI *
- 2 * atanf(sensorSize.data.f[1] / (2 * minFocalLength));
params.setFloat(CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE, horizFov);
params.setFloat(CameraParameters::KEY_VERTICAL_VIEW_ANGLE, vertFov);
@@ -674,7 +700,7 @@ status_t Parameters::initialize(const CameraMetadata *info) {
exposureCompensation);
camera_metadata_ro_entry_t exposureCompensationRange =
- staticInfo(ANDROID_CONTROL_AE_EXP_COMPENSATION_RANGE, 2, 2);
+ staticInfo(ANDROID_CONTROL_AE_COMPENSATION_RANGE, 2, 2);
if (!exposureCompensationRange.count) return NO_INIT;
params.set(CameraParameters::KEY_MAX_EXPOSURE_COMPENSATION,
@@ -683,7 +709,7 @@ status_t Parameters::initialize(const CameraMetadata *info) {
exposureCompensationRange.data.i32[0]);
camera_metadata_ro_entry_t exposureCompensationStep =
- staticInfo(ANDROID_CONTROL_AE_EXP_COMPENSATION_STEP, 1, 1);
+ staticInfo(ANDROID_CONTROL_AE_COMPENSATION_STEP, 1, 1);
if (!exposureCompensationStep.count) return NO_INIT;
params.setFloat(CameraParameters::KEY_EXPOSURE_COMPENSATION_STEP,
@@ -713,7 +739,7 @@ status_t Parameters::initialize(const CameraMetadata *info) {
params.set(CameraParameters::KEY_MAX_ZOOM, NUM_ZOOM_STEPS - 1);
camera_metadata_ro_entry_t maxDigitalZoom =
- staticInfo(ANDROID_SCALER_AVAILABLE_MAX_ZOOM, /*minCount*/1, /*maxCount*/1);
+ staticInfo(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, /*minCount*/1, /*maxCount*/1);
if (!maxDigitalZoom.count) return NO_INIT;
{
@@ -759,8 +785,8 @@ status_t Parameters::initialize(const CameraMetadata *info) {
CameraParameters::FALSE);
camera_metadata_ro_entry_t availableVideoStabilizationModes =
- staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES);
- if (!availableVideoStabilizationModes.count) return NO_INIT;
+ staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, 0, 0,
+ false);
if (availableVideoStabilizationModes.count > 1) {
params.set(CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED,
@@ -778,13 +804,17 @@ status_t Parameters::initialize(const CameraMetadata *info) {
enableFocusMoveMessages = false;
afTriggerCounter = 1;
+ afStateCounter = 0;
currentAfTriggerId = -1;
afInMotion = false;
precaptureTriggerCounter = 1;
+ takePictureCounter = 0;
+
previewCallbackFlags = 0;
previewCallbackOneShot = false;
+ previewCallbackSurface = false;
char value[PROPERTY_VALUE_MAX];
property_get("camera.disable_zsl_mode", value, "0");
@@ -811,31 +841,67 @@ String8 Parameters::get() const {
status_t Parameters::buildFastInfo() {
camera_metadata_ro_entry_t activeArraySize =
- staticInfo(ANDROID_SENSOR_ACTIVE_ARRAY_SIZE, 2, 2);
+ staticInfo(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, 2, 4);
if (!activeArraySize.count) return NO_INIT;
- int32_t arrayWidth = activeArraySize.data.i32[0];
- int32_t arrayHeight = activeArraySize.data.i32[1];
+ int32_t arrayWidth;
+ int32_t arrayHeight;
+ if (activeArraySize.count == 2) {
+ ALOGW("%s: Camera %d: activeArraySize is missing xmin/ymin!",
+ __FUNCTION__, cameraId);
+ arrayWidth = activeArraySize.data.i32[0];
+ arrayHeight = activeArraySize.data.i32[1];
+ } else if (activeArraySize.count == 4) {
+ arrayWidth = activeArraySize.data.i32[2];
+ arrayHeight = activeArraySize.data.i32[3];
+ } else return NO_INIT;
+
+ // We'll set the target FPS range for still captures to be as wide
+ // as possible to give the HAL maximum latitude for exposure selection
+ camera_metadata_ro_entry_t availableFpsRanges =
+ staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, 2);
+ if (availableFpsRanges.count < 2 || availableFpsRanges.count % 2 != 0) {
+ return NO_INIT;
+ }
+
+ int32_t bestStillCaptureFpsRange[2] = {
+ availableFpsRanges.data.i32[0], availableFpsRanges.data.i32[1]
+ };
+ int32_t curRange =
+ bestStillCaptureFpsRange[1] - bestStillCaptureFpsRange[0];
+ for (size_t i = 2; i < availableFpsRanges.count; i += 2) {
+ int32_t nextRange =
+ availableFpsRanges.data.i32[i + 1] -
+ availableFpsRanges.data.i32[i];
+ if ( (nextRange > curRange) || // Maximize size of FPS range first
+ (nextRange == curRange && // Then minimize low-end FPS
+ bestStillCaptureFpsRange[0] > availableFpsRanges.data.i32[i])) {
+
+ bestStillCaptureFpsRange[0] = availableFpsRanges.data.i32[i];
+ bestStillCaptureFpsRange[1] = availableFpsRanges.data.i32[i + 1];
+ curRange = nextRange;
+ }
+ }
camera_metadata_ro_entry_t availableFaceDetectModes =
- staticInfo(ANDROID_STATS_AVAILABLE_FACE_DETECT_MODES);
- if (!availableFaceDetectModes.count) return NO_INIT;
+ staticInfo(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES, 0, 0,
+ false);
uint8_t bestFaceDetectMode =
- ANDROID_STATS_FACE_DETECTION_OFF;
+ ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
for (size_t i = 0 ; i < availableFaceDetectModes.count; i++) {
switch (availableFaceDetectModes.data.u8[i]) {
- case ANDROID_STATS_FACE_DETECTION_OFF:
+ case ANDROID_STATISTICS_FACE_DETECT_MODE_OFF:
break;
- case ANDROID_STATS_FACE_DETECTION_SIMPLE:
+ case ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE:
if (bestFaceDetectMode !=
- ANDROID_STATS_FACE_DETECTION_FULL) {
+ ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) {
bestFaceDetectMode =
- ANDROID_STATS_FACE_DETECTION_SIMPLE;
+ ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE;
}
break;
- case ANDROID_STATS_FACE_DETECTION_FULL:
+ case ANDROID_STATISTICS_FACE_DETECT_MODE_FULL:
bestFaceDetectMode =
- ANDROID_STATS_FACE_DETECTION_FULL;
+ ANDROID_STATISTICS_FACE_DETECT_MODE_FULL;
break;
default:
ALOGE("%s: Camera %d: Unknown face detect mode %d:",
@@ -845,19 +911,30 @@ status_t Parameters::buildFastInfo() {
}
}
+ int32_t maxFaces = 0;
camera_metadata_ro_entry_t maxFacesDetected =
- staticInfo(ANDROID_STATS_MAX_FACE_COUNT, 1, 1);
- if (!maxFacesDetected.count) return NO_INIT;
-
- int32_t maxFaces = maxFacesDetected.data.i32[0];
+ staticInfo(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, 0, 1, false);
+ if (maxFacesDetected.count) {
+ maxFaces = maxFacesDetected.data.i32[0];
+ }
camera_metadata_ro_entry_t availableSceneModes =
- staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
+ staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES, 0, 0, false);
camera_metadata_ro_entry_t sceneModeOverrides =
- staticInfo(ANDROID_CONTROL_SCENE_MODE_OVERRIDES);
+ staticInfo(ANDROID_CONTROL_SCENE_MODE_OVERRIDES, 0, 0, false);
camera_metadata_ro_entry_t minFocusDistance =
- staticInfo(ANDROID_LENS_MINIMUM_FOCUS_DISTANCE);
- bool fixedLens = (minFocusDistance.data.f[0] == 0);
+ staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 0, false);
+ bool fixedLens = minFocusDistance.count == 0 ||
+ minFocusDistance.data.f[0] == 0;
+
+ camera_metadata_ro_entry_t availableFocalLengths =
+ staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS);
+ if (!availableFocalLengths.count) return NO_INIT;
+
+ camera_metadata_ro_entry_t availableFormats =
+ staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS);
+ if (!availableFormats.count) return NO_INIT;
+
if (sceneModeOverrides.count > 0) {
// sceneModeOverrides is defined to have 3 entries for each scene mode,
@@ -877,16 +954,16 @@ status_t Parameters::buildFastInfo() {
uint8_t aeMode =
sceneModeOverrides.data.u8[i * kModesPerSceneMode + 0];
switch(aeMode) {
- case ANDROID_CONTROL_AE_ON:
+ case ANDROID_CONTROL_AE_MODE_ON:
modes.flashMode = FLASH_MODE_OFF;
break;
- case ANDROID_CONTROL_AE_ON_AUTO_FLASH:
+ case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH:
modes.flashMode = FLASH_MODE_AUTO;
break;
- case ANDROID_CONTROL_AE_ON_ALWAYS_FLASH:
+ case ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH:
modes.flashMode = FLASH_MODE_ON;
break;
- case ANDROID_CONTROL_AE_ON_AUTO_FLASH_REDEYE:
+ case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE:
modes.flashMode = FLASH_MODE_RED_EYE;
break;
default:
@@ -900,15 +977,15 @@ status_t Parameters::buildFastInfo() {
uint8_t afMode =
sceneModeOverrides.data.u8[i * kModesPerSceneMode + 2];
switch(afMode) {
- case ANDROID_CONTROL_AF_OFF:
+ case ANDROID_CONTROL_AF_MODE_OFF:
modes.focusMode = fixedLens ?
FOCUS_MODE_FIXED : FOCUS_MODE_INFINITY;
break;
- case ANDROID_CONTROL_AF_AUTO:
- case ANDROID_CONTROL_AF_MACRO:
- case ANDROID_CONTROL_AF_CONTINUOUS_VIDEO:
- case ANDROID_CONTROL_AF_CONTINUOUS_PICTURE:
- case ANDROID_CONTROL_AF_EDOF:
+ case ANDROID_CONTROL_AF_MODE_AUTO:
+ case ANDROID_CONTROL_AF_MODE_MACRO:
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+ case ANDROID_CONTROL_AF_MODE_EDOF:
modes.focusMode = static_cast<focusMode_t>(afMode);
break;
default:
@@ -924,8 +1001,31 @@ status_t Parameters::buildFastInfo() {
fastInfo.arrayWidth = arrayWidth;
fastInfo.arrayHeight = arrayHeight;
+ fastInfo.bestStillCaptureFpsRange[0] = bestStillCaptureFpsRange[0];
+ fastInfo.bestStillCaptureFpsRange[1] = bestStillCaptureFpsRange[1];
fastInfo.bestFaceDetectMode = bestFaceDetectMode;
fastInfo.maxFaces = maxFaces;
+
+ // Find smallest (widest-angle) focal length to use as basis of still
+ // picture FOV reporting.
+ fastInfo.minFocalLength = availableFocalLengths.data.f[0];
+ for (size_t i = 1; i < availableFocalLengths.count; i++) {
+ if (fastInfo.minFocalLength > availableFocalLengths.data.f[i]) {
+ fastInfo.minFocalLength = availableFocalLengths.data.f[i];
+ }
+ }
+
+ // Check if the HAL supports HAL_PIXEL_FORMAT_YCbCr_420_888
+ fastInfo.useFlexibleYuv = false;
+ for (size_t i = 0; i < availableFormats.count; i++) {
+ if (availableFormats.data.i32[i] == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+ fastInfo.useFlexibleYuv = true;
+ break;
+ }
+ }
+ ALOGV("Camera %d: Flexible YUV %s supported",
+ cameraId, fastInfo.useFlexibleYuv ? "is" : "is not");
+
return OK;
}
@@ -946,15 +1046,19 @@ status_t Parameters::buildQuirks() {
ALOGV_IF(quirks.meteringCropRegion, "Camera %d: Quirk meteringCropRegion"
" enabled", cameraId);
+ entry = info->find(ANDROID_QUIRKS_USE_PARTIAL_RESULT);
+ quirks.partialResults = (entry.count != 0 && entry.data.u8[0] == 1);
+ ALOGV_IF(quirks.partialResults, "Camera %d: Quirk usePartialResult"
+ " enabled", cameraId);
+
return OK;
}
camera_metadata_ro_entry_t Parameters::staticInfo(uint32_t tag,
- size_t minCount, size_t maxCount) const {
- status_t res;
+ size_t minCount, size_t maxCount, bool required) const {
camera_metadata_ro_entry_t entry = info->find(tag);
- if (CC_UNLIKELY( entry.count == 0 )) {
+ if (CC_UNLIKELY( entry.count == 0 ) && required) {
const char* tagSection = get_camera_metadata_section_name(tag);
if (tagSection == NULL) tagSection = "<unknown>";
const char* tagName = get_camera_metadata_tag_name(tag);
@@ -1006,15 +1110,13 @@ status_t Parameters::set(const String8& paramString) {
validatedParams.previewWidth, validatedParams.previewHeight);
return BAD_VALUE;
}
- camera_metadata_ro_entry_t availablePreviewSizes =
- staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES);
- for (i = 0; i < availablePreviewSizes.count; i += 2 ) {
- if ((availablePreviewSizes.data.i32[i] ==
+ for (i = 0; i < availablePreviewSizes.size(); i++) {
+ if ((availablePreviewSizes[i].width ==
validatedParams.previewWidth) &&
- (availablePreviewSizes.data.i32[i+1] ==
+ (availablePreviewSizes[i].height ==
validatedParams.previewHeight)) break;
}
- if (i == availablePreviewSizes.count) {
+ if (i == availablePreviewSizes.size()) {
ALOGE("%s: Requested preview size %d x %d is not supported",
__FUNCTION__, validatedParams.previewWidth,
validatedParams.previewHeight);
@@ -1031,13 +1133,22 @@ status_t Parameters::set(const String8& paramString) {
// PREVIEW_FPS_RANGE
bool fpsRangeChanged = false;
+ int32_t lastSetFpsRange[2];
+
+ params.getPreviewFpsRange(&lastSetFpsRange[0], &lastSetFpsRange[1]);
+ lastSetFpsRange[0] /= kFpsToApiScale;
+ lastSetFpsRange[1] /= kFpsToApiScale;
+
newParams.getPreviewFpsRange(&validatedParams.previewFpsRange[0],
&validatedParams.previewFpsRange[1]);
validatedParams.previewFpsRange[0] /= kFpsToApiScale;
validatedParams.previewFpsRange[1] /= kFpsToApiScale;
- if (validatedParams.previewFpsRange[0] != previewFpsRange[0] ||
- validatedParams.previewFpsRange[1] != previewFpsRange[1]) {
+ // Compare the FPS range value from the last set() to the current set()
+ // to determine if the client has changed it
+ if (validatedParams.previewFpsRange[0] != lastSetFpsRange[0] ||
+ validatedParams.previewFpsRange[1] != lastSetFpsRange[1]) {
+
fpsRangeChanged = true;
camera_metadata_ro_entry_t availablePreviewFpsRanges =
staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, 2);
@@ -1055,10 +1166,6 @@ status_t Parameters::set(const String8& paramString) {
validatedParams.previewFpsRange[1]);
return BAD_VALUE;
}
- validatedParams.previewFps =
- fpsFromRange(validatedParams.previewFpsRange[0],
- validatedParams.previewFpsRange[1]);
- newParams.setPreviewFrameRate(validatedParams.previewFps);
}
// PREVIEW_FORMAT
@@ -1072,24 +1179,35 @@ status_t Parameters::set(const String8& paramString) {
}
camera_metadata_ro_entry_t availableFormats =
staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS);
- for (i = 0; i < availableFormats.count; i++) {
- if (availableFormats.data.i32[i] == validatedParams.previewFormat)
- break;
- }
- if (i == availableFormats.count) {
- ALOGE("%s: Requested preview format %s (0x%x) is not supported",
- __FUNCTION__, newParams.getPreviewFormat(),
- validatedParams.previewFormat);
- return BAD_VALUE;
+ // If using flexible YUV, always support NV21/YV12. Otherwise, check
+ // HAL's list.
+ if (! (fastInfo.useFlexibleYuv &&
+ (validatedParams.previewFormat ==
+ HAL_PIXEL_FORMAT_YCrCb_420_SP ||
+ validatedParams.previewFormat ==
+ HAL_PIXEL_FORMAT_YV12) ) ) {
+ // Not using flexible YUV format, so check explicitly
+ for (i = 0; i < availableFormats.count; i++) {
+ if (availableFormats.data.i32[i] ==
+ validatedParams.previewFormat) break;
+ }
+ if (i == availableFormats.count) {
+ ALOGE("%s: Requested preview format %s (0x%x) is not supported",
+ __FUNCTION__, newParams.getPreviewFormat(),
+ validatedParams.previewFormat);
+ return BAD_VALUE;
+ }
}
}
- // PREVIEW_FRAME_RATE
- // Deprecated, only use if the preview fps range is unchanged this time.
- // The single-value FPS is the same as the minimum of the range.
+ // PREVIEW_FRAME_RATE Deprecated, only use if the preview fps range is
+ // unchanged this time. The single-value FPS is the same as the minimum of
+ // the range. To detect whether the application has changed the value of
+ // previewFps, compare against their last-set preview FPS.
if (!fpsRangeChanged) {
- validatedParams.previewFps = newParams.getPreviewFrameRate();
- if (validatedParams.previewFps != previewFps || recordingHintChanged) {
+ int previewFps = newParams.getPreviewFrameRate();
+ int lastSetPreviewFps = params.getPreviewFrameRate();
+ if (previewFps != lastSetPreviewFps || recordingHintChanged) {
camera_metadata_ro_entry_t availableFrameRates =
staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES);
/**
@@ -1102,8 +1220,8 @@ status_t Parameters::set(const String8& paramString) {
* Either way, in case of multiple ranges, break the tie by
* selecting the smaller range.
*/
- int targetFps = validatedParams.previewFps;
- // all ranges which have targetFps
+
+ // all ranges which have previewFps
Vector<Range> candidateRanges;
for (i = 0; i < availableFrameRates.count; i+=2) {
Range r = {
@@ -1111,13 +1229,13 @@ status_t Parameters::set(const String8& paramString) {
availableFrameRates.data.i32[i+1]
};
- if (r.min <= targetFps && targetFps <= r.max) {
+ if (r.min <= previewFps && previewFps <= r.max) {
candidateRanges.push(r);
}
}
if (candidateRanges.isEmpty()) {
ALOGE("%s: Requested preview frame rate %d is not supported",
- __FUNCTION__, validatedParams.previewFps);
+ __FUNCTION__, previewFps);
return BAD_VALUE;
}
// most applicable range with targetFps
@@ -1156,11 +1274,6 @@ status_t Parameters::set(const String8& paramString) {
validatedParams.previewFpsRange[1],
validatedParams.recordingHint);
}
- newParams.set(CameraParameters::KEY_PREVIEW_FPS_RANGE,
- String8::format("%d,%d",
- validatedParams.previewFpsRange[0] * kFpsToApiScale,
- validatedParams.previewFpsRange[1] * kFpsToApiScale));
-
}
// PICTURE_SIZE
@@ -1208,23 +1321,24 @@ status_t Parameters::set(const String8& paramString) {
}
// JPEG_THUMBNAIL_QUALITY
- validatedParams.jpegThumbQuality =
- newParams.getInt(CameraParameters::KEY_JPEG_THUMBNAIL_QUALITY);
- if (validatedParams.jpegThumbQuality < 0 ||
- validatedParams.jpegThumbQuality > 100) {
+ int quality = newParams.getInt(CameraParameters::KEY_JPEG_THUMBNAIL_QUALITY);
+ // also makes sure quality fits in uint8_t
+ if (quality < 0 || quality > 100) {
ALOGE("%s: Requested JPEG thumbnail quality %d is not supported",
- __FUNCTION__, validatedParams.jpegThumbQuality);
+ __FUNCTION__, quality);
return BAD_VALUE;
}
+ validatedParams.jpegThumbQuality = quality;
// JPEG_QUALITY
- validatedParams.jpegQuality =
- newParams.getInt(CameraParameters::KEY_JPEG_QUALITY);
- if (validatedParams.jpegQuality < 0 || validatedParams.jpegQuality > 100) {
+ quality = newParams.getInt(CameraParameters::KEY_JPEG_QUALITY);
+ // also makes sure quality fits in uint8_t
+ if (quality < 0 || quality > 100) {
ALOGE("%s: Requested JPEG quality %d is not supported",
- __FUNCTION__, validatedParams.jpegQuality);
+ __FUNCTION__, quality);
return BAD_VALUE;
}
+ validatedParams.jpegQuality = quality;
// ROTATION
validatedParams.jpegRotation =
@@ -1364,7 +1478,7 @@ status_t Parameters::set(const String8& paramString) {
if (validatedParams.flashMode != flashMode) {
camera_metadata_ro_entry_t flashAvailable =
- staticInfo(ANDROID_FLASH_AVAILABLE, 1, 1);
+ staticInfo(ANDROID_FLASH_INFO_AVAILABLE, 1, 1);
if (!flashAvailable.data.u8[0] &&
validatedParams.flashMode != Parameters::FLASH_MODE_OFF) {
ALOGE("%s: Requested flash mode \"%s\" is not supported: "
@@ -1401,15 +1515,15 @@ status_t Parameters::set(const String8& paramString) {
fastInfo.sceneModeOverrides.
valueFor(validatedParams.sceneMode).wbMode;
} else {
- validatedParams.wbMode = ANDROID_CONTROL_AWB_OFF;
+ validatedParams.wbMode = ANDROID_CONTROL_AWB_MODE_OFF;
}
- if (validatedParams.wbMode == ANDROID_CONTROL_AWB_OFF) {
+ if (validatedParams.wbMode == ANDROID_CONTROL_AWB_MODE_OFF) {
validatedParams.wbMode = wbModeStringToEnum(
newParams.get(CameraParameters::KEY_WHITE_BALANCE) );
}
if (validatedParams.wbMode != wbMode) {
camera_metadata_ro_entry_t availableWbModes =
- staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES);
+ staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES, 0, 0, false);
for (i = 0; i < availableWbModes.count; i++) {
if (validatedParams.wbMode == availableWbModes.data.u8[i]) break;
}
@@ -1440,8 +1554,9 @@ status_t Parameters::set(const String8& paramString) {
validatedParams.currentAfTriggerId = -1;
if (validatedParams.focusMode != Parameters::FOCUS_MODE_FIXED) {
camera_metadata_ro_entry_t minFocusDistance =
- staticInfo(ANDROID_LENS_MINIMUM_FOCUS_DISTANCE);
- if (minFocusDistance.data.f[0] == 0) {
+ staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 0,
+ false);
+ if (minFocusDistance.count && minFocusDistance.data.f[0] == 0) {
ALOGE("%s: Requested focus mode \"%s\" is not available: "
"fixed focus lens",
__FUNCTION__,
@@ -1490,7 +1605,7 @@ status_t Parameters::set(const String8& paramString) {
validatedParams.exposureCompensation =
newParams.getInt(CameraParameters::KEY_EXPOSURE_COMPENSATION);
camera_metadata_ro_entry_t exposureCompensationRange =
- staticInfo(ANDROID_CONTROL_AE_EXP_COMPENSATION_RANGE);
+ staticInfo(ANDROID_CONTROL_AE_COMPENSATION_RANGE);
if ((validatedParams.exposureCompensation <
exposureCompensationRange.data.i32[0]) ||
(validatedParams.exposureCompensation >
@@ -1541,15 +1656,13 @@ status_t Parameters::set(const String8& paramString) {
__FUNCTION__);
return BAD_VALUE;
}
- camera_metadata_ro_entry_t availableVideoSizes =
- staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES);
- for (i = 0; i < availableVideoSizes.count; i += 2 ) {
- if ((availableVideoSizes.data.i32[i] ==
+ for (i = 0; i < availablePreviewSizes.size(); i++) {
+ if ((availablePreviewSizes[i].width ==
validatedParams.videoWidth) &&
- (availableVideoSizes.data.i32[i+1] ==
+ (availablePreviewSizes[i].height ==
validatedParams.videoHeight)) break;
}
- if (i == availableVideoSizes.count) {
+ if (i == availablePreviewSizes.size()) {
ALOGE("%s: Requested video size %d x %d is not supported",
__FUNCTION__, validatedParams.videoWidth,
validatedParams.videoHeight);
@@ -1561,16 +1674,36 @@ status_t Parameters::set(const String8& paramString) {
validatedParams.videoStabilization = boolFromString(
newParams.get(CameraParameters::KEY_VIDEO_STABILIZATION) );
camera_metadata_ro_entry_t availableVideoStabilizationModes =
- staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES);
+ staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, 0, 0,
+ false);
if (validatedParams.videoStabilization &&
availableVideoStabilizationModes.count == 1) {
ALOGE("%s: Video stabilization not supported", __FUNCTION__);
}
+ // LIGHTFX
+ validatedParams.lightFx = lightFxStringToEnum(
+ newParams.get(CameraParameters::KEY_LIGHTFX));
+
/** Update internal parameters */
*this = validatedParams;
+ /** Update external parameters calculated from the internal ones */
+
+ // HORIZONTAL/VERTICAL FIELD OF VIEW
+ float horizFov, vertFov;
+ res = calculatePictureFovs(&horizFov, &vertFov);
+ if (res != OK) {
+ ALOGE("%s: Can't calculate FOVs", __FUNCTION__);
+ // continue so parameters are at least consistent
+ }
+ newParams.setFloat(CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE,
+ horizFov);
+ newParams.setFloat(CameraParameters::KEY_VERTICAL_VIEW_ANGLE,
+ vertFov);
+ ALOGV("Current still picture FOV: %f x %f deg", horizFov, vertFov);
+
// Need to flatten again in case of overrides
paramsFlattened = newParams.flatten();
params = newParams;
@@ -1582,13 +1715,48 @@ status_t Parameters::updateRequest(CameraMetadata *request) const {
ATRACE_CALL();
status_t res;
- uint8_t metadataMode = ANDROID_REQUEST_METADATA_FULL;
+ /**
+ * Mixin default important security values
+ * - android.led.transmit = defaulted ON
+ */
+ camera_metadata_ro_entry_t entry = staticInfo(ANDROID_LED_AVAILABLE_LEDS,
+ /*minimumCount*/0,
+ /*maximumCount*/0,
+ /*required*/false);
+ for(size_t i = 0; i < entry.count; ++i) {
+ uint8_t led = entry.data.u8[i];
+
+ switch(led) {
+ // Transmit LED is unconditionally on when using
+ // the android.hardware.Camera API
+ case ANDROID_LED_AVAILABLE_LEDS_TRANSMIT: {
+ uint8_t transmitDefault = ANDROID_LED_TRANSMIT_ON;
+ res = request->update(ANDROID_LED_TRANSMIT,
+ &transmitDefault, 1);
+ if (res != OK) return res;
+ break;
+ }
+ }
+ }
+
+ /**
+ * Construct metadata from parameters
+ */
+
+ uint8_t metadataMode = ANDROID_REQUEST_METADATA_MODE_FULL;
res = request->update(ANDROID_REQUEST_METADATA_MODE,
&metadataMode, 1);
if (res != OK) return res;
- res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
- previewFpsRange, 2);
+ camera_metadata_entry_t intent =
+ request->find(ANDROID_CONTROL_CAPTURE_INTENT);
+ if (intent.data.u8[0] == ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE) {
+ res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+ fastInfo.bestStillCaptureFpsRange, 2);
+ } else {
+ res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+ previewFpsRange, 2);
+ }
if (res != OK) return res;
uint8_t reqWbLock = autoWhiteBalanceLock ?
@@ -1609,9 +1777,9 @@ status_t Parameters::updateRequest(CameraMetadata *request) const {
// to the other.
bool sceneModeActive =
sceneMode != (uint8_t)ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED;
- uint8_t reqControlMode = ANDROID_CONTROL_AUTO;
+ uint8_t reqControlMode = ANDROID_CONTROL_MODE_AUTO;
if (enableFaceDetect || sceneModeActive) {
- reqControlMode = ANDROID_CONTROL_USE_SCENE_MODE;
+ reqControlMode = ANDROID_CONTROL_MODE_USE_SCENE_MODE;
}
res = request->update(ANDROID_CONTROL_MODE,
&reqControlMode, 1);
@@ -1625,21 +1793,21 @@ status_t Parameters::updateRequest(CameraMetadata *request) const {
&reqSceneMode, 1);
if (res != OK) return res;
- uint8_t reqFlashMode = ANDROID_FLASH_OFF;
- uint8_t reqAeMode = ANDROID_CONTROL_AE_OFF;
+ uint8_t reqFlashMode = ANDROID_FLASH_MODE_OFF;
+ uint8_t reqAeMode = ANDROID_CONTROL_AE_MODE_OFF;
switch (flashMode) {
case Parameters::FLASH_MODE_OFF:
- reqAeMode = ANDROID_CONTROL_AE_ON; break;
+ reqAeMode = ANDROID_CONTROL_AE_MODE_ON; break;
case Parameters::FLASH_MODE_AUTO:
- reqAeMode = ANDROID_CONTROL_AE_ON_AUTO_FLASH; break;
+ reqAeMode = ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH; break;
case Parameters::FLASH_MODE_ON:
- reqAeMode = ANDROID_CONTROL_AE_ON_ALWAYS_FLASH; break;
+ reqAeMode = ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH; break;
case Parameters::FLASH_MODE_TORCH:
- reqAeMode = ANDROID_CONTROL_AE_ON;
- reqFlashMode = ANDROID_FLASH_TORCH;
+ reqAeMode = ANDROID_CONTROL_AE_MODE_ON;
+ reqFlashMode = ANDROID_FLASH_MODE_TORCH;
break;
case Parameters::FLASH_MODE_RED_EYE:
- reqAeMode = ANDROID_CONTROL_AE_ON_AUTO_FLASH_REDEYE; break;
+ reqAeMode = ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE; break;
default:
ALOGE("%s: Camera %d: Unknown flash mode %d", __FUNCTION__,
cameraId, flashMode);
@@ -1663,7 +1831,7 @@ status_t Parameters::updateRequest(CameraMetadata *request) const {
if (res != OK) return res;
float reqFocusDistance = 0; // infinity focus in diopters
- uint8_t reqFocusMode = ANDROID_CONTROL_AF_OFF;
+ uint8_t reqFocusMode = ANDROID_CONTROL_AF_MODE_OFF;
switch (focusMode) {
case Parameters::FOCUS_MODE_AUTO:
case Parameters::FOCUS_MODE_MACRO:
@@ -1674,7 +1842,7 @@ status_t Parameters::updateRequest(CameraMetadata *request) const {
break;
case Parameters::FOCUS_MODE_INFINITY:
case Parameters::FOCUS_MODE_FIXED:
- reqFocusMode = ANDROID_CONTROL_AF_OFF;
+ reqFocusMode = ANDROID_CONTROL_AF_MODE_OFF;
break;
default:
ALOGE("%s: Camera %d: Unknown focus mode %d", __FUNCTION__,
@@ -1713,7 +1881,7 @@ status_t Parameters::updateRequest(CameraMetadata *request) const {
if (res != OK) return res;
delete[] reqFocusingAreas;
- res = request->update(ANDROID_CONTROL_AE_EXP_COMPENSATION,
+ res = request->update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
&exposureCompensation, 1);
if (res != OK) return res;
@@ -1749,22 +1917,27 @@ status_t Parameters::updateRequest(CameraMetadata *request) const {
CropRegion::OUTPUT_PREVIEW |
CropRegion::OUTPUT_VIDEO |
CropRegion::OUTPUT_PICTURE ));
- int32_t reqCropRegion[3] = { crop.left, crop.top, crop.width };
+ int32_t reqCropRegion[4] = {
+ static_cast<int32_t>(crop.left),
+ static_cast<int32_t>(crop.top),
+ static_cast<int32_t>(crop.width),
+ static_cast<int32_t>(crop.height)
+ };
res = request->update(ANDROID_SCALER_CROP_REGION,
- reqCropRegion, 3);
+ reqCropRegion, 4);
if (res != OK) return res;
uint8_t reqVstabMode = videoStabilization ?
- ANDROID_CONTROL_VIDEO_STABILIZATION_ON :
- ANDROID_CONTROL_VIDEO_STABILIZATION_OFF;
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_ON :
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
res = request->update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
&reqVstabMode, 1);
if (res != OK) return res;
uint8_t reqFaceDetectMode = enableFaceDetect ?
fastInfo.bestFaceDetectMode :
- (uint8_t)ANDROID_STATS_FACE_DETECTION_OFF;
- res = request->update(ANDROID_STATS_FACE_DETECT_MODE,
+ (uint8_t)ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+ res = request->update(ANDROID_STATISTICS_FACE_DETECT_MODE,
&reqFaceDetectMode, 1);
if (res != OK) return res;
@@ -1888,43 +2061,43 @@ const char* Parameters::formatEnumToString(int format) {
int Parameters::wbModeStringToEnum(const char *wbMode) {
return
!wbMode ?
- ANDROID_CONTROL_AWB_AUTO :
+ ANDROID_CONTROL_AWB_MODE_AUTO :
!strcmp(wbMode, CameraParameters::WHITE_BALANCE_AUTO) ?
- ANDROID_CONTROL_AWB_AUTO :
+ ANDROID_CONTROL_AWB_MODE_AUTO :
!strcmp(wbMode, CameraParameters::WHITE_BALANCE_INCANDESCENT) ?
- ANDROID_CONTROL_AWB_INCANDESCENT :
+ ANDROID_CONTROL_AWB_MODE_INCANDESCENT :
!strcmp(wbMode, CameraParameters::WHITE_BALANCE_FLUORESCENT) ?
- ANDROID_CONTROL_AWB_FLUORESCENT :
+ ANDROID_CONTROL_AWB_MODE_FLUORESCENT :
!strcmp(wbMode, CameraParameters::WHITE_BALANCE_WARM_FLUORESCENT) ?
- ANDROID_CONTROL_AWB_WARM_FLUORESCENT :
+ ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT :
!strcmp(wbMode, CameraParameters::WHITE_BALANCE_DAYLIGHT) ?
- ANDROID_CONTROL_AWB_DAYLIGHT :
+ ANDROID_CONTROL_AWB_MODE_DAYLIGHT :
!strcmp(wbMode, CameraParameters::WHITE_BALANCE_CLOUDY_DAYLIGHT) ?
- ANDROID_CONTROL_AWB_CLOUDY_DAYLIGHT :
+ ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT :
!strcmp(wbMode, CameraParameters::WHITE_BALANCE_TWILIGHT) ?
- ANDROID_CONTROL_AWB_TWILIGHT :
+ ANDROID_CONTROL_AWB_MODE_TWILIGHT :
!strcmp(wbMode, CameraParameters::WHITE_BALANCE_SHADE) ?
- ANDROID_CONTROL_AWB_SHADE :
+ ANDROID_CONTROL_AWB_MODE_SHADE :
-1;
}
const char* Parameters::wbModeEnumToString(uint8_t wbMode) {
switch (wbMode) {
- case ANDROID_CONTROL_AWB_AUTO:
+ case ANDROID_CONTROL_AWB_MODE_AUTO:
return CameraParameters::WHITE_BALANCE_AUTO;
- case ANDROID_CONTROL_AWB_INCANDESCENT:
+ case ANDROID_CONTROL_AWB_MODE_INCANDESCENT:
return CameraParameters::WHITE_BALANCE_INCANDESCENT;
- case ANDROID_CONTROL_AWB_FLUORESCENT:
+ case ANDROID_CONTROL_AWB_MODE_FLUORESCENT:
return CameraParameters::WHITE_BALANCE_FLUORESCENT;
- case ANDROID_CONTROL_AWB_WARM_FLUORESCENT:
+ case ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT:
return CameraParameters::WHITE_BALANCE_WARM_FLUORESCENT;
- case ANDROID_CONTROL_AWB_DAYLIGHT:
+ case ANDROID_CONTROL_AWB_MODE_DAYLIGHT:
return CameraParameters::WHITE_BALANCE_DAYLIGHT;
- case ANDROID_CONTROL_AWB_CLOUDY_DAYLIGHT:
+ case ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT:
return CameraParameters::WHITE_BALANCE_CLOUDY_DAYLIGHT;
- case ANDROID_CONTROL_AWB_TWILIGHT:
+ case ANDROID_CONTROL_AWB_MODE_TWILIGHT:
return CameraParameters::WHITE_BALANCE_TWILIGHT;
- case ANDROID_CONTROL_AWB_SHADE:
+ case ANDROID_CONTROL_AWB_MODE_SHADE:
return CameraParameters::WHITE_BALANCE_SHADE;
default:
ALOGE("%s: Unknown AWB mode enum: %d",
@@ -1936,40 +2109,40 @@ const char* Parameters::wbModeEnumToString(uint8_t wbMode) {
int Parameters::effectModeStringToEnum(const char *effectMode) {
return
!effectMode ?
- ANDROID_CONTROL_EFFECT_OFF :
+ ANDROID_CONTROL_EFFECT_MODE_OFF :
!strcmp(effectMode, CameraParameters::EFFECT_NONE) ?
- ANDROID_CONTROL_EFFECT_OFF :
+ ANDROID_CONTROL_EFFECT_MODE_OFF :
!strcmp(effectMode, CameraParameters::EFFECT_MONO) ?
- ANDROID_CONTROL_EFFECT_MONO :
+ ANDROID_CONTROL_EFFECT_MODE_MONO :
!strcmp(effectMode, CameraParameters::EFFECT_NEGATIVE) ?
- ANDROID_CONTROL_EFFECT_NEGATIVE :
+ ANDROID_CONTROL_EFFECT_MODE_NEGATIVE :
!strcmp(effectMode, CameraParameters::EFFECT_SOLARIZE) ?
- ANDROID_CONTROL_EFFECT_SOLARIZE :
+ ANDROID_CONTROL_EFFECT_MODE_SOLARIZE :
!strcmp(effectMode, CameraParameters::EFFECT_SEPIA) ?
- ANDROID_CONTROL_EFFECT_SEPIA :
+ ANDROID_CONTROL_EFFECT_MODE_SEPIA :
!strcmp(effectMode, CameraParameters::EFFECT_POSTERIZE) ?
- ANDROID_CONTROL_EFFECT_POSTERIZE :
+ ANDROID_CONTROL_EFFECT_MODE_POSTERIZE :
!strcmp(effectMode, CameraParameters::EFFECT_WHITEBOARD) ?
- ANDROID_CONTROL_EFFECT_WHITEBOARD :
+ ANDROID_CONTROL_EFFECT_MODE_WHITEBOARD :
!strcmp(effectMode, CameraParameters::EFFECT_BLACKBOARD) ?
- ANDROID_CONTROL_EFFECT_BLACKBOARD :
+ ANDROID_CONTROL_EFFECT_MODE_BLACKBOARD :
!strcmp(effectMode, CameraParameters::EFFECT_AQUA) ?
- ANDROID_CONTROL_EFFECT_AQUA :
+ ANDROID_CONTROL_EFFECT_MODE_AQUA :
-1;
}
int Parameters::abModeStringToEnum(const char *abMode) {
return
!abMode ?
- ANDROID_CONTROL_AE_ANTIBANDING_AUTO :
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO :
!strcmp(abMode, CameraParameters::ANTIBANDING_AUTO) ?
- ANDROID_CONTROL_AE_ANTIBANDING_AUTO :
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO :
!strcmp(abMode, CameraParameters::ANTIBANDING_OFF) ?
- ANDROID_CONTROL_AE_ANTIBANDING_OFF :
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF :
!strcmp(abMode, CameraParameters::ANTIBANDING_50HZ) ?
- ANDROID_CONTROL_AE_ANTIBANDING_50HZ :
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ :
!strcmp(abMode, CameraParameters::ANTIBANDING_60HZ) ?
- ANDROID_CONTROL_AE_ANTIBANDING_60HZ :
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ :
-1;
}
@@ -2094,6 +2267,18 @@ const char *Parameters::focusModeEnumToString(focusMode_t focusMode) {
}
}
+Parameters::Parameters::lightFxMode_t Parameters::lightFxStringToEnum(
+ const char *lightFxMode) {
+ return
+ !lightFxMode ?
+ Parameters::LIGHTFX_NONE :
+ !strcmp(lightFxMode, CameraParameters::LIGHTFX_LOWLIGHT) ?
+ Parameters::LIGHTFX_LOWLIGHT :
+ !strcmp(lightFxMode, CameraParameters::LIGHTFX_HDR) ?
+ Parameters::LIGHTFX_HDR :
+ Parameters::LIGHTFX_NONE;
+}
+
status_t Parameters::parseAreas(const char *areasCStr,
Vector<Parameters::Area> *areas) {
static const size_t NUM_FIELDS = 5;
@@ -2196,7 +2381,7 @@ int Parameters::cropXToArray(int x) const {
CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
ALOG_ASSERT(x < previewCrop.width, "Crop-relative X coordinate = '%d' "
- "is out of bounds (upper = %d)", x, previewCrop.width);
+ "is out of bounds (upper = %f)", x, previewCrop.width);
int ret = x + previewCrop.left;
@@ -2212,7 +2397,7 @@ int Parameters::cropYToArray(int y) const {
CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
ALOG_ASSERT(y < previewCrop.height, "Crop-relative Y coordinate = '%d' is "
- "out of bounds (upper = %d)", y, previewCrop.height);
+ "out of bounds (upper = %f)", y, previewCrop.height);
int ret = y + previewCrop.top;
@@ -2305,6 +2490,64 @@ int Parameters::normalizedYToArray(int y) const {
return cropYToArray(normalizedYToCrop(y));
}
+status_t Parameters::getFilteredPreviewSizes(Size limit, Vector<Size> *sizes) {
+ if (info == NULL) {
+ ALOGE("%s: Static metadata is not initialized", __FUNCTION__);
+ return NO_INIT;
+ }
+ if (sizes == NULL) {
+ ALOGE("%s: Input size is null", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ const size_t SIZE_COUNT = sizeof(Size) / sizeof(int);
+ camera_metadata_ro_entry_t availableProcessedSizes =
+ staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, SIZE_COUNT);
+ if (availableProcessedSizes.count < SIZE_COUNT) return BAD_VALUE;
+
+ Size previewSize;
+ for (size_t i = 0; i < availableProcessedSizes.count; i += SIZE_COUNT) {
+ previewSize.width = availableProcessedSizes.data.i32[i];
+ previewSize.height = availableProcessedSizes.data.i32[i+1];
+ // Need skip the preview sizes that are too large.
+ if (previewSize.width <= limit.width &&
+ previewSize.height <= limit.height) {
+ sizes->push(previewSize);
+ }
+ }
+ if (sizes->isEmpty()) {
+ ALOGE("generated preview size list is empty!!");
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+Parameters::Size Parameters::getMaxSizeForRatio(
+ float ratio, const int32_t* sizeArray, size_t count) {
+ ALOG_ASSERT(sizeArray != NULL, "size array shouldn't be NULL");
+ ALOG_ASSERT(count >= 2 && count % 2 == 0, "count must be a positive even number");
+
+ Size maxSize = {0, 0};
+ for (size_t i = 0; i < count; i += 2) {
+ if (sizeArray[i] > 0 && sizeArray[i+1] > 0) {
+ float curRatio = static_cast<float>(sizeArray[i]) / sizeArray[i+1];
+ if (fabs(curRatio - ratio) < ASPECT_RATIO_TOLERANCE && maxSize.width < sizeArray[i]) {
+ maxSize.width = sizeArray[i];
+ maxSize.height = sizeArray[i+1];
+ }
+ }
+ }
+
+ if (maxSize.width == 0 || maxSize.height == 0) {
+ maxSize.width = sizeArray[0];
+ maxSize.height = sizeArray[1];
+ ALOGW("Unable to find the size to match the given aspect ratio %f."
+ "Fall back to %d x %d", ratio, maxSize.width, maxSize.height);
+ }
+
+ return maxSize;
+}
+
Parameters::CropRegion Parameters::calculateCropRegion(
Parameters::CropRegion::Outputs outputs) const {
@@ -2314,7 +2557,7 @@ Parameters::CropRegion Parameters::calculateCropRegion(
// chosen to maximize its area on the sensor
camera_metadata_ro_entry_t maxDigitalZoom =
- staticInfo(ANDROID_SCALER_AVAILABLE_MAX_ZOOM);
+ staticInfo(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM);
// For each zoom step by how many pixels more do we change the zoom
float zoomIncrement = (maxDigitalZoom.data.f[0] - 1) /
(NUM_ZOOM_STEPS-1);
@@ -2347,10 +2590,14 @@ Parameters::CropRegion Parameters::calculateCropRegion(
float minOutputWidth, minOutputHeight, minOutputRatio;
{
float outputSizes[][2] = {
- { previewWidth, previewHeight },
- { videoWidth, videoHeight },
- { jpegThumbSize[0], jpegThumbSize[1] },
- { pictureWidth, pictureHeight },
+ { static_cast<float>(previewWidth),
+ static_cast<float>(previewHeight) },
+ { static_cast<float>(videoWidth),
+ static_cast<float>(videoHeight) },
+ { static_cast<float>(jpegThumbSize[0]),
+ static_cast<float>(jpegThumbSize[1]) },
+ { static_cast<float>(pictureWidth),
+ static_cast<float>(pictureHeight) },
};
minOutputWidth = outputSizes[0][0];
@@ -2414,7 +2661,84 @@ Parameters::CropRegion Parameters::calculateCropRegion(
return crop;
}
-int32_t Parameters::fpsFromRange(int32_t min, int32_t max) const {
+status_t Parameters::calculatePictureFovs(float *horizFov, float *vertFov)
+ const {
+ camera_metadata_ro_entry_t sensorSize =
+ staticInfo(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, 2, 2);
+ if (!sensorSize.count) return NO_INIT;
+
+ float arrayAspect = static_cast<float>(fastInfo.arrayWidth) /
+ fastInfo.arrayHeight;
+ float stillAspect = static_cast<float>(pictureWidth) / pictureHeight;
+ ALOGV("Array aspect: %f, still aspect: %f", arrayAspect, stillAspect);
+
+ // The crop factors from the full sensor array to the still picture crop
+ // region
+ float horizCropFactor = 1.f;
+ float vertCropFactor = 1.f;
+
+ /**
+ * Need to calculate the still image field of view based on the total pixel
+ * array field of view, and the relative aspect ratios of the pixel array
+ * and output streams.
+ *
+ * Special treatment for quirky definition of crop region and relative
+ * stream cropping.
+ */
+ if (quirks.meteringCropRegion) {
+ // Use max of preview and video as first crop
+ float previewAspect = static_cast<float>(previewWidth) / previewHeight;
+ float videoAspect = static_cast<float>(videoWidth) / videoHeight;
+ if (videoAspect > previewAspect) {
+ previewAspect = videoAspect;
+ }
+ // First crop sensor to preview aspect ratio
+ if (arrayAspect < previewAspect) {
+ vertCropFactor = arrayAspect / previewAspect;
+ } else {
+ horizCropFactor = previewAspect / arrayAspect;
+ }
+ // Second crop to still aspect ratio
+ if (stillAspect < previewAspect) {
+ horizCropFactor *= stillAspect / previewAspect;
+ } else {
+ vertCropFactor *= previewAspect / stillAspect;
+ }
+ } else {
+ /**
+ * Crop are just a function of just the still/array relative aspect
+ * ratios. Since each stream will maximize its area within the crop
+ * region, and for FOV we assume a full-sensor crop region, we only ever
+ * crop the FOV either vertically or horizontally, never both.
+ */
+ horizCropFactor = (arrayAspect > stillAspect) ?
+ (stillAspect / arrayAspect) : 1.f;
+ vertCropFactor = (arrayAspect < stillAspect) ?
+ (arrayAspect / stillAspect) : 1.f;
+ }
+ ALOGV("Horiz crop factor: %f, vert crop fact: %f",
+ horizCropFactor, vertCropFactor);
+ /**
+ * Basic field of view formula is:
+ * angle of view = 2 * arctangent ( d / 2f )
+ * where d is the physical sensor dimension of interest, and f is
+ * the focal length. This only applies to rectilinear sensors, for focusing
+ * at distances >> f, etc.
+ */
+ if (horizFov != NULL) {
+ *horizFov = 180 / M_PI * 2 *
+ atanf(horizCropFactor * sensorSize.data.f[0] /
+ (2 * fastInfo.minFocalLength));
+ }
+ if (vertFov != NULL) {
+ *vertFov = 180 / M_PI * 2 *
+ atanf(vertCropFactor * sensorSize.data.f[1] /
+ (2 * fastInfo.minFocalLength));
+ }
+ return OK;
+}
+
+int32_t Parameters::fpsFromRange(int32_t /*min*/, int32_t max) const {
return max;
}
diff --git a/services/camera/libcameraservice/camera2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index 54b1e8c..32dbd42 100644
--- a/services/camera/libcameraservice/camera2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -25,8 +25,7 @@
#include <utils/Vector.h>
#include <utils/KeyedVector.h>
#include <camera/CameraParameters.h>
-
-#include "CameraMetadata.h"
+#include <camera/CameraMetadata.h>
namespace android {
namespace camera2 {
@@ -47,7 +46,6 @@ struct Parameters {
int previewWidth, previewHeight;
int32_t previewFpsRange[2];
- int previewFps; // deprecated, here only for tracking changes
int previewFormat;
int previewTransform; // set by CAMERA_CMD_SET_DISPLAY_ORIENTATION
@@ -55,7 +53,7 @@ struct Parameters {
int pictureWidth, pictureHeight;
int32_t jpegThumbSize[2];
- int32_t jpegQuality, jpegThumbQuality;
+ uint8_t jpegQuality, jpegThumbQuality;
int32_t jpegRotation;
bool gpsEnabled;
@@ -73,16 +71,16 @@ struct Parameters {
FLASH_MODE_AUTO,
FLASH_MODE_ON,
FLASH_MODE_TORCH,
- FLASH_MODE_RED_EYE = ANDROID_CONTROL_AE_ON_AUTO_FLASH_REDEYE,
+ FLASH_MODE_RED_EYE = ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE,
FLASH_MODE_INVALID = -1
} flashMode;
enum focusMode_t {
- FOCUS_MODE_AUTO = ANDROID_CONTROL_AF_AUTO,
- FOCUS_MODE_MACRO = ANDROID_CONTROL_AF_MACRO,
- FOCUS_MODE_CONTINUOUS_VIDEO = ANDROID_CONTROL_AF_CONTINUOUS_VIDEO,
- FOCUS_MODE_CONTINUOUS_PICTURE = ANDROID_CONTROL_AF_CONTINUOUS_PICTURE,
- FOCUS_MODE_EDOF = ANDROID_CONTROL_AF_EDOF,
+ FOCUS_MODE_AUTO = ANDROID_CONTROL_AF_MODE_AUTO,
+ FOCUS_MODE_MACRO = ANDROID_CONTROL_AF_MODE_MACRO,
+ FOCUS_MODE_CONTINUOUS_VIDEO = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO,
+ FOCUS_MODE_CONTINUOUS_PICTURE = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE,
+ FOCUS_MODE_EDOF = ANDROID_CONTROL_AF_MODE_EDOF,
FOCUS_MODE_INFINITY,
FOCUS_MODE_FIXED,
FOCUS_MODE_INVALID = -1
@@ -106,6 +104,11 @@ struct Parameters {
};
Vector<Area> focusingAreas;
+ struct Size {
+ int32_t width;
+ int32_t height;
+ };
+
int32_t exposureCompensation;
bool autoExposureLock;
bool autoWhiteBalanceLock;
@@ -136,13 +139,17 @@ struct Parameters {
bool enableFocusMoveMessages;
int afTriggerCounter;
+ int afStateCounter;
int currentAfTriggerId;
bool afInMotion;
int precaptureTriggerCounter;
+ int takePictureCounter;
+
uint32_t previewCallbackFlags;
bool previewCallbackOneShot;
+ bool previewCallbackSurface;
bool zslMode;
@@ -158,7 +165,12 @@ struct Parameters {
} state;
// Number of zoom steps to simulate
- static const unsigned int NUM_ZOOM_STEPS = 30;
+ static const unsigned int NUM_ZOOM_STEPS = 100;
+ // Max preview size allowed
+ static const unsigned int MAX_PREVIEW_WIDTH = 1920;
+ static const unsigned int MAX_PREVIEW_HEIGHT = 1080;
+ // Aspect ratio tolerance
+ static const float ASPECT_RATIO_TOLERANCE = 0.001;
// Full static camera info, object owned by someone else, such as
// Camera2Device.
@@ -171,6 +183,7 @@ struct Parameters {
struct DeviceInfo {
int32_t arrayWidth;
int32_t arrayHeight;
+ int32_t bestStillCaptureFpsRange[2];
uint8_t bestFaceDetectMode;
int32_t maxFaces;
struct OverrideModes {
@@ -179,11 +192,13 @@ struct Parameters {
focusMode_t focusMode;
OverrideModes():
flashMode(FLASH_MODE_INVALID),
- wbMode(ANDROID_CONTROL_AWB_OFF),
+ wbMode(ANDROID_CONTROL_AWB_MODE_OFF),
focusMode(FOCUS_MODE_INVALID) {
}
};
DefaultKeyedVector<uint8_t, OverrideModes> sceneModeOverrides;
+ float minFocalLength;
+ bool useFlexibleYuv;
} fastInfo;
// Quirks information; these are short-lived flags to enable workarounds for
@@ -192,6 +207,7 @@ struct Parameters {
bool triggerAfWithAuto;
bool useZslFormat;
bool meteringCropRegion;
+ bool partialResults;
} quirks;
/**
@@ -214,7 +230,7 @@ struct Parameters {
// max/minCount means to do no bounds check in that direction. In case of
// error, the entry data pointer is null and the count is 0.
camera_metadata_ro_entry_t staticInfo(uint32_t tag,
- size_t minCount=0, size_t maxCount=0) const;
+ size_t minCount=0, size_t maxCount=0, bool required=true) const;
// Validate and update camera parameters based on new settings
status_t set(const String8 &paramString);
@@ -244,6 +260,9 @@ struct Parameters {
};
CropRegion calculateCropRegion(CropRegion::Outputs outputs) const;
+ // Calculate the field of view of the high-resolution JPEG capture
+ status_t calculatePictureFovs(float *horizFov, float *vertFov) const;
+
// Static methods for debugging and converting between camera1 and camera2
// parameters
@@ -261,6 +280,8 @@ struct Parameters {
static const char* flashModeEnumToString(flashMode_t flashMode);
static focusMode_t focusModeStringToEnum(const char *focusMode);
static const char* focusModeEnumToString(focusMode_t focusMode);
+ static lightFxMode_t lightFxStringToEnum(const char *lightFxMode);
+
static status_t parseAreas(const char *areasCStr,
Vector<Area> *areas);
@@ -310,6 +331,12 @@ private:
int cropYToNormalized(int y) const;
int normalizedXToCrop(int x) const;
int normalizedYToCrop(int y) const;
+
+ Vector<Size> availablePreviewSizes;
+ // Get size list (that are no larger than limit) from static metadata.
+ status_t getFilteredPreviewSizes(Size limit, Vector<Size> *sizes);
+ // Get max size (from the size array) that matches the given aspect ratio.
+ Size getMaxSizeForRatio(float ratio, const int32_t* sizeArray, size_t count);
};
// This class encapsulates the Parameters class so that it can only be accessed
diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index 207f780..6076dae 100644
--- a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -17,30 +17,41 @@
#define LOG_TAG "Camera2-StreamingProcessor"
#define ATRACE_TAG ATRACE_TAG_CAMERA
//#define LOG_NDEBUG 0
+//#define LOG_NNDEBUG 0 // Per-frame verbose logging
+
+#ifdef LOG_NNDEBUG
+#define ALOGVV(...) ALOGV(__VA_ARGS__)
+#else
+#define ALOGVV(...) ((void)0)
+#endif
#include <utils/Log.h>
#include <utils/Trace.h>
-#include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
#include <media/hardware/MetadataBufferType.h>
-#include "StreamingProcessor.h"
-#include "Camera2Heap.h"
-#include "../Camera2Client.h"
-#include "../Camera2Device.h"
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/StreamingProcessor.h"
+#include "api1/client2/Camera2Heap.h"
namespace android {
namespace camera2 {
-StreamingProcessor::StreamingProcessor(wp<Camera2Client> client):
+StreamingProcessor::StreamingProcessor(sp<Camera2Client> client):
mClient(client),
+ mDevice(client->getCameraDevice()),
+ mId(client->getCameraId()),
mActiveRequest(NONE),
+ mPaused(false),
mPreviewRequestId(Camera2Client::kPreviewRequestIdStart),
mPreviewStreamId(NO_STREAM),
mRecordingRequestId(Camera2Client::kRecordingRequestIdStart),
mRecordingStreamId(NO_STREAM),
- mRecordingHeapCount(kDefaultRecordingHeapCount)
+ mRecordingFrameAvailable(false),
+ mRecordingHeapCount(kDefaultRecordingHeapCount),
+ mRecordingHeapFree(kDefaultRecordingHeapCount)
{
-
}
StreamingProcessor::~StreamingProcessor() {
@@ -70,16 +81,19 @@ bool StreamingProcessor::haveValidPreviewWindow() const {
status_t StreamingProcessor::updatePreviewRequest(const Parameters &params) {
ATRACE_CALL();
status_t res;
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return INVALID_OPERATION;
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
Mutex::Autolock m(mMutex);
if (mPreviewRequest.entryCount() == 0) {
- res = client->getCameraDevice()->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
+ res = device->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
&mPreviewRequest);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to create default preview request: "
- "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
return res;
}
}
@@ -87,7 +101,7 @@ status_t StreamingProcessor::updatePreviewRequest(const Parameters &params) {
res = params.updateRequest(&mPreviewRequest);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update common entries of preview "
- "request: %s (%d)", __FUNCTION__, client->getCameraId(),
+ "request: %s (%d)", __FUNCTION__, mId,
strerror(-res), res);
return res;
}
@@ -96,7 +110,7 @@ status_t StreamingProcessor::updatePreviewRequest(const Parameters &params) {
&mPreviewRequestId, 1);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update request id for preview: %s (%d)",
- __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ __FUNCTION__, mId, strerror(-res), res);
return res;
}
@@ -108,9 +122,11 @@ status_t StreamingProcessor::updatePreviewStream(const Parameters &params) {
Mutex::Autolock m(mMutex);
status_t res;
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return INVALID_OPERATION;
- sp<Camera2Device> device = client->getCameraDevice();
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
if (mPreviewStreamId != NO_STREAM) {
// Check if stream parameters have to change
@@ -119,24 +135,24 @@ status_t StreamingProcessor::updatePreviewStream(const Parameters &params) {
&currentWidth, &currentHeight, 0);
if (res != OK) {
ALOGE("%s: Camera %d: Error querying preview stream info: "
- "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
return res;
}
if (currentWidth != (uint32_t)params.previewWidth ||
currentHeight != (uint32_t)params.previewHeight) {
ALOGV("%s: Camera %d: Preview size switch: %d x %d -> %d x %d",
- __FUNCTION__, client->getCameraId(), currentWidth, currentHeight,
+ __FUNCTION__, mId, currentWidth, currentHeight,
params.previewWidth, params.previewHeight);
res = device->waitUntilDrained();
if (res != OK) {
ALOGE("%s: Camera %d: Error waiting for preview to drain: "
- "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
return res;
}
res = device->deleteStream(mPreviewStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to delete old output stream "
- "for preview: %s (%d)", __FUNCTION__, client->getCameraId(),
+ "for preview: %s (%d)", __FUNCTION__, mId,
strerror(-res), res);
return res;
}
@@ -151,7 +167,7 @@ status_t StreamingProcessor::updatePreviewStream(const Parameters &params) {
&mPreviewStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to create preview stream: %s (%d)",
- __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ __FUNCTION__, mId, strerror(-res), res);
return res;
}
}
@@ -160,7 +176,7 @@ status_t StreamingProcessor::updatePreviewStream(const Parameters &params) {
params.previewTransform);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to set preview stream transform: "
- "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
return res;
}
@@ -174,12 +190,14 @@ status_t StreamingProcessor::deletePreviewStream() {
Mutex::Autolock m(mMutex);
if (mPreviewStreamId != NO_STREAM) {
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return INVALID_OPERATION;
- sp<Camera2Device> device = client->getCameraDevice();
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
ALOGV("%s: for cameraId %d on streamId %d",
- __FUNCTION__, client->getCameraId(), mPreviewStreamId);
+ __FUNCTION__, mId, mPreviewStreamId);
res = device->waitUntilDrained();
if (res != OK) {
@@ -205,24 +223,39 @@ int StreamingProcessor::getPreviewStreamId() const {
status_t StreamingProcessor::setRecordingBufferCount(size_t count) {
ATRACE_CALL();
- // 32 is the current upper limit on the video buffer count for BufferQueue
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return INVALID_OPERATION;
- if (count > 32) {
- ALOGE("%s: Camera %d: Error setting %d as video buffer count value",
- __FUNCTION__, client->getCameraId(), count);
+ // Make sure we can support this many buffer slots
+ if (count > BufferQueue::NUM_BUFFER_SLOTS) {
+ ALOGE("%s: Camera %d: Too many recording buffers requested: %d, max %d",
+ __FUNCTION__, mId, count, BufferQueue::NUM_BUFFER_SLOTS);
return BAD_VALUE;
}
Mutex::Autolock m(mMutex);
- // Need to reallocate memory for heap
+ ALOGV("%s: Camera %d: New recording buffer count from encoder: %d",
+ __FUNCTION__, mId, count);
+
+ // Need to re-size consumer and heap
if (mRecordingHeapCount != count) {
- if (mRecordingHeap != 0) {
+ ALOGV("%s: Camera %d: Resetting recording heap and consumer",
+ __FUNCTION__, mId);
+
+ if (isStreamActive(mActiveStreamIds, mRecordingStreamId)) {
+ ALOGE("%s: Camera %d: Setting recording buffer count when "
+ "recording stream is already active!", __FUNCTION__,
+ mId);
+ return INVALID_OPERATION;
+ }
+
+ releaseAllRecordingFramesLocked();
+
+ if (mRecordingHeap != 0) {
mRecordingHeap.clear();
- mRecordingHeap = NULL;
}
mRecordingHeapCount = count;
+ mRecordingHeapFree = count;
+
+ mRecordingConsumer.clear();
}
return OK;
@@ -233,15 +266,18 @@ status_t StreamingProcessor::updateRecordingRequest(const Parameters &params) {
status_t res;
Mutex::Autolock m(mMutex);
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return INVALID_OPERATION;
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
if (mRecordingRequest.entryCount() == 0) {
- res = client->getCameraDevice()->createDefaultRequest(CAMERA2_TEMPLATE_VIDEO_RECORD,
+ res = device->createDefaultRequest(CAMERA2_TEMPLATE_VIDEO_RECORD,
&mRecordingRequest);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to create default recording request:"
- " %s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
return res;
}
}
@@ -249,7 +285,7 @@ status_t StreamingProcessor::updateRecordingRequest(const Parameters &params) {
res = params.updateRequest(&mRecordingRequest);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update common entries of recording "
- "request: %s (%d)", __FUNCTION__, client->getCameraId(),
+ "request: %s (%d)", __FUNCTION__, mId,
strerror(-res), res);
return res;
}
@@ -258,7 +294,7 @@ status_t StreamingProcessor::updateRecordingRequest(const Parameters &params) {
&mRecordingRequestId, 1);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update request id for request: %s (%d)",
- __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ __FUNCTION__, mId, strerror(-res), res);
return res;
}
@@ -270,22 +306,27 @@ status_t StreamingProcessor::updateRecordingStream(const Parameters &params) {
status_t res;
Mutex::Autolock m(mMutex);
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return INVALID_OPERATION;
- sp<Camera2Device> device = client->getCameraDevice();
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+ bool newConsumer = false;
if (mRecordingConsumer == 0) {
+ ALOGV("%s: Camera %d: Creating recording consumer with %d + 1 "
+ "consumer-side buffers", __FUNCTION__, mId, mRecordingHeapCount);
// Create CPU buffer queue endpoint. We need one more buffer here so that we can
// always acquire and free a buffer when the heap is full; otherwise the consumer
// will have buffers in flight we'll never clear out.
- mRecordingConsumer = new BufferItemConsumer(
+ sp<BufferQueue> bq = new BufferQueue();
+ mRecordingConsumer = new BufferItemConsumer(bq,
GRALLOC_USAGE_HW_VIDEO_ENCODER,
- mRecordingHeapCount + 1,
- true);
+ mRecordingHeapCount + 1);
mRecordingConsumer->setFrameAvailableListener(this);
mRecordingConsumer->setName(String8("Camera2-RecordingConsumer"));
- mRecordingWindow = new SurfaceTextureClient(
- mRecordingConsumer->getProducerInterface());
+ mRecordingWindow = new Surface(bq);
+ newConsumer = true;
// Allocate memory later, since we don't know buffer size until receipt
}
@@ -296,18 +337,24 @@ status_t StreamingProcessor::updateRecordingStream(const Parameters &params) {
&currentWidth, &currentHeight, 0);
if (res != OK) {
ALOGE("%s: Camera %d: Error querying recording output stream info: "
- "%s (%d)", __FUNCTION__, client->getCameraId(),
+ "%s (%d)", __FUNCTION__, mId,
strerror(-res), res);
return res;
}
if (currentWidth != (uint32_t)params.videoWidth ||
- currentHeight != (uint32_t)params.videoHeight) {
+ currentHeight != (uint32_t)params.videoHeight || newConsumer) {
// TODO: Should wait to be sure previous recording has finished
res = device->deleteStream(mRecordingStreamId);
- if (res != OK) {
+
+ if (res == -EBUSY) {
+ ALOGV("%s: Camera %d: Device is busy, call "
+ "updateRecordingStream after it becomes idle",
+ __FUNCTION__, mId);
+ return res;
+ } else if (res != OK) {
ALOGE("%s: Camera %d: Unable to delete old output stream "
"for recording: %s (%d)", __FUNCTION__,
- client->getCameraId(), strerror(-res), res);
+ mId, strerror(-res), res);
return res;
}
mRecordingStreamId = NO_STREAM;
@@ -321,7 +368,7 @@ status_t StreamingProcessor::updateRecordingStream(const Parameters &params) {
CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0, &mRecordingStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Can't create output stream for recording: "
- "%s (%d)", __FUNCTION__, client->getCameraId(),
+ "%s (%d)", __FUNCTION__, mId,
strerror(-res), res);
return res;
}
@@ -337,9 +384,11 @@ status_t StreamingProcessor::deleteRecordingStream() {
Mutex::Autolock m(mMutex);
if (mRecordingStreamId != NO_STREAM) {
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return INVALID_OPERATION;
- sp<Camera2Device> device = client->getCameraDevice();
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
res = device->waitUntilDrained();
if (res != OK) {
@@ -363,19 +412,33 @@ int StreamingProcessor::getRecordingStreamId() const {
}
status_t StreamingProcessor::startStream(StreamType type,
- const Vector<uint8_t> &outputStreams) {
+ const Vector<int32_t> &outputStreams) {
ATRACE_CALL();
status_t res;
if (type == NONE) return INVALID_OPERATION;
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return INVALID_OPERATION;
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
- ALOGV("%s: Camera %d: type = %d", __FUNCTION__, client->getCameraId(), type);
+ ALOGV("%s: Camera %d: type = %d", __FUNCTION__, mId, type);
Mutex::Autolock m(mMutex);
+ // If a recording stream is being started up, free up any
+ // outstanding buffers left from the previous recording session.
+ // There should never be any, so if there are, warn about it.
+ if (isStreamActive(outputStreams, mRecordingStreamId)) {
+ releaseAllRecordingFramesLocked();
+ }
+
+ ALOGV("%s: Camera %d: %s started, recording heap has %d free of %d",
+ __FUNCTION__, mId, (type == PREVIEW) ? "preview" : "recording",
+ mRecordingHeapFree, mRecordingHeapCount);
+
CameraMetadata &request = (type == PREVIEW) ?
mPreviewRequest : mRecordingRequest;
@@ -384,26 +447,75 @@ status_t StreamingProcessor::startStream(StreamType type,
outputStreams);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to set up preview request: %s (%d)",
- __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ __FUNCTION__, mId, strerror(-res), res);
return res;
}
res = request.sort();
if (res != OK) {
ALOGE("%s: Camera %d: Error sorting preview request: %s (%d)",
- __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ __FUNCTION__, mId, strerror(-res), res);
return res;
}
- res = client->getCameraDevice()->setStreamingRequest(request);
+ res = device->setStreamingRequest(request);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to set preview request to start preview: "
"%s (%d)",
- __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ __FUNCTION__, mId, strerror(-res), res);
return res;
}
mActiveRequest = type;
+ mPaused = false;
+ mActiveStreamIds = outputStreams;
+ return OK;
+}
+
+status_t StreamingProcessor::togglePauseStream(bool pause) {
+ ATRACE_CALL();
+ status_t res;
+
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ ALOGV("%s: Camera %d: toggling pause to %d", __FUNCTION__, mId, pause);
+ Mutex::Autolock m(mMutex);
+
+ if (mActiveRequest == NONE) {
+ ALOGE("%s: Camera %d: Can't toggle pause, streaming was not started",
+ __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ if (mPaused == pause) {
+ return OK;
+ }
+
+ if (pause) {
+ res = device->clearStreamingRequest();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't clear stream request: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ return res;
+ }
+ } else {
+ CameraMetadata &request =
+ (mActiveRequest == PREVIEW) ? mPreviewRequest
+ : mRecordingRequest;
+ res = device->setStreamingRequest(request);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to set preview request to resume: "
+ "%s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ return res;
+ }
+ }
+
+ mPaused = pause;
return OK;
}
@@ -413,17 +525,22 @@ status_t StreamingProcessor::stopStream() {
Mutex::Autolock m(mMutex);
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return INVALID_OPERATION;
- sp<Camera2Device> device = client->getCameraDevice();
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
res = device->clearStreamingRequest();
if (res != OK) {
ALOGE("%s: Camera %d: Can't clear stream request: %s (%d)",
- __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ __FUNCTION__, mId, strerror(-res), res);
return res;
}
+
mActiveRequest = NONE;
+ mActiveStreamIds.clear();
+ mPaused = false;
return OK;
}
@@ -447,7 +564,6 @@ status_t StreamingProcessor::incrementStreamingIds() {
ATRACE_CALL();
Mutex::Autolock m(mMutex);
- status_t res;
mPreviewRequestId++;
if (mPreviewRequestId >= Camera2Client::kPreviewRequestIdEnd) {
mPreviewRequestId = Camera2Client::kPreviewRequestIdStart;
@@ -461,13 +577,56 @@ status_t StreamingProcessor::incrementStreamingIds() {
void StreamingProcessor::onFrameAvailable() {
ATRACE_CALL();
+ Mutex::Autolock l(mMutex);
+ if (!mRecordingFrameAvailable) {
+ mRecordingFrameAvailable = true;
+ mRecordingFrameAvailableSignal.signal();
+ }
+
+}
+
+bool StreamingProcessor::threadLoop() {
+ status_t res;
+
+ {
+ Mutex::Autolock l(mMutex);
+ while (!mRecordingFrameAvailable) {
+ res = mRecordingFrameAvailableSignal.waitRelative(
+ mMutex, kWaitDuration);
+ if (res == TIMED_OUT) return true;
+ }
+ mRecordingFrameAvailable = false;
+ }
+
+ do {
+ res = processRecordingFrame();
+ } while (res == OK);
+
+ return true;
+}
+
+status_t StreamingProcessor::processRecordingFrame() {
+ ATRACE_CALL();
status_t res;
sp<Camera2Heap> recordingHeap;
size_t heapIdx = 0;
nsecs_t timestamp;
sp<Camera2Client> client = mClient.promote();
- if (client == 0) return;
+ if (client == 0) {
+ // Discard frames during shutdown
+ BufferItemConsumer::BufferItem imgBuffer;
+ res = mRecordingConsumer->acquireBuffer(&imgBuffer, 0);
+ if (res != OK) {
+ if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
+ ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ }
+ return res;
+ }
+ mRecordingConsumer->releaseBuffer(imgBuffer);
+ return OK;
+ }
{
/* acquire SharedParameters before mMutex so we don't dead lock
@@ -475,46 +634,47 @@ void StreamingProcessor::onFrameAvailable() {
SharedParameters::Lock l(client->getParameters());
Mutex::Autolock m(mMutex);
BufferItemConsumer::BufferItem imgBuffer;
- res = mRecordingConsumer->acquireBuffer(&imgBuffer);
+ res = mRecordingConsumer->acquireBuffer(&imgBuffer, 0);
if (res != OK) {
- ALOGE("%s: Camera %d: Error receiving recording buffer: %s (%d)",
- __FUNCTION__, client->getCameraId(), strerror(-res), res);
- return;
+ if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
+ ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ }
+ return res;
}
timestamp = imgBuffer.mTimestamp;
mRecordingFrameCount++;
- ALOGV("OnRecordingFrame: Frame %d", mRecordingFrameCount);
+ ALOGVV("OnRecordingFrame: Frame %d", mRecordingFrameCount);
- // TODO: Signal errors here upstream
if (l.mParameters.state != Parameters::RECORD &&
l.mParameters.state != Parameters::VIDEO_SNAPSHOT) {
ALOGV("%s: Camera %d: Discarding recording image buffers "
"received after recording done", __FUNCTION__,
- client->getCameraId());
+ mId);
mRecordingConsumer->releaseBuffer(imgBuffer);
- return;
+ return INVALID_OPERATION;
}
if (mRecordingHeap == 0) {
const size_t bufferSize = 4 + sizeof(buffer_handle_t);
ALOGV("%s: Camera %d: Creating recording heap with %d buffers of "
- "size %d bytes", __FUNCTION__, client->getCameraId(),
+ "size %d bytes", __FUNCTION__, mId,
mRecordingHeapCount, bufferSize);
mRecordingHeap = new Camera2Heap(bufferSize, mRecordingHeapCount,
"Camera2Client::RecordingHeap");
if (mRecordingHeap->mHeap->getSize() == 0) {
ALOGE("%s: Camera %d: Unable to allocate memory for recording",
- __FUNCTION__, client->getCameraId());
+ __FUNCTION__, mId);
mRecordingConsumer->releaseBuffer(imgBuffer);
- return;
+ return NO_MEMORY;
}
for (size_t i = 0; i < mRecordingBuffers.size(); i++) {
if (mRecordingBuffers[i].mBuf !=
BufferItemConsumer::INVALID_BUFFER_SLOT) {
ALOGE("%s: Camera %d: Non-empty recording buffers list!",
- __FUNCTION__, client->getCameraId());
+ __FUNCTION__, mId);
}
}
mRecordingBuffers.clear();
@@ -527,17 +687,17 @@ void StreamingProcessor::onFrameAvailable() {
if ( mRecordingHeapFree == 0) {
ALOGE("%s: Camera %d: No free recording buffers, dropping frame",
- __FUNCTION__, client->getCameraId());
+ __FUNCTION__, mId);
mRecordingConsumer->releaseBuffer(imgBuffer);
- return;
+ return NO_MEMORY;
}
heapIdx = mRecordingHeapHead;
mRecordingHeapHead = (mRecordingHeapHead + 1) % mRecordingHeapCount;
mRecordingHeapFree--;
- ALOGV("%s: Camera %d: Timestamp %lld",
- __FUNCTION__, client->getCameraId(), timestamp);
+ ALOGVV("%s: Camera %d: Timestamp %lld",
+ __FUNCTION__, mId, timestamp);
ssize_t offset;
size_t size;
@@ -549,29 +709,30 @@ void StreamingProcessor::onFrameAvailable() {
uint32_t type = kMetadataBufferTypeGrallocSource;
*((uint32_t*)data) = type;
*((buffer_handle_t*)(data + 4)) = imgBuffer.mGraphicBuffer->handle;
- ALOGV("%s: Camera %d: Sending out buffer_handle_t %p",
- __FUNCTION__, client->getCameraId(),
+ ALOGVV("%s: Camera %d: Sending out buffer_handle_t %p",
+ __FUNCTION__, mId,
imgBuffer.mGraphicBuffer->handle);
mRecordingBuffers.replaceAt(imgBuffer, heapIdx);
recordingHeap = mRecordingHeap;
}
// Call outside locked parameters to allow re-entrancy from notification
- Camera2Client::SharedCameraClient::Lock l(client->mSharedCameraClient);
- if (l.mCameraClient != 0) {
- l.mCameraClient->dataCallbackTimestamp(timestamp,
+ Camera2Client::SharedCameraCallbacks::Lock l(client->mSharedCameraCallbacks);
+ if (l.mRemoteCallback != 0) {
+ l.mRemoteCallback->dataCallbackTimestamp(timestamp,
CAMERA_MSG_VIDEO_FRAME,
recordingHeap->mBuffers[heapIdx]);
+ } else {
+ ALOGW("%s: Camera %d: Remote callback gone", __FUNCTION__, mId);
}
+
+ return OK;
}
void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) {
ATRACE_CALL();
status_t res;
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return;
-
Mutex::Autolock m(mMutex);
// Make sure this is for the current heap
ssize_t offset;
@@ -579,7 +740,7 @@ void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) {
sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
if (heap->getHeapID() != mRecordingHeap->mHeap->getHeapID()) {
ALOGW("%s: Camera %d: Mismatched heap ID, ignoring release "
- "(got %x, expected %x)", __FUNCTION__, client->getCameraId(),
+ "(got %x, expected %x)", __FUNCTION__, mId,
heap->getHeapID(), mRecordingHeap->mHeap->getHeapID());
return;
}
@@ -587,7 +748,7 @@ void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) {
uint32_t type = *(uint32_t*)data;
if (type != kMetadataBufferTypeGrallocSource) {
ALOGE("%s: Camera %d: Recording frame type invalid (got %x, expected %x)",
- __FUNCTION__, client->getCameraId(), type,
+ __FUNCTION__, mId, type,
kMetadataBufferTypeGrallocSource);
return;
}
@@ -607,28 +768,80 @@ void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) {
}
if (itemIndex == mRecordingBuffers.size()) {
ALOGE("%s: Camera %d: Can't find buffer_handle_t %p in list of "
- "outstanding buffers", __FUNCTION__, client->getCameraId(),
+ "outstanding buffers", __FUNCTION__, mId,
imgHandle);
return;
}
- ALOGV("%s: Camera %d: Freeing buffer_handle_t %p", __FUNCTION__,
- client->getCameraId(), imgHandle);
+ ALOGVV("%s: Camera %d: Freeing buffer_handle_t %p", __FUNCTION__,
+ mId, imgHandle);
res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to free recording frame "
"(buffer_handle_t: %p): %s (%d)", __FUNCTION__,
- client->getCameraId(), imgHandle, strerror(-res), res);
+ mId, imgHandle, strerror(-res), res);
return;
}
mRecordingBuffers.replaceAt(itemIndex);
mRecordingHeapFree++;
+ ALOGV_IF(mRecordingHeapFree == mRecordingHeapCount,
+ "%s: Camera %d: All %d recording buffers returned",
+ __FUNCTION__, mId, mRecordingHeapCount);
}
+void StreamingProcessor::releaseAllRecordingFramesLocked() {
+ ATRACE_CALL();
+ status_t res;
+
+ if (mRecordingConsumer == 0) {
+ return;
+ }
+
+ ALOGV("%s: Camera %d: Releasing all recording buffers", __FUNCTION__,
+ mId);
+
+ size_t releasedCount = 0;
+ for (size_t itemIndex = 0; itemIndex < mRecordingBuffers.size(); itemIndex++) {
+ const BufferItemConsumer::BufferItem item =
+ mRecordingBuffers[itemIndex];
+ if (item.mBuf != BufferItemConsumer::INVALID_BUFFER_SLOT) {
+ res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to free recording frame "
+ "(buffer_handle_t: %p): %s (%d)", __FUNCTION__,
+ mId, item.mGraphicBuffer->handle, strerror(-res), res);
+ }
+ mRecordingBuffers.replaceAt(itemIndex);
+ releasedCount++;
+ }
+ }
-status_t StreamingProcessor::dump(int fd, const Vector<String16>& args) {
+ if (releasedCount > 0) {
+ ALOGW("%s: Camera %d: Force-freed %d outstanding buffers "
+ "from previous recording session", __FUNCTION__, mId, releasedCount);
+ ALOGE_IF(releasedCount != mRecordingHeapCount - mRecordingHeapFree,
+ "%s: Camera %d: Force-freed %d buffers, but expected %d",
+ __FUNCTION__, mId, releasedCount, mRecordingHeapCount - mRecordingHeapFree);
+ }
+
+ mRecordingHeapHead = 0;
+ mRecordingHeapFree = mRecordingHeapCount;
+}
+
+bool StreamingProcessor::isStreamActive(const Vector<int32_t> &streams,
+ int32_t recordingStreamId) {
+ for (size_t i = 0; i < streams.size(); i++) {
+ if (streams[i] == recordingStreamId) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+status_t StreamingProcessor::dump(int fd, const Vector<String16>& /*args*/) {
String8 result;
result.append(" Current requests:\n");
@@ -636,20 +849,29 @@ status_t StreamingProcessor::dump(int fd, const Vector<String16>& args) {
result.append(" Preview request:\n");
write(fd, result.string(), result.size());
mPreviewRequest.dump(fd, 2, 6);
+ result.clear();
} else {
result.append(" Preview request: undefined\n");
- write(fd, result.string(), result.size());
}
if (mRecordingRequest.entryCount() != 0) {
result = " Recording request:\n";
write(fd, result.string(), result.size());
mRecordingRequest.dump(fd, 2, 6);
+ result.clear();
} else {
result = " Recording request: undefined\n";
- write(fd, result.string(), result.size());
}
+ const char* streamTypeString[] = {
+ "none", "preview", "record"
+ };
+ result.append(String8::format(" Active request: %s (paused: %s)\n",
+ streamTypeString[mActiveRequest],
+ mPaused ? "yes" : "no"));
+
+ write(fd, result.string(), result.size());
+
return OK;
}
diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.h b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
index 96b100f..833bb8f 100644
--- a/services/camera/libcameraservice/camera2/StreamingProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
@@ -21,24 +21,26 @@
#include <utils/String16.h>
#include <gui/BufferItemConsumer.h>
-#include "Parameters.h"
-#include "CameraMetadata.h"
+#include "camera/CameraMetadata.h"
namespace android {
class Camera2Client;
+class CameraDeviceBase;
class IMemory;
namespace camera2 {
+class Parameters;
class Camera2Heap;
/**
* Management and processing for preview and recording streams
*/
-class StreamingProcessor: public BufferItemConsumer::FrameAvailableListener {
+class StreamingProcessor:
+ public Thread, public BufferItemConsumer::FrameAvailableListener {
public:
- StreamingProcessor(wp<Camera2Client> client);
+ StreamingProcessor(sp<Camera2Client> client);
~StreamingProcessor();
status_t setPreviewWindow(sp<ANativeWindow> window);
@@ -62,7 +64,10 @@ class StreamingProcessor: public BufferItemConsumer::FrameAvailableListener {
RECORD
};
status_t startStream(StreamType type,
- const Vector<uint8_t> &outputStreams);
+ const Vector<int32_t> &outputStreams);
+
+ // Toggle between paused and unpaused. Stream must be started first.
+ status_t togglePauseStream(bool pause);
status_t stopStream();
@@ -86,8 +91,13 @@ class StreamingProcessor: public BufferItemConsumer::FrameAvailableListener {
};
wp<Camera2Client> mClient;
+ wp<CameraDeviceBase> mDevice;
+ int mId;
StreamType mActiveRequest;
+ bool mPaused;
+
+ Vector<int32_t> mActiveStreamIds;
// Preview-related members
int32_t mPreviewRequestId;
@@ -96,6 +106,8 @@ class StreamingProcessor: public BufferItemConsumer::FrameAvailableListener {
sp<ANativeWindow> mPreviewWindow;
// Recording-related members
+ static const nsecs_t kWaitDuration = 50000000; // 50 ms
+
int32_t mRecordingRequestId;
int mRecordingStreamId;
int mRecordingFrameCount;
@@ -104,11 +116,24 @@ class StreamingProcessor: public BufferItemConsumer::FrameAvailableListener {
CameraMetadata mRecordingRequest;
sp<camera2::Camera2Heap> mRecordingHeap;
+ bool mRecordingFrameAvailable;
+ Condition mRecordingFrameAvailableSignal;
+
static const size_t kDefaultRecordingHeapCount = 8;
size_t mRecordingHeapCount;
Vector<BufferItemConsumer::BufferItem> mRecordingBuffers;
size_t mRecordingHeapHead, mRecordingHeapFree;
+ virtual bool threadLoop();
+
+ status_t processRecordingFrame();
+
+ // Unilaterally free any buffers still outstanding to stagefright
+ void releaseAllRecordingFramesLocked();
+
+ // Determine if the specified stream is currently in use
+ static bool isStreamActive(const Vector<int32_t> &streams,
+ int32_t recordingStreamId);
};
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 1937955..453d54c 100644
--- a/services/camera/libcameraservice/camera2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -27,23 +27,25 @@
#include <utils/Log.h>
#include <utils/Trace.h>
+#include <gui/Surface.h>
-#include "ZslProcessor.h"
-#include <gui/SurfaceTextureClient.h>
-#include "../Camera2Device.h"
-#include "../Camera2Client.h"
-
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/CaptureSequencer.h"
+#include "api1/client2/ZslProcessor.h"
namespace android {
namespace camera2 {
ZslProcessor::ZslProcessor(
- wp<Camera2Client> client,
+ sp<Camera2Client> client,
wp<CaptureSequencer> sequencer):
Thread(false),
mState(RUNNING),
mClient(client),
+ mDevice(client->getCameraDevice()),
mSequencer(sequencer),
+ mId(client->getCameraId()),
mZslBufferAvailable(false),
mZslStreamId(NO_STREAM),
mZslReprocessStreamId(NO_STREAM),
@@ -69,11 +71,13 @@ void ZslProcessor::onFrameAvailable() {
}
}
-void ZslProcessor::onFrameAvailable(int32_t frameId, const CameraMetadata &frame) {
+void ZslProcessor::onFrameAvailable(int32_t /*requestId*/,
+ const CameraMetadata &frame) {
Mutex::Autolock l(mInputMutex);
camera_metadata_ro_entry_t entry;
entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
nsecs_t timestamp = entry.data.i64[0];
+ (void)timestamp;
ALOGVV("Got preview frame for timestamp %lld", timestamp);
if (mState != RUNNING) return;
@@ -112,19 +116,25 @@ status_t ZslProcessor::updateStream(const Parameters &params) {
Mutex::Autolock l(mInputMutex);
sp<Camera2Client> client = mClient.promote();
- if (client == 0) return OK;
- sp<Camera2Device> device = client->getCameraDevice();
+ if (client == 0) {
+ ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
if (mZslConsumer == 0) {
// Create CPU buffer queue endpoint
- mZslConsumer = new BufferItemConsumer(
+ sp<BufferQueue> bq = new BufferQueue();
+ mZslConsumer = new BufferItemConsumer(bq,
GRALLOC_USAGE_HW_CAMERA_ZSL,
- kZslBufferDepth,
- true);
+ kZslBufferDepth);
mZslConsumer->setFrameAvailableListener(this);
mZslConsumer->setName(String8("Camera2Client::ZslConsumer"));
- mZslWindow = new SurfaceTextureClient(
- mZslConsumer->getProducerInterface());
+ mZslWindow = new Surface(bq);
}
if (mZslStreamId != NO_STREAM) {
@@ -135,7 +145,7 @@ status_t ZslProcessor::updateStream(const Parameters &params) {
if (res != OK) {
ALOGE("%s: Camera %d: Error querying capture output stream info: "
"%s (%d)", __FUNCTION__,
- client->getCameraId(), strerror(-res), res);
+ mId, strerror(-res), res);
return res;
}
if (currentWidth != (uint32_t)params.fastInfo.arrayWidth ||
@@ -144,16 +154,16 @@ status_t ZslProcessor::updateStream(const Parameters &params) {
if (res != OK) {
ALOGE("%s: Camera %d: Unable to delete old reprocess stream "
"for ZSL: %s (%d)", __FUNCTION__,
- client->getCameraId(), strerror(-res), res);
+ mId, strerror(-res), res);
return res;
}
ALOGV("%s: Camera %d: Deleting stream %d since the buffer dimensions changed",
- __FUNCTION__, client->getCameraId(), mZslStreamId);
+ __FUNCTION__, mId, mZslStreamId);
res = device->deleteStream(mZslStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to delete old output stream "
"for ZSL: %s (%d)", __FUNCTION__,
- client->getCameraId(), strerror(-res), res);
+ mId, strerror(-res), res);
return res;
}
mZslStreamId = NO_STREAM;
@@ -172,7 +182,7 @@ status_t ZslProcessor::updateStream(const Parameters &params) {
&mZslStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Can't create output stream for ZSL: "
- "%s (%d)", __FUNCTION__, client->getCameraId(),
+ "%s (%d)", __FUNCTION__, mId,
strerror(-res), res);
return res;
}
@@ -180,7 +190,7 @@ status_t ZslProcessor::updateStream(const Parameters &params) {
&mZslReprocessStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Can't create reprocess stream for ZSL: "
- "%s (%d)", __FUNCTION__, client->getCameraId(),
+ "%s (%d)", __FUNCTION__, mId,
strerror(-res), res);
return res;
}
@@ -199,14 +209,18 @@ status_t ZslProcessor::deleteStream() {
Mutex::Autolock l(mInputMutex);
if (mZslStreamId != NO_STREAM) {
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return OK;
- sp<Camera2Device> device = client->getCameraDevice();
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ clearZslQueueLocked();
res = device->deleteReprocessStream(mZslReprocessStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Cannot delete ZSL reprocessing stream %d: "
- "%s (%d)", __FUNCTION__, client->getCameraId(),
+ "%s (%d)", __FUNCTION__, mId,
mZslReprocessStreamId, strerror(-res), res);
return res;
}
@@ -215,7 +229,7 @@ status_t ZslProcessor::deleteStream() {
res = device->deleteStream(mZslStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Cannot delete ZSL output stream %d: "
- "%s (%d)", __FUNCTION__, client->getCameraId(),
+ "%s (%d)", __FUNCTION__, mId,
mZslStreamId, strerror(-res), res);
return res;
}
@@ -233,11 +247,6 @@ int ZslProcessor::getStreamId() const {
return mZslStreamId;
}
-int ZslProcessor::getReprocessStreamId() const {
- Mutex::Autolock l(mInputMutex);
- return mZslReprocessStreamId;
-}
-
status_t ZslProcessor::pushToReprocess(int32_t requestId) {
ALOGV("%s: Send in reprocess request with id %d",
__FUNCTION__, requestId);
@@ -245,7 +254,10 @@ status_t ZslProcessor::pushToReprocess(int32_t requestId) {
status_t res;
sp<Camera2Client> client = mClient.promote();
- if (client == 0) return INVALID_OPERATION;
+ if (client == 0) {
+ ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
IF_ALOGV() {
dumpZslQueue(-1);
@@ -288,10 +300,12 @@ status_t ZslProcessor::pushToReprocess(int32_t requestId) {
uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS;
res = request.update(ANDROID_REQUEST_TYPE,
&requestType, 1);
- uint8_t inputStreams[1] = { mZslReprocessStreamId };
+ int32_t inputStreams[1] =
+ { mZslReprocessStreamId };
if (res == OK) request.update(ANDROID_REQUEST_INPUT_STREAMS,
inputStreams, 1);
- uint8_t outputStreams[1] = { client->getCaptureStreamId() };
+ int32_t outputStreams[1] =
+ { client->getCaptureStreamId() };
if (res == OK) request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
outputStreams, 1);
res = request.update(ANDROID_REQUEST_ID,
@@ -306,7 +320,7 @@ status_t ZslProcessor::pushToReprocess(int32_t requestId) {
if (res != OK) {
ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: "
"%s (%d)",
- __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ __FUNCTION__, mId, strerror(-res), res);
return INVALID_OPERATION;
}
// TODO: have push-and-clear be atomic
@@ -325,7 +339,7 @@ status_t ZslProcessor::pushToReprocess(int32_t requestId) {
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update JPEG entries of ZSL "
"capture request: %s (%d)", __FUNCTION__,
- client->getCameraId(),
+ mId,
strerror(-res), res);
return res;
}
@@ -367,7 +381,7 @@ status_t ZslProcessor::clearZslQueueLocked() {
return OK;
}
-void ZslProcessor::dump(int fd, const Vector<String16>& args) const {
+void ZslProcessor::dump(int fd, const Vector<String16>& /*args*/) const {
Mutex::Autolock l(mInputMutex);
if (!mLatestCapturedRequest.isEmpty()) {
String8 result(" Latest ZSL capture request:\n");
@@ -394,26 +408,29 @@ bool ZslProcessor::threadLoop() {
}
do {
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return false;
- res = processNewZslBuffer(client);
+ res = processNewZslBuffer();
} while (res == OK);
return true;
}
-status_t ZslProcessor::processNewZslBuffer(sp<Camera2Client> &client) {
+status_t ZslProcessor::processNewZslBuffer() {
ATRACE_CALL();
status_t res;
-
+ sp<BufferItemConsumer> zslConsumer;
+ {
+ Mutex::Autolock l(mInputMutex);
+ if (mZslConsumer == 0) return OK;
+ zslConsumer = mZslConsumer;
+ }
ALOGVV("Trying to get next buffer");
BufferItemConsumer::BufferItem item;
- res = mZslConsumer->acquireBuffer(&item);
+ res = zslConsumer->acquireBuffer(&item, 0);
if (res != OK) {
if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
ALOGE("%s: Camera %d: Error receiving ZSL image buffer: "
"%s (%d)", __FUNCTION__,
- client->getCameraId(), strerror(-res), res);
+ mId, strerror(-res), res);
} else {
ALOGVV(" No buffer");
}
@@ -424,7 +441,7 @@ status_t ZslProcessor::processNewZslBuffer(sp<Camera2Client> &client) {
if (mState == LOCKED) {
ALOGVV("In capture, discarding new ZSL buffers");
- mZslConsumer->releaseBuffer(item);
+ zslConsumer->releaseBuffer(item);
return OK;
}
@@ -432,7 +449,7 @@ status_t ZslProcessor::processNewZslBuffer(sp<Camera2Client> &client) {
if ( (mZslQueueHead + 1) % kZslBufferDepth == mZslQueueTail) {
ALOGVV("Releasing oldest buffer");
- mZslConsumer->releaseBuffer(mZslQueue[mZslQueueTail].buffer);
+ zslConsumer->releaseBuffer(mZslQueue[mZslQueueTail].buffer);
mZslQueue.replaceAt(mZslQueueTail);
mZslQueueTail = (mZslQueueTail + 1) % kZslBufferDepth;
}
@@ -523,7 +540,7 @@ void ZslProcessor::dumpZslQueue(int fd) const {
if (entry.count > 0) frameAeState = entry.data.u8[0];
}
String8 result =
- String8::format(" %d: b: %lld\tf: %lld, AE state: %d", i,
+ String8::format(" %zu: b: %lld\tf: %lld, AE state: %d", i,
bufferTimestamp, frameTimestamp, frameAeState);
ALOGV("%s", result.string());
if (fd != -1) {
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.h b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
index c80e7f4..6d3cb85 100644
--- a/services/camera/libcameraservice/camera2/ZslProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
@@ -23,11 +23,11 @@
#include <utils/Mutex.h>
#include <utils/Condition.h>
#include <gui/BufferItemConsumer.h>
-#include "Parameters.h"
-#include "FrameProcessor.h"
-#include "CameraMetadata.h"
-#include "Camera2Heap.h"
-#include "../Camera2Device.h"
+#include <camera/CameraMetadata.h>
+
+#include "common/CameraDeviceBase.h"
+#include "api1/client2/ZslProcessorInterface.h"
+#include "api1/client2/FrameProcessor.h"
namespace android {
@@ -36,6 +36,7 @@ class Camera2Client;
namespace camera2 {
class CaptureSequencer;
+class Parameters;
/***
* ZSL queue processing
@@ -44,22 +45,28 @@ class ZslProcessor:
virtual public Thread,
virtual public BufferItemConsumer::FrameAvailableListener,
virtual public FrameProcessor::FilteredListener,
- virtual public Camera2Device::BufferReleasedListener {
+ virtual public CameraDeviceBase::BufferReleasedListener,
+ public ZslProcessorInterface {
public:
- ZslProcessor(wp<Camera2Client> client, wp<CaptureSequencer> sequencer);
+ ZslProcessor(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
~ZslProcessor();
// From mZslConsumer
virtual void onFrameAvailable();
// From FrameProcessor
- virtual void onFrameAvailable(int32_t frameId, const CameraMetadata &frame);
+ virtual void onFrameAvailable(int32_t requestId, const CameraMetadata &frame);
virtual void onBufferReleased(buffer_handle_t *handle);
+ /**
+ ****************************************
+ * ZslProcessorInterface implementation *
+ ****************************************
+ */
+
status_t updateStream(const Parameters &params);
status_t deleteStream();
int getStreamId() const;
- int getReprocessStreamId() const;
status_t pushToReprocess(int32_t requestId);
status_t clearZslQueue();
@@ -74,7 +81,9 @@ class ZslProcessor:
} mState;
wp<Camera2Client> mClient;
+ wp<CameraDeviceBase> mDevice;
wp<CaptureSequencer> mSequencer;
+ int mId;
mutable Mutex mInputMutex;
bool mZslBufferAvailable;
@@ -109,7 +118,7 @@ class ZslProcessor:
virtual bool threadLoop();
- status_t processNewZslBuffer(sp<Camera2Client> &client);
+ status_t processNewZslBuffer();
// Match up entries from frame list to buffers in ZSL queue
void findMatchesLocked();
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
new file mode 100644
index 0000000..6b4e57a
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -0,0 +1,482 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2-ZslProcessor3"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+//#define LOG_NNDEBUG 0
+
+#ifdef LOG_NNDEBUG
+#define ALOGVV(...) ALOGV(__VA_ARGS__)
+#else
+#define ALOGVV(...) ((void)0)
+#endif
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <gui/Surface.h>
+
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/CaptureSequencer.h"
+#include "api1/client2/ZslProcessor3.h"
+#include "device3/Camera3Device.h"
+
+namespace android {
+namespace camera2 {
+
+ZslProcessor3::ZslProcessor3(
+ sp<Camera2Client> client,
+ wp<CaptureSequencer> sequencer):
+ Thread(false),
+ mState(RUNNING),
+ mClient(client),
+ mSequencer(sequencer),
+ mId(client->getCameraId()),
+ mZslStreamId(NO_STREAM),
+ mFrameListHead(0),
+ mZslQueueHead(0),
+ mZslQueueTail(0) {
+ mZslQueue.insertAt(0, kZslBufferDepth);
+ mFrameList.insertAt(0, kFrameListDepth);
+ sp<CaptureSequencer> captureSequencer = mSequencer.promote();
+ if (captureSequencer != 0) captureSequencer->setZslProcessor(this);
+}
+
+ZslProcessor3::~ZslProcessor3() {
+ ALOGV("%s: Exit", __FUNCTION__);
+ deleteStream();
+}
+
+void ZslProcessor3::onFrameAvailable(int32_t /*requestId*/,
+ const CameraMetadata &frame) {
+ Mutex::Autolock l(mInputMutex);
+ camera_metadata_ro_entry_t entry;
+ entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+ nsecs_t timestamp = entry.data.i64[0];
+ (void)timestamp;
+ ALOGVV("Got preview metadata for timestamp %lld", timestamp);
+
+ if (mState != RUNNING) return;
+
+ mFrameList.editItemAt(mFrameListHead) = frame;
+ mFrameListHead = (mFrameListHead + 1) % kFrameListDepth;
+}
+
+status_t ZslProcessor3::updateStream(const Parameters &params) {
+ ATRACE_CALL();
+ ALOGV("%s: Configuring ZSL streams", __FUNCTION__);
+ status_t res;
+
+ Mutex::Autolock l(mInputMutex);
+
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) {
+ ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+ sp<Camera3Device> device =
+ static_cast<Camera3Device*>(client->getCameraDevice().get());
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ if (mZslStreamId != NO_STREAM) {
+ // Check if stream parameters have to change
+ uint32_t currentWidth, currentHeight;
+ res = device->getStreamInfo(mZslStreamId,
+ &currentWidth, &currentHeight, 0);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Error querying capture output stream info: "
+ "%s (%d)", __FUNCTION__,
+ client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ if (currentWidth != (uint32_t)params.fastInfo.arrayWidth ||
+ currentHeight != (uint32_t)params.fastInfo.arrayHeight) {
+ ALOGV("%s: Camera %d: Deleting stream %d since the buffer "
+ "dimensions changed",
+ __FUNCTION__, client->getCameraId(), mZslStreamId);
+ res = device->deleteStream(mZslStreamId);
+ if (res == -EBUSY) {
+ ALOGV("%s: Camera %d: Device is busy, call updateStream again "
+ " after it becomes idle", __FUNCTION__, mId);
+ return res;
+ } else if(res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete old output stream "
+ "for ZSL: %s (%d)", __FUNCTION__,
+ client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ mZslStreamId = NO_STREAM;
+ }
+ }
+
+ if (mZslStreamId == NO_STREAM) {
+ // Create stream for HAL production
+ // TODO: Sort out better way to select resolution for ZSL
+
+ // Note that format specified internally in Camera3ZslStream
+ res = device->createZslStream(
+ params.fastInfo.arrayWidth, params.fastInfo.arrayHeight,
+ kZslBufferDepth,
+ &mZslStreamId,
+ &mZslStream);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't create ZSL stream: "
+ "%s (%d)", __FUNCTION__, client->getCameraId(),
+ strerror(-res), res);
+ return res;
+ }
+ }
+ client->registerFrameListener(Camera2Client::kPreviewRequestIdStart,
+ Camera2Client::kPreviewRequestIdEnd,
+ this);
+
+ return OK;
+}
+
+status_t ZslProcessor3::deleteStream() {
+ ATRACE_CALL();
+ status_t res;
+
+ Mutex::Autolock l(mInputMutex);
+
+ if (mZslStreamId != NO_STREAM) {
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) {
+ ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ sp<Camera3Device> device =
+ reinterpret_cast<Camera3Device*>(client->getCameraDevice().get());
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ res = device->deleteStream(mZslStreamId);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Cannot delete ZSL output stream %d: "
+ "%s (%d)", __FUNCTION__, client->getCameraId(),
+ mZslStreamId, strerror(-res), res);
+ return res;
+ }
+
+ mZslStreamId = NO_STREAM;
+ }
+ return OK;
+}
+
+int ZslProcessor3::getStreamId() const {
+ Mutex::Autolock l(mInputMutex);
+ return mZslStreamId;
+}
+
+status_t ZslProcessor3::pushToReprocess(int32_t requestId) {
+ ALOGV("%s: Send in reprocess request with id %d",
+ __FUNCTION__, requestId);
+ Mutex::Autolock l(mInputMutex);
+ status_t res;
+ sp<Camera2Client> client = mClient.promote();
+
+ if (client == 0) {
+ ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ IF_ALOGV() {
+ dumpZslQueue(-1);
+ }
+
+ size_t metadataIdx;
+ nsecs_t candidateTimestamp = getCandidateTimestampLocked(&metadataIdx);
+
+ if (candidateTimestamp == -1) {
+ ALOGE("%s: Could not find good candidate for ZSL reprocessing",
+ __FUNCTION__);
+ return NOT_ENOUGH_DATA;
+ }
+
+ res = mZslStream->enqueueInputBufferByTimestamp(candidateTimestamp,
+ /*actualTimestamp*/NULL);
+
+ if (res == mZslStream->NO_BUFFER_AVAILABLE) {
+ ALOGV("%s: No ZSL buffers yet", __FUNCTION__);
+ return NOT_ENOUGH_DATA;
+ } else if (res != OK) {
+ ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ {
+ CameraMetadata request = mFrameList[metadataIdx];
+
+ // Verify that the frame is reasonable for reprocessing
+
+ camera_metadata_entry_t entry;
+ entry = request.find(ANDROID_CONTROL_AE_STATE);
+ if (entry.count == 0) {
+ ALOGE("%s: ZSL queue frame has no AE state field!",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+ if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED &&
+ entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) {
+ ALOGV("%s: ZSL queue frame AE state is %d, need full capture",
+ __FUNCTION__, entry.data.u8[0]);
+ return NOT_ENOUGH_DATA;
+ }
+
+ uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS;
+ res = request.update(ANDROID_REQUEST_TYPE,
+ &requestType, 1);
+ int32_t inputStreams[1] =
+ { mZslStreamId };
+ if (res == OK) request.update(ANDROID_REQUEST_INPUT_STREAMS,
+ inputStreams, 1);
+ // TODO: Shouldn't we also update the latest preview frame?
+ int32_t outputStreams[1] =
+ { client->getCaptureStreamId() };
+ if (res == OK) request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
+ outputStreams, 1);
+ res = request.update(ANDROID_REQUEST_ID,
+ &requestId, 1);
+
+ if (res != OK ) {
+ ALOGE("%s: Unable to update frame to a reprocess request",
+ __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ res = client->stopStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: "
+ "%s (%d)",
+ __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return INVALID_OPERATION;
+ }
+
+ // Update JPEG settings
+ {
+ SharedParameters::Lock l(client->getParameters());
+ res = l.mParameters.updateRequestJpeg(&request);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to update JPEG entries of ZSL "
+ "capture request: %s (%d)", __FUNCTION__,
+ client->getCameraId(),
+ strerror(-res), res);
+ return res;
+ }
+ }
+
+ mLatestCapturedRequest = request;
+ res = client->getCameraDevice()->capture(request);
+ if (res != OK ) {
+ ALOGE("%s: Unable to send ZSL reprocess request to capture: %s"
+ " (%d)", __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ mState = LOCKED;
+ }
+
+ return OK;
+}
+
+status_t ZslProcessor3::clearZslQueue() {
+ Mutex::Autolock l(mInputMutex);
+ // If in middle of capture, can't clear out queue
+ if (mState == LOCKED) return OK;
+
+ return clearZslQueueLocked();
+}
+
+status_t ZslProcessor3::clearZslQueueLocked() {
+ if (mZslStream != 0) {
+ return mZslStream->clearInputRingBuffer();
+ }
+ return OK;
+}
+
+void ZslProcessor3::dump(int fd, const Vector<String16>& /*args*/) const {
+ Mutex::Autolock l(mInputMutex);
+ if (!mLatestCapturedRequest.isEmpty()) {
+ String8 result(" Latest ZSL capture request:\n");
+ write(fd, result.string(), result.size());
+ mLatestCapturedRequest.dump(fd, 2, 6);
+ } else {
+ String8 result(" Latest ZSL capture request: none yet\n");
+ write(fd, result.string(), result.size());
+ }
+ dumpZslQueue(fd);
+}
+
+bool ZslProcessor3::threadLoop() {
+ // TODO: remove dependency on thread. For now, shut thread down right
+ // away.
+ return false;
+}
+
+void ZslProcessor3::dumpZslQueue(int fd) const {
+ String8 header("ZSL queue contents:");
+ String8 indent(" ");
+ ALOGV("%s", header.string());
+ if (fd != -1) {
+ header = indent + header + "\n";
+ write(fd, header.string(), header.size());
+ }
+ for (size_t i = 0; i < mZslQueue.size(); i++) {
+ const ZslPair &queueEntry = mZslQueue[i];
+ nsecs_t bufferTimestamp = queueEntry.buffer.mTimestamp;
+ camera_metadata_ro_entry_t entry;
+ nsecs_t frameTimestamp = 0;
+ int frameAeState = -1;
+ if (!queueEntry.frame.isEmpty()) {
+ entry = queueEntry.frame.find(ANDROID_SENSOR_TIMESTAMP);
+ if (entry.count > 0) frameTimestamp = entry.data.i64[0];
+ entry = queueEntry.frame.find(ANDROID_CONTROL_AE_STATE);
+ if (entry.count > 0) frameAeState = entry.data.u8[0];
+ }
+ String8 result =
+ String8::format(" %zu: b: %lld\tf: %lld, AE state: %d", i,
+ bufferTimestamp, frameTimestamp, frameAeState);
+ ALOGV("%s", result.string());
+ if (fd != -1) {
+ result = indent + result + "\n";
+ write(fd, result.string(), result.size());
+ }
+
+ }
+}
+
+nsecs_t ZslProcessor3::getCandidateTimestampLocked(size_t* metadataIdx) const {
+ /**
+ * Find the smallest timestamp we know about so far
+ * - ensure that aeState is either converged or locked
+ */
+
+ size_t idx = 0;
+ nsecs_t minTimestamp = -1;
+
+ size_t emptyCount = mFrameList.size();
+
+ for (size_t j = 0; j < mFrameList.size(); j++) {
+ const CameraMetadata &frame = mFrameList[j];
+ if (!frame.isEmpty()) {
+
+ emptyCount--;
+
+ camera_metadata_ro_entry_t entry;
+ entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+ if (entry.count == 0) {
+ ALOGE("%s: Can't find timestamp in frame!",
+ __FUNCTION__);
+ continue;
+ }
+ nsecs_t frameTimestamp = entry.data.i64[0];
+ if (minTimestamp > frameTimestamp || minTimestamp == -1) {
+
+ entry = frame.find(ANDROID_CONTROL_AE_STATE);
+
+ if (entry.count == 0) {
+ /**
+ * This is most likely a HAL bug. The aeState field is
+ * mandatory, so it should always be in a metadata packet.
+ */
+ ALOGW("%s: ZSL queue frame has no AE state field!",
+ __FUNCTION__);
+ continue;
+ }
+ if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED &&
+ entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) {
+ ALOGVV("%s: ZSL queue frame AE state is %d, need "
+ "full capture", __FUNCTION__, entry.data.u8[0]);
+ continue;
+ }
+
+ minTimestamp = frameTimestamp;
+ idx = j;
+ }
+
+ ALOGVV("%s: Saw timestamp %lld", __FUNCTION__, frameTimestamp);
+ }
+ }
+
+ if (emptyCount == mFrameList.size()) {
+ /**
+ * This could be mildly bad and means our ZSL was triggered before
+ * there were any frames yet received by the camera framework.
+ *
+ * This is a fairly corner case which can happen under:
+ * + a user presses the shutter button real fast when the camera starts
+ * (startPreview followed immediately by takePicture).
+ * + burst capture case (hitting shutter button as fast possible)
+ *
+ * If this happens in steady case (preview running for a while, call
+ * a single takePicture) then this might be a fwk bug.
+ */
+ ALOGW("%s: ZSL queue has no metadata frames", __FUNCTION__);
+ }
+
+ ALOGV("%s: Candidate timestamp %lld (idx %d), empty frames: %d",
+ __FUNCTION__, minTimestamp, idx, emptyCount);
+
+ if (metadataIdx) {
+ *metadataIdx = idx;
+ }
+
+ return minTimestamp;
+}
+
+void ZslProcessor3::onBufferAcquired(const BufferInfo& /*bufferInfo*/) {
+ // Intentionally left empty
+ // Although theoretically we could use this to get better dump info
+}
+
+void ZslProcessor3::onBufferReleased(const BufferInfo& bufferInfo) {
+ Mutex::Autolock l(mInputMutex);
+
+ // ignore output buffers
+ if (bufferInfo.mOutput) {
+ return;
+ }
+
+ // TODO: Verify that the buffer is in our queue by looking at timestamp
+ // theoretically unnecessary unless we change the following assumptions:
+ // -- only 1 buffer reprocessed at a time (which is the case now)
+
+ // Erase entire ZSL queue since we've now completed the capture and preview
+ // is stopped.
+ //
+ // We need to guarantee that if we do two back-to-back captures,
+ // the second won't use a buffer that's older/the same as the first, which
+ // is theoretically possible if we don't clear out the queue and the
+ // selection criteria is something like 'newest'. Clearing out the queue
+ // on a completed capture ensures we'll only use new data.
+ ALOGV("%s: Memory optimization, clearing ZSL queue",
+ __FUNCTION__);
+ clearZslQueueLocked();
+
+ // Required so we accept more ZSL requests
+ mState = RUNNING;
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
new file mode 100644
index 0000000..d2f8322
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR3_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR3_H
+
+#include <utils/Thread.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+#include <gui/BufferItemConsumer.h>
+#include <camera/CameraMetadata.h>
+
+#include "api1/client2/FrameProcessor.h"
+#include "api1/client2/ZslProcessorInterface.h"
+#include "device3/Camera3ZslStream.h"
+
+namespace android {
+
+class Camera2Client;
+
+namespace camera2 {
+
+class CaptureSequencer;
+class Parameters;
+
+/***
+ * ZSL queue processing
+ */
+class ZslProcessor3 :
+ public ZslProcessorInterface,
+ public camera3::Camera3StreamBufferListener,
+ virtual public Thread,
+ virtual public FrameProcessor::FilteredListener {
+ public:
+ ZslProcessor3(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
+ ~ZslProcessor3();
+
+ // From FrameProcessor
+ virtual void onFrameAvailable(int32_t requestId, const CameraMetadata &frame);
+
+ /**
+ ****************************************
+ * ZslProcessorInterface implementation *
+ ****************************************
+ */
+
+ virtual status_t updateStream(const Parameters &params);
+ virtual status_t deleteStream();
+ virtual int getStreamId() const;
+
+ virtual status_t pushToReprocess(int32_t requestId);
+ virtual status_t clearZslQueue();
+
+ void dump(int fd, const Vector<String16>& args) const;
+
+ protected:
+ /**
+ **********************************************
+ * Camera3StreamBufferListener implementation *
+ **********************************************
+ */
+ typedef camera3::Camera3StreamBufferListener::BufferInfo BufferInfo;
+ // Buffer was acquired by the HAL
+ virtual void onBufferAcquired(const BufferInfo& bufferInfo);
+ // Buffer was released by the HAL
+ virtual void onBufferReleased(const BufferInfo& bufferInfo);
+
+ private:
+ static const nsecs_t kWaitDuration = 10000000; // 10 ms
+
+ enum {
+ RUNNING,
+ LOCKED
+ } mState;
+
+ wp<Camera2Client> mClient;
+ wp<CaptureSequencer> mSequencer;
+
+ const int mId;
+
+ mutable Mutex mInputMutex;
+
+ enum {
+ NO_STREAM = -1
+ };
+
+ int mZslStreamId;
+ sp<camera3::Camera3ZslStream> mZslStream;
+
+ struct ZslPair {
+ BufferItemConsumer::BufferItem buffer;
+ CameraMetadata frame;
+ };
+
+ static const size_t kZslBufferDepth = 4;
+ static const size_t kFrameListDepth = kZslBufferDepth * 2;
+ Vector<CameraMetadata> mFrameList;
+ size_t mFrameListHead;
+
+ ZslPair mNextPair;
+
+ Vector<ZslPair> mZslQueue;
+ size_t mZslQueueHead;
+ size_t mZslQueueTail;
+
+ CameraMetadata mLatestCapturedRequest;
+
+ virtual bool threadLoop();
+
+ status_t clearZslQueueLocked();
+
+ void dumpZslQueue(int id) const;
+
+ nsecs_t getCandidateTimestampLocked(size_t* metadataIdx) const;
+};
+
+
+}; //namespace camera2
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
new file mode 100644
index 0000000..183c0c2
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSORINTERFACE_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSORINTERFACE_H
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+namespace camera2 {
+
+class Parameters;
+
+class ZslProcessorInterface : virtual public RefBase {
+public:
+
+ // Get ID for use with android.request.outputStreams / inputStreams
+ virtual int getStreamId() const = 0;
+
+ // Update the streams by recreating them if the size/format has changed
+ virtual status_t updateStream(const Parameters& params) = 0;
+
+ // Delete the underlying CameraDevice streams
+ virtual status_t deleteStream() = 0;
+
+ /**
+ * Submits a ZSL capture request (id = requestId)
+ *
+ * An appropriate ZSL buffer is selected by the closest timestamp,
+ * then we push that buffer to be reprocessed by the HAL.
+ * A capture request is created and submitted on behalf of the client.
+ */
+ virtual status_t pushToReprocess(int32_t requestId) = 0;
+
+ // Flush the ZSL buffer queue, freeing up all the buffers
+ virtual status_t clearZslQueue() = 0;
+
+ // (Debugging only) Dump the current state to the specified file descriptor
+ virtual void dump(int fd, const Vector<String16>& args) const = 0;
+};
+
+}; //namespace camera2
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
new file mode 100644
index 0000000..1cdf8dc
--- /dev/null
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -0,0 +1,680 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraDeviceClient"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+// #define LOG_NDEBUG 0
+
+#include <cutils/properties.h>
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <gui/Surface.h>
+#include <camera/camera2/CaptureRequest.h>
+
+#include "common/CameraDeviceBase.h"
+#include "api2/CameraDeviceClient.h"
+
+
+
+namespace android {
+using namespace camera2;
+
+CameraDeviceClientBase::CameraDeviceClientBase(
+ const sp<CameraService>& cameraService,
+ const sp<ICameraDeviceCallbacks>& remoteCallback,
+ const String16& clientPackageName,
+ int cameraId,
+ int cameraFacing,
+ int clientPid,
+ uid_t clientUid,
+ int servicePid) :
+ BasicClient(cameraService, remoteCallback->asBinder(), clientPackageName,
+ cameraId, cameraFacing, clientPid, clientUid, servicePid),
+ mRemoteCallback(remoteCallback) {
+}
+
+// Interface used by CameraService
+
+CameraDeviceClient::CameraDeviceClient(const sp<CameraService>& cameraService,
+ const sp<ICameraDeviceCallbacks>& remoteCallback,
+ const String16& clientPackageName,
+ int cameraId,
+ int cameraFacing,
+ int clientPid,
+ uid_t clientUid,
+ int servicePid) :
+ Camera2ClientBase(cameraService, remoteCallback, clientPackageName,
+ cameraId, cameraFacing, clientPid, clientUid, servicePid),
+ mRequestIdCounter(0) {
+
+ ATRACE_CALL();
+ ALOGI("CameraDeviceClient %d: Opened", cameraId);
+}
+
+status_t CameraDeviceClient::initialize(camera_module_t *module)
+{
+ ATRACE_CALL();
+ status_t res;
+
+ res = Camera2ClientBase::initialize(module);
+ if (res != OK) {
+ return res;
+ }
+
+ String8 threadName;
+ mFrameProcessor = new FrameProcessorBase(mDevice);
+ threadName = String8::format("CDU-%d-FrameProc", mCameraId);
+ mFrameProcessor->run(threadName.string());
+
+ mFrameProcessor->registerListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
+ FRAME_PROCESSOR_LISTENER_MAX_ID,
+ /*listener*/this,
+ /*quirkSendPartials*/true);
+
+ return OK;
+}
+
+CameraDeviceClient::~CameraDeviceClient() {
+}
+
+status_t CameraDeviceClient::submitRequest(sp<CaptureRequest> request,
+ bool streaming) {
+ ATRACE_CALL();
+ ALOGV("%s", __FUNCTION__);
+
+ status_t res;
+
+ if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (!mDevice.get()) return DEAD_OBJECT;
+
+ if (request == 0) {
+ ALOGE("%s: Camera %d: Sent null request. Rejecting request.",
+ __FUNCTION__, mCameraId);
+ return BAD_VALUE;
+ }
+
+ CameraMetadata metadata(request->mMetadata);
+
+ if (metadata.isEmpty()) {
+ ALOGE("%s: Camera %d: Sent empty metadata packet. Rejecting request.",
+ __FUNCTION__, mCameraId);
+ return BAD_VALUE;
+ } else if (request->mSurfaceList.size() == 0) {
+ ALOGE("%s: Camera %d: Requests must have at least one surface target. "
+ "Rejecting request.", __FUNCTION__, mCameraId);
+ return BAD_VALUE;
+ }
+
+ if (!enforceRequestPermissions(metadata)) {
+ // Callee logs
+ return PERMISSION_DENIED;
+ }
+
+ /**
+ * Write in the output stream IDs which we calculate from
+ * the capture request's list of surface targets
+ */
+ Vector<int32_t> outputStreamIds;
+ outputStreamIds.setCapacity(request->mSurfaceList.size());
+ for (size_t i = 0; i < request->mSurfaceList.size(); ++i) {
+ sp<Surface> surface = request->mSurfaceList[i];
+
+ if (surface == 0) continue;
+
+ sp<IGraphicBufferProducer> gbp = surface->getIGraphicBufferProducer();
+ int idx = mStreamMap.indexOfKey(gbp->asBinder());
+
+ // Trying to submit request with surface that wasn't created
+ if (idx == NAME_NOT_FOUND) {
+ ALOGE("%s: Camera %d: Tried to submit a request with a surface that"
+ " we have not called createStream on",
+ __FUNCTION__, mCameraId);
+ return BAD_VALUE;
+ }
+
+ int streamId = mStreamMap.valueAt(idx);
+ outputStreamIds.push_back(streamId);
+ ALOGV("%s: Camera %d: Appending output stream %d to request",
+ __FUNCTION__, mCameraId, streamId);
+ }
+
+ metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0],
+ outputStreamIds.size());
+
+ int32_t requestId = mRequestIdCounter++;
+ metadata.update(ANDROID_REQUEST_ID, &requestId, /*size*/1);
+ ALOGV("%s: Camera %d: Submitting request with ID %d",
+ __FUNCTION__, mCameraId, requestId);
+
+ if (streaming) {
+ res = mDevice->setStreamingRequest(metadata);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Got error %d after trying to set streaming "
+ "request", __FUNCTION__, mCameraId, res);
+ } else {
+ mStreamingRequestList.push_back(requestId);
+ }
+ } else {
+ res = mDevice->capture(metadata);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Got error %d after trying to set capture",
+ __FUNCTION__, mCameraId, res);
+ }
+ }
+
+ ALOGV("%s: Camera %d: End of function", __FUNCTION__, mCameraId);
+ if (res == OK) {
+ return requestId;
+ }
+
+ return res;
+}
+
+status_t CameraDeviceClient::cancelRequest(int requestId) {
+ ATRACE_CALL();
+ ALOGV("%s, requestId = %d", __FUNCTION__, requestId);
+
+ status_t res;
+
+ if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (!mDevice.get()) return DEAD_OBJECT;
+
+ Vector<int>::iterator it, end;
+ for (it = mStreamingRequestList.begin(), end = mStreamingRequestList.end();
+ it != end; ++it) {
+ if (*it == requestId) {
+ break;
+ }
+ }
+
+ if (it == end) {
+ ALOGE("%s: Camera%d: Did not find request id %d in list of streaming "
+ "requests", __FUNCTION__, mCameraId, requestId);
+ return BAD_VALUE;
+ }
+
+ res = mDevice->clearStreamingRequest();
+
+ if (res == OK) {
+ ALOGV("%s: Camera %d: Successfully cleared streaming request",
+ __FUNCTION__, mCameraId);
+ mStreamingRequestList.erase(it);
+ }
+
+ return res;
+}
+
+status_t CameraDeviceClient::deleteStream(int streamId) {
+ ATRACE_CALL();
+ ALOGV("%s (streamId = 0x%x)", __FUNCTION__, streamId);
+
+ status_t res;
+ if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (!mDevice.get()) return DEAD_OBJECT;
+
+ // Guard against trying to delete non-created streams
+ ssize_t index = NAME_NOT_FOUND;
+ for (size_t i = 0; i < mStreamMap.size(); ++i) {
+ if (streamId == mStreamMap.valueAt(i)) {
+ index = i;
+ break;
+ }
+ }
+
+ if (index == NAME_NOT_FOUND) {
+ ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream "
+ "created yet", __FUNCTION__, mCameraId, streamId);
+ return BAD_VALUE;
+ }
+
+ // Also returns BAD_VALUE if stream ID was not valid
+ res = mDevice->deleteStream(streamId);
+
+ if (res == BAD_VALUE) {
+ ALOGE("%s: Camera %d: Unexpected BAD_VALUE when deleting stream, but we"
+ " already checked and the stream ID (%d) should be valid.",
+ __FUNCTION__, mCameraId, streamId);
+ } else if (res == OK) {
+ mStreamMap.removeItemsAt(index);
+
+ ALOGV("%s: Camera %d: Successfully deleted stream ID (%d)",
+ __FUNCTION__, mCameraId, streamId);
+ }
+
+ return res;
+}
+
+status_t CameraDeviceClient::createStream(int width, int height, int format,
+ const sp<IGraphicBufferProducer>& bufferProducer)
+{
+ ATRACE_CALL();
+ ALOGV("%s (w = %d, h = %d, f = 0x%x)", __FUNCTION__, width, height, format);
+
+ status_t res;
+ if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (!mDevice.get()) return DEAD_OBJECT;
+
+ // Don't create multiple streams for the same target surface
+ {
+ ssize_t index = mStreamMap.indexOfKey(bufferProducer->asBinder());
+ if (index != NAME_NOT_FOUND) {
+ ALOGW("%s: Camera %d: Buffer producer already has a stream for it "
+ "(ID %d)",
+ __FUNCTION__, mCameraId, index);
+ return ALREADY_EXISTS;
+ }
+ }
+
+ // HACK b/10949105
+ // Query consumer usage bits to set async operation mode for
+ // GLConsumer using controlledByApp parameter.
+ bool useAsync = false;
+ int32_t consumerUsage;
+ if ((res = bufferProducer->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS,
+ &consumerUsage)) != OK) {
+ ALOGE("%s: Camera %d: Failed to query consumer usage", __FUNCTION__,
+ mCameraId);
+ return res;
+ }
+ if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
+ ALOGW("%s: Camera %d: Forcing asynchronous mode for stream",
+ __FUNCTION__, mCameraId);
+ useAsync = true;
+ }
+
+ sp<IBinder> binder;
+ sp<ANativeWindow> anw;
+ if (bufferProducer != 0) {
+ binder = bufferProducer->asBinder();
+ anw = new Surface(bufferProducer, useAsync);
+ }
+
+ // TODO: remove w,h,f since we are ignoring them
+
+ if ((res = anw->query(anw.get(), NATIVE_WINDOW_WIDTH, &width)) != OK) {
+ ALOGE("%s: Camera %d: Failed to query Surface width", __FUNCTION__,
+ mCameraId);
+ return res;
+ }
+ if ((res = anw->query(anw.get(), NATIVE_WINDOW_HEIGHT, &height)) != OK) {
+ ALOGE("%s: Camera %d: Failed to query Surface height", __FUNCTION__,
+ mCameraId);
+ return res;
+ }
+ if ((res = anw->query(anw.get(), NATIVE_WINDOW_FORMAT, &format)) != OK) {
+ ALOGE("%s: Camera %d: Failed to query Surface format", __FUNCTION__,
+ mCameraId);
+ return res;
+ }
+
+ // FIXME: remove this override since the default format should be
+ // IMPLEMENTATION_DEFINED. b/9487482
+ if (format >= HAL_PIXEL_FORMAT_RGBA_8888 &&
+ format <= HAL_PIXEL_FORMAT_BGRA_8888) {
+ ALOGW("%s: Camera %d: Overriding format 0x%x to IMPLEMENTATION_DEFINED",
+ __FUNCTION__, mCameraId, format);
+ format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ }
+
+ // TODO: add startConfigure/stopConfigure call to CameraDeviceBase
+ // this will make it so Camera3Device doesn't call configure_streams
+ // after each call, but only once we are done with all.
+
+ int streamId = -1;
+ if (format == HAL_PIXEL_FORMAT_BLOB) {
+ // JPEG buffers need to be sized for maximum possible compressed size
+ CameraMetadata staticInfo = mDevice->info();
+ camera_metadata_entry_t entry = staticInfo.find(ANDROID_JPEG_MAX_SIZE);
+ if (entry.count == 0) {
+ ALOGE("%s: Camera %d: Can't find maximum JPEG size in "
+ "static metadata!", __FUNCTION__, mCameraId);
+ return INVALID_OPERATION;
+ }
+ int32_t maxJpegSize = entry.data.i32[0];
+ res = mDevice->createStream(anw, width, height, format, maxJpegSize,
+ &streamId);
+ } else {
+ // All other streams are a known size
+ res = mDevice->createStream(anw, width, height, format, /*size*/0,
+ &streamId);
+ }
+
+ if (res == OK) {
+ mStreamMap.add(bufferProducer->asBinder(), streamId);
+
+ ALOGV("%s: Camera %d: Successfully created a new stream ID %d",
+ __FUNCTION__, mCameraId, streamId);
+
+ /**
+ * Set the stream transform flags to automatically
+ * rotate the camera stream for preview use cases.
+ */
+ int32_t transform = 0;
+ res = getRotationTransformLocked(&transform);
+
+ if (res != OK) {
+ // Error logged by getRotationTransformLocked.
+ return res;
+ }
+
+ res = mDevice->setStreamTransform(streamId, transform);
+ if (res != OK) {
+ ALOGE("%s: Failed to set stream transform (stream id %d)",
+ __FUNCTION__, streamId);
+ return res;
+ }
+
+ return streamId;
+ }
+
+ return res;
+}
+
+// Create a request object from a template.
+status_t CameraDeviceClient::createDefaultRequest(int templateId,
+ /*out*/
+ CameraMetadata* request)
+{
+ ATRACE_CALL();
+ ALOGV("%s (templateId = 0x%x)", __FUNCTION__, templateId);
+
+ status_t res;
+ if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (!mDevice.get()) return DEAD_OBJECT;
+
+ CameraMetadata metadata;
+ if ( (res = mDevice->createDefaultRequest(templateId, &metadata) ) == OK &&
+ request != NULL) {
+
+ request->swap(metadata);
+ }
+
+ return res;
+}
+
+status_t CameraDeviceClient::getCameraInfo(/*out*/CameraMetadata* info)
+{
+ ATRACE_CALL();
+ ALOGV("%s", __FUNCTION__);
+
+ status_t res = OK;
+
+ if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (!mDevice.get()) return DEAD_OBJECT;
+
+ if (info != NULL) {
+ *info = mDevice->info(); // static camera metadata
+ // TODO: merge with device-specific camera metadata
+ }
+
+ return res;
+}
+
+status_t CameraDeviceClient::waitUntilIdle()
+{
+ ATRACE_CALL();
+ ALOGV("%s", __FUNCTION__);
+
+ status_t res = OK;
+ if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (!mDevice.get()) return DEAD_OBJECT;
+
+ // FIXME: Also need check repeating burst.
+ if (!mStreamingRequestList.isEmpty()) {
+ ALOGE("%s: Camera %d: Try to waitUntilIdle when there are active streaming requests",
+ __FUNCTION__, mCameraId);
+ return INVALID_OPERATION;
+ }
+ res = mDevice->waitUntilDrained();
+ ALOGV("%s Done", __FUNCTION__);
+
+ return res;
+}
+
+status_t CameraDeviceClient::flush() {
+ ATRACE_CALL();
+ ALOGV("%s", __FUNCTION__);
+
+ status_t res = OK;
+ if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (!mDevice.get()) return DEAD_OBJECT;
+
+ return mDevice->flush();
+}
+
+status_t CameraDeviceClient::dump(int fd, const Vector<String16>& args) {
+ String8 result;
+ result.appendFormat("CameraDeviceClient[%d] (%p) PID: %d, dump:\n",
+ mCameraId,
+ getRemoteCallback()->asBinder().get(),
+ mClientPid);
+ result.append(" State: ");
+
+ // TODO: print dynamic/request section from most recent requests
+ mFrameProcessor->dump(fd, args);
+
+ return dumpDevice(fd, args);
+}
+
+
+void CameraDeviceClient::notifyError() {
+ // Thread safe. Don't bother locking.
+ sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
+
+ if (remoteCb != 0) {
+ remoteCb->onDeviceError(ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE);
+ }
+}
+
+void CameraDeviceClient::notifyIdle() {
+ // Thread safe. Don't bother locking.
+ sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
+
+ if (remoteCb != 0) {
+ remoteCb->onDeviceIdle();
+ }
+}
+
+void CameraDeviceClient::notifyShutter(int requestId,
+ nsecs_t timestamp) {
+ // Thread safe. Don't bother locking.
+ sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
+ if (remoteCb != 0) {
+ remoteCb->onCaptureStarted(requestId, timestamp);
+ }
+}
+
+// TODO: refactor the code below this with IProCameraUser.
+// it's 100% copy-pasted, so lets not change it right now to make it easier.
+
+void CameraDeviceClient::detachDevice() {
+ if (mDevice == 0) return;
+
+ ALOGV("Camera %d: Stopping processors", mCameraId);
+
+ mFrameProcessor->removeListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
+ FRAME_PROCESSOR_LISTENER_MAX_ID,
+ /*listener*/this);
+ mFrameProcessor->requestExit();
+ ALOGV("Camera %d: Waiting for threads", mCameraId);
+ mFrameProcessor->join();
+ ALOGV("Camera %d: Disconnecting device", mCameraId);
+
+ // WORKAROUND: HAL refuses to disconnect while there's streams in flight
+ {
+ mDevice->clearStreamingRequest();
+
+ status_t code;
+ if ((code = mDevice->waitUntilDrained()) != OK) {
+ ALOGE("%s: waitUntilDrained failed with code 0x%x", __FUNCTION__,
+ code);
+ }
+ }
+
+ Camera2ClientBase::detachDevice();
+}
+
+/** Device-related methods */
+void CameraDeviceClient::onFrameAvailable(int32_t requestId,
+ const CameraMetadata& frame) {
+ ATRACE_CALL();
+ ALOGV("%s", __FUNCTION__);
+
+ // Thread-safe. No lock necessary.
+ sp<ICameraDeviceCallbacks> remoteCb = mRemoteCallback;
+ if (remoteCb != NULL) {
+ ALOGV("%s: frame = %p ", __FUNCTION__, &frame);
+ remoteCb->onResultReceived(requestId, frame);
+ }
+}
+
+// TODO: move to Camera2ClientBase
+bool CameraDeviceClient::enforceRequestPermissions(CameraMetadata& metadata) {
+
+ const int pid = IPCThreadState::self()->getCallingPid();
+ const int selfPid = getpid();
+ camera_metadata_entry_t entry;
+
+ /**
+ * Mixin default important security values
+ * - android.led.transmit = defaulted ON
+ */
+ CameraMetadata staticInfo = mDevice->info();
+ entry = staticInfo.find(ANDROID_LED_AVAILABLE_LEDS);
+ for(size_t i = 0; i < entry.count; ++i) {
+ uint8_t led = entry.data.u8[i];
+
+ switch(led) {
+ case ANDROID_LED_AVAILABLE_LEDS_TRANSMIT: {
+ uint8_t transmitDefault = ANDROID_LED_TRANSMIT_ON;
+ if (!metadata.exists(ANDROID_LED_TRANSMIT)) {
+ metadata.update(ANDROID_LED_TRANSMIT,
+ &transmitDefault, 1);
+ }
+ break;
+ }
+ }
+ }
+
+ // We can do anything!
+ if (pid == selfPid) {
+ return true;
+ }
+
+ /**
+ * Permission check special fields in the request
+ * - android.led.transmit = android.permission.CAMERA_DISABLE_TRANSMIT
+ */
+ entry = metadata.find(ANDROID_LED_TRANSMIT);
+ if (entry.count > 0 && entry.data.u8[0] != ANDROID_LED_TRANSMIT_ON) {
+ String16 permissionString =
+ String16("android.permission.CAMERA_DISABLE_TRANSMIT_LED");
+ if (!checkCallingPermission(permissionString)) {
+ const int uid = IPCThreadState::self()->getCallingUid();
+ ALOGE("Permission Denial: "
+ "can't disable transmit LED pid=%d, uid=%d", pid, uid);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+status_t CameraDeviceClient::getRotationTransformLocked(int32_t* transform) {
+ ALOGV("%s: begin", __FUNCTION__);
+
+ if (transform == NULL) {
+ ALOGW("%s: null transform", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ *transform = 0;
+
+ const CameraMetadata& staticInfo = mDevice->info();
+ camera_metadata_ro_entry_t entry = staticInfo.find(ANDROID_SENSOR_ORIENTATION);
+ if (entry.count == 0) {
+ ALOGE("%s: Camera %d: Can't find android.sensor.orientation in "
+ "static metadata!", __FUNCTION__, mCameraId);
+ return INVALID_OPERATION;
+ }
+
+ int32_t& flags = *transform;
+
+ int orientation = entry.data.i32[0];
+ switch (orientation) {
+ case 0:
+ flags = 0;
+ break;
+ case 90:
+ flags = NATIVE_WINDOW_TRANSFORM_ROT_90;
+ break;
+ case 180:
+ flags = NATIVE_WINDOW_TRANSFORM_ROT_180;
+ break;
+ case 270:
+ flags = NATIVE_WINDOW_TRANSFORM_ROT_270;
+ break;
+ default:
+ ALOGE("%s: Invalid HAL android.sensor.orientation value: %d",
+ __FUNCTION__, orientation);
+ return INVALID_OPERATION;
+ }
+
+ /**
+ * This magic flag makes surfaceflinger un-rotate the buffers
+ * to counter the extra global device UI rotation whenever the user
+ * physically rotates the device.
+ *
+ * By doing this, the camera buffer always ends up aligned
+ * with the physical camera for a "see through" effect.
+ *
+ * In essence, the buffer only gets rotated during preview use-cases.
+ * The user is still responsible to re-create streams of the proper
+ * aspect ratio, or the preview will end up looking non-uniformly
+ * stretched.
+ */
+ flags |= NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY;
+
+ ALOGV("%s: final transform = 0x%x", __FUNCTION__, flags);
+
+ return OK;
+}
+
+} // namespace android
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
new file mode 100644
index 0000000..b9c16aa
--- /dev/null
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_PHOTOGRAPHY_CAMERADEVICECLIENT_H
+#define ANDROID_SERVERS_CAMERA_PHOTOGRAPHY_CAMERADEVICECLIENT_H
+
+#include <camera/camera2/ICameraDeviceUser.h>
+#include <camera/camera2/ICameraDeviceCallbacks.h>
+
+#include "CameraService.h"
+#include "common/FrameProcessorBase.h"
+#include "common/Camera2ClientBase.h"
+
+namespace android {
+
+struct CameraDeviceClientBase :
+ public CameraService::BasicClient, public BnCameraDeviceUser
+{
+ typedef ICameraDeviceCallbacks TCamCallbacks;
+
+ const sp<ICameraDeviceCallbacks>& getRemoteCallback() {
+ return mRemoteCallback;
+ }
+
+protected:
+ CameraDeviceClientBase(const sp<CameraService>& cameraService,
+ const sp<ICameraDeviceCallbacks>& remoteCallback,
+ const String16& clientPackageName,
+ int cameraId,
+ int cameraFacing,
+ int clientPid,
+ uid_t clientUid,
+ int servicePid);
+
+ sp<ICameraDeviceCallbacks> mRemoteCallback;
+};
+
+/**
+ * Implements the binder ICameraDeviceUser API,
+ * meant for HAL3-public implementation of
+ * android.hardware.photography.CameraDevice
+ */
+class CameraDeviceClient :
+ public Camera2ClientBase<CameraDeviceClientBase>,
+ public camera2::FrameProcessorBase::FilteredListener
+{
+public:
+ /**
+ * ICameraDeviceUser interface (see ICameraDeviceUser for details)
+ */
+
+ // Note that the callee gets a copy of the metadata.
+ virtual int submitRequest(sp<CaptureRequest> request,
+ bool streaming = false);
+ virtual status_t cancelRequest(int requestId);
+
+ // Returns -EBUSY if device is not idle
+ virtual status_t deleteStream(int streamId);
+
+ virtual status_t createStream(
+ int width,
+ int height,
+ int format,
+ const sp<IGraphicBufferProducer>& bufferProducer);
+
+ // Create a request object from a template.
+ virtual status_t createDefaultRequest(int templateId,
+ /*out*/
+ CameraMetadata* request);
+
+ // Get the static metadata for the camera
+ // -- Caller owns the newly allocated metadata
+ virtual status_t getCameraInfo(/*out*/CameraMetadata* info);
+
+ // Wait until all the submitted requests have finished processing
+ virtual status_t waitUntilIdle();
+
+ // Flush all active and pending requests as fast as possible
+ virtual status_t flush();
+
+ /**
+ * Interface used by CameraService
+ */
+
+ CameraDeviceClient(const sp<CameraService>& cameraService,
+ const sp<ICameraDeviceCallbacks>& remoteCallback,
+ const String16& clientPackageName,
+ int cameraId,
+ int cameraFacing,
+ int clientPid,
+ uid_t clientUid,
+ int servicePid);
+ virtual ~CameraDeviceClient();
+
+ virtual status_t initialize(camera_module_t *module);
+
+ virtual status_t dump(int fd, const Vector<String16>& args);
+
+ /**
+ * Device listener interface
+ */
+
+ virtual void notifyIdle();
+ virtual void notifyError();
+ virtual void notifyShutter(int requestId, nsecs_t timestamp);
+
+ /**
+ * Interface used by independent components of CameraDeviceClient.
+ */
+protected:
+ /** FilteredListener implementation **/
+ virtual void onFrameAvailable(int32_t requestId,
+ const CameraMetadata& frame);
+ virtual void detachDevice();
+
+ // Calculate the ANativeWindow transform from android.sensor.orientation
+ status_t getRotationTransformLocked(/*out*/int32_t* transform);
+
+private:
+ /** ICameraDeviceUser interface-related private members */
+
+ /** Preview callback related members */
+ sp<camera2::FrameProcessorBase> mFrameProcessor;
+ static const int32_t FRAME_PROCESSOR_LISTENER_MIN_ID = 0;
+ static const int32_t FRAME_PROCESSOR_LISTENER_MAX_ID = 0x7fffffffL;
+
+ /** Utility members */
+ bool enforceRequestPermissions(CameraMetadata& metadata);
+
+ // IGraphicsBufferProducer binder -> Stream ID
+ KeyedVector<sp<IBinder>, int> mStreamMap;
+
+ // Stream ID
+ Vector<int> mStreamingRequestList;
+
+ int32_t mRequestIdCounter;
+};
+
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp b/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
new file mode 100644
index 0000000..1a7a7a7
--- /dev/null
+++ b/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
@@ -0,0 +1,446 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "ProCamera2Client"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include <cutils/properties.h>
+#include <gui/Surface.h>
+#include <gui/Surface.h>
+
+#include "api_pro/ProCamera2Client.h"
+#include "common/CameraDeviceBase.h"
+
+namespace android {
+using namespace camera2;
+
+// Interface used by CameraService
+
+ProCamera2Client::ProCamera2Client(const sp<CameraService>& cameraService,
+ const sp<IProCameraCallbacks>& remoteCallback,
+ const String16& clientPackageName,
+ int cameraId,
+ int cameraFacing,
+ int clientPid,
+ uid_t clientUid,
+ int servicePid) :
+ Camera2ClientBase(cameraService, remoteCallback, clientPackageName,
+ cameraId, cameraFacing, clientPid, clientUid, servicePid)
+{
+ ATRACE_CALL();
+ ALOGI("ProCamera %d: Opened", cameraId);
+
+ mExclusiveLock = false;
+}
+
+status_t ProCamera2Client::initialize(camera_module_t *module)
+{
+ ATRACE_CALL();
+ status_t res;
+
+ res = Camera2ClientBase::initialize(module);
+ if (res != OK) {
+ return res;
+ }
+
+ String8 threadName;
+ mFrameProcessor = new FrameProcessorBase(mDevice);
+ threadName = String8::format("PC2-%d-FrameProc", mCameraId);
+ mFrameProcessor->run(threadName.string());
+
+ mFrameProcessor->registerListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
+ FRAME_PROCESSOR_LISTENER_MAX_ID,
+ /*listener*/this);
+
+ return OK;
+}
+
+ProCamera2Client::~ProCamera2Client() {
+}
+
+status_t ProCamera2Client::exclusiveTryLock() {
+ ATRACE_CALL();
+ ALOGV("%s", __FUNCTION__);
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+ SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
+
+ if (!mDevice.get()) return PERMISSION_DENIED;
+
+ if (!mExclusiveLock) {
+ mExclusiveLock = true;
+
+ if (mRemoteCallback != NULL) {
+ mRemoteCallback->onLockStatusChanged(
+ IProCameraCallbacks::LOCK_ACQUIRED);
+ }
+
+ ALOGV("%s: exclusive lock acquired", __FUNCTION__);
+
+ return OK;
+ }
+
+ // TODO: have a PERMISSION_DENIED case for when someone else owns the lock
+
+ // don't allow recursive locking
+ ALOGW("%s: exclusive lock already exists - recursive locking is not"
+ "allowed", __FUNCTION__);
+
+ return ALREADY_EXISTS;
+}
+
+status_t ProCamera2Client::exclusiveLock() {
+ ATRACE_CALL();
+ ALOGV("%s", __FUNCTION__);
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+ SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
+
+ if (!mDevice.get()) return PERMISSION_DENIED;
+
+ /**
+ * TODO: this should asynchronously 'wait' until the lock becomes available
+ * if another client already has an exclusive lock.
+ *
+ * once we have proper sharing support this will need to do
+ * more than just return immediately
+ */
+ if (!mExclusiveLock) {
+ mExclusiveLock = true;
+
+ if (mRemoteCallback != NULL) {
+ mRemoteCallback->onLockStatusChanged(IProCameraCallbacks::LOCK_ACQUIRED);
+ }
+
+ ALOGV("%s: exclusive lock acquired", __FUNCTION__);
+
+ return OK;
+ }
+
+ // don't allow recursive locking
+ ALOGW("%s: exclusive lock already exists - recursive locking is not allowed"
+ , __FUNCTION__);
+ return ALREADY_EXISTS;
+}
+
+status_t ProCamera2Client::exclusiveUnlock() {
+ ATRACE_CALL();
+ ALOGV("%s", __FUNCTION__);
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+ SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
+
+ // don't allow unlocking if we have no lock
+ if (!mExclusiveLock) {
+ ALOGW("%s: cannot unlock, no lock was held in the first place",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ mExclusiveLock = false;
+ if (mRemoteCallback != NULL ) {
+ mRemoteCallback->onLockStatusChanged(
+ IProCameraCallbacks::LOCK_RELEASED);
+ }
+ ALOGV("%s: exclusive lock released", __FUNCTION__);
+
+ return OK;
+}
+
+bool ProCamera2Client::hasExclusiveLock() {
+ Mutex::Autolock icl(mBinderSerializationLock);
+ return mExclusiveLock;
+}
+
+void ProCamera2Client::onExclusiveLockStolen() {
+ ALOGV("%s: ProClient lost exclusivity (id %d)",
+ __FUNCTION__, mCameraId);
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+ SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
+
+ if (mExclusiveLock && mRemoteCallback.get() != NULL) {
+ mRemoteCallback->onLockStatusChanged(
+ IProCameraCallbacks::LOCK_STOLEN);
+ }
+
+ mExclusiveLock = false;
+
+ //TODO: we should not need to detach the device, merely reset it.
+ detachDevice();
+}
+
+status_t ProCamera2Client::submitRequest(camera_metadata_t* request,
+ bool streaming) {
+ ATRACE_CALL();
+ ALOGV("%s", __FUNCTION__);
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (!mDevice.get()) return DEAD_OBJECT;
+
+ if (!mExclusiveLock) {
+ return PERMISSION_DENIED;
+ }
+
+ CameraMetadata metadata(request);
+
+ if (!enforceRequestPermissions(metadata)) {
+ return PERMISSION_DENIED;
+ }
+
+ if (streaming) {
+ return mDevice->setStreamingRequest(metadata);
+ } else {
+ return mDevice->capture(metadata);
+ }
+
+ // unreachable. thx gcc for a useless warning
+ return OK;
+}
+
+status_t ProCamera2Client::cancelRequest(int requestId) {
+ (void)requestId;
+ ATRACE_CALL();
+ ALOGV("%s", __FUNCTION__);
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (!mDevice.get()) return DEAD_OBJECT;
+
+ if (!mExclusiveLock) {
+ return PERMISSION_DENIED;
+ }
+
+ // TODO: implement
+ ALOGE("%s: not fully implemented yet", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+status_t ProCamera2Client::deleteStream(int streamId) {
+ ATRACE_CALL();
+ ALOGV("%s (streamId = 0x%x)", __FUNCTION__, streamId);
+
+ status_t res;
+ if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (!mDevice.get()) return DEAD_OBJECT;
+ mDevice->clearStreamingRequest();
+
+ status_t code;
+ if ((code = mDevice->waitUntilDrained()) != OK) {
+ ALOGE("%s: waitUntilDrained failed with code 0x%x", __FUNCTION__, code);
+ }
+
+ return mDevice->deleteStream(streamId);
+}
+
+status_t ProCamera2Client::createStream(int width, int height, int format,
+ const sp<IGraphicBufferProducer>& bufferProducer,
+ /*out*/
+ int* streamId)
+{
+ if (streamId) {
+ *streamId = -1;
+ }
+
+ ATRACE_CALL();
+ ALOGV("%s (w = %d, h = %d, f = 0x%x)", __FUNCTION__, width, height, format);
+
+ status_t res;
+ if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (!mDevice.get()) return DEAD_OBJECT;
+
+ sp<IBinder> binder;
+ sp<ANativeWindow> window;
+ if (bufferProducer != 0) {
+ binder = bufferProducer->asBinder();
+ window = new Surface(bufferProducer);
+ }
+
+ return mDevice->createStream(window, width, height, format, /*size*/1,
+ streamId);
+}
+
+// Create a request object from a template.
+// -- Caller owns the newly allocated metadata
+status_t ProCamera2Client::createDefaultRequest(int templateId,
+ /*out*/
+ camera_metadata** request)
+{
+ ATRACE_CALL();
+ ALOGV("%s (templateId = 0x%x)", __FUNCTION__, templateId);
+
+ if (request) {
+ *request = NULL;
+ }
+
+ status_t res;
+ if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (!mDevice.get()) return DEAD_OBJECT;
+
+ CameraMetadata metadata;
+ if ( (res = mDevice->createDefaultRequest(templateId, &metadata) ) == OK) {
+ *request = metadata.release();
+ }
+
+ return res;
+}
+
+status_t ProCamera2Client::getCameraInfo(int cameraId,
+ /*out*/
+ camera_metadata** info)
+{
+ if (cameraId != mCameraId) {
+ return INVALID_OPERATION;
+ }
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (!mDevice.get()) return DEAD_OBJECT;
+
+ CameraMetadata deviceInfo = mDevice->info();
+ *info = deviceInfo.release();
+
+ return OK;
+}
+
+status_t ProCamera2Client::dump(int fd, const Vector<String16>& args) {
+ String8 result;
+ result.appendFormat("ProCamera2Client[%d] (%p) PID: %d, dump:\n",
+ mCameraId,
+ getRemoteCallback()->asBinder().get(),
+ mClientPid);
+ result.append(" State: ");
+
+ // TODO: print dynamic/request section from most recent requests
+ mFrameProcessor->dump(fd, args);
+
+ return dumpDevice(fd, args);
+}
+
+// IProCameraUser interface
+
+void ProCamera2Client::detachDevice() {
+ if (mDevice == 0) return;
+
+ ALOGV("Camera %d: Stopping processors", mCameraId);
+
+ mFrameProcessor->removeListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
+ FRAME_PROCESSOR_LISTENER_MAX_ID,
+ /*listener*/this);
+ mFrameProcessor->requestExit();
+ ALOGV("Camera %d: Waiting for threads", mCameraId);
+ mFrameProcessor->join();
+ ALOGV("Camera %d: Disconnecting device", mCameraId);
+
+ // WORKAROUND: HAL refuses to disconnect while there's streams in flight
+ {
+ mDevice->clearStreamingRequest();
+
+ status_t code;
+ if ((code = mDevice->waitUntilDrained()) != OK) {
+ ALOGE("%s: waitUntilDrained failed with code 0x%x", __FUNCTION__,
+ code);
+ }
+ }
+
+ Camera2ClientBase::detachDevice();
+}
+
+/** Device-related methods */
+void ProCamera2Client::onFrameAvailable(int32_t requestId,
+ const CameraMetadata& frame) {
+ ATRACE_CALL();
+ ALOGV("%s", __FUNCTION__);
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+ SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
+
+ if (mRemoteCallback != NULL) {
+ CameraMetadata tmp(frame);
+ camera_metadata_t* meta = tmp.release();
+ ALOGV("%s: meta = %p ", __FUNCTION__, meta);
+ mRemoteCallback->onResultReceived(requestId, meta);
+ tmp.acquire(meta);
+ }
+
+}
+
+bool ProCamera2Client::enforceRequestPermissions(CameraMetadata& metadata) {
+
+ const int pid = IPCThreadState::self()->getCallingPid();
+ const int selfPid = getpid();
+ camera_metadata_entry_t entry;
+
+ /**
+ * Mixin default important security values
+ * - android.led.transmit = defaulted ON
+ */
+ CameraMetadata staticInfo = mDevice->info();
+ entry = staticInfo.find(ANDROID_LED_AVAILABLE_LEDS);
+ for(size_t i = 0; i < entry.count; ++i) {
+ uint8_t led = entry.data.u8[i];
+
+ switch(led) {
+ case ANDROID_LED_AVAILABLE_LEDS_TRANSMIT: {
+ uint8_t transmitDefault = ANDROID_LED_TRANSMIT_ON;
+ if (!metadata.exists(ANDROID_LED_TRANSMIT)) {
+ metadata.update(ANDROID_LED_TRANSMIT,
+ &transmitDefault, 1);
+ }
+ break;
+ }
+ }
+ }
+
+ // We can do anything!
+ if (pid == selfPid) {
+ return true;
+ }
+
+ /**
+ * Permission check special fields in the request
+ * - android.led.transmit = android.permission.CAMERA_DISABLE_TRANSMIT
+ */
+ entry = metadata.find(ANDROID_LED_TRANSMIT);
+ if (entry.count > 0 && entry.data.u8[0] != ANDROID_LED_TRANSMIT_ON) {
+ String16 permissionString =
+ String16("android.permission.CAMERA_DISABLE_TRANSMIT_LED");
+ if (!checkCallingPermission(permissionString)) {
+ const int uid = IPCThreadState::self()->getCallingUid();
+ ALOGE("Permission Denial: "
+ "can't disable transmit LED pid=%d, uid=%d", pid, uid);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // namespace android
diff --git a/services/camera/libcameraservice/api_pro/ProCamera2Client.h b/services/camera/libcameraservice/api_pro/ProCamera2Client.h
new file mode 100644
index 0000000..8a0f547
--- /dev/null
+++ b/services/camera/libcameraservice/api_pro/ProCamera2Client.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_PROCAMERA2CLIENT_H
+#define ANDROID_SERVERS_CAMERA_PROCAMERA2CLIENT_H
+
+#include "CameraService.h"
+#include "common/FrameProcessorBase.h"
+#include "common/Camera2ClientBase.h"
+#include "device2/Camera2Device.h"
+
+namespace android {
+
+class IMemory;
+/**
+ * Implements the binder IProCameraUser API,
+ * meant for HAL2-level private API access.
+ */
+class ProCamera2Client :
+ public Camera2ClientBase<CameraService::ProClient>,
+ public camera2::FrameProcessorBase::FilteredListener
+{
+public:
+ /**
+ * IProCameraUser interface (see IProCameraUser for details)
+ */
+ virtual status_t exclusiveTryLock();
+ virtual status_t exclusiveLock();
+ virtual status_t exclusiveUnlock();
+
+ virtual bool hasExclusiveLock();
+
+ // Note that the callee gets a copy of the metadata.
+ virtual int submitRequest(camera_metadata_t* metadata,
+ bool streaming = false);
+ virtual status_t cancelRequest(int requestId);
+
+ virtual status_t deleteStream(int streamId);
+
+ virtual status_t createStream(
+ int width,
+ int height,
+ int format,
+ const sp<IGraphicBufferProducer>& bufferProducer,
+ /*out*/
+ int* streamId);
+
+ // Create a request object from a template.
+ // -- Caller owns the newly allocated metadata
+ virtual status_t createDefaultRequest(int templateId,
+ /*out*/
+ camera_metadata** request);
+
+ // Get the static metadata for the camera
+ // -- Caller owns the newly allocated metadata
+ virtual status_t getCameraInfo(int cameraId,
+ /*out*/
+ camera_metadata** info);
+
+ /**
+ * Interface used by CameraService
+ */
+
+ ProCamera2Client(const sp<CameraService>& cameraService,
+ const sp<IProCameraCallbacks>& remoteCallback,
+ const String16& clientPackageName,
+ int cameraId,
+ int cameraFacing,
+ int clientPid,
+ uid_t clientUid,
+ int servicePid);
+ virtual ~ProCamera2Client();
+
+ virtual status_t initialize(camera_module_t *module);
+
+ virtual status_t dump(int fd, const Vector<String16>& args);
+
+ // Callbacks from camera service
+ virtual void onExclusiveLockStolen();
+
+ /**
+ * Interface used by independent components of ProCamera2Client.
+ */
+
+protected:
+ /** FilteredListener implementation **/
+ virtual void onFrameAvailable(int32_t requestId,
+ const CameraMetadata& frame);
+ virtual void detachDevice();
+
+private:
+ /** IProCameraUser interface-related private members */
+
+ /** Preview callback related members */
+ sp<camera2::FrameProcessorBase> mFrameProcessor;
+ static const int32_t FRAME_PROCESSOR_LISTENER_MIN_ID = 0;
+ static const int32_t FRAME_PROCESSOR_LISTENER_MAX_ID = 0x7fffffffL;
+
+ /** Utility members */
+ bool enforceRequestPermissions(CameraMetadata& metadata);
+
+ // Whether or not we have an exclusive lock on the device
+ // - if no we can't modify the request queue.
+ // note that creating/deleting streams we own is still OK
+ bool mExclusiveLock;
+};
+
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
deleted file mode 100644
index 3e9c255..0000000
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "Camera2-CallbackProcessor"
-#define ATRACE_TAG ATRACE_TAG_CAMERA
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-#include <utils/Trace.h>
-
-#include "CallbackProcessor.h"
-#include <gui/SurfaceTextureClient.h>
-#include "../Camera2Device.h"
-#include "../Camera2Client.h"
-
-
-namespace android {
-namespace camera2 {
-
-CallbackProcessor::CallbackProcessor(wp<Camera2Client> client):
- Thread(false),
- mClient(client),
- mCallbackAvailable(false),
- mCallbackStreamId(NO_STREAM) {
-}
-
-CallbackProcessor::~CallbackProcessor() {
- ALOGV("%s: Exit", __FUNCTION__);
- deleteStream();
-}
-
-void CallbackProcessor::onFrameAvailable() {
- Mutex::Autolock l(mInputMutex);
- if (!mCallbackAvailable) {
- mCallbackAvailable = true;
- mCallbackAvailableSignal.signal();
- }
-}
-
-status_t CallbackProcessor::updateStream(const Parameters &params) {
- ATRACE_CALL();
- status_t res;
-
- Mutex::Autolock l(mInputMutex);
-
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return OK;
- sp<Camera2Device> device = client->getCameraDevice();
-
- if (mCallbackConsumer == 0) {
- // Create CPU buffer queue endpoint
- mCallbackConsumer = new CpuConsumer(kCallbackHeapCount);
- mCallbackConsumer->setFrameAvailableListener(this);
- mCallbackConsumer->setName(String8("Camera2Client::CallbackConsumer"));
- mCallbackWindow = new SurfaceTextureClient(
- mCallbackConsumer->getProducerInterface());
- }
-
- if (mCallbackStreamId != NO_STREAM) {
- // Check if stream parameters have to change
- uint32_t currentWidth, currentHeight, currentFormat;
- res = device->getStreamInfo(mCallbackStreamId,
- &currentWidth, &currentHeight, &currentFormat);
- if (res != OK) {
- ALOGE("%s: Camera %d: Error querying callback output stream info: "
- "%s (%d)", __FUNCTION__, client->getCameraId(),
- strerror(-res), res);
- return res;
- }
- if (currentWidth != (uint32_t)params.previewWidth ||
- currentHeight != (uint32_t)params.previewHeight ||
- currentFormat != (uint32_t)params.previewFormat) {
- // Since size should only change while preview is not running,
- // assuming that all existing use of old callback stream is
- // completed.
- ALOGV("%s: Camera %d: Deleting stream %d since the buffer dimensions changed",
- __FUNCTION__, client->getCameraId(), mCallbackStreamId);
- res = device->deleteStream(mCallbackStreamId);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to delete old output stream "
- "for callbacks: %s (%d)", __FUNCTION__, client->getCameraId(),
- strerror(-res), res);
- return res;
- }
- mCallbackStreamId = NO_STREAM;
- }
- }
-
- if (mCallbackStreamId == NO_STREAM) {
- ALOGV("Creating callback stream: %d %d format 0x%x",
- params.previewWidth, params.previewHeight,
- params.previewFormat);
- res = device->createStream(mCallbackWindow,
- params.previewWidth, params.previewHeight,
- params.previewFormat, 0, &mCallbackStreamId);
- if (res != OK) {
- ALOGE("%s: Camera %d: Can't create output stream for callbacks: "
- "%s (%d)", __FUNCTION__, client->getCameraId(),
- strerror(-res), res);
- return res;
- }
- }
-
- return OK;
-}
-
-status_t CallbackProcessor::deleteStream() {
- ATRACE_CALL();
- status_t res;
-
- Mutex::Autolock l(mInputMutex);
-
- if (mCallbackStreamId != NO_STREAM) {
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return OK;
- sp<Camera2Device> device = client->getCameraDevice();
-
- device->deleteStream(mCallbackStreamId);
-
- mCallbackHeap.clear();
- mCallbackWindow.clear();
- mCallbackConsumer.clear();
-
- mCallbackStreamId = NO_STREAM;
- }
- return OK;
-}
-
-int CallbackProcessor::getStreamId() const {
- Mutex::Autolock l(mInputMutex);
- return mCallbackStreamId;
-}
-
-void CallbackProcessor::dump(int fd, const Vector<String16>& args) const {
-}
-
-bool CallbackProcessor::threadLoop() {
- status_t res;
-
- {
- Mutex::Autolock l(mInputMutex);
- while (!mCallbackAvailable) {
- res = mCallbackAvailableSignal.waitRelative(mInputMutex,
- kWaitDuration);
- if (res == TIMED_OUT) return true;
- }
- mCallbackAvailable = false;
- }
-
- do {
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return false;
- res = processNewCallback(client);
- } while (res == OK);
-
- return true;
-}
-
-status_t CallbackProcessor::processNewCallback(sp<Camera2Client> &client) {
- ATRACE_CALL();
- status_t res;
-
- int callbackHeapId;
- sp<Camera2Heap> callbackHeap;
- size_t heapIdx;
-
- CpuConsumer::LockedBuffer imgBuffer;
- ALOGV("%s: Getting buffer", __FUNCTION__);
- res = mCallbackConsumer->lockNextBuffer(&imgBuffer);
- if (res != OK) {
- if (res != BAD_VALUE) {
- ALOGE("%s: Camera %d: Error receiving next callback buffer: "
- "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res);
- }
- return res;
- }
- ALOGV("%s: Camera %d: Preview callback available", __FUNCTION__,
- client->getCameraId());
-
- {
- SharedParameters::Lock l(client->getParameters());
-
- if ( l.mParameters.state != Parameters::PREVIEW
- && l.mParameters.state != Parameters::RECORD
- && l.mParameters.state != Parameters::VIDEO_SNAPSHOT) {
- ALOGV("%s: Camera %d: No longer streaming",
- __FUNCTION__, client->getCameraId());
- mCallbackConsumer->unlockBuffer(imgBuffer);
- return OK;
- }
-
- if (! (l.mParameters.previewCallbackFlags &
- CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) ) {
- ALOGV("%s: No longer enabled, dropping", __FUNCTION__);
- mCallbackConsumer->unlockBuffer(imgBuffer);
- return OK;
- }
- if ((l.mParameters.previewCallbackFlags &
- CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK) &&
- !l.mParameters.previewCallbackOneShot) {
- ALOGV("%s: One shot mode, already sent, dropping", __FUNCTION__);
- mCallbackConsumer->unlockBuffer(imgBuffer);
- return OK;
- }
-
- if (imgBuffer.format != l.mParameters.previewFormat) {
- ALOGE("%s: Camera %d: Unexpected format for callback: "
- "%x, expected %x", __FUNCTION__, client->getCameraId(),
- imgBuffer.format, l.mParameters.previewFormat);
- mCallbackConsumer->unlockBuffer(imgBuffer);
- return INVALID_OPERATION;
- }
-
- // In one-shot mode, stop sending callbacks after the first one
- if (l.mParameters.previewCallbackFlags &
- CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK) {
- ALOGV("%s: clearing oneshot", __FUNCTION__);
- l.mParameters.previewCallbackOneShot = false;
- }
- }
-
- size_t bufferSize = Camera2Client::calculateBufferSize(
- imgBuffer.width, imgBuffer.height,
- imgBuffer.format, imgBuffer.stride);
- size_t currentBufferSize = (mCallbackHeap == 0) ?
- 0 : (mCallbackHeap->mHeap->getSize() / kCallbackHeapCount);
- if (bufferSize != currentBufferSize) {
- mCallbackHeap.clear();
- mCallbackHeap = new Camera2Heap(bufferSize, kCallbackHeapCount,
- "Camera2Client::CallbackHeap");
- if (mCallbackHeap->mHeap->getSize() == 0) {
- ALOGE("%s: Camera %d: Unable to allocate memory for callbacks",
- __FUNCTION__, client->getCameraId());
- mCallbackConsumer->unlockBuffer(imgBuffer);
- return INVALID_OPERATION;
- }
-
- mCallbackHeapHead = 0;
- mCallbackHeapFree = kCallbackHeapCount;
- }
-
- if (mCallbackHeapFree == 0) {
- ALOGE("%s: Camera %d: No free callback buffers, dropping frame",
- __FUNCTION__, client->getCameraId());
- mCallbackConsumer->unlockBuffer(imgBuffer);
- return OK;
- }
-
- heapIdx = mCallbackHeapHead;
-
- mCallbackHeapHead = (mCallbackHeapHead + 1) & kCallbackHeapCount;
- mCallbackHeapFree--;
-
- // TODO: Get rid of this memcpy by passing the gralloc queue all the way
- // to app
-
- ssize_t offset;
- size_t size;
- sp<IMemoryHeap> heap =
- mCallbackHeap->mBuffers[heapIdx]->getMemory(&offset,
- &size);
- uint8_t *data = (uint8_t*)heap->getBase() + offset;
- memcpy(data, imgBuffer.data, bufferSize);
-
- ALOGV("%s: Freeing buffer", __FUNCTION__);
- mCallbackConsumer->unlockBuffer(imgBuffer);
-
- // Call outside parameter lock to allow re-entrancy from notification
- {
- Camera2Client::SharedCameraClient::Lock l(client->mSharedCameraClient);
- if (l.mCameraClient != 0) {
- ALOGV("%s: Camera %d: Invoking client data callback",
- __FUNCTION__, client->getCameraId());
- l.mCameraClient->dataCallback(CAMERA_MSG_PREVIEW_FRAME,
- mCallbackHeap->mBuffers[heapIdx], NULL);
- }
- }
-
- // Only increment free if we're still using the same heap
- mCallbackHeapFree++;
-
- ALOGV("%s: exit", __FUNCTION__);
-
- return OK;
-}
-
-}; // namespace camera2
-}; // namespace android
diff --git a/services/camera/libcameraservice/camera2/CameraMetadata.cpp b/services/camera/libcameraservice/camera2/CameraMetadata.cpp
deleted file mode 100644
index 835587d..0000000
--- a/services/camera/libcameraservice/camera2/CameraMetadata.cpp
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "Camera2-Metadata"
-#include <utils/Log.h>
-#include <utils/Errors.h>
-
-#include "CameraMetadata.h"
-
-namespace android {
-
-namespace camera2 {
-CameraMetadata::CameraMetadata() :
- mBuffer(NULL) {
-}
-
-CameraMetadata::CameraMetadata(size_t entryCapacity, size_t dataCapacity)
-{
- mBuffer = allocate_camera_metadata(entryCapacity, dataCapacity);
-}
-
-CameraMetadata::CameraMetadata(const CameraMetadata &other) {
- mBuffer = clone_camera_metadata(other.mBuffer);
-}
-
-CameraMetadata &CameraMetadata::operator=(const CameraMetadata &other) {
- return operator=(other.mBuffer);
-}
-
-CameraMetadata &CameraMetadata::operator=(const camera_metadata_t *buffer) {
- if (CC_LIKELY(buffer != mBuffer)) {
- camera_metadata_t *newBuffer = clone_camera_metadata(buffer);
- clear();
- mBuffer = newBuffer;
- }
- return *this;
-}
-
-CameraMetadata::~CameraMetadata() {
- clear();
-}
-
-camera_metadata_t* CameraMetadata::release() {
- camera_metadata_t *released = mBuffer;
- mBuffer = NULL;
- return released;
-}
-
-void CameraMetadata::clear() {
- if (mBuffer) {
- free_camera_metadata(mBuffer);
- mBuffer = NULL;
- }
-}
-
-void CameraMetadata::acquire(camera_metadata_t *buffer) {
- clear();
- mBuffer = buffer;
-}
-
-void CameraMetadata::acquire(CameraMetadata &other) {
- acquire(other.release());
-}
-
-status_t CameraMetadata::append(const CameraMetadata &other) {
- return append_camera_metadata(mBuffer, other.mBuffer);
-}
-
-size_t CameraMetadata::entryCount() const {
- return (mBuffer == NULL) ? 0 :
- get_camera_metadata_entry_count(mBuffer);
-}
-
-bool CameraMetadata::isEmpty() const {
- return entryCount() == 0;
-}
-
-status_t CameraMetadata::sort() {
- return sort_camera_metadata(mBuffer);
-}
-
-status_t CameraMetadata::checkType(uint32_t tag, uint8_t expectedType) {
- int tagType = get_camera_metadata_tag_type(tag);
- if ( CC_UNLIKELY(tagType == -1)) {
- ALOGE("Update metadata entry: Unknown tag %d", tag);
- return INVALID_OPERATION;
- }
- if ( CC_UNLIKELY(tagType != expectedType) ) {
- ALOGE("Mismatched tag type when updating entry %s (%d) of type %s; "
- "got type %s data instead ",
- get_camera_metadata_tag_name(tag), tag,
- camera_metadata_type_names[tagType],
- camera_metadata_type_names[expectedType]);
- return INVALID_OPERATION;
- }
- return OK;
-}
-
-status_t CameraMetadata::update(uint32_t tag,
- const int32_t *data, size_t data_count) {
- status_t res;
- if ( (res = checkType(tag, TYPE_INT32)) != OK) {
- return res;
- }
- return update(tag, (const void*)data, data_count);
-}
-
-status_t CameraMetadata::update(uint32_t tag,
- const uint8_t *data, size_t data_count) {
- status_t res;
- if ( (res = checkType(tag, TYPE_BYTE)) != OK) {
- return res;
- }
- return update(tag, (const void*)data, data_count);
-}
-
-status_t CameraMetadata::update(uint32_t tag,
- const float *data, size_t data_count) {
- status_t res;
- if ( (res = checkType(tag, TYPE_FLOAT)) != OK) {
- return res;
- }
- return update(tag, (const void*)data, data_count);
-}
-
-status_t CameraMetadata::update(uint32_t tag,
- const int64_t *data, size_t data_count) {
- status_t res;
- if ( (res = checkType(tag, TYPE_INT64)) != OK) {
- return res;
- }
- return update(tag, (const void*)data, data_count);
-}
-
-status_t CameraMetadata::update(uint32_t tag,
- const double *data, size_t data_count) {
- status_t res;
- if ( (res = checkType(tag, TYPE_DOUBLE)) != OK) {
- return res;
- }
- return update(tag, (const void*)data, data_count);
-}
-
-status_t CameraMetadata::update(uint32_t tag,
- const camera_metadata_rational_t *data, size_t data_count) {
- status_t res;
- if ( (res = checkType(tag, TYPE_RATIONAL)) != OK) {
- return res;
- }
- return update(tag, (const void*)data, data_count);
-}
-
-status_t CameraMetadata::update(uint32_t tag,
- const String8 &string) {
- status_t res;
- if ( (res = checkType(tag, TYPE_BYTE)) != OK) {
- return res;
- }
- return update(tag, (const void*)string.string(), string.size());
-}
-
-status_t CameraMetadata::update(uint32_t tag, const void *data,
- size_t data_count) {
- status_t res;
- int type = get_camera_metadata_tag_type(tag);
- if (type == -1) {
- ALOGE("%s: Tag %d not found", __FUNCTION__, tag);
- return BAD_VALUE;
- }
- size_t data_size = calculate_camera_metadata_entry_data_size(type,
- data_count);
-
- res = resizeIfNeeded(1, data_size);
-
- if (res == OK) {
- camera_metadata_entry_t entry;
- res = find_camera_metadata_entry(mBuffer, tag, &entry);
- if (res == NAME_NOT_FOUND) {
- res = add_camera_metadata_entry(mBuffer,
- tag, data, data_count);
- } else if (res == OK) {
- res = update_camera_metadata_entry(mBuffer,
- entry.index, data, data_count, NULL);
- }
- }
-
- if (res != OK) {
- ALOGE("%s: Unable to update metadata entry %s.%s (%x): %s (%d)",
- __FUNCTION__, get_camera_metadata_section_name(tag),
- get_camera_metadata_tag_name(tag), tag, strerror(-res), res);
- }
- return res;
-}
-
-camera_metadata_entry_t CameraMetadata::find(uint32_t tag) {
- status_t res;
- camera_metadata_entry entry;
- res = find_camera_metadata_entry(mBuffer, tag, &entry);
- if (CC_UNLIKELY( res != OK )) {
- entry.count = 0;
- entry.data.u8 = NULL;
- }
- return entry;
-}
-
-camera_metadata_ro_entry_t CameraMetadata::find(uint32_t tag) const {
- status_t res;
- camera_metadata_ro_entry entry;
- res = find_camera_metadata_ro_entry(mBuffer, tag, &entry);
- if (CC_UNLIKELY( res != OK )) {
- entry.count = 0;
- entry.data.u8 = NULL;
- }
- return entry;
-}
-
-status_t CameraMetadata::erase(uint32_t tag) {
- camera_metadata_entry_t entry;
- status_t res;
- res = find_camera_metadata_entry(mBuffer, tag, &entry);
- if (res == NAME_NOT_FOUND) {
- return OK;
- } else if (res != OK) {
- ALOGE("%s: Error looking for entry %s.%s (%x): %s %d",
- __FUNCTION__,
- get_camera_metadata_section_name(tag),
- get_camera_metadata_tag_name(tag), tag, strerror(-res), res);
- return res;
- }
- res = delete_camera_metadata_entry(mBuffer, entry.index);
- if (res != OK) {
- ALOGE("%s: Error deleting entry %s.%s (%x): %s %d",
- __FUNCTION__,
- get_camera_metadata_section_name(tag),
- get_camera_metadata_tag_name(tag), tag, strerror(-res), res);
- }
- return res;
-}
-
-void CameraMetadata::dump(int fd, int verbosity, int indentation) const {
- dump_indented_camera_metadata(mBuffer, fd, verbosity, indentation);
-}
-
-status_t CameraMetadata::resizeIfNeeded(size_t extraEntries, size_t extraData) {
- if (mBuffer == NULL) {
- mBuffer = allocate_camera_metadata(extraEntries * 2, extraData * 2);
- if (mBuffer == NULL) {
- ALOGE("%s: Can't allocate larger metadata buffer", __FUNCTION__);
- return NO_MEMORY;
- }
- } else {
- size_t currentEntryCount = get_camera_metadata_entry_count(mBuffer);
- size_t currentEntryCap = get_camera_metadata_entry_capacity(mBuffer);
- size_t newEntryCount = currentEntryCount +
- extraEntries;
- newEntryCount = (newEntryCount > currentEntryCap) ?
- newEntryCount * 2 : currentEntryCap;
-
- size_t currentDataCount = get_camera_metadata_data_count(mBuffer);
- size_t currentDataCap = get_camera_metadata_data_capacity(mBuffer);
- size_t newDataCount = currentDataCount +
- extraData;
- newDataCount = (newDataCount > currentDataCap) ?
- newDataCount * 2 : currentDataCap;
-
- if (newEntryCount > currentEntryCap ||
- newDataCount > currentDataCap) {
- camera_metadata_t *oldBuffer = mBuffer;
- mBuffer = allocate_camera_metadata(newEntryCount,
- newDataCount);
- if (mBuffer == NULL) {
- ALOGE("%s: Can't allocate larger metadata buffer", __FUNCTION__);
- return NO_MEMORY;
- }
- append_camera_metadata(mBuffer, oldBuffer);
- free_camera_metadata(oldBuffer);
- }
- }
- return OK;
-}
-
-}; // namespace camera2
-}; // namespace android
diff --git a/services/camera/libcameraservice/camera2/CameraMetadata.h b/services/camera/libcameraservice/camera2/CameraMetadata.h
deleted file mode 100644
index aee6cd7..0000000
--- a/services/camera/libcameraservice/camera2/CameraMetadata.h
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SERVERS_CAMERA_CAMERA2METADATA_CPP
-#define ANDROID_SERVERS_CAMERA_CAMERA2METADATA_CPP
-
-#include "system/camera_metadata.h"
-#include <utils/String8.h>
-#include <utils/Vector.h>
-
-namespace android {
-namespace camera2 {
-
-/**
- * A convenience wrapper around the C-based camera_metadata_t library.
- */
-class CameraMetadata {
- public:
- /** Creates an empty object; best used when expecting to acquire contents
- * from elsewhere */
- CameraMetadata();
- /** Creates an object with space for entryCapacity entries, with
- * dataCapacity extra storage */
- CameraMetadata(size_t entryCapacity, size_t dataCapacity = 10);
-
- ~CameraMetadata();
-
- /** Takes ownership of passed-in buffer */
- CameraMetadata(camera_metadata_t *buffer);
- /** Clones the metadata */
- CameraMetadata(const CameraMetadata &other);
-
- /**
- * Assignment clones metadata buffer.
- */
- CameraMetadata &operator=(const CameraMetadata &other);
- CameraMetadata &operator=(const camera_metadata_t *buffer);
-
- /**
- * Release a raw metadata buffer to the caller. After this call,
- * CameraMetadata no longer references the buffer, and the caller takes
- * responsibility for freeing the raw metadata buffer (using
- * free_camera_metadata()), or for handing it to another CameraMetadata
- * instance.
- */
- camera_metadata_t* release();
-
- /**
- * Clear the metadata buffer and free all storage used by it
- */
- void clear();
-
- /**
- * Acquire a raw metadata buffer from the caller. After this call,
- * the caller no longer owns the raw buffer, and must not free or manipulate it.
- * If CameraMetadata already contains metadata, it is freed.
- */
- void acquire(camera_metadata_t* buffer);
-
- /**
- * Acquires raw buffer from other CameraMetadata object. After the call, the argument
- * object no longer has any metadata.
- */
- void acquire(CameraMetadata &other);
-
- /**
- * Append metadata from another CameraMetadata object.
- */
- status_t append(const CameraMetadata &other);
-
- /**
- * Number of metadata entries.
- */
- size_t entryCount() const;
-
- /**
- * Is the buffer empty (no entires)
- */
- bool isEmpty() const;
-
- /**
- * Sort metadata buffer for faster find
- */
- status_t sort();
-
- /**
- * Update metadata entry. Will create entry if it doesn't exist already, and
- * will reallocate the buffer if insufficient space exists. Overloaded for
- * the various types of valid data.
- */
- status_t update(uint32_t tag,
- const uint8_t *data, size_t data_count);
- status_t update(uint32_t tag,
- const int32_t *data, size_t data_count);
- status_t update(uint32_t tag,
- const float *data, size_t data_count);
- status_t update(uint32_t tag,
- const int64_t *data, size_t data_count);
- status_t update(uint32_t tag,
- const double *data, size_t data_count);
- status_t update(uint32_t tag,
- const camera_metadata_rational_t *data, size_t data_count);
- status_t update(uint32_t tag,
- const String8 &string);
-
- template<typename T>
- status_t update(uint32_t tag, Vector<T> data) {
- return update(tag, data.array(), data.size());
- }
-
- /**
- * Get metadata entry by tag id
- */
- camera_metadata_entry find(uint32_t tag);
-
- /**
- * Get metadata entry by tag id, with no editing
- */
- camera_metadata_ro_entry find(uint32_t tag) const;
-
- /**
- * Delete metadata entry by tag
- */
- status_t erase(uint32_t tag);
-
- /**
- * Dump contents into FD for debugging. The verbosity levels are
- * 0: Tag entry information only, no data values
- * 1: Level 0 plus at most 16 data values per entry
- * 2: All information
- *
- * The indentation parameter sets the number of spaces to add to the start
- * each line of output.
- */
- void dump(int fd, int verbosity = 1, int indentation = 0) const;
-
- private:
- camera_metadata_t *mBuffer;
-
- /**
- * Check if tag has a given type
- */
- status_t checkType(uint32_t tag, uint8_t expectedType);
-
- /**
- * Base update entry method
- */
- status_t update(uint32_t tag, const void *data, size_t data_count);
-
- /**
- * Resize metadata buffer if needed by reallocating it and copying it over.
- */
- status_t resizeIfNeeded(size_t extraEntries, size_t extraData);
-
-};
-
-}; // namespace camera2
-}; // namespace android
-
-#endif
diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.cpp b/services/camera/libcameraservice/camera2/FrameProcessor.cpp
deleted file mode 100644
index 064607c..0000000
--- a/services/camera/libcameraservice/camera2/FrameProcessor.cpp
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "Camera2-FrameProcessor"
-#define ATRACE_TAG ATRACE_TAG_CAMERA
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-#include <utils/Trace.h>
-
-#include "FrameProcessor.h"
-#include "../Camera2Device.h"
-#include "../Camera2Client.h"
-
-namespace android {
-namespace camera2 {
-
-FrameProcessor::FrameProcessor(wp<Camera2Client> client):
- Thread(false), mClient(client), mLastFrameNumberOfFaces(0) {
-}
-
-FrameProcessor::~FrameProcessor() {
- ALOGV("%s: Exit", __FUNCTION__);
-}
-
-status_t FrameProcessor::registerListener(int32_t minId,
- int32_t maxId, wp<FilteredListener> listener) {
- Mutex::Autolock l(mInputMutex);
- ALOGV("%s: Registering listener for frame id range %d - %d",
- __FUNCTION__, minId, maxId);
- RangeListener rListener = { minId, maxId, listener };
- mRangeListeners.push_back(rListener);
- return OK;
-}
-
-status_t FrameProcessor::removeListener(int32_t minId,
- int32_t maxId, wp<FilteredListener> listener) {
- Mutex::Autolock l(mInputMutex);
- List<RangeListener>::iterator item = mRangeListeners.begin();
- while (item != mRangeListeners.end()) {
- if (item->minId == minId &&
- item->maxId == maxId &&
- item->listener == listener) {
- item = mRangeListeners.erase(item);
- } else {
- item++;
- }
- }
- return OK;
-}
-
-void FrameProcessor::dump(int fd, const Vector<String16>& args) {
- String8 result(" Latest received frame:\n");
- write(fd, result.string(), result.size());
- mLastFrame.dump(fd, 2, 6);
-}
-
-bool FrameProcessor::threadLoop() {
- status_t res;
-
- sp<Camera2Device> device;
- {
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return false;
- device = client->getCameraDevice();
- if (device == 0) return false;
- }
-
- res = device->waitForNextFrame(kWaitDuration);
- if (res == OK) {
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) return false;
- processNewFrames(client);
- } else if (res != TIMED_OUT) {
- ALOGE("Camera2Client::FrameProcessor: Error waiting for new "
- "frames: %s (%d)", strerror(-res), res);
- }
-
- return true;
-}
-
-void FrameProcessor::processNewFrames(sp<Camera2Client> &client) {
- status_t res;
- ATRACE_CALL();
- CameraMetadata frame;
- while ( (res = client->getCameraDevice()->getNextFrame(&frame)) == OK) {
- camera_metadata_entry_t entry;
-
- entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
- if (entry.count == 0) {
- ALOGE("%s: Camera %d: Error reading frame number",
- __FUNCTION__, client->getCameraId());
- break;
- }
- ATRACE_INT("cam2_frame", entry.data.i32[0]);
-
- res = processFaceDetect(frame, client);
- if (res != OK) break;
-
- res = processListeners(frame, client);
- if (res != OK) break;
-
- if (!frame.isEmpty()) {
- mLastFrame.acquire(frame);
- }
- }
- if (res != NOT_ENOUGH_DATA) {
- ALOGE("%s: Camera %d: Error getting next frame: %s (%d)",
- __FUNCTION__, client->getCameraId(), strerror(-res), res);
- return;
- }
-
- return;
-}
-
-status_t FrameProcessor::processListeners(const CameraMetadata &frame,
- sp<Camera2Client> &client) {
- status_t res;
- ATRACE_CALL();
- camera_metadata_ro_entry_t entry;
-
- entry = frame.find(ANDROID_REQUEST_ID);
- if (entry.count == 0) {
- ALOGE("%s: Camera %d: Error reading frame id",
- __FUNCTION__, client->getCameraId());
- return BAD_VALUE;
- }
- int32_t frameId = entry.data.i32[0];
-
- List<sp<FilteredListener> > listeners;
- {
- Mutex::Autolock l(mInputMutex);
-
- List<RangeListener>::iterator item = mRangeListeners.begin();
- while (item != mRangeListeners.end()) {
- if (frameId >= item->minId &&
- frameId < item->maxId) {
- sp<FilteredListener> listener = item->listener.promote();
- if (listener == 0) {
- item = mRangeListeners.erase(item);
- continue;
- } else {
- listeners.push_back(listener);
- }
- }
- item++;
- }
- }
- ALOGV("Got %d range listeners out of %d", listeners.size(), mRangeListeners.size());
- List<sp<FilteredListener> >::iterator item = listeners.begin();
- for (; item != listeners.end(); item++) {
- (*item)->onFrameAvailable(frameId, frame);
- }
- return OK;
-}
-
-status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame,
- sp<Camera2Client> &client) {
- status_t res = BAD_VALUE;
- ATRACE_CALL();
- camera_metadata_ro_entry_t entry;
- bool enableFaceDetect;
- int maxFaces;
- {
- SharedParameters::Lock l(client->getParameters());
- enableFaceDetect = l.mParameters.enableFaceDetect;
- }
- entry = frame.find(ANDROID_STATS_FACE_DETECT_MODE);
-
- // TODO: This should be an error once implementations are compliant
- if (entry.count == 0) {
- return OK;
- }
-
- uint8_t faceDetectMode = entry.data.u8[0];
-
- camera_frame_metadata metadata;
- Vector<camera_face_t> faces;
- metadata.number_of_faces = 0;
-
- if (enableFaceDetect && faceDetectMode != ANDROID_STATS_FACE_DETECTION_OFF) {
- SharedParameters::Lock l(client->getParameters());
- entry = frame.find(ANDROID_STATS_FACE_RECTANGLES);
- if (entry.count == 0) {
- // No faces this frame
- /* warning: locks SharedCameraClient */
- callbackFaceDetection(client, metadata);
- return OK;
- }
- metadata.number_of_faces = entry.count / 4;
- if (metadata.number_of_faces >
- l.mParameters.fastInfo.maxFaces) {
- ALOGE("%s: Camera %d: More faces than expected! (Got %d, max %d)",
- __FUNCTION__, client->getCameraId(),
- metadata.number_of_faces, l.mParameters.fastInfo.maxFaces);
- return res;
- }
- const int32_t *faceRects = entry.data.i32;
-
- entry = frame.find(ANDROID_STATS_FACE_SCORES);
- if (entry.count == 0) {
- ALOGE("%s: Camera %d: Unable to read face scores",
- __FUNCTION__, client->getCameraId());
- return res;
- }
- const uint8_t *faceScores = entry.data.u8;
-
- const int32_t *faceLandmarks = NULL;
- const int32_t *faceIds = NULL;
-
- if (faceDetectMode == ANDROID_STATS_FACE_DETECTION_FULL) {
- entry = frame.find(ANDROID_STATS_FACE_LANDMARKS);
- if (entry.count == 0) {
- ALOGE("%s: Camera %d: Unable to read face landmarks",
- __FUNCTION__, client->getCameraId());
- return res;
- }
- faceLandmarks = entry.data.i32;
-
- entry = frame.find(ANDROID_STATS_FACE_IDS);
-
- if (entry.count == 0) {
- ALOGE("%s: Camera %d: Unable to read face IDs",
- __FUNCTION__, client->getCameraId());
- return res;
- }
- faceIds = entry.data.i32;
- }
-
- faces.setCapacity(metadata.number_of_faces);
-
- size_t maxFaces = metadata.number_of_faces;
- for (size_t i = 0; i < maxFaces; i++) {
- if (faceScores[i] == 0) {
- metadata.number_of_faces--;
- continue;
- }
-
- camera_face_t face;
-
- face.rect[0] = l.mParameters.arrayXToNormalized(faceRects[i*4 + 0]);
- face.rect[1] = l.mParameters.arrayYToNormalized(faceRects[i*4 + 1]);
- face.rect[2] = l.mParameters.arrayXToNormalized(faceRects[i*4 + 2]);
- face.rect[3] = l.mParameters.arrayYToNormalized(faceRects[i*4 + 3]);
-
- face.score = faceScores[i];
- if (faceDetectMode == ANDROID_STATS_FACE_DETECTION_FULL) {
- face.id = faceIds[i];
- face.left_eye[0] =
- l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 0]);
- face.left_eye[1] =
- l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 1]);
- face.right_eye[0] =
- l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 2]);
- face.right_eye[1] =
- l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 3]);
- face.mouth[0] =
- l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 4]);
- face.mouth[1] =
- l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 5]);
- } else {
- face.id = 0;
- face.left_eye[0] = face.left_eye[1] = -2000;
- face.right_eye[0] = face.right_eye[1] = -2000;
- face.mouth[0] = face.mouth[1] = -2000;
- }
- faces.push_back(face);
- }
-
- metadata.faces = faces.editArray();
- }
-
- /* warning: locks SharedCameraClient */
- callbackFaceDetection(client, metadata);
-
- return OK;
-}
-
-void FrameProcessor::callbackFaceDetection(sp<Camera2Client> client,
- /*in*/camera_frame_metadata &metadata) {
-
- /* Filter out repeated 0-face callbacks, but not when the last frame was >0 */
- if (metadata.number_of_faces != 0 || mLastFrameNumberOfFaces != metadata.number_of_faces) {
- Camera2Client::SharedCameraClient::Lock l(client->mSharedCameraClient);
- if (l.mCameraClient != NULL) {
- l.mCameraClient->dataCallback(CAMERA_MSG_PREVIEW_METADATA,
- NULL, &metadata);
- }
- }
-
- mLastFrameNumberOfFaces = metadata.number_of_faces;
-}
-
-}; // namespace camera2
-}; // namespace android
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
new file mode 100644
index 0000000..2d1253f
--- /dev/null
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -0,0 +1,331 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2ClientBase"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include <cutils/properties.h>
+#include <gui/Surface.h>
+#include <gui/Surface.h>
+
+#include "common/Camera2ClientBase.h"
+
+#include "api2/CameraDeviceClient.h"
+
+#include "CameraDeviceFactory.h"
+
+namespace android {
+using namespace camera2;
+
+static int getCallingPid() {
+ return IPCThreadState::self()->getCallingPid();
+}
+
+// Interface used by CameraService
+
+template <typename TClientBase>
+Camera2ClientBase<TClientBase>::Camera2ClientBase(
+ const sp<CameraService>& cameraService,
+ const sp<TCamCallbacks>& remoteCallback,
+ const String16& clientPackageName,
+ int cameraId,
+ int cameraFacing,
+ int clientPid,
+ uid_t clientUid,
+ int servicePid):
+ TClientBase(cameraService, remoteCallback, clientPackageName,
+ cameraId, cameraFacing, clientPid, clientUid, servicePid),
+ mSharedCameraCallbacks(remoteCallback)
+{
+ ALOGI("Camera %d: Opened", cameraId);
+
+ mDevice = CameraDeviceFactory::createDevice(cameraId);
+ LOG_ALWAYS_FATAL_IF(mDevice == 0, "Device should never be NULL here.");
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::checkPid(const char* checkLocation)
+ const {
+
+ int callingPid = getCallingPid();
+ if (callingPid == TClientBase::mClientPid) return NO_ERROR;
+
+ ALOGE("%s: attempt to use a locked camera from a different process"
+ " (old pid %d, new pid %d)", checkLocation, TClientBase::mClientPid, callingPid);
+ return PERMISSION_DENIED;
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::initialize(camera_module_t *module) {
+ ATRACE_CALL();
+ ALOGV("%s: Initializing client for camera %d", __FUNCTION__,
+ TClientBase::mCameraId);
+ status_t res;
+
+ // Verify ops permissions
+ res = TClientBase::startCameraOps();
+ if (res != OK) {
+ return res;
+ }
+
+ if (mDevice == NULL) {
+ ALOGE("%s: Camera %d: No device connected",
+ __FUNCTION__, TClientBase::mCameraId);
+ return NO_INIT;
+ }
+
+ res = mDevice->initialize(module);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
+ __FUNCTION__, TClientBase::mCameraId, strerror(-res), res);
+ return res;
+ }
+
+ res = mDevice->setNotifyCallback(this);
+
+ return OK;
+}
+
+template <typename TClientBase>
+Camera2ClientBase<TClientBase>::~Camera2ClientBase() {
+ ATRACE_CALL();
+
+ TClientBase::mDestructionStarted = true;
+
+ TClientBase::finishCameraOps();
+
+ disconnect();
+
+ ALOGI("Closed Camera %d", TClientBase::mCameraId);
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::dump(int fd,
+ const Vector<String16>& args) {
+ String8 result;
+ result.appendFormat("Camera2ClientBase[%d] (%p) PID: %d, dump:\n",
+ TClientBase::mCameraId,
+ TClientBase::getRemoteCallback()->asBinder().get(),
+ TClientBase::mClientPid);
+ result.append(" State: ");
+
+ write(fd, result.string(), result.size());
+ // TODO: print dynamic/request section from most recent requests
+
+ return dumpDevice(fd, args);
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::dumpDevice(
+ int fd,
+ const Vector<String16>& args) {
+ String8 result;
+
+ result = " Device dump:\n";
+ write(fd, result.string(), result.size());
+
+ if (!mDevice.get()) {
+ result = " *** Device is detached\n";
+ write(fd, result.string(), result.size());
+ return NO_ERROR;
+ }
+
+ status_t res = mDevice->dump(fd, args);
+ if (res != OK) {
+ result = String8::format(" Error dumping device: %s (%d)",
+ strerror(-res), res);
+ write(fd, result.string(), result.size());
+ }
+
+ return NO_ERROR;
+}
+
+// ICameraClient2BaseUser interface
+
+
+template <typename TClientBase>
+void Camera2ClientBase<TClientBase>::disconnect() {
+ ATRACE_CALL();
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ // Allow both client and the media server to disconnect at all times
+ int callingPid = getCallingPid();
+ if (callingPid != TClientBase::mClientPid &&
+ callingPid != TClientBase::mServicePid) return;
+
+ ALOGV("Camera %d: Shutting down", TClientBase::mCameraId);
+
+ detachDevice();
+
+ CameraService::BasicClient::disconnect();
+
+ ALOGV("Camera %d: Shut down complete complete", TClientBase::mCameraId);
+}
+
+template <typename TClientBase>
+void Camera2ClientBase<TClientBase>::detachDevice() {
+ if (mDevice == 0) return;
+ mDevice->disconnect();
+
+ mDevice.clear();
+
+ ALOGV("Camera %d: Detach complete", TClientBase::mCameraId);
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::connect(
+ const sp<TCamCallbacks>& client) {
+ ATRACE_CALL();
+ ALOGV("%s: E", __FUNCTION__);
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (TClientBase::mClientPid != 0 &&
+ getCallingPid() != TClientBase::mClientPid) {
+
+ ALOGE("%s: Camera %d: Connection attempt from pid %d; "
+ "current locked to pid %d",
+ __FUNCTION__,
+ TClientBase::mCameraId,
+ getCallingPid(),
+ TClientBase::mClientPid);
+ return BAD_VALUE;
+ }
+
+ TClientBase::mClientPid = getCallingPid();
+
+ TClientBase::mRemoteCallback = client;
+ mSharedCameraCallbacks = client;
+
+ return OK;
+}
+
+/** Device-related methods */
+
+template <typename TClientBase>
+void Camera2ClientBase<TClientBase>::notifyError(int errorCode, int arg1,
+ int arg2) {
+ ALOGE("Error condition %d reported by HAL, arguments %d, %d", errorCode,
+ arg1, arg2);
+}
+
+template <typename TClientBase>
+void Camera2ClientBase<TClientBase>::notifyIdle() {
+ ALOGV("Camera device is now idle");
+}
+
+template <typename TClientBase>
+void Camera2ClientBase<TClientBase>::notifyShutter(int requestId,
+ nsecs_t timestamp) {
+ (void)requestId;
+ (void)timestamp;
+
+ ALOGV("%s: Shutter notification for request id %d at time %lld",
+ __FUNCTION__, requestId, timestamp);
+}
+
+template <typename TClientBase>
+void Camera2ClientBase<TClientBase>::notifyAutoFocus(uint8_t newState,
+ int triggerId) {
+ (void)newState;
+ (void)triggerId;
+
+ ALOGV("%s: Autofocus state now %d, last trigger %d",
+ __FUNCTION__, newState, triggerId);
+
+}
+
+template <typename TClientBase>
+void Camera2ClientBase<TClientBase>::notifyAutoExposure(uint8_t newState,
+ int triggerId) {
+ (void)newState;
+ (void)triggerId;
+
+ ALOGV("%s: Autoexposure state now %d, last trigger %d",
+ __FUNCTION__, newState, triggerId);
+}
+
+template <typename TClientBase>
+void Camera2ClientBase<TClientBase>::notifyAutoWhitebalance(uint8_t newState,
+ int triggerId) {
+ (void)newState;
+ (void)triggerId;
+
+ ALOGV("%s: Auto-whitebalance state now %d, last trigger %d",
+ __FUNCTION__, newState, triggerId);
+}
+
+template <typename TClientBase>
+int Camera2ClientBase<TClientBase>::getCameraId() const {
+ return TClientBase::mCameraId;
+}
+
+template <typename TClientBase>
+const sp<CameraDeviceBase>& Camera2ClientBase<TClientBase>::getCameraDevice() {
+ return mDevice;
+}
+
+template <typename TClientBase>
+const sp<CameraService>& Camera2ClientBase<TClientBase>::getCameraService() {
+ return TClientBase::mCameraService;
+}
+
+template <typename TClientBase>
+Camera2ClientBase<TClientBase>::SharedCameraCallbacks::Lock::Lock(
+ SharedCameraCallbacks &client) :
+
+ mRemoteCallback(client.mRemoteCallback),
+ mSharedClient(client) {
+
+ mSharedClient.mRemoteCallbackLock.lock();
+}
+
+template <typename TClientBase>
+Camera2ClientBase<TClientBase>::SharedCameraCallbacks::Lock::~Lock() {
+ mSharedClient.mRemoteCallbackLock.unlock();
+}
+
+template <typename TClientBase>
+Camera2ClientBase<TClientBase>::SharedCameraCallbacks::SharedCameraCallbacks(
+ const sp<TCamCallbacks>&client) :
+
+ mRemoteCallback(client) {
+}
+
+template <typename TClientBase>
+typename Camera2ClientBase<TClientBase>::SharedCameraCallbacks&
+Camera2ClientBase<TClientBase>::SharedCameraCallbacks::operator=(
+ const sp<TCamCallbacks>&client) {
+
+ Mutex::Autolock l(mRemoteCallbackLock);
+ mRemoteCallback = client;
+ return *this;
+}
+
+template <typename TClientBase>
+void Camera2ClientBase<TClientBase>::SharedCameraCallbacks::clear() {
+ Mutex::Autolock l(mRemoteCallbackLock);
+ mRemoteCallback.clear();
+}
+
+template class Camera2ClientBase<CameraService::ProClient>;
+template class Camera2ClientBase<CameraService::Client>;
+template class Camera2ClientBase<CameraDeviceClientBase>;
+
+} // namespace android
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
new file mode 100644
index 0000000..61e44f0
--- /dev/null
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_BASE_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_BASE_H
+
+#include "common/CameraDeviceBase.h"
+
+namespace android {
+
+class IMemory;
+
+class CameraService;
+
+template <typename TClientBase>
+class Camera2ClientBase :
+ public TClientBase,
+ public CameraDeviceBase::NotificationListener
+{
+public:
+ typedef typename TClientBase::TCamCallbacks TCamCallbacks;
+
+ /**
+ * Base binder interface (see ICamera/IProCameraUser for details)
+ */
+ virtual status_t connect(const sp<TCamCallbacks>& callbacks);
+ virtual void disconnect();
+
+ /**
+ * Interface used by CameraService
+ */
+
+ // TODO: too many params, move into a ClientArgs<T>
+ Camera2ClientBase(const sp<CameraService>& cameraService,
+ const sp<TCamCallbacks>& remoteCallback,
+ const String16& clientPackageName,
+ int cameraId,
+ int cameraFacing,
+ int clientPid,
+ uid_t clientUid,
+ int servicePid);
+ virtual ~Camera2ClientBase();
+
+ virtual status_t initialize(camera_module_t *module);
+ virtual status_t dump(int fd, const Vector<String16>& args);
+
+ /**
+ * CameraDeviceBase::NotificationListener implementation
+ */
+
+ virtual void notifyError(int errorCode, int arg1, int arg2);
+ virtual void notifyIdle();
+ virtual void notifyShutter(int requestId, nsecs_t timestamp);
+ virtual void notifyAutoFocus(uint8_t newState, int triggerId);
+ virtual void notifyAutoExposure(uint8_t newState, int triggerId);
+ virtual void notifyAutoWhitebalance(uint8_t newState,
+ int triggerId);
+
+
+ int getCameraId() const;
+ const sp<CameraDeviceBase>&
+ getCameraDevice();
+ const sp<CameraService>&
+ getCameraService();
+
+ /**
+ * Interface used by independent components of CameraClient2Base.
+ */
+
+ // Simple class to ensure that access to TCamCallbacks is serialized
+ // by requiring mRemoteCallbackLock to be locked before access to
+ // mRemoteCallback is possible.
+ class SharedCameraCallbacks {
+ public:
+ class Lock {
+ public:
+ Lock(SharedCameraCallbacks &client);
+ ~Lock();
+ sp<TCamCallbacks> &mRemoteCallback;
+ private:
+ SharedCameraCallbacks &mSharedClient;
+ };
+ SharedCameraCallbacks(const sp<TCamCallbacks>& client);
+ SharedCameraCallbacks& operator=(const sp<TCamCallbacks>& client);
+ void clear();
+ private:
+ sp<TCamCallbacks> mRemoteCallback;
+ mutable Mutex mRemoteCallbackLock;
+ } mSharedCameraCallbacks;
+
+protected:
+
+ virtual sp<IBinder> asBinderWrapper() {
+ return IInterface::asBinder();
+ }
+
+ virtual status_t dumpDevice(int fd, const Vector<String16>& args);
+
+ /** Binder client interface-related private members */
+
+ // Mutex that must be locked by methods implementing the binder client
+ // interface. Ensures serialization between incoming client calls.
+ // All methods in this class hierarchy that append 'L' to the name assume
+ // that mBinderSerializationLock is locked when they're called
+ mutable Mutex mBinderSerializationLock;
+
+ /** CameraDeviceBase instance wrapping HAL2+ entry */
+
+ sp<CameraDeviceBase> mDevice;
+
+ /** Utility members */
+
+ // Verify that caller is the owner of the camera
+ status_t checkPid(const char *checkLocation) const;
+
+ virtual void detachDevice();
+};
+
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.cpp b/services/camera/libcameraservice/common/CameraDeviceBase.cpp
new file mode 100644
index 0000000..6c4e87f
--- /dev/null
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CameraDeviceBase.h"
+
+namespace android {
+
+/**
+ * Base class destructors
+ */
+CameraDeviceBase::~CameraDeviceBase() {
+}
+
+CameraDeviceBase::NotificationListener::~NotificationListener() {
+}
+
+} // namespace android
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
new file mode 100644
index 0000000..e80abf1
--- /dev/null
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERADEVICEBASE_H
+#define ANDROID_SERVERS_CAMERA_CAMERADEVICEBASE_H
+
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+#include <utils/Timers.h>
+
+#include "hardware/camera2.h"
+#include "camera/CameraMetadata.h"
+
+namespace android {
+
+/**
+ * Base interface for version >= 2 camera device classes, which interface to
+ * camera HAL device versions >= 2.
+ */
+class CameraDeviceBase : public virtual RefBase {
+ public:
+ virtual ~CameraDeviceBase();
+
+ /**
+ * The device's camera ID
+ */
+ virtual int getId() const = 0;
+
+ virtual status_t initialize(camera_module_t *module) = 0;
+ virtual status_t disconnect() = 0;
+
+ virtual status_t dump(int fd, const Vector<String16>& args) = 0;
+
+ /**
+ * The device's static characteristics metadata buffer
+ */
+ virtual const CameraMetadata& info() const = 0;
+
+ /**
+ * Submit request for capture. The CameraDevice takes ownership of the
+ * passed-in buffer.
+ */
+ virtual status_t capture(CameraMetadata &request) = 0;
+
+ /**
+ * Submit request for streaming. The CameraDevice makes a copy of the
+ * passed-in buffer and the caller retains ownership.
+ */
+ virtual status_t setStreamingRequest(const CameraMetadata &request) = 0;
+
+ /**
+ * Clear the streaming request slot.
+ */
+ virtual status_t clearStreamingRequest() = 0;
+
+ /**
+ * Wait until a request with the given ID has been dequeued by the
+ * HAL. Returns TIMED_OUT if the timeout duration is reached. Returns
+ * immediately if the latest request received by the HAL has this id.
+ */
+ virtual status_t waitUntilRequestReceived(int32_t requestId,
+ nsecs_t timeout) = 0;
+
+ /**
+ * Create an output stream of the requested size and format.
+ *
+ * If format is CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, then the HAL device selects
+ * an appropriate format; it can be queried with getStreamInfo.
+ *
+ * If format is HAL_PIXEL_FORMAT_COMPRESSED, the size parameter must be
+ * equal to the size in bytes of the buffers to allocate for the stream. For
+ * other formats, the size parameter is ignored.
+ */
+ virtual status_t createStream(sp<ANativeWindow> consumer,
+ uint32_t width, uint32_t height, int format, size_t size,
+ int *id) = 0;
+
+ /**
+ * Create an input reprocess stream that uses buffers from an existing
+ * output stream.
+ */
+ virtual status_t createReprocessStreamFromStream(int outputId, int *id) = 0;
+
+ /**
+ * Get information about a given stream.
+ */
+ virtual status_t getStreamInfo(int id,
+ uint32_t *width, uint32_t *height, uint32_t *format) = 0;
+
+ /**
+ * Set stream gralloc buffer transform
+ */
+ virtual status_t setStreamTransform(int id, int transform) = 0;
+
+ /**
+ * Delete stream. Must not be called if there are requests in flight which
+ * reference that stream.
+ */
+ virtual status_t deleteStream(int id) = 0;
+
+ /**
+ * Delete reprocess stream. Must not be called if there are requests in
+ * flight which reference that stream.
+ */
+ virtual status_t deleteReprocessStream(int id) = 0;
+
+ /**
+ * Create a metadata buffer with fields that the HAL device believes are
+ * best for the given use case
+ */
+ virtual status_t createDefaultRequest(int templateId,
+ CameraMetadata *request) = 0;
+
+ /**
+ * Wait until all requests have been processed. Returns INVALID_OPERATION if
+ * the streaming slot is not empty, or TIMED_OUT if the requests haven't
+ * finished processing in 10 seconds.
+ */
+ virtual status_t waitUntilDrained() = 0;
+
+ /**
+ * Abstract class for HAL notification listeners
+ */
+ class NotificationListener {
+ public:
+ // The set of notifications is a merge of the notifications required for
+ // API1 and API2.
+
+ // Required for API 1 and 2
+ virtual void notifyError(int errorCode, int arg1, int arg2) = 0;
+
+ // Required only for API2
+ virtual void notifyIdle() = 0;
+ virtual void notifyShutter(int requestId,
+ nsecs_t timestamp) = 0;
+
+ // Required only for API1
+ virtual void notifyAutoFocus(uint8_t newState, int triggerId) = 0;
+ virtual void notifyAutoExposure(uint8_t newState, int triggerId) = 0;
+ virtual void notifyAutoWhitebalance(uint8_t newState,
+ int triggerId) = 0;
+ protected:
+ virtual ~NotificationListener();
+ };
+
+ /**
+ * Connect HAL notifications to a listener. Overwrites previous
+ * listener. Set to NULL to stop receiving notifications.
+ */
+ virtual status_t setNotifyCallback(NotificationListener *listener) = 0;
+
+ /**
+ * Whether the device supports calling notifyAutofocus, notifyAutoExposure,
+ * and notifyAutoWhitebalance; if this returns false, the client must
+ * synthesize these notifications from received frame metadata.
+ */
+ virtual bool willNotify3A() = 0;
+
+ /**
+ * Wait for a new frame to be produced, with timeout in nanoseconds.
+ * Returns TIMED_OUT when no frame produced within the specified duration
+ * May be called concurrently to most methods, except for getNextFrame
+ */
+ virtual status_t waitForNextFrame(nsecs_t timeout) = 0;
+
+ /**
+ * Get next metadata frame from the frame queue. Returns NULL if the queue
+ * is empty; caller takes ownership of the metadata buffer.
+ * May be called concurrently to most methods, except for waitForNextFrame
+ */
+ virtual status_t getNextFrame(CameraMetadata *frame) = 0;
+
+ /**
+ * Trigger auto-focus. The latest ID used in a trigger autofocus or cancel
+ * autofocus call will be returned by the HAL in all subsequent AF
+ * notifications.
+ */
+ virtual status_t triggerAutofocus(uint32_t id) = 0;
+
+ /**
+ * Cancel auto-focus. The latest ID used in a trigger autofocus/cancel
+ * autofocus call will be returned by the HAL in all subsequent AF
+ * notifications.
+ */
+ virtual status_t triggerCancelAutofocus(uint32_t id) = 0;
+
+ /**
+ * Trigger pre-capture metering. The latest ID used in a trigger pre-capture
+ * call will be returned by the HAL in all subsequent AE and AWB
+ * notifications.
+ */
+ virtual status_t triggerPrecaptureMetering(uint32_t id) = 0;
+
+ /**
+ * Abstract interface for clients that want to listen to reprocess buffer
+ * release events
+ */
+ struct BufferReleasedListener : public virtual RefBase {
+ virtual void onBufferReleased(buffer_handle_t *handle) = 0;
+ };
+
+ /**
+ * Push a buffer to be reprocessed into a reprocessing stream, and
+ * provide a listener to call once the buffer is returned by the HAL
+ */
+ virtual status_t pushReprocessBuffer(int reprocessStreamId,
+ buffer_handle_t *buffer, wp<BufferReleasedListener> listener) = 0;
+
+ /**
+ * Flush all pending and in-flight requests. Blocks until flush is
+ * complete.
+ */
+ virtual status_t flush() = 0;
+
+};
+
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.cpp b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
new file mode 100644
index 0000000..f2064fb
--- /dev/null
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2-FrameProcessorBase"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "common/FrameProcessorBase.h"
+#include "common/CameraDeviceBase.h"
+
+namespace android {
+namespace camera2 {
+
+FrameProcessorBase::FrameProcessorBase(wp<CameraDeviceBase> device) :
+ Thread(/*canCallJava*/false),
+ mDevice(device) {
+}
+
+FrameProcessorBase::~FrameProcessorBase() {
+ ALOGV("%s: Exit", __FUNCTION__);
+}
+
+status_t FrameProcessorBase::registerListener(int32_t minId,
+ int32_t maxId, wp<FilteredListener> listener, bool quirkSendPartials) {
+ Mutex::Autolock l(mInputMutex);
+ ALOGV("%s: Registering listener for frame id range %d - %d",
+ __FUNCTION__, minId, maxId);
+ RangeListener rListener = { minId, maxId, listener, quirkSendPartials };
+ mRangeListeners.push_back(rListener);
+ return OK;
+}
+
+status_t FrameProcessorBase::removeListener(int32_t minId,
+ int32_t maxId,
+ wp<FilteredListener> listener) {
+ Mutex::Autolock l(mInputMutex);
+ List<RangeListener>::iterator item = mRangeListeners.begin();
+ while (item != mRangeListeners.end()) {
+ if (item->minId == minId &&
+ item->maxId == maxId &&
+ item->listener == listener) {
+ item = mRangeListeners.erase(item);
+ } else {
+ item++;
+ }
+ }
+ return OK;
+}
+
+void FrameProcessorBase::dump(int fd, const Vector<String16>& /*args*/) {
+ String8 result(" Latest received frame:\n");
+ write(fd, result.string(), result.size());
+
+ CameraMetadata lastFrame;
+ {
+ // Don't race while dumping metadata
+ Mutex::Autolock al(mLastFrameMutex);
+ lastFrame = CameraMetadata(mLastFrame);
+ }
+ lastFrame.dump(fd, 2, 6);
+}
+
+bool FrameProcessorBase::threadLoop() {
+ status_t res;
+
+ sp<CameraDeviceBase> device;
+ {
+ device = mDevice.promote();
+ if (device == 0) return false;
+ }
+
+ res = device->waitForNextFrame(kWaitDuration);
+ if (res == OK) {
+ processNewFrames(device);
+ } else if (res != TIMED_OUT) {
+ ALOGE("FrameProcessorBase: Error waiting for new "
+ "frames: %s (%d)", strerror(-res), res);
+ }
+
+ return true;
+}
+
+void FrameProcessorBase::processNewFrames(const sp<CameraDeviceBase> &device) {
+ status_t res;
+ ATRACE_CALL();
+ CameraMetadata frame;
+
+ ALOGV("%s: Camera %d: Process new frames", __FUNCTION__, device->getId());
+
+ while ( (res = device->getNextFrame(&frame)) == OK) {
+
+ camera_metadata_entry_t entry;
+
+ entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
+ if (entry.count == 0) {
+ ALOGE("%s: Camera %d: Error reading frame number",
+ __FUNCTION__, device->getId());
+ break;
+ }
+ ATRACE_INT("cam2_frame", entry.data.i32[0]);
+
+ if (!processSingleFrame(frame, device)) {
+ break;
+ }
+
+ if (!frame.isEmpty()) {
+ Mutex::Autolock al(mLastFrameMutex);
+ mLastFrame.acquire(frame);
+ }
+ }
+ if (res != NOT_ENOUGH_DATA) {
+ ALOGE("%s: Camera %d: Error getting next frame: %s (%d)",
+ __FUNCTION__, device->getId(), strerror(-res), res);
+ return;
+ }
+
+ return;
+}
+
+bool FrameProcessorBase::processSingleFrame(CameraMetadata &frame,
+ const sp<CameraDeviceBase> &device) {
+ ALOGV("%s: Camera %d: Process single frame (is empty? %d)",
+ __FUNCTION__, device->getId(), frame.isEmpty());
+ return processListeners(frame, device) == OK;
+}
+
+status_t FrameProcessorBase::processListeners(const CameraMetadata &frame,
+ const sp<CameraDeviceBase> &device) {
+ ATRACE_CALL();
+ camera_metadata_ro_entry_t entry;
+
+ // Quirks: Don't deliver partial results to listeners that don't want them
+ bool quirkIsPartial = false;
+ entry = frame.find(ANDROID_QUIRKS_PARTIAL_RESULT);
+ if (entry.count != 0 &&
+ entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+ ALOGV("%s: Camera %d: Not forwarding partial result to listeners",
+ __FUNCTION__, device->getId());
+ quirkIsPartial = true;
+ }
+
+ entry = frame.find(ANDROID_REQUEST_ID);
+ if (entry.count == 0) {
+ ALOGE("%s: Camera %d: Error reading frame id",
+ __FUNCTION__, device->getId());
+ return BAD_VALUE;
+ }
+ int32_t requestId = entry.data.i32[0];
+
+ List<sp<FilteredListener> > listeners;
+ {
+ Mutex::Autolock l(mInputMutex);
+
+ List<RangeListener>::iterator item = mRangeListeners.begin();
+ while (item != mRangeListeners.end()) {
+ if (requestId >= item->minId &&
+ requestId < item->maxId &&
+ (!quirkIsPartial || item->quirkSendPartials) ) {
+ sp<FilteredListener> listener = item->listener.promote();
+ if (listener == 0) {
+ item = mRangeListeners.erase(item);
+ continue;
+ } else {
+ listeners.push_back(listener);
+ }
+ }
+ item++;
+ }
+ }
+ ALOGV("Got %d range listeners out of %d", listeners.size(), mRangeListeners.size());
+ List<sp<FilteredListener> >::iterator item = listeners.begin();
+ for (; item != listeners.end(); item++) {
+ (*item)->onFrameAvailable(requestId, frame);
+ }
+ return OK;
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.h b/services/camera/libcameraservice/common/FrameProcessorBase.h
index 3bd4e25..89b608a 100644
--- a/services/camera/libcameraservice/camera2/FrameProcessor.h
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 The Android Open Source Project
+ * Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,72 +14,71 @@
* limitations under the License.
*/
-#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_FRAMEPROCESSOR_H
-#define ANDROID_SERVERS_CAMERA_CAMERA2_FRAMEPROCESSOR_H
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_PROFRAMEPROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_PROFRAMEPROCESSOR_H
#include <utils/Thread.h>
#include <utils/String16.h>
#include <utils/Vector.h>
#include <utils/KeyedVector.h>
#include <utils/List.h>
-#include "CameraMetadata.h"
-
-struct camera_frame_metadata;
+#include <camera/CameraMetadata.h>
namespace android {
-class Camera2Client;
+class CameraDeviceBase;
namespace camera2 {
/* Output frame metadata processing thread. This thread waits for new
* frames from the device, and analyzes them as necessary.
*/
-class FrameProcessor: public Thread {
+class FrameProcessorBase: public Thread {
public:
- FrameProcessor(wp<Camera2Client> client);
- ~FrameProcessor();
+ FrameProcessorBase(wp<CameraDeviceBase> device);
+ virtual ~FrameProcessorBase();
struct FilteredListener: virtual public RefBase {
- virtual void onFrameAvailable(int32_t frameId,
- const CameraMetadata &frame) = 0;
+ virtual void onFrameAvailable(int32_t requestId,
+ const CameraMetadata &frame) = 0;
};
// Register a listener for a range of IDs [minId, maxId). Multiple listeners
- // can be listening to the same range
- status_t registerListener(int32_t minId, int32_t maxId, wp<FilteredListener> listener);
- status_t removeListener(int32_t minId, int32_t maxId, wp<FilteredListener> listener);
+ // can be listening to the same range.
+ // QUIRK: sendPartials controls whether partial results will be sent.
+ status_t registerListener(int32_t minId, int32_t maxId,
+ wp<FilteredListener> listener,
+ bool quirkSendPartials = true);
+ status_t removeListener(int32_t minId, int32_t maxId,
+ wp<FilteredListener> listener);
void dump(int fd, const Vector<String16>& args);
- private:
+ protected:
static const nsecs_t kWaitDuration = 10000000; // 10 ms
- wp<Camera2Client> mClient;
+ wp<CameraDeviceBase> mDevice;
virtual bool threadLoop();
Mutex mInputMutex;
+ Mutex mLastFrameMutex;
struct RangeListener {
int32_t minId;
int32_t maxId;
wp<FilteredListener> listener;
+ bool quirkSendPartials;
};
List<RangeListener> mRangeListeners;
- void processNewFrames(sp<Camera2Client> &client);
+ void processNewFrames(const sp<CameraDeviceBase> &device);
- status_t processFaceDetect(const CameraMetadata &frame,
- sp<Camera2Client> &client);
+ virtual bool processSingleFrame(CameraMetadata &frame,
+ const sp<CameraDeviceBase> &device);
status_t processListeners(const CameraMetadata &frame,
- sp<Camera2Client> &client);
+ const sp<CameraDeviceBase> &device);
CameraMetadata mLastFrame;
- int mLastFrameNumberOfFaces;
-
- // Emit FaceDetection event to java if faces changed
- void callbackFaceDetection(sp<Camera2Client> client,
- camera_frame_metadata &metadata);
};
diff --git a/services/camera/libcameraservice/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
index 05ac9fa..87b2807 100644
--- a/services/camera/libcameraservice/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
@@ -47,7 +47,8 @@ typedef void (*data_callback_timestamp)(nsecs_t timestamp,
/**
* CameraHardwareInterface.h defines the interface to the
* camera hardware abstraction layer, used for setting and getting
- * parameters, live previewing, and taking pictures.
+ * parameters, live previewing, and taking pictures. It is used for
+ * HAL devices with version CAMERA_DEVICE_API_VERSION_1_0 only.
*
* It is a referenced counted interface with RefBase as its base class.
* CameraService calls openCameraHardware() to retrieve a strong pointer to the
@@ -56,24 +57,18 @@ typedef void (*data_callback_timestamp)(nsecs_t timestamp,
*
* -# After CameraService calls openCameraHardware(), getParameters() and
* setParameters() are used to initialize the camera instance.
- * CameraService calls getPreviewHeap() to establish access to the
- * preview heap so it can be registered with SurfaceFlinger for
- * efficient display updating while in preview mode.
- * -# startPreview() is called. The camera instance then periodically
- * sends the message CAMERA_MSG_PREVIEW_FRAME (if enabled) each time
- * a new preview frame is available. If data callback code needs to use
- * this memory after returning, it must copy the data.
+ * -# startPreview() is called.
*
- * Prior to taking a picture, CameraService calls autofocus(). When auto
+ * Prior to taking a picture, CameraService often calls autofocus(). When auto
* focusing has completed, the camera instance sends a CAMERA_MSG_FOCUS notification,
* which informs the application whether focusing was successful. The camera instance
* only sends this message once and it is up to the application to call autoFocus()
* again if refocusing is desired.
*
* CameraService calls takePicture() to request the camera instance take a
- * picture. At this point, if a shutter, postview, raw, and/or compressed callback
- * is desired, the corresponding message must be enabled. As with CAMERA_MSG_PREVIEW_FRAME,
- * any memory provided in a data callback must be copied if it's needed after returning.
+ * picture. At this point, if a shutter, postview, raw, and/or compressed
+ * callback is desired, the corresponding message must be enabled. Any memory
+ * provided in a data callback must be copied if it's needed after returning.
*/
class CameraHardwareInterface : public virtual RefBase {
@@ -427,7 +422,7 @@ public:
/**
* Dump state of the camera hardware
*/
- status_t dump(int fd, const Vector<String16>& args) const
+ status_t dump(int fd, const Vector<String16>& /*args*/) const
{
ALOGV("%s(%s)", __FUNCTION__, mName.string());
if (mDevice->ops->dump)
@@ -584,9 +579,10 @@ private:
#endif
static int __lock_buffer(struct preview_stream_ops* w,
- buffer_handle_t* buffer)
+ buffer_handle_t* /*buffer*/)
{
ANativeWindow *a = anw(w);
+ (void)a;
return 0;
}
diff --git a/services/camera/libcameraservice/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
index d6445c1..dc97c47 100644
--- a/services/camera/libcameraservice/Camera2Device.cpp
+++ b/services/camera/libcameraservice/device2/Camera2Device.cpp
@@ -25,6 +25,7 @@
#define ALOGVV(...) ((void)0)
#endif
+#include <inttypes.h>
#include <utils/Log.h>
#include <utils/Trace.h>
#include <utils/Timers.h>
@@ -34,7 +35,7 @@ namespace android {
Camera2Device::Camera2Device(int id):
mId(id),
- mDevice(NULL)
+ mHal2Device(NULL)
{
ATRACE_CALL();
ALOGV("%s: Created device for camera %d", __FUNCTION__, id);
@@ -47,11 +48,15 @@ Camera2Device::~Camera2Device()
disconnect();
}
+int Camera2Device::getId() const {
+ return mId;
+}
+
status_t Camera2Device::initialize(camera_module_t *module)
{
ATRACE_CALL();
ALOGV("%s: Initializing device for camera %d", __FUNCTION__, mId);
- if (mDevice != NULL) {
+ if (mHal2Device != NULL) {
ALOGE("%s: Already initialized!", __FUNCTION__);
return INVALID_OPERATION;
}
@@ -131,7 +136,7 @@ status_t Camera2Device::initialize(camera_module_t *module)
}
mDeviceInfo = info.static_camera_characteristics;
- mDevice = device;
+ mHal2Device = device;
return OK;
}
@@ -139,23 +144,23 @@ status_t Camera2Device::initialize(camera_module_t *module)
status_t Camera2Device::disconnect() {
ATRACE_CALL();
status_t res = OK;
- if (mDevice) {
+ if (mHal2Device) {
ALOGV("%s: Closing device for camera %d", __FUNCTION__, mId);
- int inProgressCount = mDevice->ops->get_in_progress_count(mDevice);
+ int inProgressCount = mHal2Device->ops->get_in_progress_count(mHal2Device);
if (inProgressCount > 0) {
ALOGW("%s: Closing camera device %d with %d requests in flight!",
__FUNCTION__, mId, inProgressCount);
}
mReprocessStreams.clear();
mStreams.clear();
- res = mDevice->common.close(&mDevice->common);
+ res = mHal2Device->common.close(&mHal2Device->common);
if (res != OK) {
ALOGE("%s: Could not close camera %d: %s (%d)",
__FUNCTION__,
mId, strerror(-res), res);
}
- mDevice = NULL;
+ mHal2Device = NULL;
ALOGV("%s: Shutdown complete", __FUNCTION__);
}
return res;
@@ -197,12 +202,12 @@ status_t Camera2Device::dump(int fd, const Vector<String16>& args) {
write(fd, result.string(), result.size());
status_t res;
- res = mDevice->ops->dump(mDevice, fd);
+ res = mHal2Device->ops->dump(mHal2Device, fd);
return res;
}
-const camera2::CameraMetadata& Camera2Device::info() const {
+const CameraMetadata& Camera2Device::info() const {
ALOGVV("%s: E", __FUNCTION__);
return mDeviceInfo;
@@ -240,7 +245,7 @@ status_t Camera2Device::createStream(sp<ANativeWindow> consumer,
status_t res;
ALOGV("%s: E", __FUNCTION__);
- sp<StreamAdapter> stream = new StreamAdapter(mDevice);
+ sp<StreamAdapter> stream = new StreamAdapter(mHal2Device);
res = stream->connectToDevice(consumer, width, height, format, size);
if (res != OK) {
@@ -276,7 +281,7 @@ status_t Camera2Device::createReprocessStreamFromStream(int outputId, int *id) {
return BAD_VALUE;
}
- sp<ReprocessStreamAdapter> stream = new ReprocessStreamAdapter(mDevice);
+ sp<ReprocessStreamAdapter> stream = new ReprocessStreamAdapter(mHal2Device);
res = stream->connectToDevice((*streamI));
if (res != OK) {
@@ -401,8 +406,8 @@ status_t Camera2Device::createDefaultRequest(int templateId,
status_t err;
ALOGV("%s: E", __FUNCTION__);
camera_metadata_t *rawRequest;
- err = mDevice->ops->construct_default_request(
- mDevice, templateId, &rawRequest);
+ err = mHal2Device->ops->construct_default_request(
+ mHal2Device, templateId, &rawRequest);
request->acquire(rawRequest);
return err;
}
@@ -417,12 +422,12 @@ status_t Camera2Device::waitUntilDrained() {
// TODO: Set up notifications from HAL, instead of sleeping here
uint32_t totalTime = 0;
- while (mDevice->ops->get_in_progress_count(mDevice) > 0) {
+ while (mHal2Device->ops->get_in_progress_count(mHal2Device) > 0) {
usleep(kSleepTime);
totalTime += kSleepTime;
if (totalTime > kMaxSleepTime) {
ALOGE("%s: Waited %d us, %d requests still in flight", __FUNCTION__,
- mDevice->ops->get_in_progress_count(mDevice), totalTime);
+ totalTime, mHal2Device->ops->get_in_progress_count(mHal2Device));
return TIMED_OUT;
}
}
@@ -433,7 +438,7 @@ status_t Camera2Device::waitUntilDrained() {
status_t Camera2Device::setNotifyCallback(NotificationListener *listener) {
ATRACE_CALL();
status_t res;
- res = mDevice->ops->set_notify_callback(mDevice, notificationCallback,
+ res = mHal2Device->ops->set_notify_callback(mHal2Device, notificationCallback,
reinterpret_cast<void*>(listener) );
if (res != OK) {
ALOGE("%s: Unable to set notification callback!", __FUNCTION__);
@@ -441,6 +446,10 @@ status_t Camera2Device::setNotifyCallback(NotificationListener *listener) {
return res;
}
+bool Camera2Device::willNotify3A() {
+ return true;
+}
+
void Camera2Device::notificationCallback(int32_t msg_type,
int32_t ext1,
int32_t ext2,
@@ -456,8 +465,10 @@ void Camera2Device::notificationCallback(int32_t msg_type,
listener->notifyError(ext1, ext2, ext3);
break;
case CAMERA2_MSG_SHUTTER: {
- nsecs_t timestamp = (nsecs_t)ext2 | ((nsecs_t)(ext3) << 32 );
- listener->notifyShutter(ext1, timestamp);
+ // TODO: Only needed for camera2 API, which is unsupported
+ // by HAL2 directly.
+ // nsecs_t timestamp = (nsecs_t)ext2 | ((nsecs_t)(ext3) << 32 );
+ // listener->notifyShutter(requestId, timestamp);
break;
}
case CAMERA2_MSG_AUTOFOCUS:
@@ -497,7 +508,7 @@ status_t Camera2Device::triggerAutofocus(uint32_t id) {
ATRACE_CALL();
status_t res;
ALOGV("%s: Triggering autofocus, id %d", __FUNCTION__, id);
- res = mDevice->ops->trigger_action(mDevice,
+ res = mHal2Device->ops->trigger_action(mHal2Device,
CAMERA2_TRIGGER_AUTOFOCUS, id, 0);
if (res != OK) {
ALOGE("%s: Error triggering autofocus (id %d)",
@@ -510,7 +521,7 @@ status_t Camera2Device::triggerCancelAutofocus(uint32_t id) {
ATRACE_CALL();
status_t res;
ALOGV("%s: Canceling autofocus, id %d", __FUNCTION__, id);
- res = mDevice->ops->trigger_action(mDevice,
+ res = mHal2Device->ops->trigger_action(mHal2Device,
CAMERA2_TRIGGER_CANCEL_AUTOFOCUS, id, 0);
if (res != OK) {
ALOGE("%s: Error canceling autofocus (id %d)",
@@ -523,7 +534,7 @@ status_t Camera2Device::triggerPrecaptureMetering(uint32_t id) {
ATRACE_CALL();
status_t res;
ALOGV("%s: Triggering precapture metering, id %d", __FUNCTION__, id);
- res = mDevice->ops->trigger_action(mDevice,
+ res = mHal2Device->ops->trigger_action(mHal2Device,
CAMERA2_TRIGGER_PRECAPTURE_METERING, id, 0);
if (res != OK) {
ALOGE("%s: Error triggering precapture metering (id %d)",
@@ -559,11 +570,11 @@ status_t Camera2Device::pushReprocessBuffer(int reprocessStreamId,
return res;
}
-/**
- * Camera2Device::NotificationListener
- */
+status_t Camera2Device::flush() {
+ ATRACE_CALL();
-Camera2Device::NotificationListener::~NotificationListener() {
+ mRequestQueue.clear();
+ return waitUntilDrained();
}
/**
@@ -571,7 +582,7 @@ Camera2Device::NotificationListener::~NotificationListener() {
*/
Camera2Device::MetadataQueue::MetadataQueue():
- mDevice(NULL),
+ mHal2Device(NULL),
mFrameCount(0),
mLatestRequestId(0),
mCount(0),
@@ -590,9 +601,7 @@ Camera2Device::MetadataQueue::MetadataQueue():
Camera2Device::MetadataQueue::~MetadataQueue() {
ATRACE_CALL();
- Mutex::Autolock l(mMutex);
- freeBuffers(mEntries.begin(), mEntries.end());
- freeBuffers(mStreamSlot.begin(), mStreamSlot.end());
+ clear();
}
// Connect to camera2 HAL as consumer (input requests/reprocessing)
@@ -602,7 +611,7 @@ status_t Camera2Device::MetadataQueue::setConsumerDevice(camera2_device_t *d) {
res = d->ops->set_request_queue_src_ops(d,
this);
if (res != OK) return res;
- mDevice = d;
+ mHal2Device = d;
return OK;
}
@@ -765,7 +774,6 @@ status_t Camera2Device::MetadataQueue::setStreamSlot(
ATRACE_CALL();
ALOGV("%s: E", __FUNCTION__);
Mutex::Autolock l(mMutex);
- status_t res;
if (mStreamSlotCount > 0) {
freeBuffers(mStreamSlot.begin(), mStreamSlot.end());
@@ -784,8 +792,25 @@ status_t Camera2Device::MetadataQueue::setStreamSlot(
return signalConsumerLocked();
}
+status_t Camera2Device::MetadataQueue::clear()
+{
+ ATRACE_CALL();
+ ALOGV("%s: E", __FUNCTION__);
+
+ Mutex::Autolock l(mMutex);
+
+ // Clear streaming slot
+ freeBuffers(mStreamSlot.begin(), mStreamSlot.end());
+ mStreamSlotCount = 0;
+
+ // Clear request queue
+ freeBuffers(mEntries.begin(), mEntries.end());
+ mCount = 0;
+ return OK;
+}
+
status_t Camera2Device::MetadataQueue::dump(int fd,
- const Vector<String16>& args) {
+ const Vector<String16>& /*args*/) {
ATRACE_CALL();
String8 result;
status_t notLocked;
@@ -798,7 +823,7 @@ status_t Camera2Device::MetadataQueue::dump(int fd,
result.append(" Stream slot: Empty\n");
write(fd, result.string(), result.size());
} else {
- result.appendFormat(" Stream slot: %d entries\n",
+ result.appendFormat(" Stream slot: %zu entries\n",
mStreamSlot.size());
int i = 0;
for (List<camera_metadata_t*>::iterator r = mStreamSlot.begin();
@@ -813,7 +838,7 @@ status_t Camera2Device::MetadataQueue::dump(int fd,
result = " Main queue is empty\n";
write(fd, result.string(), result.size());
} else {
- result = String8::format(" Main queue has %d entries:\n",
+ result = String8::format(" Main queue has %zu entries:\n",
mEntries.size());
int i = 0;
for (List<camera_metadata_t*>::iterator r = mEntries.begin();
@@ -836,12 +861,12 @@ status_t Camera2Device::MetadataQueue::signalConsumerLocked() {
ATRACE_CALL();
status_t res = OK;
notEmpty.signal();
- if (mSignalConsumer && mDevice != NULL) {
+ if (mSignalConsumer && mHal2Device != NULL) {
mSignalConsumer = false;
mMutex.unlock();
ALOGV("%s: Signaling consumer", __FUNCTION__);
- res = mDevice->ops->notify_request_queue_not_empty(mDevice);
+ res = mHal2Device->ops->notify_request_queue_not_empty(mHal2Device);
mMutex.lock();
}
return res;
@@ -894,12 +919,13 @@ int Camera2Device::MetadataQueue::consumer_free(
{
ATRACE_CALL();
MetadataQueue *queue = getInstance(q);
+ (void)queue;
free_camera_metadata(old_buffer);
return OK;
}
int Camera2Device::MetadataQueue::producer_dequeue(
- const camera2_frame_queue_dst_ops_t *q,
+ const camera2_frame_queue_dst_ops_t * /*q*/,
size_t entries, size_t bytes,
camera_metadata_t **buffer)
{
@@ -912,7 +938,7 @@ int Camera2Device::MetadataQueue::producer_dequeue(
}
int Camera2Device::MetadataQueue::producer_cancel(
- const camera2_frame_queue_dst_ops_t *q,
+ const camera2_frame_queue_dst_ops_t * /*q*/,
camera_metadata_t *old_buffer)
{
ATRACE_CALL();
@@ -939,7 +965,7 @@ int Camera2Device::MetadataQueue::producer_enqueue(
Camera2Device::StreamAdapter::StreamAdapter(camera2_device_t *d):
mState(RELEASED),
- mDevice(d),
+ mHal2Device(d),
mId(-1),
mWidth(0), mHeight(0), mFormat(0), mSize(0), mUsage(0),
mMaxProducerBuffers(0), mMaxConsumerBuffers(0),
@@ -990,7 +1016,7 @@ status_t Camera2Device::StreamAdapter::connectToDevice(
uint32_t formatActual;
uint32_t usage;
uint32_t maxBuffers = 2;
- res = mDevice->ops->allocate_stream(mDevice,
+ res = mHal2Device->ops->allocate_stream(mHal2Device,
mWidth, mHeight, mFormatRequested, getStreamOps(),
&id, &formatActual, &usage, &maxBuffers);
if (res != OK) {
@@ -1106,7 +1132,7 @@ status_t Camera2Device::StreamAdapter::connectToDevice(
}
ALOGV("%s: Registering %d buffers with camera HAL", __FUNCTION__, mTotalBuffers);
- res = mDevice->ops->register_stream_buffers(mDevice,
+ res = mHal2Device->ops->register_stream_buffers(mHal2Device,
mId,
mTotalBuffers,
buffers);
@@ -1136,9 +1162,10 @@ cleanUpBuffers:
status_t Camera2Device::StreamAdapter::release() {
ATRACE_CALL();
status_t res;
- ALOGV("%s: Releasing stream %d", __FUNCTION__, mId);
+ ALOGV("%s: Releasing stream %d (%d x %d, format %d)", __FUNCTION__, mId,
+ mWidth, mHeight, mFormat);
if (mState >= ALLOCATED) {
- res = mDevice->ops->release_stream(mDevice, mId);
+ res = mHal2Device->ops->release_stream(mHal2Device, mId);
if (res != OK) {
ALOGE("%s: Unable to release stream %d",
__FUNCTION__, mId);
@@ -1184,15 +1211,15 @@ status_t Camera2Device::StreamAdapter::setTransform(int transform) {
}
status_t Camera2Device::StreamAdapter::dump(int fd,
- const Vector<String16>& args) {
+ const Vector<String16>& /*args*/) {
ATRACE_CALL();
String8 result = String8::format(" Stream %d: %d x %d, format 0x%x\n",
mId, mWidth, mHeight, mFormat);
- result.appendFormat(" size %d, usage 0x%x, requested format 0x%x\n",
+ result.appendFormat(" size %zu, usage 0x%x, requested format 0x%x\n",
mSize, mUsage, mFormatRequested);
result.appendFormat(" total buffers: %d, dequeued buffers: %d\n",
mTotalBuffers, mActiveBuffers);
- result.appendFormat(" frame count: %d, last timestamp %lld\n",
+ result.appendFormat(" frame count: %d, last timestamp %" PRId64 "\n",
mFrameCount, mLastTimestamp);
write(fd, result.string(), result.size());
return OK;
@@ -1319,7 +1346,7 @@ int Camera2Device::StreamAdapter::set_crop(const camera2_stream_ops_t* w,
Camera2Device::ReprocessStreamAdapter::ReprocessStreamAdapter(camera2_device_t *d):
mState(RELEASED),
- mDevice(d),
+ mHal2Device(d),
mId(-1),
mWidth(0), mHeight(0), mFormat(0),
mActiveBuffers(0),
@@ -1361,7 +1388,7 @@ status_t Camera2Device::ReprocessStreamAdapter::connectToDevice(
// Allocate device-side stream interface
uint32_t id;
- res = mDevice->ops->allocate_reprocess_stream_from_stream(mDevice,
+ res = mHal2Device->ops->allocate_reprocess_stream_from_stream(mHal2Device,
outputStream->getId(), getStreamOps(),
&id);
if (res != OK) {
@@ -1385,7 +1412,7 @@ status_t Camera2Device::ReprocessStreamAdapter::release() {
status_t res;
ALOGV("%s: Releasing stream %d", __FUNCTION__, mId);
if (mState >= ACTIVE) {
- res = mDevice->ops->release_reprocess_stream(mDevice, mId);
+ res = mHal2Device->ops->release_reprocess_stream(mHal2Device, mId);
if (res != OK) {
ALOGE("%s: Unable to release stream %d",
__FUNCTION__, mId);
@@ -1423,7 +1450,7 @@ status_t Camera2Device::ReprocessStreamAdapter::pushIntoStream(
}
status_t Camera2Device::ReprocessStreamAdapter::dump(int fd,
- const Vector<String16>& args) {
+ const Vector<String16>& /*args*/) {
ATRACE_CALL();
String8 result =
String8::format(" Reprocess stream %d: %d x %d, fmt 0x%x\n",
@@ -1444,7 +1471,7 @@ int Camera2Device::ReprocessStreamAdapter::acquire_buffer(
const camera2_stream_in_ops_t *w,
buffer_handle_t** buffer) {
ATRACE_CALL();
- int res;
+
ReprocessStreamAdapter* stream =
const_cast<ReprocessStreamAdapter*>(
static_cast<const ReprocessStreamAdapter*>(w));
diff --git a/services/camera/libcameraservice/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
index 41df2e4..1f53c56 100644
--- a/services/camera/libcameraservice/Camera2Device.h
+++ b/services/camera/libcameraservice/device2/Camera2Device.h
@@ -21,186 +21,61 @@
#include <utils/Errors.h>
#include <utils/List.h>
#include <utils/Mutex.h>
-#include <utils/RefBase.h>
-#include <utils/String8.h>
-#include <utils/String16.h>
-#include <utils/Vector.h>
-#include "hardware/camera2.h"
-#include "camera2/CameraMetadata.h"
+#include "common/CameraDeviceBase.h"
namespace android {
-class Camera2Device : public virtual RefBase {
+/**
+ * CameraDevice for HAL devices with version CAMERA_DEVICE_API_VERSION_2_0
+ *
+ * TODO for camera2 API implementation:
+ * Does not produce notifyShutter / notifyIdle callbacks to NotificationListener
+ * Use waitUntilDrained for idle.
+ */
+class Camera2Device: public CameraDeviceBase {
public:
- typedef camera2::CameraMetadata CameraMetadata;
-
Camera2Device(int id);
- ~Camera2Device();
-
- status_t initialize(camera_module_t *module);
- status_t disconnect();
-
- status_t dump(int fd, const Vector<String16>& args);
-
- /**
- * The device's static characteristics metadata buffer
- */
- const CameraMetadata& info() const;
-
- /**
- * Submit request for capture. The Camera2Device takes ownership of the
- * passed-in buffer.
- */
- status_t capture(CameraMetadata &request);
-
- /**
- * Submit request for streaming. The Camera2Device makes a copy of the
- * passed-in buffer and the caller retains ownership.
- */
- status_t setStreamingRequest(const CameraMetadata &request);
-
- /**
- * Clear the streaming request slot.
- */
- status_t clearStreamingRequest();
-
- /**
- * Wait until a request with the given ID has been dequeued by the
- * HAL. Returns TIMED_OUT if the timeout duration is reached. Returns
- * immediately if the latest request received by the HAL has this id.
- */
- status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout);
+ virtual ~Camera2Device();
/**
- * Create an output stream of the requested size and format.
- *
- * If format is CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, then the HAL device selects
- * an appropriate format; it can be queried with getStreamInfo.
- *
- * If format is HAL_PIXEL_FORMAT_COMPRESSED, the size parameter must be
- * equal to the size in bytes of the buffers to allocate for the stream. For
- * other formats, the size parameter is ignored.
+ * CameraDevice interface
*/
- status_t createStream(sp<ANativeWindow> consumer,
+ virtual int getId() const;
+ virtual status_t initialize(camera_module_t *module);
+ virtual status_t disconnect();
+ virtual status_t dump(int fd, const Vector<String16>& args);
+ virtual const CameraMetadata& info() const;
+ virtual status_t capture(CameraMetadata &request);
+ virtual status_t setStreamingRequest(const CameraMetadata &request);
+ virtual status_t clearStreamingRequest();
+ virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout);
+ virtual status_t createStream(sp<ANativeWindow> consumer,
uint32_t width, uint32_t height, int format, size_t size,
int *id);
-
- /**
- * Create an input reprocess stream that uses buffers from an existing
- * output stream.
- */
- status_t createReprocessStreamFromStream(int outputId, int *id);
-
- /**
- * Get information about a given stream.
- */
- status_t getStreamInfo(int id,
+ virtual status_t createReprocessStreamFromStream(int outputId, int *id);
+ virtual status_t getStreamInfo(int id,
uint32_t *width, uint32_t *height, uint32_t *format);
-
- /**
- * Set stream gralloc buffer transform
- */
- status_t setStreamTransform(int id, int transform);
-
- /**
- * Delete stream. Must not be called if there are requests in flight which
- * reference that stream.
- */
- status_t deleteStream(int id);
-
- /**
- * Delete reprocess stream. Must not be called if there are requests in
- * flight which reference that stream.
- */
- status_t deleteReprocessStream(int id);
-
- /**
- * Create a metadata buffer with fields that the HAL device believes are
- * best for the given use case
- */
- status_t createDefaultRequest(int templateId, CameraMetadata *request);
-
- /**
- * Wait until all requests have been processed. Returns INVALID_OPERATION if
- * the streaming slot is not empty, or TIMED_OUT if the requests haven't
- * finished processing in 10 seconds.
- */
- status_t waitUntilDrained();
-
- /**
- * Abstract class for HAL notification listeners
- */
- class NotificationListener {
- public:
- // Refer to the Camera2 HAL definition for notification definitions
- virtual void notifyError(int errorCode, int arg1, int arg2) = 0;
- virtual void notifyShutter(int frameNumber, nsecs_t timestamp) = 0;
- virtual void notifyAutoFocus(uint8_t newState, int triggerId) = 0;
- virtual void notifyAutoExposure(uint8_t newState, int triggerId) = 0;
- virtual void notifyAutoWhitebalance(uint8_t newState, int triggerId) = 0;
- protected:
- virtual ~NotificationListener();
- };
-
- /**
- * Connect HAL notifications to a listener. Overwrites previous
- * listener. Set to NULL to stop receiving notifications.
- */
- status_t setNotifyCallback(NotificationListener *listener);
-
- /**
- * Wait for a new frame to be produced, with timeout in nanoseconds.
- * Returns TIMED_OUT when no frame produced within the specified duration
- */
- status_t waitForNextFrame(nsecs_t timeout);
-
- /**
- * Get next metadata frame from the frame queue. Returns NULL if the queue
- * is empty; caller takes ownership of the metadata buffer.
- */
- status_t getNextFrame(CameraMetadata *frame);
-
- /**
- * Trigger auto-focus. The latest ID used in a trigger autofocus or cancel
- * autofocus call will be returned by the HAL in all subsequent AF
- * notifications.
- */
- status_t triggerAutofocus(uint32_t id);
-
- /**
- * Cancel auto-focus. The latest ID used in a trigger autofocus/cancel
- * autofocus call will be returned by the HAL in all subsequent AF
- * notifications.
- */
- status_t triggerCancelAutofocus(uint32_t id);
-
- /**
- * Trigger pre-capture metering. The latest ID used in a trigger pre-capture
- * call will be returned by the HAL in all subsequent AE and AWB
- * notifications.
- */
- status_t triggerPrecaptureMetering(uint32_t id);
-
- /**
- * Abstract interface for clients that want to listen to reprocess buffer
- * release events
- */
- struct BufferReleasedListener: public virtual RefBase {
- virtual void onBufferReleased(buffer_handle_t *handle) = 0;
- };
-
- /**
- * Push a buffer to be reprocessed into a reprocessing stream, and
- * provide a listener to call once the buffer is returned by the HAL
- */
- status_t pushReprocessBuffer(int reprocessStreamId,
+ virtual status_t setStreamTransform(int id, int transform);
+ virtual status_t deleteStream(int id);
+ virtual status_t deleteReprocessStream(int id);
+ virtual status_t createDefaultRequest(int templateId, CameraMetadata *request);
+ virtual status_t waitUntilDrained();
+ virtual status_t setNotifyCallback(NotificationListener *listener);
+ virtual bool willNotify3A();
+ virtual status_t waitForNextFrame(nsecs_t timeout);
+ virtual status_t getNextFrame(CameraMetadata *frame);
+ virtual status_t triggerAutofocus(uint32_t id);
+ virtual status_t triggerCancelAutofocus(uint32_t id);
+ virtual status_t triggerPrecaptureMetering(uint32_t id);
+ virtual status_t pushReprocessBuffer(int reprocessStreamId,
buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
-
+ // Flush implemented as just a wait
+ virtual status_t flush();
private:
const int mId;
- camera2_device_t *mDevice;
+ camera2_device_t *mHal2Device;
CameraMetadata mDeviceInfo;
vendor_tag_query_ops_t *mVendorTagOps;
@@ -244,6 +119,9 @@ class Camera2Device : public virtual RefBase {
status_t setStreamSlot(camera_metadata_t *buf);
status_t setStreamSlot(const List<camera_metadata_t*> &bufs);
+ // Clear the request queue and the streaming slot
+ status_t clear();
+
status_t dump(int fd, const Vector<String16>& args);
private:
@@ -251,7 +129,7 @@ class Camera2Device : public virtual RefBase {
status_t freeBuffers(List<camera_metadata_t*>::iterator start,
List<camera_metadata_t*>::iterator end);
- camera2_device_t *mDevice;
+ camera2_device_t *mHal2Device;
Mutex mMutex;
Condition notEmpty;
@@ -343,7 +221,7 @@ class Camera2Device : public virtual RefBase {
} mState;
sp<ANativeWindow> mConsumerInterface;
- camera2_device_t *mDevice;
+ camera2_device_t *mHal2Device;
uint32_t mId;
uint32_t mWidth;
@@ -437,7 +315,7 @@ class Camera2Device : public virtual RefBase {
List<QueueEntry> mInFlightQueue;
- camera2_device_t *mDevice;
+ camera2_device_t *mHal2Device;
uint32_t mId;
uint32_t mWidth;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
new file mode 100644
index 0000000..3dbc1b0
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -0,0 +1,2557 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-Device"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+//#define LOG_NNDEBUG 0 // Per-frame verbose logging
+
+#ifdef LOG_NNDEBUG
+#define ALOGVV(...) ALOGV(__VA_ARGS__)
+#else
+#define ALOGVV(...) ((void)0)
+#endif
+
+// Convenience macro for transient errors
+#define CLOGE(fmt, ...) ALOGE("Camera %d: %s: " fmt, mId, __FUNCTION__, \
+ ##__VA_ARGS__)
+
+// Convenience macros for transitioning to the error state
+#define SET_ERR(fmt, ...) setErrorState( \
+ "%s: " fmt, __FUNCTION__, \
+ ##__VA_ARGS__)
+#define SET_ERR_L(fmt, ...) setErrorStateLocked( \
+ "%s: " fmt, __FUNCTION__, \
+ ##__VA_ARGS__)
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <utils/Timers.h>
+
+#include "utils/CameraTraces.h"
+#include "device3/Camera3Device.h"
+#include "device3/Camera3OutputStream.h"
+#include "device3/Camera3InputStream.h"
+#include "device3/Camera3ZslStream.h"
+
+using namespace android::camera3;
+
+namespace android {
+
+Camera3Device::Camera3Device(int id):
+ mId(id),
+ mHal3Device(NULL),
+ mStatus(STATUS_UNINITIALIZED),
+ mUsePartialResultQuirk(false),
+ mNextResultFrameNumber(0),
+ mNextShutterFrameNumber(0),
+ mListener(NULL)
+{
+ ATRACE_CALL();
+ camera3_callback_ops::notify = &sNotify;
+ camera3_callback_ops::process_capture_result = &sProcessCaptureResult;
+ ALOGV("%s: Created device for camera %d", __FUNCTION__, id);
+}
+
+Camera3Device::~Camera3Device()
+{
+ ATRACE_CALL();
+ ALOGV("%s: Tearing down for camera id %d", __FUNCTION__, mId);
+ disconnect();
+}
+
+int Camera3Device::getId() const {
+ return mId;
+}
+
+/**
+ * CameraDeviceBase interface
+ */
+
+status_t Camera3Device::initialize(camera_module_t *module)
+{
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ ALOGV("%s: Initializing device for camera %d", __FUNCTION__, mId);
+ if (mStatus != STATUS_UNINITIALIZED) {
+ CLOGE("Already initialized!");
+ return INVALID_OPERATION;
+ }
+
+ /** Open HAL device */
+
+ status_t res;
+ String8 deviceName = String8::format("%d", mId);
+
+ camera3_device_t *device;
+
+ res = module->common.methods->open(&module->common, deviceName.string(),
+ reinterpret_cast<hw_device_t**>(&device));
+
+ if (res != OK) {
+ SET_ERR_L("Could not open camera: %s (%d)", strerror(-res), res);
+ return res;
+ }
+
+ /** Cross-check device version */
+
+ if (device->common.version != CAMERA_DEVICE_API_VERSION_3_0) {
+ SET_ERR_L("Could not open camera: "
+ "Camera device is not version %x, reports %x instead",
+ CAMERA_DEVICE_API_VERSION_3_0,
+ device->common.version);
+ device->common.close(&device->common);
+ return BAD_VALUE;
+ }
+
+ camera_info info;
+ res = module->get_camera_info(mId, &info);
+ if (res != OK) return res;
+
+ if (info.device_version != device->common.version) {
+ SET_ERR_L("HAL reporting mismatched camera_info version (%x)"
+ " and device version (%x).",
+ device->common.version, info.device_version);
+ device->common.close(&device->common);
+ return BAD_VALUE;
+ }
+
+ /** Initialize device with callback functions */
+
+ ATRACE_BEGIN("camera3->initialize");
+ res = device->ops->initialize(device, this);
+ ATRACE_END();
+
+ if (res != OK) {
+ SET_ERR_L("Unable to initialize HAL device: %s (%d)",
+ strerror(-res), res);
+ device->common.close(&device->common);
+ return BAD_VALUE;
+ }
+
+ /** Get vendor metadata tags */
+
+ mVendorTagOps.get_camera_vendor_section_name = NULL;
+
+ ATRACE_BEGIN("camera3->get_metadata_vendor_tag_ops");
+ device->ops->get_metadata_vendor_tag_ops(device, &mVendorTagOps);
+ ATRACE_END();
+
+ if (mVendorTagOps.get_camera_vendor_section_name != NULL) {
+ res = set_camera_metadata_vendor_tag_ops(&mVendorTagOps);
+ if (res != OK) {
+ SET_ERR_L("Unable to set tag ops: %s (%d)",
+ strerror(-res), res);
+ device->common.close(&device->common);
+ return res;
+ }
+ }
+
+ /** Start up status tracker thread */
+ mStatusTracker = new StatusTracker(this);
+ res = mStatusTracker->run(String8::format("C3Dev-%d-Status", mId).string());
+ if (res != OK) {
+ SET_ERR_L("Unable to start status tracking thread: %s (%d)",
+ strerror(-res), res);
+ device->common.close(&device->common);
+ mStatusTracker.clear();
+ return res;
+ }
+
+ /** Start up request queue thread */
+
+ mRequestThread = new RequestThread(this, mStatusTracker, device);
+ res = mRequestThread->run(String8::format("C3Dev-%d-ReqQueue", mId).string());
+ if (res != OK) {
+ SET_ERR_L("Unable to start request queue thread: %s (%d)",
+ strerror(-res), res);
+ device->common.close(&device->common);
+ mRequestThread.clear();
+ return res;
+ }
+
+ /** Everything is good to go */
+
+ mDeviceInfo = info.static_camera_characteristics;
+ mHal3Device = device;
+ mStatus = STATUS_UNCONFIGURED;
+ mNextStreamId = 0;
+ mNeedConfig = true;
+ mPauseStateNotify = false;
+
+ /** Check for quirks */
+
+ // Will the HAL be sending in early partial result metadata?
+ camera_metadata_entry partialResultsQuirk =
+ mDeviceInfo.find(ANDROID_QUIRKS_USE_PARTIAL_RESULT);
+ if (partialResultsQuirk.count > 0 && partialResultsQuirk.data.u8[0] == 1) {
+ mUsePartialResultQuirk = true;
+ }
+
+ return OK;
+}
+
+status_t Camera3Device::disconnect() {
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+
+ ALOGV("%s: E", __FUNCTION__);
+
+ status_t res = OK;
+
+ {
+ Mutex::Autolock l(mLock);
+ if (mStatus == STATUS_UNINITIALIZED) return res;
+
+ if (mStatus == STATUS_ACTIVE ||
+ (mStatus == STATUS_ERROR && mRequestThread != NULL)) {
+ res = mRequestThread->clearRepeatingRequests();
+ if (res != OK) {
+ SET_ERR_L("Can't stop streaming");
+ // Continue to close device even in case of error
+ } else {
+ res = waitUntilStateThenRelock(/*active*/ false, kShutdownTimeout);
+ if (res != OK) {
+ SET_ERR_L("Timeout waiting for HAL to drain");
+ // Continue to close device even in case of error
+ }
+ }
+ }
+
+ if (mStatus == STATUS_ERROR) {
+ CLOGE("Shutting down in an error state");
+ }
+
+ if (mStatusTracker != NULL) {
+ mStatusTracker->requestExit();
+ }
+
+ if (mRequestThread != NULL) {
+ mRequestThread->requestExit();
+ }
+
+ mOutputStreams.clear();
+ mInputStream.clear();
+ }
+
+ // Joining done without holding mLock, otherwise deadlocks may ensue
+ // as the threads try to access parent state
+ if (mRequestThread != NULL && mStatus != STATUS_ERROR) {
+ // HAL may be in a bad state, so waiting for request thread
+ // (which may be stuck in the HAL processCaptureRequest call)
+ // could be dangerous.
+ mRequestThread->join();
+ }
+
+ if (mStatusTracker != NULL) {
+ mStatusTracker->join();
+ }
+
+ {
+ Mutex::Autolock l(mLock);
+
+ mRequestThread.clear();
+ mStatusTracker.clear();
+
+ if (mHal3Device != NULL) {
+ mHal3Device->common.close(&mHal3Device->common);
+ mHal3Device = NULL;
+ }
+
+ mStatus = STATUS_UNINITIALIZED;
+ }
+
+ ALOGV("%s: X", __FUNCTION__);
+ return res;
+}
+
+// For dumping/debugging only -
+// try to acquire a lock a few times, eventually give up to proceed with
+// debug/dump operations
+bool Camera3Device::tryLockSpinRightRound(Mutex& lock) {
+ bool gotLock = false;
+ for (size_t i = 0; i < kDumpLockAttempts; ++i) {
+ if (lock.tryLock() == NO_ERROR) {
+ gotLock = true;
+ break;
+ } else {
+ usleep(kDumpSleepDuration);
+ }
+ }
+ return gotLock;
+}
+
+status_t Camera3Device::dump(int fd, const Vector<String16> &args) {
+ ATRACE_CALL();
+ (void)args;
+
+ // Try to lock, but continue in case of failure (to avoid blocking in
+ // deadlocks)
+ bool gotInterfaceLock = tryLockSpinRightRound(mInterfaceLock);
+ bool gotLock = tryLockSpinRightRound(mLock);
+
+ ALOGW_IF(!gotInterfaceLock,
+ "Camera %d: %s: Unable to lock interface lock, proceeding anyway",
+ mId, __FUNCTION__);
+ ALOGW_IF(!gotLock,
+ "Camera %d: %s: Unable to lock main lock, proceeding anyway",
+ mId, __FUNCTION__);
+
+ String8 lines;
+
+ const char *status =
+ mStatus == STATUS_ERROR ? "ERROR" :
+ mStatus == STATUS_UNINITIALIZED ? "UNINITIALIZED" :
+ mStatus == STATUS_UNCONFIGURED ? "UNCONFIGURED" :
+ mStatus == STATUS_CONFIGURED ? "CONFIGURED" :
+ mStatus == STATUS_ACTIVE ? "ACTIVE" :
+ "Unknown";
+
+ lines.appendFormat(" Device status: %s\n", status);
+ if (mStatus == STATUS_ERROR) {
+ lines.appendFormat(" Error cause: %s\n", mErrorCause.string());
+ }
+ lines.appendFormat(" Stream configuration:\n");
+
+ if (mInputStream != NULL) {
+ write(fd, lines.string(), lines.size());
+ mInputStream->dump(fd, args);
+ } else {
+ lines.appendFormat(" No input stream.\n");
+ write(fd, lines.string(), lines.size());
+ }
+ for (size_t i = 0; i < mOutputStreams.size(); i++) {
+ mOutputStreams[i]->dump(fd,args);
+ }
+
+ lines = String8(" In-flight requests:\n");
+ if (mInFlightMap.size() == 0) {
+ lines.append(" None\n");
+ } else {
+ for (size_t i = 0; i < mInFlightMap.size(); i++) {
+ InFlightRequest r = mInFlightMap.valueAt(i);
+ lines.appendFormat(" Frame %d | Timestamp: %lld, metadata"
+ " arrived: %s, buffers left: %d\n", mInFlightMap.keyAt(i),
+ r.captureTimestamp, r.haveResultMetadata ? "true" : "false",
+ r.numBuffersLeft);
+ }
+ }
+ write(fd, lines.string(), lines.size());
+
+ {
+ lines = String8(" Last request sent:\n");
+ write(fd, lines.string(), lines.size());
+
+ CameraMetadata lastRequest = getLatestRequestLocked();
+ lastRequest.dump(fd, /*verbosity*/2, /*indentation*/6);
+ }
+
+ if (mHal3Device != NULL) {
+ lines = String8(" HAL device dump:\n");
+ write(fd, lines.string(), lines.size());
+ mHal3Device->ops->dump(mHal3Device, fd);
+ }
+
+ if (gotLock) mLock.unlock();
+ if (gotInterfaceLock) mInterfaceLock.unlock();
+
+ return OK;
+}
+
+const CameraMetadata& Camera3Device::info() const {
+ ALOGVV("%s: E", __FUNCTION__);
+ if (CC_UNLIKELY(mStatus == STATUS_UNINITIALIZED ||
+ mStatus == STATUS_ERROR)) {
+ ALOGW("%s: Access to static info %s!", __FUNCTION__,
+ mStatus == STATUS_ERROR ?
+ "when in error state" : "before init");
+ }
+ return mDeviceInfo;
+}
+
+status_t Camera3Device::capture(CameraMetadata &request) {
+ ATRACE_CALL();
+ status_t res;
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ // TODO: take ownership of the request
+
+ switch (mStatus) {
+ case STATUS_ERROR:
+ CLOGE("Device has encountered a serious error");
+ return INVALID_OPERATION;
+ case STATUS_UNINITIALIZED:
+ CLOGE("Device not initialized");
+ return INVALID_OPERATION;
+ case STATUS_UNCONFIGURED:
+ // May be lazily configuring streams, will check during setup
+ case STATUS_CONFIGURED:
+ case STATUS_ACTIVE:
+ // OK
+ break;
+ default:
+ SET_ERR_L("Unexpected status: %d", mStatus);
+ return INVALID_OPERATION;
+ }
+
+ sp<CaptureRequest> newRequest = setUpRequestLocked(request);
+ if (newRequest == NULL) {
+ CLOGE("Can't create capture request");
+ return BAD_VALUE;
+ }
+
+ res = mRequestThread->queueRequest(newRequest);
+ if (res == OK) {
+ waitUntilStateThenRelock(/*active*/ true, kActiveTimeout);
+ if (res != OK) {
+ SET_ERR_L("Can't transition to active in %f seconds!",
+ kActiveTimeout/1e9);
+ }
+ ALOGV("Camera %d: Capture request enqueued", mId);
+ }
+ return res;
+}
+
+
+status_t Camera3Device::setStreamingRequest(const CameraMetadata &request) {
+ ATRACE_CALL();
+ status_t res;
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ switch (mStatus) {
+ case STATUS_ERROR:
+ CLOGE("Device has encountered a serious error");
+ return INVALID_OPERATION;
+ case STATUS_UNINITIALIZED:
+ CLOGE("Device not initialized");
+ return INVALID_OPERATION;
+ case STATUS_UNCONFIGURED:
+ // May be lazily configuring streams, will check during setup
+ case STATUS_CONFIGURED:
+ case STATUS_ACTIVE:
+ // OK
+ break;
+ default:
+ SET_ERR_L("Unexpected status: %d", mStatus);
+ return INVALID_OPERATION;
+ }
+
+ sp<CaptureRequest> newRepeatingRequest = setUpRequestLocked(request);
+ if (newRepeatingRequest == NULL) {
+ CLOGE("Can't create repeating request");
+ return BAD_VALUE;
+ }
+
+ RequestList newRepeatingRequests;
+ newRepeatingRequests.push_back(newRepeatingRequest);
+
+ res = mRequestThread->setRepeatingRequests(newRepeatingRequests);
+ if (res == OK) {
+ waitUntilStateThenRelock(/*active*/ true, kActiveTimeout);
+ if (res != OK) {
+ SET_ERR_L("Can't transition to active in %f seconds!",
+ kActiveTimeout/1e9);
+ }
+ ALOGV("Camera %d: Repeating request set", mId);
+ }
+ return res;
+}
+
+
+sp<Camera3Device::CaptureRequest> Camera3Device::setUpRequestLocked(
+ const CameraMetadata &request) {
+ status_t res;
+
+ if (mStatus == STATUS_UNCONFIGURED || mNeedConfig) {
+ res = configureStreamsLocked();
+ if (res != OK) {
+ SET_ERR_L("Can't set up streams: %s (%d)", strerror(-res), res);
+ return NULL;
+ }
+ if (mStatus == STATUS_UNCONFIGURED) {
+ CLOGE("No streams configured");
+ return NULL;
+ }
+ }
+
+ sp<CaptureRequest> newRequest = createCaptureRequest(request);
+ return newRequest;
+}
+
+status_t Camera3Device::clearStreamingRequest() {
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ switch (mStatus) {
+ case STATUS_ERROR:
+ CLOGE("Device has encountered a serious error");
+ return INVALID_OPERATION;
+ case STATUS_UNINITIALIZED:
+ CLOGE("Device not initialized");
+ return INVALID_OPERATION;
+ case STATUS_UNCONFIGURED:
+ case STATUS_CONFIGURED:
+ case STATUS_ACTIVE:
+ // OK
+ break;
+ default:
+ SET_ERR_L("Unexpected status: %d", mStatus);
+ return INVALID_OPERATION;
+ }
+ ALOGV("Camera %d: Clearing repeating request", mId);
+ return mRequestThread->clearRepeatingRequests();
+}
+
+status_t Camera3Device::waitUntilRequestReceived(int32_t requestId, nsecs_t timeout) {
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+
+ return mRequestThread->waitUntilRequestProcessed(requestId, timeout);
+}
+
+status_t Camera3Device::createInputStream(
+ uint32_t width, uint32_t height, int format, int *id) {
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+ ALOGV("Camera %d: Creating new input stream %d: %d x %d, format %d",
+ mId, mNextStreamId, width, height, format);
+
+ status_t res;
+ bool wasActive = false;
+
+ switch (mStatus) {
+ case STATUS_ERROR:
+ ALOGE("%s: Device has encountered a serious error", __FUNCTION__);
+ return INVALID_OPERATION;
+ case STATUS_UNINITIALIZED:
+ ALOGE("%s: Device not initialized", __FUNCTION__);
+ return INVALID_OPERATION;
+ case STATUS_UNCONFIGURED:
+ case STATUS_CONFIGURED:
+ // OK
+ break;
+ case STATUS_ACTIVE:
+ ALOGV("%s: Stopping activity to reconfigure streams", __FUNCTION__);
+ res = internalPauseAndWaitLocked();
+ if (res != OK) {
+ SET_ERR_L("Can't pause captures to reconfigure streams!");
+ return res;
+ }
+ wasActive = true;
+ break;
+ default:
+ SET_ERR_L("%s: Unexpected status: %d", mStatus);
+ return INVALID_OPERATION;
+ }
+ assert(mStatus != STATUS_ACTIVE);
+
+ if (mInputStream != 0) {
+ ALOGE("%s: Cannot create more than 1 input stream", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ sp<Camera3InputStream> newStream = new Camera3InputStream(mNextStreamId,
+ width, height, format);
+ newStream->setStatusTracker(mStatusTracker);
+
+ mInputStream = newStream;
+
+ *id = mNextStreamId++;
+
+ // Continue captures if active at start
+ if (wasActive) {
+ ALOGV("%s: Restarting activity to reconfigure streams", __FUNCTION__);
+ res = configureStreamsLocked();
+ if (res != OK) {
+ ALOGE("%s: Can't reconfigure device for new stream %d: %s (%d)",
+ __FUNCTION__, mNextStreamId, strerror(-res), res);
+ return res;
+ }
+ internalResumeLocked();
+ }
+
+ ALOGV("Camera %d: Created input stream", mId);
+ return OK;
+}
+
+
+status_t Camera3Device::createZslStream(
+ uint32_t width, uint32_t height,
+ int depth,
+ /*out*/
+ int *id,
+ sp<Camera3ZslStream>* zslStream) {
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+ ALOGV("Camera %d: Creating ZSL stream %d: %d x %d, depth %d",
+ mId, mNextStreamId, width, height, depth);
+
+ status_t res;
+ bool wasActive = false;
+
+ switch (mStatus) {
+ case STATUS_ERROR:
+ ALOGE("%s: Device has encountered a serious error", __FUNCTION__);
+ return INVALID_OPERATION;
+ case STATUS_UNINITIALIZED:
+ ALOGE("%s: Device not initialized", __FUNCTION__);
+ return INVALID_OPERATION;
+ case STATUS_UNCONFIGURED:
+ case STATUS_CONFIGURED:
+ // OK
+ break;
+ case STATUS_ACTIVE:
+ ALOGV("%s: Stopping activity to reconfigure streams", __FUNCTION__);
+ res = internalPauseAndWaitLocked();
+ if (res != OK) {
+ SET_ERR_L("Can't pause captures to reconfigure streams!");
+ return res;
+ }
+ wasActive = true;
+ break;
+ default:
+ SET_ERR_L("Unexpected status: %d", mStatus);
+ return INVALID_OPERATION;
+ }
+ assert(mStatus != STATUS_ACTIVE);
+
+ if (mInputStream != 0) {
+ ALOGE("%s: Cannot create more than 1 input stream", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ sp<Camera3ZslStream> newStream = new Camera3ZslStream(mNextStreamId,
+ width, height, depth);
+ newStream->setStatusTracker(mStatusTracker);
+
+ res = mOutputStreams.add(mNextStreamId, newStream);
+ if (res < 0) {
+ ALOGE("%s: Can't add new stream to set: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ mInputStream = newStream;
+
+ *id = mNextStreamId++;
+ *zslStream = newStream;
+
+ // Continue captures if active at start
+ if (wasActive) {
+ ALOGV("%s: Restarting activity to reconfigure streams", __FUNCTION__);
+ res = configureStreamsLocked();
+ if (res != OK) {
+ ALOGE("%s: Can't reconfigure device for new stream %d: %s (%d)",
+ __FUNCTION__, mNextStreamId, strerror(-res), res);
+ return res;
+ }
+ internalResumeLocked();
+ }
+
+ ALOGV("Camera %d: Created ZSL stream", mId);
+ return OK;
+}
+
+status_t Camera3Device::createStream(sp<ANativeWindow> consumer,
+ uint32_t width, uint32_t height, int format, size_t size, int *id) {
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+ ALOGV("Camera %d: Creating new stream %d: %d x %d, format %d, size %d",
+ mId, mNextStreamId, width, height, format, size);
+
+ status_t res;
+ bool wasActive = false;
+
+ switch (mStatus) {
+ case STATUS_ERROR:
+ CLOGE("Device has encountered a serious error");
+ return INVALID_OPERATION;
+ case STATUS_UNINITIALIZED:
+ CLOGE("Device not initialized");
+ return INVALID_OPERATION;
+ case STATUS_UNCONFIGURED:
+ case STATUS_CONFIGURED:
+ // OK
+ break;
+ case STATUS_ACTIVE:
+ ALOGV("%s: Stopping activity to reconfigure streams", __FUNCTION__);
+ res = internalPauseAndWaitLocked();
+ if (res != OK) {
+ SET_ERR_L("Can't pause captures to reconfigure streams!");
+ return res;
+ }
+ wasActive = true;
+ break;
+ default:
+ SET_ERR_L("Unexpected status: %d", mStatus);
+ return INVALID_OPERATION;
+ }
+ assert(mStatus != STATUS_ACTIVE);
+
+ sp<Camera3OutputStream> newStream;
+ if (format == HAL_PIXEL_FORMAT_BLOB) {
+ newStream = new Camera3OutputStream(mNextStreamId, consumer,
+ width, height, size, format);
+ } else {
+ newStream = new Camera3OutputStream(mNextStreamId, consumer,
+ width, height, format);
+ }
+ newStream->setStatusTracker(mStatusTracker);
+
+ res = mOutputStreams.add(mNextStreamId, newStream);
+ if (res < 0) {
+ SET_ERR_L("Can't add new stream to set: %s (%d)", strerror(-res), res);
+ return res;
+ }
+
+ *id = mNextStreamId++;
+ mNeedConfig = true;
+
+ // Continue captures if active at start
+ if (wasActive) {
+ ALOGV("%s: Restarting activity to reconfigure streams", __FUNCTION__);
+ res = configureStreamsLocked();
+ if (res != OK) {
+ CLOGE("Can't reconfigure device for new stream %d: %s (%d)",
+ mNextStreamId, strerror(-res), res);
+ return res;
+ }
+ internalResumeLocked();
+ }
+ ALOGV("Camera %d: Created new stream", mId);
+ return OK;
+}
+
+status_t Camera3Device::createReprocessStreamFromStream(int outputId, int *id) {
+ ATRACE_CALL();
+ (void)outputId; (void)id;
+
+ CLOGE("Unimplemented");
+ return INVALID_OPERATION;
+}
+
+
+status_t Camera3Device::getStreamInfo(int id,
+ uint32_t *width, uint32_t *height, uint32_t *format) {
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ switch (mStatus) {
+ case STATUS_ERROR:
+ CLOGE("Device has encountered a serious error");
+ return INVALID_OPERATION;
+ case STATUS_UNINITIALIZED:
+ CLOGE("Device not initialized!");
+ return INVALID_OPERATION;
+ case STATUS_UNCONFIGURED:
+ case STATUS_CONFIGURED:
+ case STATUS_ACTIVE:
+ // OK
+ break;
+ default:
+ SET_ERR_L("Unexpected status: %d", mStatus);
+ return INVALID_OPERATION;
+ }
+
+ ssize_t idx = mOutputStreams.indexOfKey(id);
+ if (idx == NAME_NOT_FOUND) {
+ CLOGE("Stream %d is unknown", id);
+ return idx;
+ }
+
+ if (width) *width = mOutputStreams[idx]->getWidth();
+ if (height) *height = mOutputStreams[idx]->getHeight();
+ if (format) *format = mOutputStreams[idx]->getFormat();
+
+ return OK;
+}
+
+status_t Camera3Device::setStreamTransform(int id,
+ int transform) {
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ switch (mStatus) {
+ case STATUS_ERROR:
+ CLOGE("Device has encountered a serious error");
+ return INVALID_OPERATION;
+ case STATUS_UNINITIALIZED:
+ CLOGE("Device not initialized");
+ return INVALID_OPERATION;
+ case STATUS_UNCONFIGURED:
+ case STATUS_CONFIGURED:
+ case STATUS_ACTIVE:
+ // OK
+ break;
+ default:
+ SET_ERR_L("Unexpected status: %d", mStatus);
+ return INVALID_OPERATION;
+ }
+
+ ssize_t idx = mOutputStreams.indexOfKey(id);
+ if (idx == NAME_NOT_FOUND) {
+ CLOGE("Stream %d does not exist",
+ id);
+ return BAD_VALUE;
+ }
+
+ return mOutputStreams.editValueAt(idx)->setTransform(transform);
+}
+
+status_t Camera3Device::deleteStream(int id) {
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+ status_t res;
+
+ ALOGV("%s: Camera %d: Deleting stream %d", __FUNCTION__, mId, id);
+
+ // CameraDevice semantics require device to already be idle before
+ // deleteStream is called, unlike for createStream.
+ if (mStatus == STATUS_ACTIVE) {
+ ALOGV("%s: Camera %d: Device not idle", __FUNCTION__, mId);
+ return -EBUSY;
+ }
+
+ sp<Camera3StreamInterface> deletedStream;
+ if (mInputStream != NULL && id == mInputStream->getId()) {
+ deletedStream = mInputStream;
+ mInputStream.clear();
+ } else {
+ ssize_t idx = mOutputStreams.indexOfKey(id);
+ if (idx == NAME_NOT_FOUND) {
+ CLOGE("Stream %d does not exist", id);
+ return BAD_VALUE;
+ }
+ deletedStream = mOutputStreams.editValueAt(idx);
+ mOutputStreams.removeItem(id);
+ }
+
+ // Free up the stream endpoint so that it can be used by some other stream
+ res = deletedStream->disconnect();
+ if (res != OK) {
+ SET_ERR_L("Can't disconnect deleted stream %d", id);
+ // fall through since we want to still list the stream as deleted.
+ }
+ mDeletedStreams.add(deletedStream);
+ mNeedConfig = true;
+
+ return res;
+}
+
+status_t Camera3Device::deleteReprocessStream(int id) {
+ ATRACE_CALL();
+ (void)id;
+
+ CLOGE("Unimplemented");
+ return INVALID_OPERATION;
+}
+
+
+status_t Camera3Device::createDefaultRequest(int templateId,
+ CameraMetadata *request) {
+ ATRACE_CALL();
+ ALOGV("%s: for template %d", __FUNCTION__, templateId);
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ switch (mStatus) {
+ case STATUS_ERROR:
+ CLOGE("Device has encountered a serious error");
+ return INVALID_OPERATION;
+ case STATUS_UNINITIALIZED:
+ CLOGE("Device is not initialized!");
+ return INVALID_OPERATION;
+ case STATUS_UNCONFIGURED:
+ case STATUS_CONFIGURED:
+ case STATUS_ACTIVE:
+ // OK
+ break;
+ default:
+ SET_ERR_L("Unexpected status: %d", mStatus);
+ return INVALID_OPERATION;
+ }
+
+ const camera_metadata_t *rawRequest;
+ ATRACE_BEGIN("camera3->construct_default_request_settings");
+ rawRequest = mHal3Device->ops->construct_default_request_settings(
+ mHal3Device, templateId);
+ ATRACE_END();
+ if (rawRequest == NULL) {
+ SET_ERR_L("HAL is unable to construct default settings for template %d",
+ templateId);
+ return DEAD_OBJECT;
+ }
+ *request = rawRequest;
+
+ return OK;
+}
+
+status_t Camera3Device::waitUntilDrained() {
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ switch (mStatus) {
+ case STATUS_UNINITIALIZED:
+ case STATUS_UNCONFIGURED:
+ ALOGV("%s: Already idle", __FUNCTION__);
+ return OK;
+ case STATUS_CONFIGURED:
+ // To avoid race conditions, check with tracker to be sure
+ case STATUS_ERROR:
+ case STATUS_ACTIVE:
+ // Need to verify shut down
+ break;
+ default:
+ SET_ERR_L("Unexpected status: %d",mStatus);
+ return INVALID_OPERATION;
+ }
+
+ ALOGV("%s: Camera %d: Waiting until idle", __FUNCTION__, mId);
+ status_t res = waitUntilStateThenRelock(/*active*/ false, kShutdownTimeout);
+ return res;
+}
+
+// Pause to reconfigure
+status_t Camera3Device::internalPauseAndWaitLocked() {
+ mRequestThread->setPaused(true);
+ mPauseStateNotify = true;
+
+ ALOGV("%s: Camera %d: Internal wait until idle", __FUNCTION__, mId);
+ status_t res = waitUntilStateThenRelock(/*active*/ false, kShutdownTimeout);
+ if (res != OK) {
+ SET_ERR_L("Can't idle device in %f seconds!",
+ kShutdownTimeout/1e9);
+ }
+
+ return res;
+}
+
+// Resume after internalPauseAndWaitLocked
+status_t Camera3Device::internalResumeLocked() {
+ status_t res;
+
+ mRequestThread->setPaused(false);
+
+ res = waitUntilStateThenRelock(/*active*/ true, kActiveTimeout);
+ if (res != OK) {
+ SET_ERR_L("Can't transition to active in %f seconds!",
+ kActiveTimeout/1e9);
+ }
+ mPauseStateNotify = false;
+ return OK;
+}
+
+status_t Camera3Device::waitUntilStateThenRelock(bool active,
+ nsecs_t timeout) {
+ status_t res = OK;
+ if (active == (mStatus == STATUS_ACTIVE)) {
+ // Desired state already reached
+ return res;
+ }
+
+ bool stateSeen = false;
+ do {
+ mRecentStatusUpdates.clear();
+
+ res = mStatusChanged.waitRelative(mLock, timeout);
+ if (res != OK) break;
+
+ // Check state change history during wait
+ for (size_t i = 0; i < mRecentStatusUpdates.size(); i++) {
+ if (active == (mRecentStatusUpdates[i] == STATUS_ACTIVE) ) {
+ stateSeen = true;
+ break;
+ }
+ }
+ } while (!stateSeen);
+
+ return res;
+}
+
+
+status_t Camera3Device::setNotifyCallback(NotificationListener *listener) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mOutputLock);
+
+ if (listener != NULL && mListener != NULL) {
+ ALOGW("%s: Replacing old callback listener", __FUNCTION__);
+ }
+ mListener = listener;
+
+ return OK;
+}
+
+bool Camera3Device::willNotify3A() {
+ return false;
+}
+
+status_t Camera3Device::waitForNextFrame(nsecs_t timeout) {
+ status_t res;
+ Mutex::Autolock l(mOutputLock);
+
+ while (mResultQueue.empty()) {
+ res = mResultSignal.waitRelative(mOutputLock, timeout);
+ if (res == TIMED_OUT) {
+ return res;
+ } else if (res != OK) {
+ ALOGW("%s: Camera %d: No frame in %lld ns: %s (%d)",
+ __FUNCTION__, mId, timeout, strerror(-res), res);
+ return res;
+ }
+ }
+ return OK;
+}
+
+status_t Camera3Device::getNextFrame(CameraMetadata *frame) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mOutputLock);
+
+ if (mResultQueue.empty()) {
+ return NOT_ENOUGH_DATA;
+ }
+
+ CameraMetadata &result = *(mResultQueue.begin());
+ frame->acquire(result);
+ mResultQueue.erase(mResultQueue.begin());
+
+ return OK;
+}
+
+status_t Camera3Device::triggerAutofocus(uint32_t id) {
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+
+ ALOGV("%s: Triggering autofocus, id %d", __FUNCTION__, id);
+ // Mix-in this trigger into the next request and only the next request.
+ RequestTrigger trigger[] = {
+ {
+ ANDROID_CONTROL_AF_TRIGGER,
+ ANDROID_CONTROL_AF_TRIGGER_START
+ },
+ {
+ ANDROID_CONTROL_AF_TRIGGER_ID,
+ static_cast<int32_t>(id)
+ },
+ };
+
+ return mRequestThread->queueTrigger(trigger,
+ sizeof(trigger)/sizeof(trigger[0]));
+}
+
+status_t Camera3Device::triggerCancelAutofocus(uint32_t id) {
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+
+ ALOGV("%s: Triggering cancel autofocus, id %d", __FUNCTION__, id);
+ // Mix-in this trigger into the next request and only the next request.
+ RequestTrigger trigger[] = {
+ {
+ ANDROID_CONTROL_AF_TRIGGER,
+ ANDROID_CONTROL_AF_TRIGGER_CANCEL
+ },
+ {
+ ANDROID_CONTROL_AF_TRIGGER_ID,
+ static_cast<int32_t>(id)
+ },
+ };
+
+ return mRequestThread->queueTrigger(trigger,
+ sizeof(trigger)/sizeof(trigger[0]));
+}
+
+status_t Camera3Device::triggerPrecaptureMetering(uint32_t id) {
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+
+ ALOGV("%s: Triggering precapture metering, id %d", __FUNCTION__, id);
+ // Mix-in this trigger into the next request and only the next request.
+ RequestTrigger trigger[] = {
+ {
+ ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
+ ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_START
+ },
+ {
+ ANDROID_CONTROL_AE_PRECAPTURE_ID,
+ static_cast<int32_t>(id)
+ },
+ };
+
+ return mRequestThread->queueTrigger(trigger,
+ sizeof(trigger)/sizeof(trigger[0]));
+}
+
+status_t Camera3Device::pushReprocessBuffer(int reprocessStreamId,
+ buffer_handle_t *buffer, wp<BufferReleasedListener> listener) {
+ ATRACE_CALL();
+ (void)reprocessStreamId; (void)buffer; (void)listener;
+
+ CLOGE("Unimplemented");
+ return INVALID_OPERATION;
+}
+
+status_t Camera3Device::flush() {
+ ATRACE_CALL();
+ ALOGV("%s: Camera %d: Flushing all requests", __FUNCTION__, mId);
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ mRequestThread->clear();
+ return mHal3Device->ops->flush(mHal3Device);
+}
+
+/**
+ * Methods called by subclasses
+ */
+
+void Camera3Device::notifyStatus(bool idle) {
+ {
+ // Need mLock to safely update state and synchronize to current
+ // state of methods in flight.
+ Mutex::Autolock l(mLock);
+ // We can get various system-idle notices from the status tracker
+ // while starting up. Only care about them if we've actually sent
+ // in some requests recently.
+ if (mStatus != STATUS_ACTIVE && mStatus != STATUS_CONFIGURED) {
+ return;
+ }
+ ALOGV("%s: Camera %d: Now %s", __FUNCTION__, mId,
+ idle ? "idle" : "active");
+ mStatus = idle ? STATUS_CONFIGURED : STATUS_ACTIVE;
+ mRecentStatusUpdates.add(mStatus);
+ mStatusChanged.signal();
+
+ // Skip notifying listener if we're doing some user-transparent
+ // state changes
+ if (mPauseStateNotify) return;
+ }
+ NotificationListener *listener;
+ {
+ Mutex::Autolock l(mOutputLock);
+ listener = mListener;
+ }
+ if (idle && listener != NULL) {
+ listener->notifyIdle();
+ }
+}
+
+/**
+ * Camera3Device private methods
+ */
+
+sp<Camera3Device::CaptureRequest> Camera3Device::createCaptureRequest(
+ const CameraMetadata &request) {
+ ATRACE_CALL();
+ status_t res;
+
+ sp<CaptureRequest> newRequest = new CaptureRequest;
+ newRequest->mSettings = request;
+
+ camera_metadata_entry_t inputStreams =
+ newRequest->mSettings.find(ANDROID_REQUEST_INPUT_STREAMS);
+ if (inputStreams.count > 0) {
+ if (mInputStream == NULL ||
+ mInputStream->getId() != inputStreams.data.i32[0]) {
+ CLOGE("Request references unknown input stream %d",
+ inputStreams.data.u8[0]);
+ return NULL;
+ }
+ // Lazy completion of stream configuration (allocation/registration)
+ // on first use
+ if (mInputStream->isConfiguring()) {
+ res = mInputStream->finishConfiguration(mHal3Device);
+ if (res != OK) {
+ SET_ERR_L("Unable to finish configuring input stream %d:"
+ " %s (%d)",
+ mInputStream->getId(), strerror(-res), res);
+ return NULL;
+ }
+ }
+
+ newRequest->mInputStream = mInputStream;
+ newRequest->mSettings.erase(ANDROID_REQUEST_INPUT_STREAMS);
+ }
+
+ camera_metadata_entry_t streams =
+ newRequest->mSettings.find(ANDROID_REQUEST_OUTPUT_STREAMS);
+ if (streams.count == 0) {
+ CLOGE("Zero output streams specified!");
+ return NULL;
+ }
+
+ for (size_t i = 0; i < streams.count; i++) {
+ int idx = mOutputStreams.indexOfKey(streams.data.i32[i]);
+ if (idx == NAME_NOT_FOUND) {
+ CLOGE("Request references unknown stream %d",
+ streams.data.u8[i]);
+ return NULL;
+ }
+ sp<Camera3OutputStreamInterface> stream =
+ mOutputStreams.editValueAt(idx);
+
+ // Lazy completion of stream configuration (allocation/registration)
+ // on first use
+ if (stream->isConfiguring()) {
+ res = stream->finishConfiguration(mHal3Device);
+ if (res != OK) {
+ SET_ERR_L("Unable to finish configuring stream %d: %s (%d)",
+ stream->getId(), strerror(-res), res);
+ return NULL;
+ }
+ }
+
+ newRequest->mOutputStreams.push(stream);
+ }
+ newRequest->mSettings.erase(ANDROID_REQUEST_OUTPUT_STREAMS);
+
+ return newRequest;
+}
+
+status_t Camera3Device::configureStreamsLocked() {
+ ATRACE_CALL();
+ status_t res;
+
+ if (mStatus != STATUS_UNCONFIGURED && mStatus != STATUS_CONFIGURED) {
+ CLOGE("Not idle");
+ return INVALID_OPERATION;
+ }
+
+ if (!mNeedConfig) {
+ ALOGV("%s: Skipping config, no stream changes", __FUNCTION__);
+ return OK;
+ }
+
+ // Start configuring the streams
+ ALOGV("%s: Camera %d: Starting stream configuration", __FUNCTION__, mId);
+
+ camera3_stream_configuration config;
+
+ config.num_streams = (mInputStream != NULL) + mOutputStreams.size();
+
+ Vector<camera3_stream_t*> streams;
+ streams.setCapacity(config.num_streams);
+
+ if (mInputStream != NULL) {
+ camera3_stream_t *inputStream;
+ inputStream = mInputStream->startConfiguration();
+ if (inputStream == NULL) {
+ SET_ERR_L("Can't start input stream configuration");
+ return INVALID_OPERATION;
+ }
+ streams.add(inputStream);
+ }
+
+ for (size_t i = 0; i < mOutputStreams.size(); i++) {
+
+ // Don't configure bidi streams twice, nor add them twice to the list
+ if (mOutputStreams[i].get() ==
+ static_cast<Camera3StreamInterface*>(mInputStream.get())) {
+
+ config.num_streams--;
+ continue;
+ }
+
+ camera3_stream_t *outputStream;
+ outputStream = mOutputStreams.editValueAt(i)->startConfiguration();
+ if (outputStream == NULL) {
+ SET_ERR_L("Can't start output stream configuration");
+ return INVALID_OPERATION;
+ }
+ streams.add(outputStream);
+ }
+
+ config.streams = streams.editArray();
+
+ // Do the HAL configuration; will potentially touch stream
+ // max_buffers, usage, priv fields.
+ ATRACE_BEGIN("camera3->configure_streams");
+ res = mHal3Device->ops->configure_streams(mHal3Device, &config);
+ ATRACE_END();
+
+ if (res != OK) {
+ SET_ERR_L("Unable to configure streams with HAL: %s (%d)",
+ strerror(-res), res);
+ return res;
+ }
+
+ // Finish all stream configuration immediately.
+ // TODO: Try to relax this later back to lazy completion, which should be
+ // faster
+
+ if (mInputStream != NULL && mInputStream->isConfiguring()) {
+ res = mInputStream->finishConfiguration(mHal3Device);
+ if (res != OK) {
+ SET_ERR_L("Can't finish configuring input stream %d: %s (%d)",
+ mInputStream->getId(), strerror(-res), res);
+ return res;
+ }
+ }
+
+ for (size_t i = 0; i < mOutputStreams.size(); i++) {
+ sp<Camera3OutputStreamInterface> outputStream =
+ mOutputStreams.editValueAt(i);
+ if (outputStream->isConfiguring()) {
+ res = outputStream->finishConfiguration(mHal3Device);
+ if (res != OK) {
+ SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
+ outputStream->getId(), strerror(-res), res);
+ return res;
+ }
+ }
+ }
+
+ // Request thread needs to know to avoid using repeat-last-settings protocol
+ // across configure_streams() calls
+ mRequestThread->configurationComplete();
+
+ // Update device state
+
+ mNeedConfig = false;
+
+ if (config.num_streams > 0) {
+ mStatus = STATUS_CONFIGURED;
+ } else {
+ mStatus = STATUS_UNCONFIGURED;
+ }
+
+ ALOGV("%s: Camera %d: Stream configuration complete", __FUNCTION__, mId);
+
+ return OK;
+}
+
+void Camera3Device::setErrorState(const char *fmt, ...) {
+ Mutex::Autolock l(mLock);
+ va_list args;
+ va_start(args, fmt);
+
+ setErrorStateLockedV(fmt, args);
+
+ va_end(args);
+}
+
+void Camera3Device::setErrorStateV(const char *fmt, va_list args) {
+ Mutex::Autolock l(mLock);
+ setErrorStateLockedV(fmt, args);
+}
+
+void Camera3Device::setErrorStateLocked(const char *fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+
+ setErrorStateLockedV(fmt, args);
+
+ va_end(args);
+}
+
+void Camera3Device::setErrorStateLockedV(const char *fmt, va_list args) {
+ // Print out all error messages to log
+ String8 errorCause = String8::formatV(fmt, args);
+ ALOGE("Camera %d: %s", mId, errorCause.string());
+
+ // But only do error state transition steps for the first error
+ if (mStatus == STATUS_ERROR || mStatus == STATUS_UNINITIALIZED) return;
+
+ // Save stack trace. View by dumping it later.
+ CameraTraces::saveTrace();
+ // TODO: consider adding errorCause and client pid/procname
+
+ mErrorCause = errorCause;
+
+ mRequestThread->setPaused(true);
+ mStatus = STATUS_ERROR;
+}
+
+/**
+ * In-flight request management
+ */
+
+status_t Camera3Device::registerInFlight(int32_t frameNumber,
+ int32_t requestId, int32_t numBuffers) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mInFlightLock);
+
+ ssize_t res;
+ res = mInFlightMap.add(frameNumber, InFlightRequest(requestId, numBuffers));
+ if (res < 0) return res;
+
+ return OK;
+}
+
+/**
+ * QUIRK(partial results)
+ * Check if all 3A fields are ready, and send off a partial 3A-only result
+ * to the output frame queue
+ */
+bool Camera3Device::processPartial3AQuirk(
+ int32_t frameNumber, int32_t requestId,
+ const CameraMetadata& partial) {
+
+ // Check if all 3A states are present
+ // The full list of fields is
+ // android.control.afMode
+ // android.control.awbMode
+ // android.control.aeState
+ // android.control.awbState
+ // android.control.afState
+ // android.control.afTriggerID
+ // android.control.aePrecaptureID
+ // TODO: Add android.control.aeMode
+
+ bool gotAllStates = true;
+
+ uint8_t afMode;
+ uint8_t awbMode;
+ uint8_t aeState;
+ uint8_t afState;
+ uint8_t awbState;
+ int32_t afTriggerId;
+ int32_t aeTriggerId;
+
+ gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_MODE,
+ &afMode, frameNumber);
+
+ gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AWB_MODE,
+ &awbMode, frameNumber);
+
+ gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AE_STATE,
+ &aeState, frameNumber);
+
+ gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_STATE,
+ &afState, frameNumber);
+
+ gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AWB_STATE,
+ &awbState, frameNumber);
+
+ gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_TRIGGER_ID,
+ &afTriggerId, frameNumber);
+
+ gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AE_PRECAPTURE_ID,
+ &aeTriggerId, frameNumber);
+
+ if (!gotAllStates) return false;
+
+ ALOGVV("%s: Camera %d: Frame %d, Request ID %d: AF mode %d, AWB mode %d, "
+ "AF state %d, AE state %d, AWB state %d, "
+ "AF trigger %d, AE precapture trigger %d",
+ __FUNCTION__, mId, frameNumber, requestId,
+ afMode, awbMode,
+ afState, aeState, awbState,
+ afTriggerId, aeTriggerId);
+
+ // Got all states, so construct a minimal result to send
+ // In addition to the above fields, this means adding in
+ // android.request.frameCount
+ // android.request.requestId
+ // android.quirks.partialResult
+
+ const size_t kMinimal3AResultEntries = 10;
+
+ Mutex::Autolock l(mOutputLock);
+
+ CameraMetadata& min3AResult =
+ *mResultQueue.insert(
+ mResultQueue.end(),
+ CameraMetadata(kMinimal3AResultEntries, /*dataCapacity*/ 0));
+
+ if (!insert3AResult(min3AResult, ANDROID_REQUEST_FRAME_COUNT,
+ &frameNumber, frameNumber)) {
+ return false;
+ }
+
+ if (!insert3AResult(min3AResult, ANDROID_REQUEST_ID,
+ &requestId, frameNumber)) {
+ return false;
+ }
+
+ static const uint8_t partialResult = ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL;
+ if (!insert3AResult(min3AResult, ANDROID_QUIRKS_PARTIAL_RESULT,
+ &partialResult, frameNumber)) {
+ return false;
+ }
+
+ if (!insert3AResult(min3AResult, ANDROID_CONTROL_AF_MODE,
+ &afMode, frameNumber)) {
+ return false;
+ }
+
+ if (!insert3AResult(min3AResult, ANDROID_CONTROL_AWB_MODE,
+ &awbMode, frameNumber)) {
+ return false;
+ }
+
+ if (!insert3AResult(min3AResult, ANDROID_CONTROL_AE_STATE,
+ &aeState, frameNumber)) {
+ return false;
+ }
+
+ if (!insert3AResult(min3AResult, ANDROID_CONTROL_AF_STATE,
+ &afState, frameNumber)) {
+ return false;
+ }
+
+ if (!insert3AResult(min3AResult, ANDROID_CONTROL_AWB_STATE,
+ &awbState, frameNumber)) {
+ return false;
+ }
+
+ if (!insert3AResult(min3AResult, ANDROID_CONTROL_AF_TRIGGER_ID,
+ &afTriggerId, frameNumber)) {
+ return false;
+ }
+
+ if (!insert3AResult(min3AResult, ANDROID_CONTROL_AE_PRECAPTURE_ID,
+ &aeTriggerId, frameNumber)) {
+ return false;
+ }
+
+ mResultSignal.signal();
+
+ return true;
+}
+
+template<typename T>
+bool Camera3Device::get3AResult(const CameraMetadata& result, int32_t tag,
+ T* value, int32_t frameNumber) {
+ (void) frameNumber;
+
+ camera_metadata_ro_entry_t entry;
+
+ entry = result.find(tag);
+ if (entry.count == 0) {
+ ALOGVV("%s: Camera %d: Frame %d: No %s provided by HAL!", __FUNCTION__,
+ mId, frameNumber, get_camera_metadata_tag_name(tag));
+ return false;
+ }
+
+ if (sizeof(T) == sizeof(uint8_t)) {
+ *value = entry.data.u8[0];
+ } else if (sizeof(T) == sizeof(int32_t)) {
+ *value = entry.data.i32[0];
+ } else {
+ ALOGE("%s: Unexpected type", __FUNCTION__);
+ return false;
+ }
+ return true;
+}
+
+template<typename T>
+bool Camera3Device::insert3AResult(CameraMetadata& result, int32_t tag,
+ const T* value, int32_t frameNumber) {
+ if (result.update(tag, value, 1) != NO_ERROR) {
+ mResultQueue.erase(--mResultQueue.end(), mResultQueue.end());
+ SET_ERR("Frame %d: Failed to set %s in partial metadata",
+ frameNumber, get_camera_metadata_tag_name(tag));
+ return false;
+ }
+ return true;
+}
+
+/**
+ * Camera HAL device callback methods
+ */
+
+void Camera3Device::processCaptureResult(const camera3_capture_result *result) {
+ ATRACE_CALL();
+
+ status_t res;
+
+ uint32_t frameNumber = result->frame_number;
+ if (result->result == NULL && result->num_output_buffers == 0) {
+ SET_ERR("No result data provided by HAL for frame %d",
+ frameNumber);
+ return;
+ }
+ bool partialResultQuirk = false;
+ CameraMetadata collectedQuirkResult;
+
+ // Get capture timestamp from list of in-flight requests, where it was added
+ // by the shutter notification for this frame. Then update the in-flight
+ // status and remove the in-flight entry if all result data has been
+ // received.
+ nsecs_t timestamp = 0;
+ {
+ Mutex::Autolock l(mInFlightLock);
+ ssize_t idx = mInFlightMap.indexOfKey(frameNumber);
+ if (idx == NAME_NOT_FOUND) {
+ SET_ERR("Unknown frame number for capture result: %d",
+ frameNumber);
+ return;
+ }
+ InFlightRequest &request = mInFlightMap.editValueAt(idx);
+
+ // Check if this result carries only partial metadata
+ if (mUsePartialResultQuirk && result->result != NULL) {
+ camera_metadata_ro_entry_t partialResultEntry;
+ res = find_camera_metadata_ro_entry(result->result,
+ ANDROID_QUIRKS_PARTIAL_RESULT, &partialResultEntry);
+ if (res != NAME_NOT_FOUND &&
+ partialResultEntry.count > 0 &&
+ partialResultEntry.data.u8[0] ==
+ ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+ // A partial result. Flag this as such, and collect this
+ // set of metadata into the in-flight entry.
+ partialResultQuirk = true;
+ request.partialResultQuirk.collectedResult.append(
+ result->result);
+ request.partialResultQuirk.collectedResult.erase(
+ ANDROID_QUIRKS_PARTIAL_RESULT);
+ // Fire off a 3A-only result if possible
+ if (!request.partialResultQuirk.haveSent3A) {
+ request.partialResultQuirk.haveSent3A =
+ processPartial3AQuirk(frameNumber,
+ request.requestId,
+ request.partialResultQuirk.collectedResult);
+ }
+ }
+ }
+
+ timestamp = request.captureTimestamp;
+ /**
+ * One of the following must happen before it's legal to call process_capture_result,
+ * unless partial metadata is being provided:
+ * - CAMERA3_MSG_SHUTTER (expected during normal operation)
+ * - CAMERA3_MSG_ERROR (expected during flush)
+ */
+ if (request.requestStatus == OK && timestamp == 0 && !partialResultQuirk) {
+ SET_ERR("Called before shutter notify for frame %d",
+ frameNumber);
+ return;
+ }
+
+ // Did we get the (final) result metadata for this capture?
+ if (result->result != NULL && !partialResultQuirk) {
+ if (request.haveResultMetadata) {
+ SET_ERR("Called multiple times with metadata for frame %d",
+ frameNumber);
+ return;
+ }
+ if (mUsePartialResultQuirk &&
+ !request.partialResultQuirk.collectedResult.isEmpty()) {
+ collectedQuirkResult.acquire(
+ request.partialResultQuirk.collectedResult);
+ }
+ request.haveResultMetadata = true;
+ }
+
+ request.numBuffersLeft -= result->num_output_buffers;
+
+ if (request.numBuffersLeft < 0) {
+ SET_ERR("Too many buffers returned for frame %d",
+ frameNumber);
+ return;
+ }
+
+ // Check if everything has arrived for this result (buffers and metadata)
+ if (request.haveResultMetadata && request.numBuffersLeft == 0) {
+ ATRACE_ASYNC_END("frame capture", frameNumber);
+ mInFlightMap.removeItemsAt(idx, 1);
+ }
+
+ // Sanity check - if we have too many in-flight frames, something has
+ // likely gone wrong
+ if (mInFlightMap.size() > kInFlightWarnLimit) {
+ CLOGE("In-flight list too large: %d", mInFlightMap.size());
+ }
+
+ }
+
+ // Process the result metadata, if provided
+ bool gotResult = false;
+ if (result->result != NULL && !partialResultQuirk) {
+ Mutex::Autolock l(mOutputLock);
+
+ gotResult = true;
+
+ if (frameNumber != mNextResultFrameNumber) {
+ SET_ERR("Out-of-order capture result metadata submitted! "
+ "(got frame number %d, expecting %d)",
+ frameNumber, mNextResultFrameNumber);
+ return;
+ }
+ mNextResultFrameNumber++;
+
+ CameraMetadata captureResult;
+ captureResult = result->result;
+
+ if (captureResult.update(ANDROID_REQUEST_FRAME_COUNT,
+ (int32_t*)&frameNumber, 1) != OK) {
+ SET_ERR("Failed to set frame# in metadata (%d)",
+ frameNumber);
+ gotResult = false;
+ } else {
+ ALOGVV("%s: Camera %d: Set frame# in metadata (%d)",
+ __FUNCTION__, mId, frameNumber);
+ }
+
+ // Append any previous partials to form a complete result
+ if (mUsePartialResultQuirk && !collectedQuirkResult.isEmpty()) {
+ captureResult.append(collectedQuirkResult);
+ }
+
+ captureResult.sort();
+
+ // Check that there's a timestamp in the result metadata
+
+ camera_metadata_entry entry =
+ captureResult.find(ANDROID_SENSOR_TIMESTAMP);
+ if (entry.count == 0) {
+ SET_ERR("No timestamp provided by HAL for frame %d!",
+ frameNumber);
+ gotResult = false;
+ } else if (timestamp != entry.data.i64[0]) {
+ SET_ERR("Timestamp mismatch between shutter notify and result"
+ " metadata for frame %d (%lld vs %lld respectively)",
+ frameNumber, timestamp, entry.data.i64[0]);
+ gotResult = false;
+ }
+
+ if (gotResult) {
+ // Valid result, insert into queue
+ CameraMetadata& queuedResult =
+ *mResultQueue.insert(mResultQueue.end(), CameraMetadata());
+ queuedResult.swap(captureResult);
+ }
+ } // scope for mOutputLock
+
+ // Return completed buffers to their streams with the timestamp
+
+ for (size_t i = 0; i < result->num_output_buffers; i++) {
+ Camera3Stream *stream =
+ Camera3Stream::cast(result->output_buffers[i].stream);
+ res = stream->returnBuffer(result->output_buffers[i], timestamp);
+ // Note: stream may be deallocated at this point, if this buffer was the
+ // last reference to it.
+ if (res != OK) {
+ ALOGE("Can't return buffer %d for frame %d to its stream: "
+ " %s (%d)", i, frameNumber, strerror(-res), res);
+ }
+ }
+
+ // Finally, signal any waiters for new frames
+
+ if (gotResult) {
+ mResultSignal.signal();
+ }
+
+}
+
+
+
+void Camera3Device::notify(const camera3_notify_msg *msg) {
+ ATRACE_CALL();
+ NotificationListener *listener;
+ {
+ Mutex::Autolock l(mOutputLock);
+ listener = mListener;
+ }
+
+ if (msg == NULL) {
+ SET_ERR("HAL sent NULL notify message!");
+ return;
+ }
+
+ switch (msg->type) {
+ case CAMERA3_MSG_ERROR: {
+ int streamId = 0;
+ if (msg->message.error.error_stream != NULL) {
+ Camera3Stream *stream =
+ Camera3Stream::cast(
+ msg->message.error.error_stream);
+ streamId = stream->getId();
+ }
+ ALOGV("Camera %d: %s: HAL error, frame %d, stream %d: %d",
+ mId, __FUNCTION__, msg->message.error.frame_number,
+ streamId, msg->message.error.error_code);
+
+ // Set request error status for the request in the in-flight tracking
+ {
+ Mutex::Autolock l(mInFlightLock);
+ ssize_t idx = mInFlightMap.indexOfKey(msg->message.error.frame_number);
+ if (idx >= 0) {
+ mInFlightMap.editValueAt(idx).requestStatus = msg->message.error.error_code;
+ }
+ }
+
+ if (listener != NULL) {
+ listener->notifyError(msg->message.error.error_code,
+ msg->message.error.frame_number, streamId);
+ }
+ break;
+ }
+ case CAMERA3_MSG_SHUTTER: {
+ ssize_t idx;
+ uint32_t frameNumber = msg->message.shutter.frame_number;
+ nsecs_t timestamp = msg->message.shutter.timestamp;
+ // Verify ordering of shutter notifications
+ {
+ Mutex::Autolock l(mOutputLock);
+ if (frameNumber != mNextShutterFrameNumber) {
+ SET_ERR("Shutter notification out-of-order. Expected "
+ "notification for frame %d, got frame %d",
+ mNextShutterFrameNumber, frameNumber);
+ break;
+ }
+ mNextShutterFrameNumber++;
+ }
+
+ int32_t requestId = -1;
+
+ // Set timestamp for the request in the in-flight tracking
+ // and get the request ID to send upstream
+ {
+ Mutex::Autolock l(mInFlightLock);
+ idx = mInFlightMap.indexOfKey(frameNumber);
+ if (idx >= 0) {
+ InFlightRequest &r = mInFlightMap.editValueAt(idx);
+ r.captureTimestamp = timestamp;
+ requestId = r.requestId;
+ }
+ }
+ if (idx < 0) {
+ SET_ERR("Shutter notification for non-existent frame number %d",
+ frameNumber);
+ break;
+ }
+ ALOGVV("Camera %d: %s: Shutter fired for frame %d (id %d) at %lld",
+ mId, __FUNCTION__, frameNumber, requestId, timestamp);
+ // Call listener, if any
+ if (listener != NULL) {
+ listener->notifyShutter(requestId, timestamp);
+ }
+ break;
+ }
+ default:
+ SET_ERR("Unknown notify message from HAL: %d",
+ msg->type);
+ }
+}
+
+CameraMetadata Camera3Device::getLatestRequestLocked() {
+ ALOGV("%s", __FUNCTION__);
+
+ CameraMetadata retVal;
+
+ if (mRequestThread != NULL) {
+ retVal = mRequestThread->getLatestRequest();
+ }
+
+ return retVal;
+}
+
+/**
+ * RequestThread inner class methods
+ */
+
+Camera3Device::RequestThread::RequestThread(wp<Camera3Device> parent,
+ sp<StatusTracker> statusTracker,
+ camera3_device_t *hal3Device) :
+ Thread(false),
+ mParent(parent),
+ mStatusTracker(statusTracker),
+ mHal3Device(hal3Device),
+ mId(getId(parent)),
+ mReconfigured(false),
+ mDoPause(false),
+ mPaused(true),
+ mFrameNumber(0),
+ mLatestRequestId(NAME_NOT_FOUND) {
+ mStatusId = statusTracker->addComponent();
+}
+
+void Camera3Device::RequestThread::configurationComplete() {
+ Mutex::Autolock l(mRequestLock);
+ mReconfigured = true;
+}
+
+status_t Camera3Device::RequestThread::queueRequest(
+ sp<CaptureRequest> request) {
+ Mutex::Autolock l(mRequestLock);
+ mRequestQueue.push_back(request);
+
+ unpauseForNewRequests();
+
+ return OK;
+}
+
+
+status_t Camera3Device::RequestThread::queueTrigger(
+ RequestTrigger trigger[],
+ size_t count) {
+
+ Mutex::Autolock l(mTriggerMutex);
+ status_t ret;
+
+ for (size_t i = 0; i < count; ++i) {
+ ret = queueTriggerLocked(trigger[i]);
+
+ if (ret != OK) {
+ return ret;
+ }
+ }
+
+ return OK;
+}
+
+int Camera3Device::RequestThread::getId(const wp<Camera3Device> &device) {
+ sp<Camera3Device> d = device.promote();
+ if (d != NULL) return d->mId;
+ return 0;
+}
+
+status_t Camera3Device::RequestThread::queueTriggerLocked(
+ RequestTrigger trigger) {
+
+ uint32_t tag = trigger.metadataTag;
+ ssize_t index = mTriggerMap.indexOfKey(tag);
+
+ switch (trigger.getTagType()) {
+ case TYPE_BYTE:
+ // fall-through
+ case TYPE_INT32:
+ break;
+ default:
+ ALOGE("%s: Type not supported: 0x%x", __FUNCTION__,
+ trigger.getTagType());
+ return INVALID_OPERATION;
+ }
+
+ /**
+ * Collect only the latest trigger, since we only have 1 field
+ * in the request settings per trigger tag, and can't send more than 1
+ * trigger per request.
+ */
+ if (index != NAME_NOT_FOUND) {
+ mTriggerMap.editValueAt(index) = trigger;
+ } else {
+ mTriggerMap.add(tag, trigger);
+ }
+
+ return OK;
+}
+
+status_t Camera3Device::RequestThread::setRepeatingRequests(
+ const RequestList &requests) {
+ Mutex::Autolock l(mRequestLock);
+ mRepeatingRequests.clear();
+ mRepeatingRequests.insert(mRepeatingRequests.begin(),
+ requests.begin(), requests.end());
+
+ unpauseForNewRequests();
+
+ return OK;
+}
+
+status_t Camera3Device::RequestThread::clearRepeatingRequests() {
+ Mutex::Autolock l(mRequestLock);
+ mRepeatingRequests.clear();
+ return OK;
+}
+
+status_t Camera3Device::RequestThread::clear() {
+ Mutex::Autolock l(mRequestLock);
+ mRepeatingRequests.clear();
+ mRequestQueue.clear();
+ mTriggerMap.clear();
+ return OK;
+}
+
+void Camera3Device::RequestThread::setPaused(bool paused) {
+ Mutex::Autolock l(mPauseLock);
+ mDoPause = paused;
+ mDoPauseSignal.signal();
+}
+
+status_t Camera3Device::RequestThread::waitUntilRequestProcessed(
+ int32_t requestId, nsecs_t timeout) {
+ Mutex::Autolock l(mLatestRequestMutex);
+ status_t res;
+ while (mLatestRequestId != requestId) {
+ nsecs_t startTime = systemTime();
+
+ res = mLatestRequestSignal.waitRelative(mLatestRequestMutex, timeout);
+ if (res != OK) return res;
+
+ timeout -= (systemTime() - startTime);
+ }
+
+ return OK;
+}
+
+void Camera3Device::RequestThread::requestExit() {
+ // Call parent to set up shutdown
+ Thread::requestExit();
+ // The exit from any possible waits
+ mDoPauseSignal.signal();
+ mRequestSignal.signal();
+}
+
+bool Camera3Device::RequestThread::threadLoop() {
+
+ status_t res;
+
+ // Handle paused state.
+ if (waitIfPaused()) {
+ return true;
+ }
+
+ // Get work to do
+
+ sp<CaptureRequest> nextRequest = waitForNextRequest();
+ if (nextRequest == NULL) {
+ return true;
+ }
+
+ // Create request to HAL
+ camera3_capture_request_t request = camera3_capture_request_t();
+ Vector<camera3_stream_buffer_t> outputBuffers;
+
+ // Get the request ID, if any
+ int requestId;
+ camera_metadata_entry_t requestIdEntry =
+ nextRequest->mSettings.find(ANDROID_REQUEST_ID);
+ if (requestIdEntry.count > 0) {
+ requestId = requestIdEntry.data.i32[0];
+ } else {
+ ALOGW("%s: Did not have android.request.id set in the request",
+ __FUNCTION__);
+ requestId = NAME_NOT_FOUND;
+ }
+
+ // Insert any queued triggers (before metadata is locked)
+ int32_t triggerCount;
+ res = insertTriggers(nextRequest);
+ if (res < 0) {
+ SET_ERR("RequestThread: Unable to insert triggers "
+ "(capture request %d, HAL device: %s (%d)",
+ (mFrameNumber+1), strerror(-res), res);
+ cleanUpFailedRequest(request, nextRequest, outputBuffers);
+ return false;
+ }
+ triggerCount = res;
+
+ bool triggersMixedIn = (triggerCount > 0 || mPrevTriggers > 0);
+
+ // If the request is the same as last, or we had triggers last time
+ if (mPrevRequest != nextRequest || triggersMixedIn) {
+ /**
+ * HAL workaround:
+ * Insert a dummy trigger ID if a trigger is set but no trigger ID is
+ */
+ res = addDummyTriggerIds(nextRequest);
+ if (res != OK) {
+ SET_ERR("RequestThread: Unable to insert dummy trigger IDs "
+ "(capture request %d, HAL device: %s (%d)",
+ (mFrameNumber+1), strerror(-res), res);
+ cleanUpFailedRequest(request, nextRequest, outputBuffers);
+ return false;
+ }
+
+ /**
+ * The request should be presorted so accesses in HAL
+ * are O(logn). Sidenote, sorting a sorted metadata is nop.
+ */
+ nextRequest->mSettings.sort();
+ request.settings = nextRequest->mSettings.getAndLock();
+ mPrevRequest = nextRequest;
+ ALOGVV("%s: Request settings are NEW", __FUNCTION__);
+
+ IF_ALOGV() {
+ camera_metadata_ro_entry_t e = camera_metadata_ro_entry_t();
+ find_camera_metadata_ro_entry(
+ request.settings,
+ ANDROID_CONTROL_AF_TRIGGER,
+ &e
+ );
+ if (e.count > 0) {
+ ALOGV("%s: Request (frame num %d) had AF trigger 0x%x",
+ __FUNCTION__,
+ mFrameNumber+1,
+ e.data.u8[0]);
+ }
+ }
+ } else {
+ // leave request.settings NULL to indicate 'reuse latest given'
+ ALOGVV("%s: Request settings are REUSED",
+ __FUNCTION__);
+ }
+
+ camera3_stream_buffer_t inputBuffer;
+
+ // Fill in buffers
+
+ if (nextRequest->mInputStream != NULL) {
+ request.input_buffer = &inputBuffer;
+ res = nextRequest->mInputStream->getInputBuffer(&inputBuffer);
+ if (res != OK) {
+ ALOGE("RequestThread: Can't get input buffer, skipping request:"
+ " %s (%d)", strerror(-res), res);
+ cleanUpFailedRequest(request, nextRequest, outputBuffers);
+ return true;
+ }
+ } else {
+ request.input_buffer = NULL;
+ }
+
+ outputBuffers.insertAt(camera3_stream_buffer_t(), 0,
+ nextRequest->mOutputStreams.size());
+ request.output_buffers = outputBuffers.array();
+ for (size_t i = 0; i < nextRequest->mOutputStreams.size(); i++) {
+ res = nextRequest->mOutputStreams.editItemAt(i)->
+ getBuffer(&outputBuffers.editItemAt(i));
+ if (res != OK) {
+ ALOGE("RequestThread: Can't get output buffer, skipping request:"
+ " %s (%d)", strerror(-res), res);
+ cleanUpFailedRequest(request, nextRequest, outputBuffers);
+ return true;
+ }
+ request.num_output_buffers++;
+ }
+
+ request.frame_number = mFrameNumber++;
+
+ // Log request in the in-flight queue
+ sp<Camera3Device> parent = mParent.promote();
+ if (parent == NULL) {
+ CLOGE("RequestThread: Parent is gone");
+ cleanUpFailedRequest(request, nextRequest, outputBuffers);
+ return false;
+ }
+
+ res = parent->registerInFlight(request.frame_number, requestId,
+ request.num_output_buffers);
+ if (res != OK) {
+ SET_ERR("RequestThread: Unable to register new in-flight request:"
+ " %s (%d)", strerror(-res), res);
+ cleanUpFailedRequest(request, nextRequest, outputBuffers);
+ return false;
+ }
+
+ // Inform waitUntilRequestProcessed thread of a new request ID
+ {
+ Mutex::Autolock al(mLatestRequestMutex);
+
+ mLatestRequestId = requestId;
+ mLatestRequestSignal.signal();
+ }
+
+ // Submit request and block until ready for next one
+ ATRACE_ASYNC_BEGIN("frame capture", request.frame_number);
+ ATRACE_BEGIN("camera3->process_capture_request");
+ res = mHal3Device->ops->process_capture_request(mHal3Device, &request);
+ ATRACE_END();
+
+ if (res != OK) {
+ SET_ERR("RequestThread: Unable to submit capture request %d to HAL"
+ " device: %s (%d)", request.frame_number, strerror(-res), res);
+ cleanUpFailedRequest(request, nextRequest, outputBuffers);
+ return false;
+ }
+
+ // Update the latest request sent to HAL
+ if (request.settings != NULL) { // Don't update them if they were unchanged
+ Mutex::Autolock al(mLatestRequestMutex);
+
+ camera_metadata_t* cloned = clone_camera_metadata(request.settings);
+ mLatestRequest.acquire(cloned);
+ }
+
+ if (request.settings != NULL) {
+ nextRequest->mSettings.unlock(request.settings);
+ }
+
+ // Remove any previously queued triggers (after unlock)
+ res = removeTriggers(mPrevRequest);
+ if (res != OK) {
+ SET_ERR("RequestThread: Unable to remove triggers "
+ "(capture request %d, HAL device: %s (%d)",
+ request.frame_number, strerror(-res), res);
+ return false;
+ }
+ mPrevTriggers = triggerCount;
+
+ // Return input buffer back to framework
+ if (request.input_buffer != NULL) {
+ Camera3Stream *stream =
+ Camera3Stream::cast(request.input_buffer->stream);
+ res = stream->returnInputBuffer(*(request.input_buffer));
+ // Note: stream may be deallocated at this point, if this buffer was the
+ // last reference to it.
+ if (res != OK) {
+ ALOGE("%s: RequestThread: Can't return input buffer for frame %d to"
+ " its stream:%s (%d)", __FUNCTION__,
+ request.frame_number, strerror(-res), res);
+ // TODO: Report error upstream
+ }
+ }
+
+ return true;
+}
+
+CameraMetadata Camera3Device::RequestThread::getLatestRequest() const {
+ Mutex::Autolock al(mLatestRequestMutex);
+
+ ALOGV("RequestThread::%s", __FUNCTION__);
+
+ return mLatestRequest;
+}
+
+void Camera3Device::RequestThread::cleanUpFailedRequest(
+ camera3_capture_request_t &request,
+ sp<CaptureRequest> &nextRequest,
+ Vector<camera3_stream_buffer_t> &outputBuffers) {
+
+ if (request.settings != NULL) {
+ nextRequest->mSettings.unlock(request.settings);
+ }
+ if (request.input_buffer != NULL) {
+ request.input_buffer->status = CAMERA3_BUFFER_STATUS_ERROR;
+ nextRequest->mInputStream->returnInputBuffer(*(request.input_buffer));
+ }
+ for (size_t i = 0; i < request.num_output_buffers; i++) {
+ outputBuffers.editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR;
+ nextRequest->mOutputStreams.editItemAt(i)->returnBuffer(
+ outputBuffers[i], 0);
+ }
+}
+
+sp<Camera3Device::CaptureRequest>
+ Camera3Device::RequestThread::waitForNextRequest() {
+ status_t res;
+ sp<CaptureRequest> nextRequest;
+
+ // Optimized a bit for the simple steady-state case (single repeating
+ // request), to avoid putting that request in the queue temporarily.
+ Mutex::Autolock l(mRequestLock);
+
+ while (mRequestQueue.empty()) {
+ if (!mRepeatingRequests.empty()) {
+ // Always atomically enqueue all requests in a repeating request
+ // list. Guarantees a complete in-sequence set of captures to
+ // application.
+ const RequestList &requests = mRepeatingRequests;
+ RequestList::const_iterator firstRequest =
+ requests.begin();
+ nextRequest = *firstRequest;
+ mRequestQueue.insert(mRequestQueue.end(),
+ ++firstRequest,
+ requests.end());
+ // No need to wait any longer
+ break;
+ }
+
+ res = mRequestSignal.waitRelative(mRequestLock, kRequestTimeout);
+
+ if ((mRequestQueue.empty() && mRepeatingRequests.empty()) ||
+ exitPending()) {
+ Mutex::Autolock pl(mPauseLock);
+ if (mPaused == false) {
+ ALOGV("%s: RequestThread: Going idle", __FUNCTION__);
+ mPaused = true;
+ // Let the tracker know
+ sp<StatusTracker> statusTracker = mStatusTracker.promote();
+ if (statusTracker != 0) {
+ statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
+ }
+ }
+ // Stop waiting for now and let thread management happen
+ return NULL;
+ }
+ }
+
+ if (nextRequest == NULL) {
+ // Don't have a repeating request already in hand, so queue
+ // must have an entry now.
+ RequestList::iterator firstRequest =
+ mRequestQueue.begin();
+ nextRequest = *firstRequest;
+ mRequestQueue.erase(firstRequest);
+ }
+
+ // In case we've been unpaused by setPaused clearing mDoPause, need to
+ // update internal pause state (capture/setRepeatingRequest unpause
+ // directly).
+ Mutex::Autolock pl(mPauseLock);
+ if (mPaused) {
+ ALOGV("%s: RequestThread: Unpaused", __FUNCTION__);
+ sp<StatusTracker> statusTracker = mStatusTracker.promote();
+ if (statusTracker != 0) {
+ statusTracker->markComponentActive(mStatusId);
+ }
+ }
+ mPaused = false;
+
+ // Check if we've reconfigured since last time, and reset the preview
+ // request if so. Can't use 'NULL request == repeat' across configure calls.
+ if (mReconfigured) {
+ mPrevRequest.clear();
+ mReconfigured = false;
+ }
+
+ return nextRequest;
+}
+
+bool Camera3Device::RequestThread::waitIfPaused() {
+ status_t res;
+ Mutex::Autolock l(mPauseLock);
+ while (mDoPause) {
+ if (mPaused == false) {
+ mPaused = true;
+ ALOGV("%s: RequestThread: Paused", __FUNCTION__);
+ // Let the tracker know
+ sp<StatusTracker> statusTracker = mStatusTracker.promote();
+ if (statusTracker != 0) {
+ statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
+ }
+ }
+
+ res = mDoPauseSignal.waitRelative(mPauseLock, kRequestTimeout);
+ if (res == TIMED_OUT || exitPending()) {
+ return true;
+ }
+ }
+ // We don't set mPaused to false here, because waitForNextRequest needs
+ // to further manage the paused state in case of starvation.
+ return false;
+}
+
+void Camera3Device::RequestThread::unpauseForNewRequests() {
+ // With work to do, mark thread as unpaused.
+ // If paused by request (setPaused), don't resume, to avoid
+ // extra signaling/waiting overhead to waitUntilPaused
+ mRequestSignal.signal();
+ Mutex::Autolock p(mPauseLock);
+ if (!mDoPause) {
+ ALOGV("%s: RequestThread: Going active", __FUNCTION__);
+ if (mPaused) {
+ sp<StatusTracker> statusTracker = mStatusTracker.promote();
+ if (statusTracker != 0) {
+ statusTracker->markComponentActive(mStatusId);
+ }
+ }
+ mPaused = false;
+ }
+}
+
+void Camera3Device::RequestThread::setErrorState(const char *fmt, ...) {
+ sp<Camera3Device> parent = mParent.promote();
+ if (parent != NULL) {
+ va_list args;
+ va_start(args, fmt);
+
+ parent->setErrorStateV(fmt, args);
+
+ va_end(args);
+ }
+}
+
+status_t Camera3Device::RequestThread::insertTriggers(
+ const sp<CaptureRequest> &request) {
+
+ Mutex::Autolock al(mTriggerMutex);
+
+ CameraMetadata &metadata = request->mSettings;
+ size_t count = mTriggerMap.size();
+
+ for (size_t i = 0; i < count; ++i) {
+ RequestTrigger trigger = mTriggerMap.valueAt(i);
+
+ uint32_t tag = trigger.metadataTag;
+ camera_metadata_entry entry = metadata.find(tag);
+
+ if (entry.count > 0) {
+ /**
+ * Already has an entry for this trigger in the request.
+ * Rewrite it with our requested trigger value.
+ */
+ RequestTrigger oldTrigger = trigger;
+
+ oldTrigger.entryValue = entry.data.u8[0];
+
+ mTriggerReplacedMap.add(tag, oldTrigger);
+ } else {
+ /**
+ * More typical, no trigger entry, so we just add it
+ */
+ mTriggerRemovedMap.add(tag, trigger);
+ }
+
+ status_t res;
+
+ switch (trigger.getTagType()) {
+ case TYPE_BYTE: {
+ uint8_t entryValue = static_cast<uint8_t>(trigger.entryValue);
+ res = metadata.update(tag,
+ &entryValue,
+ /*count*/1);
+ break;
+ }
+ case TYPE_INT32:
+ res = metadata.update(tag,
+ &trigger.entryValue,
+ /*count*/1);
+ break;
+ default:
+ ALOGE("%s: Type not supported: 0x%x",
+ __FUNCTION__,
+ trigger.getTagType());
+ return INVALID_OPERATION;
+ }
+
+ if (res != OK) {
+ ALOGE("%s: Failed to update request metadata with trigger tag %s"
+ ", value %d", __FUNCTION__, trigger.getTagName(),
+ trigger.entryValue);
+ return res;
+ }
+
+ ALOGV("%s: Mixed in trigger %s, value %d", __FUNCTION__,
+ trigger.getTagName(),
+ trigger.entryValue);
+ }
+
+ mTriggerMap.clear();
+
+ return count;
+}
+
+status_t Camera3Device::RequestThread::removeTriggers(
+ const sp<CaptureRequest> &request) {
+ Mutex::Autolock al(mTriggerMutex);
+
+ CameraMetadata &metadata = request->mSettings;
+
+ /**
+ * Replace all old entries with their old values.
+ */
+ for (size_t i = 0; i < mTriggerReplacedMap.size(); ++i) {
+ RequestTrigger trigger = mTriggerReplacedMap.valueAt(i);
+
+ status_t res;
+
+ uint32_t tag = trigger.metadataTag;
+ switch (trigger.getTagType()) {
+ case TYPE_BYTE: {
+ uint8_t entryValue = static_cast<uint8_t>(trigger.entryValue);
+ res = metadata.update(tag,
+ &entryValue,
+ /*count*/1);
+ break;
+ }
+ case TYPE_INT32:
+ res = metadata.update(tag,
+ &trigger.entryValue,
+ /*count*/1);
+ break;
+ default:
+ ALOGE("%s: Type not supported: 0x%x",
+ __FUNCTION__,
+ trigger.getTagType());
+ return INVALID_OPERATION;
+ }
+
+ if (res != OK) {
+ ALOGE("%s: Failed to restore request metadata with trigger tag %s"
+ ", trigger value %d", __FUNCTION__,
+ trigger.getTagName(), trigger.entryValue);
+ return res;
+ }
+ }
+ mTriggerReplacedMap.clear();
+
+ /**
+ * Remove all new entries.
+ */
+ for (size_t i = 0; i < mTriggerRemovedMap.size(); ++i) {
+ RequestTrigger trigger = mTriggerRemovedMap.valueAt(i);
+ status_t res = metadata.erase(trigger.metadataTag);
+
+ if (res != OK) {
+ ALOGE("%s: Failed to erase metadata with trigger tag %s"
+ ", trigger value %d", __FUNCTION__,
+ trigger.getTagName(), trigger.entryValue);
+ return res;
+ }
+ }
+ mTriggerRemovedMap.clear();
+
+ return OK;
+}
+
+status_t Camera3Device::RequestThread::addDummyTriggerIds(
+ const sp<CaptureRequest> &request) {
+ // Trigger ID 0 has special meaning in the HAL2 spec, so avoid it here
+ static const int32_t dummyTriggerId = 1;
+ status_t res;
+
+ CameraMetadata &metadata = request->mSettings;
+
+ // If AF trigger is active, insert a dummy AF trigger ID if none already
+ // exists
+ camera_metadata_entry afTrigger = metadata.find(ANDROID_CONTROL_AF_TRIGGER);
+ camera_metadata_entry afId = metadata.find(ANDROID_CONTROL_AF_TRIGGER_ID);
+ if (afTrigger.count > 0 &&
+ afTrigger.data.u8[0] != ANDROID_CONTROL_AF_TRIGGER_IDLE &&
+ afId.count == 0) {
+ res = metadata.update(ANDROID_CONTROL_AF_TRIGGER_ID, &dummyTriggerId, 1);
+ if (res != OK) return res;
+ }
+
+ // If AE precapture trigger is active, insert a dummy precapture trigger ID
+ // if none already exists
+ camera_metadata_entry pcTrigger =
+ metadata.find(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER);
+ camera_metadata_entry pcId = metadata.find(ANDROID_CONTROL_AE_PRECAPTURE_ID);
+ if (pcTrigger.count > 0 &&
+ pcTrigger.data.u8[0] != ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE &&
+ pcId.count == 0) {
+ res = metadata.update(ANDROID_CONTROL_AE_PRECAPTURE_ID,
+ &dummyTriggerId, 1);
+ if (res != OK) return res;
+ }
+
+ return OK;
+}
+
+
+/**
+ * Static callback forwarding methods from HAL to instance
+ */
+
+void Camera3Device::sProcessCaptureResult(const camera3_callback_ops *cb,
+ const camera3_capture_result *result) {
+ Camera3Device *d =
+ const_cast<Camera3Device*>(static_cast<const Camera3Device*>(cb));
+ d->processCaptureResult(result);
+}
+
+void Camera3Device::sNotify(const camera3_callback_ops *cb,
+ const camera3_notify_msg *msg) {
+ Camera3Device *d =
+ const_cast<Camera3Device*>(static_cast<const Camera3Device*>(cb));
+ d->notify(msg);
+}
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
new file mode 100644
index 0000000..468f641
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -0,0 +1,545 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3DEVICE_H
+#define ANDROID_SERVERS_CAMERA3DEVICE_H
+
+#include <utils/Condition.h>
+#include <utils/Errors.h>
+#include <utils/List.h>
+#include <utils/Mutex.h>
+#include <utils/Thread.h>
+#include <utils/KeyedVector.h>
+#include <hardware/camera3.h>
+
+#include "common/CameraDeviceBase.h"
+#include "device3/StatusTracker.h"
+
+/**
+ * Function pointer types with C calling convention to
+ * use for HAL callback functions.
+ */
+extern "C" {
+ typedef void (callbacks_process_capture_result_t)(
+ const struct camera3_callback_ops *,
+ const camera3_capture_result_t *);
+
+ typedef void (callbacks_notify_t)(
+ const struct camera3_callback_ops *,
+ const camera3_notify_msg_t *);
+}
+
+namespace android {
+
+namespace camera3 {
+
+class Camera3Stream;
+class Camera3ZslStream;
+class Camera3OutputStreamInterface;
+class Camera3StreamInterface;
+
+}
+
+/**
+ * CameraDevice for HAL devices with version CAMERA_DEVICE_API_VERSION_3_0
+ */
+class Camera3Device :
+ public CameraDeviceBase,
+ private camera3_callback_ops {
+ public:
+ Camera3Device(int id);
+
+ virtual ~Camera3Device();
+
+ /**
+ * CameraDeviceBase interface
+ */
+
+ virtual int getId() const;
+
+ // Transitions to idle state on success.
+ virtual status_t initialize(camera_module_t *module);
+ virtual status_t disconnect();
+ virtual status_t dump(int fd, const Vector<String16> &args);
+ virtual const CameraMetadata& info() const;
+
+ // Capture and setStreamingRequest will configure streams if currently in
+ // idle state
+ virtual status_t capture(CameraMetadata &request);
+ virtual status_t setStreamingRequest(const CameraMetadata &request);
+ virtual status_t clearStreamingRequest();
+
+ virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout);
+
+ // Actual stream creation/deletion is delayed until first request is submitted
+ // If adding streams while actively capturing, will pause device before adding
+ // stream, reconfiguring device, and unpausing.
+ virtual status_t createStream(sp<ANativeWindow> consumer,
+ uint32_t width, uint32_t height, int format, size_t size,
+ int *id);
+ virtual status_t createInputStream(
+ uint32_t width, uint32_t height, int format,
+ int *id);
+ virtual status_t createZslStream(
+ uint32_t width, uint32_t height,
+ int depth,
+ /*out*/
+ int *id,
+ sp<camera3::Camera3ZslStream>* zslStream);
+ virtual status_t createReprocessStreamFromStream(int outputId, int *id);
+
+ virtual status_t getStreamInfo(int id,
+ uint32_t *width, uint32_t *height, uint32_t *format);
+ virtual status_t setStreamTransform(int id, int transform);
+
+ virtual status_t deleteStream(int id);
+ virtual status_t deleteReprocessStream(int id);
+
+ virtual status_t createDefaultRequest(int templateId, CameraMetadata *request);
+
+ // Transitions to the idle state on success
+ virtual status_t waitUntilDrained();
+
+ virtual status_t setNotifyCallback(NotificationListener *listener);
+ virtual bool willNotify3A();
+ virtual status_t waitForNextFrame(nsecs_t timeout);
+ virtual status_t getNextFrame(CameraMetadata *frame);
+
+ virtual status_t triggerAutofocus(uint32_t id);
+ virtual status_t triggerCancelAutofocus(uint32_t id);
+ virtual status_t triggerPrecaptureMetering(uint32_t id);
+
+ virtual status_t pushReprocessBuffer(int reprocessStreamId,
+ buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
+
+ virtual status_t flush();
+
+ // Methods called by subclasses
+ void notifyStatus(bool idle); // updates from StatusTracker
+
+ private:
+ static const size_t kDumpLockAttempts = 10;
+ static const size_t kDumpSleepDuration = 100000; // 0.10 sec
+ static const size_t kInFlightWarnLimit = 20;
+ static const nsecs_t kShutdownTimeout = 5000000000; // 5 sec
+ static const nsecs_t kActiveTimeout = 500000000; // 500 ms
+ struct RequestTrigger;
+
+ // A lock to enforce serialization on the input/configure side
+ // of the public interface.
+ // Only locked by public methods inherited from CameraDeviceBase.
+ // Not locked by methods guarded by mOutputLock, since they may act
+ // concurrently to the input/configure side of the interface.
+ // Must be locked before mLock if both will be locked by a method
+ Mutex mInterfaceLock;
+
+ // The main lock on internal state
+ Mutex mLock;
+
+ // Camera device ID
+ const int mId;
+
+ /**** Scope for mLock ****/
+
+ camera3_device_t *mHal3Device;
+
+ CameraMetadata mDeviceInfo;
+ vendor_tag_query_ops_t mVendorTagOps;
+
+ enum Status {
+ STATUS_ERROR,
+ STATUS_UNINITIALIZED,
+ STATUS_UNCONFIGURED,
+ STATUS_CONFIGURED,
+ STATUS_ACTIVE
+ } mStatus;
+ Vector<Status> mRecentStatusUpdates;
+ Condition mStatusChanged;
+
+ // Tracking cause of fatal errors when in STATUS_ERROR
+ String8 mErrorCause;
+
+ // Mapping of stream IDs to stream instances
+ typedef KeyedVector<int, sp<camera3::Camera3OutputStreamInterface> >
+ StreamSet;
+
+ StreamSet mOutputStreams;
+ sp<camera3::Camera3Stream> mInputStream;
+ int mNextStreamId;
+ bool mNeedConfig;
+
+ // Whether to send state updates upstream
+ // Pause when doing transparent reconfiguration
+ bool mPauseStateNotify;
+
+ // Need to hold on to stream references until configure completes.
+ Vector<sp<camera3::Camera3StreamInterface> > mDeletedStreams;
+
+ // Whether quirk ANDROID_QUIRKS_USE_PARTIAL_RESULT is enabled
+ bool mUsePartialResultQuirk;
+
+ /**** End scope for mLock ****/
+
+ class CaptureRequest : public LightRefBase<CaptureRequest> {
+ public:
+ CameraMetadata mSettings;
+ sp<camera3::Camera3Stream> mInputStream;
+ Vector<sp<camera3::Camera3OutputStreamInterface> >
+ mOutputStreams;
+ };
+ typedef List<sp<CaptureRequest> > RequestList;
+
+ /**
+ * Get the last request submitted to the hal by the request thread.
+ *
+ * Takes mLock.
+ */
+ virtual CameraMetadata getLatestRequestLocked();
+
+ /**
+ * Pause processing and flush everything, but don't tell the clients.
+ * This is for reconfiguring outputs transparently when according to the
+ * CameraDeviceBase interface we shouldn't need to.
+ * Must be called with mLock and mInterfaceLock both held.
+ */
+ status_t internalPauseAndWaitLocked();
+
+ /**
+ * Resume work after internalPauseAndWaitLocked()
+ * Must be called with mLock and mInterfaceLock both held.
+ */
+ status_t internalResumeLocked();
+
+ /**
+ * Wait until status tracker tells us we've transitioned to the target state
+ * set, which is either ACTIVE when active==true or IDLE (which is any
+ * non-ACTIVE state) when active==false.
+ *
+ * Needs to be called with mLock and mInterfaceLock held. This means there
+ * can ever only be one waiter at most.
+ *
+ * During the wait mLock is released.
+ *
+ */
+ status_t waitUntilStateThenRelock(bool active, nsecs_t timeout);
+
+ /**
+ * Do common work for setting up a streaming or single capture request.
+ * On success, will transition to ACTIVE if in IDLE.
+ */
+ sp<CaptureRequest> setUpRequestLocked(const CameraMetadata &request);
+
+ /**
+ * Build a CaptureRequest request from the CameraDeviceBase request
+ * settings.
+ */
+ sp<CaptureRequest> createCaptureRequest(const CameraMetadata &request);
+
+ /**
+ * Take the currently-defined set of streams and configure the HAL to use
+ * them. This is a long-running operation (may be several hundered ms).
+ */
+ status_t configureStreamsLocked();
+
+ /**
+ * Set device into an error state due to some fatal failure, and set an
+ * error message to indicate why. Only the first call's message will be
+ * used. The message is also sent to the log.
+ */
+ void setErrorState(const char *fmt, ...);
+ void setErrorStateV(const char *fmt, va_list args);
+ void setErrorStateLocked(const char *fmt, ...);
+ void setErrorStateLockedV(const char *fmt, va_list args);
+
+ /**
+ * Debugging trylock/spin method
+ * Try to acquire a lock a few times with sleeps between before giving up.
+ */
+ bool tryLockSpinRightRound(Mutex& lock);
+
+ struct RequestTrigger {
+ // Metadata tag number, e.g. android.control.aePrecaptureTrigger
+ uint32_t metadataTag;
+ // Metadata value, e.g. 'START' or the trigger ID
+ int32_t entryValue;
+
+ // The last part of the fully qualified path, e.g. afTrigger
+ const char *getTagName() const {
+ return get_camera_metadata_tag_name(metadataTag) ?: "NULL";
+ }
+
+ // e.g. TYPE_BYTE, TYPE_INT32, etc.
+ int getTagType() const {
+ return get_camera_metadata_tag_type(metadataTag);
+ }
+ };
+
+ /**
+ * Thread for managing capture request submission to HAL device.
+ */
+ class RequestThread : public Thread {
+
+ public:
+
+ RequestThread(wp<Camera3Device> parent,
+ sp<camera3::StatusTracker> statusTracker,
+ camera3_device_t *hal3Device);
+
+ /**
+ * Call after stream (re)-configuration is completed.
+ */
+ void configurationComplete();
+
+ /**
+ * Set or clear the list of repeating requests. Does not block
+ * on either. Use waitUntilPaused to wait until request queue
+ * has emptied out.
+ */
+ status_t setRepeatingRequests(const RequestList& requests);
+ status_t clearRepeatingRequests();
+
+ status_t queueRequest(sp<CaptureRequest> request);
+
+ /**
+ * Remove all queued and repeating requests, and pending triggers
+ */
+ status_t clear();
+
+ /**
+ * Queue a trigger to be dispatched with the next outgoing
+ * process_capture_request. The settings for that request only
+ * will be temporarily rewritten to add the trigger tag/value.
+ * Subsequent requests will not be rewritten (for this tag).
+ */
+ status_t queueTrigger(RequestTrigger trigger[], size_t count);
+
+ /**
+ * Pause/unpause the capture thread. Doesn't block, so use
+ * waitUntilPaused to wait until the thread is paused.
+ */
+ void setPaused(bool paused);
+
+ /**
+ * Wait until thread processes the capture request with settings'
+ * android.request.id == requestId.
+ *
+ * Returns TIMED_OUT in case the thread does not process the request
+ * within the timeout.
+ */
+ status_t waitUntilRequestProcessed(int32_t requestId, nsecs_t timeout);
+
+ /**
+ * Shut down the thread. Shutdown is asynchronous, so thread may
+ * still be running once this method returns.
+ */
+ virtual void requestExit();
+
+ /**
+ * Get the latest request that was sent to the HAL
+ * with process_capture_request.
+ */
+ CameraMetadata getLatestRequest() const;
+
+ protected:
+
+ virtual bool threadLoop();
+
+ private:
+ static int getId(const wp<Camera3Device> &device);
+
+ status_t queueTriggerLocked(RequestTrigger trigger);
+ // Mix-in queued triggers into this request
+ int32_t insertTriggers(const sp<CaptureRequest> &request);
+ // Purge the queued triggers from this request,
+ // restoring the old field values for those tags.
+ status_t removeTriggers(const sp<CaptureRequest> &request);
+
+ // HAL workaround: Make sure a trigger ID always exists if
+ // a trigger does
+ status_t addDummyTriggerIds(const sp<CaptureRequest> &request);
+
+ static const nsecs_t kRequestTimeout = 50e6; // 50 ms
+
+ // Waits for a request, or returns NULL if times out.
+ sp<CaptureRequest> waitForNextRequest();
+
+ // Return buffers, etc, for a request that couldn't be fully
+ // constructed. The buffers will be returned in the ERROR state
+ // to mark them as not having valid data.
+ // All arguments will be modified.
+ void cleanUpFailedRequest(camera3_capture_request_t &request,
+ sp<CaptureRequest> &nextRequest,
+ Vector<camera3_stream_buffer_t> &outputBuffers);
+
+ // Pause handling
+ bool waitIfPaused();
+ void unpauseForNewRequests();
+
+ // Relay error to parent device object setErrorState
+ void setErrorState(const char *fmt, ...);
+
+ wp<Camera3Device> mParent;
+ wp<camera3::StatusTracker> mStatusTracker;
+ camera3_device_t *mHal3Device;
+
+ const int mId; // The camera ID
+ int mStatusId; // The RequestThread's component ID for
+ // status tracking
+
+ Mutex mRequestLock;
+ Condition mRequestSignal;
+ RequestList mRequestQueue;
+ RequestList mRepeatingRequests;
+
+ bool mReconfigured;
+
+ // Used by waitIfPaused, waitForNextRequest, and waitUntilPaused
+ Mutex mPauseLock;
+ bool mDoPause;
+ Condition mDoPauseSignal;
+ bool mPaused;
+ Condition mPausedSignal;
+
+ sp<CaptureRequest> mPrevRequest;
+ int32_t mPrevTriggers;
+
+ uint32_t mFrameNumber;
+
+ mutable Mutex mLatestRequestMutex;
+ Condition mLatestRequestSignal;
+ // android.request.id for latest process_capture_request
+ int32_t mLatestRequestId;
+ CameraMetadata mLatestRequest;
+
+ typedef KeyedVector<uint32_t/*tag*/, RequestTrigger> TriggerMap;
+ Mutex mTriggerMutex;
+ TriggerMap mTriggerMap;
+ TriggerMap mTriggerRemovedMap;
+ TriggerMap mTriggerReplacedMap;
+ };
+ sp<RequestThread> mRequestThread;
+
+ /**
+ * In-flight queue for tracking completion of capture requests.
+ */
+
+ struct InFlightRequest {
+ // android.request.id for the request
+ int requestId;
+ // Set by notify() SHUTTER call.
+ nsecs_t captureTimestamp;
+ int requestStatus;
+ // Set by process_capture_result call with valid metadata
+ bool haveResultMetadata;
+ // Decremented by calls to process_capture_result with valid output
+ // buffers
+ int numBuffersLeft;
+
+ // Fields used by the partial result quirk only
+ struct PartialResultQuirkInFlight {
+ // Set by process_capture_result once 3A has been sent to clients
+ bool haveSent3A;
+ // Result metadata collected so far, when partial results are in use
+ CameraMetadata collectedResult;
+
+ PartialResultQuirkInFlight():
+ haveSent3A(false) {
+ }
+ } partialResultQuirk;
+
+ // Default constructor needed by KeyedVector
+ InFlightRequest() :
+ requestId(0),
+ captureTimestamp(0),
+ requestStatus(OK),
+ haveResultMetadata(false),
+ numBuffersLeft(0) {
+ }
+
+ InFlightRequest(int id, int numBuffers) :
+ requestId(id),
+ captureTimestamp(0),
+ requestStatus(OK),
+ haveResultMetadata(false),
+ numBuffersLeft(numBuffers) {
+ }
+ };
+ // Map from frame number to the in-flight request state
+ typedef KeyedVector<uint32_t, InFlightRequest> InFlightMap;
+
+ Mutex mInFlightLock; // Protects mInFlightMap
+ InFlightMap mInFlightMap;
+
+ status_t registerInFlight(int32_t frameNumber, int32_t requestId,
+ int32_t numBuffers);
+
+ /**
+ * For the partial result quirk, check if all 3A state fields are available
+ * and if so, queue up 3A-only result to the client. Returns true if 3A
+ * is sent.
+ */
+ bool processPartial3AQuirk(int32_t frameNumber, int32_t requestId,
+ const CameraMetadata& partial);
+
+ // Helpers for reading and writing 3A metadata into to/from partial results
+ template<typename T>
+ bool get3AResult(const CameraMetadata& result, int32_t tag,
+ T* value, int32_t frameNumber);
+
+ template<typename T>
+ bool insert3AResult(CameraMetadata &result, int32_t tag, const T* value,
+ int32_t frameNumber);
+ /**
+ * Tracking for idle detection
+ */
+ sp<camera3::StatusTracker> mStatusTracker;
+
+ /**
+ * Output result queue and current HAL device 3A state
+ */
+
+ // Lock for output side of device
+ Mutex mOutputLock;
+
+ /**** Scope for mOutputLock ****/
+
+ uint32_t mNextResultFrameNumber;
+ uint32_t mNextShutterFrameNumber;
+ List<CameraMetadata> mResultQueue;
+ Condition mResultSignal;
+ NotificationListener *mListener;
+
+ /**** End scope for mOutputLock ****/
+
+ /**
+ * Callback functions from HAL device
+ */
+ void processCaptureResult(const camera3_capture_result *result);
+
+ void notify(const camera3_notify_msg *msg);
+
+ /**
+ * Static callback forwarding methods from HAL to instance
+ */
+ static callbacks_process_capture_result_t sProcessCaptureResult;
+
+ static callbacks_notify_t sNotify;
+
+}; // class Camera3Device
+
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
new file mode 100644
index 0000000..42e02d8
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-IOStreamBase"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+// This is needed for stdint.h to define INT64_MAX in C++
+#define __STDC_LIMIT_MACROS
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include "device3/Camera3IOStreamBase.h"
+#include "device3/StatusTracker.h"
+
+namespace android {
+
+namespace camera3 {
+
+Camera3IOStreamBase::Camera3IOStreamBase(int id, camera3_stream_type_t type,
+ uint32_t width, uint32_t height, size_t maxSize, int format) :
+ Camera3Stream(id, type,
+ width, height, maxSize, format),
+ mTotalBufferCount(0),
+ mDequeuedBufferCount(0),
+ mFrameCount(0),
+ mLastTimestamp(0) {
+
+ mCombinedFence = new Fence();
+
+ if (maxSize > 0 && format != HAL_PIXEL_FORMAT_BLOB) {
+ ALOGE("%s: Bad format for size-only stream: %d", __FUNCTION__,
+ format);
+ mState = STATE_ERROR;
+ }
+}
+
+Camera3IOStreamBase::~Camera3IOStreamBase() {
+ disconnectLocked();
+}
+
+bool Camera3IOStreamBase::hasOutstandingBuffersLocked() const {
+ nsecs_t signalTime = mCombinedFence->getSignalTime();
+ ALOGV("%s: Stream %d: Has %d outstanding buffers,"
+ " buffer signal time is %lld",
+ __FUNCTION__, mId, mDequeuedBufferCount, signalTime);
+ if (mDequeuedBufferCount > 0 || signalTime == INT64_MAX) {
+ return true;
+ }
+ return false;
+}
+
+void Camera3IOStreamBase::dump(int fd, const Vector<String16> &args) const {
+ (void) args;
+ String8 lines;
+ lines.appendFormat(" State: %d\n", mState);
+ lines.appendFormat(" Dims: %d x %d, format 0x%x\n",
+ camera3_stream::width, camera3_stream::height,
+ camera3_stream::format);
+ lines.appendFormat(" Max size: %zu\n", mMaxSize);
+ lines.appendFormat(" Usage: %d, max HAL buffers: %d\n",
+ camera3_stream::usage, camera3_stream::max_buffers);
+ lines.appendFormat(" Frames produced: %d, last timestamp: %lld ns\n",
+ mFrameCount, mLastTimestamp);
+ lines.appendFormat(" Total buffers: %zu, currently dequeued: %zu\n",
+ mTotalBufferCount, mDequeuedBufferCount);
+ write(fd, lines.string(), lines.size());
+}
+
+status_t Camera3IOStreamBase::configureQueueLocked() {
+ status_t res;
+
+ switch (mState) {
+ case STATE_IN_RECONFIG:
+ res = disconnectLocked();
+ if (res != OK) {
+ return res;
+ }
+ break;
+ case STATE_IN_CONFIG:
+ // OK
+ break;
+ default:
+ ALOGE("%s: Bad state: %d", __FUNCTION__, mState);
+ return INVALID_OPERATION;
+ }
+
+ return OK;
+}
+
+size_t Camera3IOStreamBase::getBufferCountLocked() {
+ return mTotalBufferCount;
+}
+
+status_t Camera3IOStreamBase::disconnectLocked() {
+ switch (mState) {
+ case STATE_IN_RECONFIG:
+ case STATE_CONFIGURED:
+ // OK
+ break;
+ default:
+ // No connection, nothing to do
+ ALOGV("%s: Stream %d: Already disconnected",
+ __FUNCTION__, mId);
+ return -ENOTCONN;
+ }
+
+ if (mDequeuedBufferCount > 0) {
+ ALOGE("%s: Can't disconnect with %d buffers still dequeued!",
+ __FUNCTION__, mDequeuedBufferCount);
+ return INVALID_OPERATION;
+ }
+
+ return OK;
+}
+
+void Camera3IOStreamBase::handoutBufferLocked(camera3_stream_buffer &buffer,
+ buffer_handle_t *handle,
+ int acquireFence,
+ int releaseFence,
+ camera3_buffer_status_t status) {
+ /**
+ * Note that all fences are now owned by HAL.
+ */
+
+ // Handing out a raw pointer to this object. Increment internal refcount.
+ incStrong(this);
+ buffer.stream = this;
+ buffer.buffer = handle;
+ buffer.acquire_fence = acquireFence;
+ buffer.release_fence = releaseFence;
+ buffer.status = status;
+
+ // Inform tracker about becoming busy
+ if (mDequeuedBufferCount == 0 && mState != STATE_IN_CONFIG &&
+ mState != STATE_IN_RECONFIG) {
+ sp<StatusTracker> statusTracker = mStatusTracker.promote();
+ if (statusTracker != 0) {
+ statusTracker->markComponentActive(mStatusId);
+ }
+ }
+ mDequeuedBufferCount++;
+}
+
+status_t Camera3IOStreamBase::getBufferPreconditionCheckLocked() const {
+ // Allow dequeue during IN_[RE]CONFIG for registration
+ if (mState != STATE_CONFIGURED &&
+ mState != STATE_IN_CONFIG && mState != STATE_IN_RECONFIG) {
+ ALOGE("%s: Stream %d: Can't get buffers in unconfigured state %d",
+ __FUNCTION__, mId, mState);
+ return INVALID_OPERATION;
+ }
+
+ // Only limit dequeue amount when fully configured
+ if (mState == STATE_CONFIGURED &&
+ mDequeuedBufferCount == camera3_stream::max_buffers) {
+ ALOGE("%s: Stream %d: Already dequeued maximum number of simultaneous"
+ " buffers (%d)", __FUNCTION__, mId,
+ camera3_stream::max_buffers);
+ return INVALID_OPERATION;
+ }
+
+ return OK;
+}
+
+status_t Camera3IOStreamBase::returnBufferPreconditionCheckLocked() const {
+ // Allow buffers to be returned in the error state, to allow for disconnect
+ // and in the in-config states for registration
+ if (mState == STATE_CONSTRUCTED) {
+ ALOGE("%s: Stream %d: Can't return buffers in unconfigured state %d",
+ __FUNCTION__, mId, mState);
+ return INVALID_OPERATION;
+ }
+ if (mDequeuedBufferCount == 0) {
+ ALOGE("%s: Stream %d: No buffers outstanding to return", __FUNCTION__,
+ mId);
+ return INVALID_OPERATION;
+ }
+
+ return OK;
+}
+
+status_t Camera3IOStreamBase::returnAnyBufferLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output) {
+ status_t res;
+
+ // returnBuffer may be called from a raw pointer, not a sp<>, and we'll be
+ // decrementing the internal refcount next. In case this is the last ref, we
+ // might get destructed on the decStrong(), so keep an sp around until the
+ // end of the call - otherwise have to sprinkle the decStrong on all exit
+ // points.
+ sp<Camera3IOStreamBase> keepAlive(this);
+ decStrong(this);
+
+ if ((res = returnBufferPreconditionCheckLocked()) != OK) {
+ return res;
+ }
+
+ sp<Fence> releaseFence;
+ res = returnBufferCheckedLocked(buffer, timestamp, output,
+ &releaseFence);
+ // Res may be an error, but we still want to decrement our owned count
+ // to enable clean shutdown. So we'll just return the error but otherwise
+ // carry on
+
+ if (releaseFence != 0) {
+ mCombinedFence = Fence::merge(mName, mCombinedFence, releaseFence);
+ }
+
+ mDequeuedBufferCount--;
+ if (mDequeuedBufferCount == 0 && mState != STATE_IN_CONFIG &&
+ mState != STATE_IN_RECONFIG) {
+ ALOGV("%s: Stream %d: All buffers returned; now idle", __FUNCTION__,
+ mId);
+ sp<StatusTracker> statusTracker = mStatusTracker.promote();
+ if (statusTracker != 0) {
+ statusTracker->markComponentIdle(mStatusId, mCombinedFence);
+ }
+ }
+
+ mBufferReturnedSignal.signal();
+
+ if (output) {
+ mLastTimestamp = timestamp;
+ }
+
+ return res;
+}
+
+
+
+}; // namespace camera3
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
new file mode 100644
index 0000000..fcb9d04
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_IO_STREAM_BASE_H
+#define ANDROID_SERVERS_CAMERA3_IO_STREAM_BASE_H
+
+#include <utils/RefBase.h>
+#include <gui/Surface.h>
+
+#include "Camera3Stream.h"
+
+namespace android {
+
+namespace camera3 {
+
+/**
+ * A base class for managing a single stream of I/O data from the camera device.
+ */
+class Camera3IOStreamBase :
+ public Camera3Stream {
+ protected:
+ Camera3IOStreamBase(int id, camera3_stream_type_t type,
+ uint32_t width, uint32_t height, size_t maxSize, int format);
+
+ public:
+
+ virtual ~Camera3IOStreamBase();
+
+ /**
+ * Camera3Stream interface
+ */
+
+ virtual void dump(int fd, const Vector<String16> &args) const;
+
+ protected:
+ size_t mTotalBufferCount;
+ // sum of input and output buffers that are currently acquired by HAL
+ size_t mDequeuedBufferCount;
+ Condition mBufferReturnedSignal;
+ uint32_t mFrameCount;
+ // Last received output buffer's timestamp
+ nsecs_t mLastTimestamp;
+
+ // The merged release fence for all returned buffers
+ sp<Fence> mCombinedFence;
+
+ status_t returnAnyBufferLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output);
+
+ virtual status_t returnBufferCheckedLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output,
+ /*out*/
+ sp<Fence> *releaseFenceOut) = 0;
+
+ /**
+ * Internal Camera3Stream interface
+ */
+ virtual bool hasOutstandingBuffersLocked() const;
+
+ virtual size_t getBufferCountLocked();
+
+ virtual status_t getEndpointUsage(uint32_t *usage) = 0;
+
+ status_t getBufferPreconditionCheckLocked() const;
+ status_t returnBufferPreconditionCheckLocked() const;
+
+ // State check only
+ virtual status_t configureQueueLocked();
+ // State checks only
+ virtual status_t disconnectLocked();
+
+ // Hand out the buffer to a native location,
+ // incrementing the internal refcount and dequeued buffer count
+ void handoutBufferLocked(camera3_stream_buffer &buffer,
+ buffer_handle_t *handle,
+ int acquire_fence,
+ int release_fence,
+ camera3_buffer_status_t status);
+
+}; // class Camera3IOStreamBase
+
+} // namespace camera3
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
new file mode 100644
index 0000000..5aa9a3e
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -0,0 +1,239 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-InputStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include "Camera3InputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+Camera3InputStream::Camera3InputStream(int id,
+ uint32_t width, uint32_t height, int format) :
+ Camera3IOStreamBase(id, CAMERA3_STREAM_INPUT, width, height,
+ /*maxSize*/0, format) {
+
+ if (format == HAL_PIXEL_FORMAT_BLOB) {
+ ALOGE("%s: Bad format, BLOB not supported", __FUNCTION__);
+ mState = STATE_ERROR;
+ }
+}
+
+Camera3InputStream::~Camera3InputStream() {
+ disconnectLocked();
+}
+
+status_t Camera3InputStream::getInputBufferLocked(
+ camera3_stream_buffer *buffer) {
+ ATRACE_CALL();
+ status_t res;
+
+ // FIXME: will not work in (re-)registration
+ if (mState == STATE_IN_CONFIG || mState == STATE_IN_RECONFIG) {
+ ALOGE("%s: Stream %d: Buffer registration for input streams"
+ " not implemented (state %d)",
+ __FUNCTION__, mId, mState);
+ return INVALID_OPERATION;
+ }
+
+ if ((res = getBufferPreconditionCheckLocked()) != OK) {
+ return res;
+ }
+
+ ANativeWindowBuffer* anb;
+ int fenceFd;
+
+ assert(mConsumer != 0);
+
+ BufferItem bufferItem;
+ res = mConsumer->acquireBuffer(&bufferItem, /*waitForFence*/false);
+
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Can't acquire next output buffer: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ return res;
+ }
+
+ anb = bufferItem.mGraphicBuffer->getNativeBuffer();
+ assert(anb != NULL);
+ fenceFd = bufferItem.mFence->dup();
+
+ /**
+ * FenceFD now owned by HAL except in case of error,
+ * in which case we reassign it to acquire_fence
+ */
+ handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
+ /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK);
+ mBuffersInFlight.push_back(bufferItem);
+
+ return OK;
+}
+
+status_t Camera3InputStream::returnBufferCheckedLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output,
+ /*out*/
+ sp<Fence> *releaseFenceOut) {
+
+ (void)timestamp;
+ (void)output;
+ ALOG_ASSERT(!output, "Expected output to be false");
+
+ status_t res;
+
+ bool bufferFound = false;
+ BufferItem bufferItem;
+ {
+ // Find the buffer we are returning
+ Vector<BufferItem>::iterator it, end;
+ for (it = mBuffersInFlight.begin(), end = mBuffersInFlight.end();
+ it != end;
+ ++it) {
+
+ const BufferItem& tmp = *it;
+ ANativeWindowBuffer *anb = tmp.mGraphicBuffer->getNativeBuffer();
+ if (anb != NULL && &(anb->handle) == buffer.buffer) {
+ bufferFound = true;
+ bufferItem = tmp;
+ mBuffersInFlight.erase(it);
+ }
+ }
+ }
+ if (!bufferFound) {
+ ALOGE("%s: Stream %d: Can't return buffer that wasn't sent to HAL",
+ __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
+ if (buffer.release_fence != -1) {
+ ALOGE("%s: Stream %d: HAL should not set release_fence(%d) when "
+ "there is an error", __FUNCTION__, mId, buffer.release_fence);
+ close(buffer.release_fence);
+ }
+
+ /**
+ * Reassign release fence as the acquire fence incase of error
+ */
+ const_cast<camera3_stream_buffer*>(&buffer)->release_fence =
+ buffer.acquire_fence;
+ }
+
+ /**
+ * Unconditionally return buffer to the buffer queue.
+ * - Fwk takes over the release_fence ownership
+ */
+ sp<Fence> releaseFence = new Fence(buffer.release_fence);
+ res = mConsumer->releaseBuffer(bufferItem, releaseFence);
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Error releasing buffer back to buffer queue:"
+ " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
+ }
+
+ *releaseFenceOut = releaseFence;
+
+ return res;
+}
+
+status_t Camera3InputStream::returnInputBufferLocked(
+ const camera3_stream_buffer &buffer) {
+ ATRACE_CALL();
+
+ return returnAnyBufferLocked(buffer, /*timestamp*/0, /*output*/false);
+}
+
+status_t Camera3InputStream::disconnectLocked() {
+
+ status_t res;
+
+ if ((res = Camera3IOStreamBase::disconnectLocked()) != OK) {
+ return res;
+ }
+
+ assert(mBuffersInFlight.size() == 0);
+
+ /**
+ * no-op since we can't disconnect the producer from the consumer-side
+ */
+
+ mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG
+ : STATE_CONSTRUCTED;
+ return OK;
+}
+
+void Camera3InputStream::dump(int fd, const Vector<String16> &args) const {
+ (void) args;
+ String8 lines;
+ lines.appendFormat(" Stream[%d]: Input\n", mId);
+ write(fd, lines.string(), lines.size());
+
+ Camera3IOStreamBase::dump(fd, args);
+}
+
+status_t Camera3InputStream::configureQueueLocked() {
+ status_t res;
+
+ if ((res = Camera3IOStreamBase::configureQueueLocked()) != OK) {
+ return res;
+ }
+
+ assert(mMaxSize == 0);
+ assert(camera3_stream::format != HAL_PIXEL_FORMAT_BLOB);
+
+ mTotalBufferCount = BufferQueue::MIN_UNDEQUEUED_BUFFERS +
+ camera3_stream::max_buffers;
+ mDequeuedBufferCount = 0;
+ mFrameCount = 0;
+
+ if (mConsumer.get() == 0) {
+ sp<BufferQueue> bq = new BufferQueue();
+ mConsumer = new BufferItemConsumer(bq, camera3_stream::usage,
+ mTotalBufferCount);
+ mConsumer->setName(String8::format("Camera3-InputStream-%d", mId));
+ }
+
+ res = mConsumer->setDefaultBufferSize(camera3_stream::width,
+ camera3_stream::height);
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Could not set buffer dimensions %dx%d",
+ __FUNCTION__, mId, camera3_stream::width, camera3_stream::height);
+ return res;
+ }
+ res = mConsumer->setDefaultBufferFormat(camera3_stream::format);
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Could not set buffer format %d",
+ __FUNCTION__, mId, camera3_stream::format);
+ return res;
+ }
+
+ return OK;
+}
+
+status_t Camera3InputStream::getEndpointUsage(uint32_t *usage) {
+ // Per HAL3 spec, input streams have 0 for their initial usage field.
+ *usage = 0;
+ return OK;
+}
+
+}; // namespace camera3
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h
new file mode 100644
index 0000000..681d684
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_INPUT_STREAM_H
+#define ANDROID_SERVERS_CAMERA3_INPUT_STREAM_H
+
+#include <utils/RefBase.h>
+#include <gui/Surface.h>
+#include <gui/BufferItemConsumer.h>
+
+#include "Camera3IOStreamBase.h"
+
+namespace android {
+
+namespace camera3 {
+
+/**
+ * A class for managing a single stream of input data to the camera device.
+ *
+ * This class serves as a consumer adapter for the HAL, and will consume the
+ * buffers by feeding them into the HAL, as well as releasing the buffers back
+ * the buffers once the HAL is done with them.
+ */
+class Camera3InputStream : public Camera3IOStreamBase {
+ public:
+ /**
+ * Set up a stream for formats that have fixed size, such as RAW and YUV.
+ */
+ Camera3InputStream(int id, uint32_t width, uint32_t height, int format);
+ ~Camera3InputStream();
+
+ virtual void dump(int fd, const Vector<String16> &args) const;
+
+ private:
+
+ typedef BufferItemConsumer::BufferItem BufferItem;
+
+ sp<BufferItemConsumer> mConsumer;
+ Vector<BufferItem> mBuffersInFlight;
+
+ /**
+ * Camera3IOStreamBase
+ */
+ virtual status_t returnBufferCheckedLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output,
+ /*out*/
+ sp<Fence> *releaseFenceOut);
+
+ /**
+ * Camera3Stream interface
+ */
+
+ virtual status_t getInputBufferLocked(camera3_stream_buffer *buffer);
+ virtual status_t returnInputBufferLocked(
+ const camera3_stream_buffer &buffer);
+ virtual status_t disconnectLocked();
+
+ virtual status_t configureQueueLocked();
+
+ virtual status_t getEndpointUsage(uint32_t *usage);
+
+}; // class Camera3InputStream
+
+}; // namespace camera3
+
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
new file mode 100644
index 0000000..682755d
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -0,0 +1,394 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-OutputStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include "Camera3OutputStream.h"
+
+#ifndef container_of
+#define container_of(ptr, type, member) \
+ (type *)((char*)(ptr) - offsetof(type, member))
+#endif
+
+namespace android {
+
+namespace camera3 {
+
+Camera3OutputStream::Camera3OutputStream(int id,
+ sp<ANativeWindow> consumer,
+ uint32_t width, uint32_t height, int format) :
+ Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, width, height,
+ /*maxSize*/0, format),
+ mConsumer(consumer),
+ mTransform(0) {
+
+ if (mConsumer == NULL) {
+ ALOGE("%s: Consumer is NULL!", __FUNCTION__);
+ mState = STATE_ERROR;
+ }
+}
+
+Camera3OutputStream::Camera3OutputStream(int id,
+ sp<ANativeWindow> consumer,
+ uint32_t width, uint32_t height, size_t maxSize, int format) :
+ Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, width, height, maxSize,
+ format),
+ mConsumer(consumer),
+ mTransform(0) {
+
+ if (format != HAL_PIXEL_FORMAT_BLOB) {
+ ALOGE("%s: Bad format for size-only stream: %d", __FUNCTION__,
+ format);
+ mState = STATE_ERROR;
+ }
+
+ if (mConsumer == NULL) {
+ ALOGE("%s: Consumer is NULL!", __FUNCTION__);
+ mState = STATE_ERROR;
+ }
+}
+
+Camera3OutputStream::Camera3OutputStream(int id, camera3_stream_type_t type,
+ uint32_t width, uint32_t height,
+ int format) :
+ Camera3IOStreamBase(id, type, width, height,
+ /*maxSize*/0,
+ format),
+ mTransform(0) {
+
+ // Subclasses expected to initialize mConsumer themselves
+}
+
+
+Camera3OutputStream::~Camera3OutputStream() {
+ disconnectLocked();
+}
+
+status_t Camera3OutputStream::getBufferLocked(camera3_stream_buffer *buffer) {
+ ATRACE_CALL();
+ status_t res;
+
+ if ((res = getBufferPreconditionCheckLocked()) != OK) {
+ return res;
+ }
+
+ ANativeWindowBuffer* anb;
+ int fenceFd;
+
+ /**
+ * Release the lock briefly to avoid deadlock for below scenario:
+ * Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring().
+ * This thread acquired StreamingProcessor lock and try to lock Camera3Stream lock.
+ * Thread 2: Camera3Stream::returnBuffer->StreamingProcessor::onFrameAvailable().
+ * This thread acquired Camera3Stream lock and bufferQueue lock, and try to lock
+ * StreamingProcessor lock.
+ * Thread 3: Camera3Stream::getBuffer(). This thread acquired Camera3Stream lock
+ * and try to lock bufferQueue lock.
+ * Then there is circular locking dependency.
+ */
+ sp<ANativeWindow> currentConsumer = mConsumer;
+ mLock.unlock();
+
+ res = currentConsumer->dequeueBuffer(currentConsumer.get(), &anb, &fenceFd);
+ mLock.lock();
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ return res;
+ }
+
+ /**
+ * FenceFD now owned by HAL except in case of error,
+ * in which case we reassign it to acquire_fence
+ */
+ handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
+ /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK);
+
+ return OK;
+}
+
+status_t Camera3OutputStream::returnBufferLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp) {
+ ATRACE_CALL();
+
+ status_t res = returnAnyBufferLocked(buffer, timestamp, /*output*/true);
+
+ if (res != OK) {
+ return res;
+ }
+
+ mLastTimestamp = timestamp;
+
+ return OK;
+}
+
+status_t Camera3OutputStream::returnBufferCheckedLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output,
+ /*out*/
+ sp<Fence> *releaseFenceOut) {
+
+ (void)output;
+ ALOG_ASSERT(output, "Expected output to be true");
+
+ status_t res;
+ sp<Fence> releaseFence;
+
+ /**
+ * Fence management - calculate Release Fence
+ */
+ if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
+ if (buffer.release_fence != -1) {
+ ALOGE("%s: Stream %d: HAL should not set release_fence(%d) when "
+ "there is an error", __FUNCTION__, mId, buffer.release_fence);
+ close(buffer.release_fence);
+ }
+
+ /**
+ * Reassign release fence as the acquire fence in case of error
+ */
+ releaseFence = new Fence(buffer.acquire_fence);
+ } else {
+ res = native_window_set_buffers_timestamp(mConsumer.get(), timestamp);
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ return res;
+ }
+
+ releaseFence = new Fence(buffer.release_fence);
+ }
+
+ int anwReleaseFence = releaseFence->dup();
+
+ /**
+ * Release the lock briefly to avoid deadlock with
+ * StreamingProcessor::startStream -> Camera3Stream::isConfiguring (this
+ * thread will go into StreamingProcessor::onFrameAvailable) during
+ * queueBuffer
+ */
+ sp<ANativeWindow> currentConsumer = mConsumer;
+ mLock.unlock();
+
+ /**
+ * Return buffer back to ANativeWindow
+ */
+ if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
+ // Cancel buffer
+ res = currentConsumer->cancelBuffer(currentConsumer.get(),
+ container_of(buffer.buffer, ANativeWindowBuffer, handle),
+ anwReleaseFence);
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Error cancelling buffer to native window:"
+ " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
+ }
+ } else {
+ res = currentConsumer->queueBuffer(currentConsumer.get(),
+ container_of(buffer.buffer, ANativeWindowBuffer, handle),
+ anwReleaseFence);
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Error queueing buffer to native window: "
+ "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
+ }
+ }
+ mLock.lock();
+ if (res != OK) {
+ close(anwReleaseFence);
+ }
+
+ *releaseFenceOut = releaseFence;
+
+ return res;
+}
+
+void Camera3OutputStream::dump(int fd, const Vector<String16> &args) const {
+ (void) args;
+ String8 lines;
+ lines.appendFormat(" Stream[%d]: Output\n", mId);
+ write(fd, lines.string(), lines.size());
+
+ Camera3IOStreamBase::dump(fd, args);
+}
+
+status_t Camera3OutputStream::setTransform(int transform) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+ return setTransformLocked(transform);
+}
+
+status_t Camera3OutputStream::setTransformLocked(int transform) {
+ status_t res = OK;
+ if (mState == STATE_ERROR) {
+ ALOGE("%s: Stream in error state", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ mTransform = transform;
+ if (mState == STATE_CONFIGURED) {
+ res = native_window_set_buffers_transform(mConsumer.get(),
+ transform);
+ if (res != OK) {
+ ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
+ __FUNCTION__, transform, strerror(-res), res);
+ }
+ }
+ return res;
+}
+
+status_t Camera3OutputStream::configureQueueLocked() {
+ status_t res;
+
+ if ((res = Camera3IOStreamBase::configureQueueLocked()) != OK) {
+ return res;
+ }
+
+ ALOG_ASSERT(mConsumer != 0, "mConsumer should never be NULL");
+
+ // Configure consumer-side ANativeWindow interface
+ res = native_window_api_connect(mConsumer.get(),
+ NATIVE_WINDOW_API_CAMERA);
+ if (res != OK) {
+ ALOGE("%s: Unable to connect to native window for stream %d",
+ __FUNCTION__, mId);
+ return res;
+ }
+
+ res = native_window_set_usage(mConsumer.get(), camera3_stream::usage);
+ if (res != OK) {
+ ALOGE("%s: Unable to configure usage %08x for stream %d",
+ __FUNCTION__, camera3_stream::usage, mId);
+ return res;
+ }
+
+ res = native_window_set_scaling_mode(mConsumer.get(),
+ NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+ if (res != OK) {
+ ALOGE("%s: Unable to configure stream scaling: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ if (mMaxSize == 0) {
+ // For buffers of known size
+ res = native_window_set_buffers_geometry(mConsumer.get(),
+ camera3_stream::width, camera3_stream::height,
+ camera3_stream::format);
+ } else {
+ // For buffers with bounded size
+ res = native_window_set_buffers_geometry(mConsumer.get(),
+ mMaxSize, 1,
+ camera3_stream::format);
+ }
+ if (res != OK) {
+ ALOGE("%s: Unable to configure stream buffer geometry"
+ " %d x %d, format %x for stream %d",
+ __FUNCTION__, camera3_stream::width, camera3_stream::height,
+ camera3_stream::format, mId);
+ return res;
+ }
+
+ int maxConsumerBuffers;
+ res = mConsumer->query(mConsumer.get(),
+ NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
+ if (res != OK) {
+ ALOGE("%s: Unable to query consumer undequeued"
+ " buffer count for stream %d", __FUNCTION__, mId);
+ return res;
+ }
+
+ ALOGV("%s: Consumer wants %d buffers, HAL wants %d", __FUNCTION__,
+ maxConsumerBuffers, camera3_stream::max_buffers);
+ if (camera3_stream::max_buffers == 0) {
+ ALOGE("%s: Camera HAL requested max_buffer count: %d, requires at least 1",
+ __FUNCTION__, camera3_stream::max_buffers);
+ return INVALID_OPERATION;
+ }
+
+ mTotalBufferCount = maxConsumerBuffers + camera3_stream::max_buffers;
+ mDequeuedBufferCount = 0;
+ mFrameCount = 0;
+ mLastTimestamp = 0;
+
+ res = native_window_set_buffer_count(mConsumer.get(),
+ mTotalBufferCount);
+ if (res != OK) {
+ ALOGE("%s: Unable to set buffer count for stream %d",
+ __FUNCTION__, mId);
+ return res;
+ }
+
+ res = native_window_set_buffers_transform(mConsumer.get(),
+ mTransform);
+ if (res != OK) {
+ ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
+ __FUNCTION__, mTransform, strerror(-res), res);
+ }
+
+ return OK;
+}
+
+status_t Camera3OutputStream::disconnectLocked() {
+ status_t res;
+
+ if ((res = Camera3IOStreamBase::disconnectLocked()) != OK) {
+ return res;
+ }
+
+ res = native_window_api_disconnect(mConsumer.get(),
+ NATIVE_WINDOW_API_CAMERA);
+
+ /**
+ * This is not an error. if client calling process dies, the window will
+ * also die and all calls to it will return DEAD_OBJECT, thus it's already
+ * "disconnected"
+ */
+ if (res == DEAD_OBJECT) {
+ ALOGW("%s: While disconnecting stream %d from native window, the"
+ " native window died from under us", __FUNCTION__, mId);
+ }
+ else if (res != OK) {
+ ALOGE("%s: Unable to disconnect stream %d from native window "
+ "(error %d %s)",
+ __FUNCTION__, mId, res, strerror(-res));
+ mState = STATE_ERROR;
+ return res;
+ }
+
+ mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG
+ : STATE_CONSTRUCTED;
+ return OK;
+}
+
+status_t Camera3OutputStream::getEndpointUsage(uint32_t *usage) {
+
+ status_t res;
+ int32_t u = 0;
+ res = mConsumer->query(mConsumer.get(),
+ NATIVE_WINDOW_CONSUMER_USAGE_BITS, &u);
+ *usage = u;
+
+ return res;
+}
+
+}; // namespace camera3
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
new file mode 100644
index 0000000..6cbb9f4
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_OUTPUT_STREAM_H
+#define ANDROID_SERVERS_CAMERA3_OUTPUT_STREAM_H
+
+#include <utils/RefBase.h>
+#include <gui/Surface.h>
+
+#include "Camera3Stream.h"
+#include "Camera3IOStreamBase.h"
+#include "Camera3OutputStreamInterface.h"
+
+namespace android {
+
+namespace camera3 {
+
+/**
+ * A class for managing a single stream of output data from the camera device.
+ */
+class Camera3OutputStream :
+ public Camera3IOStreamBase,
+ public Camera3OutputStreamInterface {
+ public:
+ /**
+ * Set up a stream for formats that have 2 dimensions, such as RAW and YUV.
+ */
+ Camera3OutputStream(int id, sp<ANativeWindow> consumer,
+ uint32_t width, uint32_t height, int format);
+
+ /**
+ * Set up a stream for formats that have a variable buffer size for the same
+ * dimensions, such as compressed JPEG.
+ */
+ Camera3OutputStream(int id, sp<ANativeWindow> consumer,
+ uint32_t width, uint32_t height, size_t maxSize, int format);
+
+ virtual ~Camera3OutputStream();
+
+ /**
+ * Camera3Stream interface
+ */
+
+ virtual void dump(int fd, const Vector<String16> &args) const;
+
+ /**
+ * Set the transform on the output stream; one of the
+ * HAL_TRANSFORM_* / NATIVE_WINDOW_TRANSFORM_* constants.
+ */
+ status_t setTransform(int transform);
+
+ protected:
+ Camera3OutputStream(int id, camera3_stream_type_t type,
+ uint32_t width, uint32_t height, int format);
+
+ /**
+ * Note that we release the lock briefly in this function
+ */
+ virtual status_t returnBufferCheckedLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output,
+ /*out*/
+ sp<Fence> *releaseFenceOut);
+
+ sp<ANativeWindow> mConsumer;
+ private:
+ int mTransform;
+
+ virtual status_t setTransformLocked(int transform);
+
+ /**
+ * Internal Camera3Stream interface
+ */
+ virtual status_t getBufferLocked(camera3_stream_buffer *buffer);
+ virtual status_t returnBufferLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp);
+
+ virtual status_t configureQueueLocked();
+ virtual status_t disconnectLocked();
+
+ virtual status_t getEndpointUsage(uint32_t *usage);
+
+}; // class Camera3OutputStream
+
+} // namespace camera3
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
new file mode 100644
index 0000000..aae72cf
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_OUTPUT_STREAM_INTERFACE_H
+#define ANDROID_SERVERS_CAMERA3_OUTPUT_STREAM_INTERFACE_H
+
+#include "Camera3StreamInterface.h"
+
+namespace android {
+
+namespace camera3 {
+
+/**
+ * An interface for managing a single stream of output data from the camera
+ * device.
+ */
+class Camera3OutputStreamInterface : public virtual Camera3StreamInterface {
+ public:
+ /**
+ * Set the transform on the output stream; one of the
+ * HAL_TRANSFORM_* / NATIVE_WINDOW_TRANSFORM_* constants.
+ */
+ virtual status_t setTransform(int transform) = 0;
+};
+
+} // namespace camera3
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
new file mode 100644
index 0000000..6d2cf94
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -0,0 +1,426 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-Stream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include "device3/Camera3Stream.h"
+#include "device3/StatusTracker.h"
+
+namespace android {
+
+namespace camera3 {
+
+Camera3Stream::~Camera3Stream() {
+ sp<StatusTracker> statusTracker = mStatusTracker.promote();
+ if (statusTracker != 0 && mStatusId != StatusTracker::NO_STATUS_ID) {
+ statusTracker->removeComponent(mStatusId);
+ }
+}
+
+Camera3Stream* Camera3Stream::cast(camera3_stream *stream) {
+ return static_cast<Camera3Stream*>(stream);
+}
+
+const Camera3Stream* Camera3Stream::cast(const camera3_stream *stream) {
+ return static_cast<const Camera3Stream*>(stream);
+}
+
+Camera3Stream::Camera3Stream(int id,
+ camera3_stream_type type,
+ uint32_t width, uint32_t height, size_t maxSize, int format) :
+ camera3_stream(),
+ mId(id),
+ mName(String8::format("Camera3Stream[%d]", id)),
+ mMaxSize(maxSize),
+ mState(STATE_CONSTRUCTED),
+ mStatusId(StatusTracker::NO_STATUS_ID) {
+
+ camera3_stream::stream_type = type;
+ camera3_stream::width = width;
+ camera3_stream::height = height;
+ camera3_stream::format = format;
+ camera3_stream::usage = 0;
+ camera3_stream::max_buffers = 0;
+ camera3_stream::priv = NULL;
+
+ if (format == HAL_PIXEL_FORMAT_BLOB && maxSize == 0) {
+ ALOGE("%s: BLOB format with size == 0", __FUNCTION__);
+ mState = STATE_ERROR;
+ }
+}
+
+int Camera3Stream::getId() const {
+ return mId;
+}
+
+uint32_t Camera3Stream::getWidth() const {
+ return camera3_stream::width;
+}
+
+uint32_t Camera3Stream::getHeight() const {
+ return camera3_stream::height;
+}
+
+int Camera3Stream::getFormat() const {
+ return camera3_stream::format;
+}
+
+camera3_stream* Camera3Stream::startConfiguration() {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+ status_t res;
+
+ switch (mState) {
+ case STATE_ERROR:
+ ALOGE("%s: In error state", __FUNCTION__);
+ return NULL;
+ case STATE_CONSTRUCTED:
+ // OK
+ break;
+ case STATE_IN_CONFIG:
+ case STATE_IN_RECONFIG:
+ // Can start config again with no trouble; but don't redo
+ // oldUsage/oldMaxBuffers
+ return this;
+ case STATE_CONFIGURED:
+ if (stream_type == CAMERA3_STREAM_INPUT) {
+ ALOGE("%s: Cannot configure an input stream twice",
+ __FUNCTION__);
+ return NULL;
+ } else if (hasOutstandingBuffersLocked()) {
+ ALOGE("%s: Cannot configure stream; has outstanding buffers",
+ __FUNCTION__);
+ return NULL;
+ }
+ break;
+ default:
+ ALOGE("%s: Unknown state %d", __FUNCTION__, mState);
+ return NULL;
+ }
+
+ oldUsage = camera3_stream::usage;
+ oldMaxBuffers = camera3_stream::max_buffers;
+
+ res = getEndpointUsage(&(camera3_stream::usage));
+ if (res != OK) {
+ ALOGE("%s: Cannot query consumer endpoint usage!",
+ __FUNCTION__);
+ return NULL;
+ }
+
+ // Stop tracking if currently doing so
+ if (mStatusId != StatusTracker::NO_STATUS_ID) {
+ sp<StatusTracker> statusTracker = mStatusTracker.promote();
+ if (statusTracker != 0) {
+ statusTracker->removeComponent(mStatusId);
+ }
+ mStatusId = StatusTracker::NO_STATUS_ID;
+ }
+
+ if (mState == STATE_CONSTRUCTED) {
+ mState = STATE_IN_CONFIG;
+ } else { // mState == STATE_CONFIGURED
+ mState = STATE_IN_RECONFIG;
+ }
+
+ return this;
+}
+
+bool Camera3Stream::isConfiguring() const {
+ Mutex::Autolock l(mLock);
+ return (mState == STATE_IN_CONFIG) || (mState == STATE_IN_RECONFIG);
+}
+
+status_t Camera3Stream::finishConfiguration(camera3_device *hal3Device) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+ switch (mState) {
+ case STATE_ERROR:
+ ALOGE("%s: In error state", __FUNCTION__);
+ return INVALID_OPERATION;
+ case STATE_IN_CONFIG:
+ case STATE_IN_RECONFIG:
+ // OK
+ break;
+ case STATE_CONSTRUCTED:
+ case STATE_CONFIGURED:
+ ALOGE("%s: Cannot finish configuration that hasn't been started",
+ __FUNCTION__);
+ return INVALID_OPERATION;
+ default:
+ ALOGE("%s: Unknown state", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ // Register for idle tracking
+ sp<StatusTracker> statusTracker = mStatusTracker.promote();
+ if (statusTracker != 0) {
+ mStatusId = statusTracker->addComponent();
+ }
+
+ // Check if the stream configuration is unchanged, and skip reallocation if
+ // so. As documented in hardware/camera3.h:configure_streams().
+ if (mState == STATE_IN_RECONFIG &&
+ oldUsage == camera3_stream::usage &&
+ oldMaxBuffers == camera3_stream::max_buffers) {
+ mState = STATE_CONFIGURED;
+ return OK;
+ }
+
+ status_t res;
+ res = configureQueueLocked();
+ if (res != OK) {
+ ALOGE("%s: Unable to configure stream %d queue: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ mState = STATE_ERROR;
+ return res;
+ }
+
+ res = registerBuffersLocked(hal3Device);
+ if (res != OK) {
+ ALOGE("%s: Unable to register stream buffers with HAL: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ mState = STATE_ERROR;
+ return res;
+ }
+
+ mState = STATE_CONFIGURED;
+
+ return res;
+}
+
+status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+
+ status_t res = getBufferLocked(buffer);
+ if (res == OK) {
+ fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/true);
+ }
+
+ return res;
+}
+
+status_t Camera3Stream::returnBuffer(const camera3_stream_buffer &buffer,
+ nsecs_t timestamp) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+
+ status_t res = returnBufferLocked(buffer, timestamp);
+ if (res == OK) {
+ fireBufferListenersLocked(buffer, /*acquired*/false, /*output*/true);
+ }
+
+ return res;
+}
+
+status_t Camera3Stream::getInputBuffer(camera3_stream_buffer *buffer) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+
+ status_t res = getInputBufferLocked(buffer);
+ if (res == OK) {
+ fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/false);
+ }
+
+ return res;
+}
+
+status_t Camera3Stream::returnInputBuffer(const camera3_stream_buffer &buffer) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+
+ status_t res = returnInputBufferLocked(buffer);
+ if (res == OK) {
+ fireBufferListenersLocked(buffer, /*acquired*/false, /*output*/false);
+ }
+ return res;
+}
+
+void Camera3Stream::fireBufferListenersLocked(
+ const camera3_stream_buffer& /*buffer*/, bool acquired, bool output) {
+ List<wp<Camera3StreamBufferListener> >::iterator it, end;
+
+ // TODO: finish implementing
+
+ Camera3StreamBufferListener::BufferInfo info =
+ Camera3StreamBufferListener::BufferInfo();
+ info.mOutput = output;
+ // TODO: rest of fields
+
+ for (it = mBufferListenerList.begin(), end = mBufferListenerList.end();
+ it != end;
+ ++it) {
+
+ sp<Camera3StreamBufferListener> listener = it->promote();
+ if (listener != 0) {
+ if (acquired) {
+ listener->onBufferAcquired(info);
+ } else {
+ listener->onBufferReleased(info);
+ }
+ }
+ }
+}
+
+bool Camera3Stream::hasOutstandingBuffers() const {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+ return hasOutstandingBuffersLocked();
+}
+
+status_t Camera3Stream::setStatusTracker(sp<StatusTracker> statusTracker) {
+ Mutex::Autolock l(mLock);
+ sp<StatusTracker> oldTracker = mStatusTracker.promote();
+ if (oldTracker != 0 && mStatusId != StatusTracker::NO_STATUS_ID) {
+ oldTracker->removeComponent(mStatusId);
+ }
+ mStatusId = StatusTracker::NO_STATUS_ID;
+ mStatusTracker = statusTracker;
+
+ return OK;
+}
+
+status_t Camera3Stream::disconnect() {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+ ALOGV("%s: Stream %d: Disconnecting...", __FUNCTION__, mId);
+ status_t res = disconnectLocked();
+
+ if (res == -ENOTCONN) {
+ // "Already disconnected" -- not an error
+ return OK;
+ } else {
+ return res;
+ }
+}
+
+status_t Camera3Stream::registerBuffersLocked(camera3_device *hal3Device) {
+ ATRACE_CALL();
+ status_t res;
+
+ size_t bufferCount = getBufferCountLocked();
+
+ Vector<buffer_handle_t*> buffers;
+ buffers.insertAt(NULL, 0, bufferCount);
+
+ camera3_stream_buffer_set bufferSet = camera3_stream_buffer_set();
+ bufferSet.stream = this;
+ bufferSet.num_buffers = bufferCount;
+ bufferSet.buffers = buffers.editArray();
+
+ Vector<camera3_stream_buffer_t> streamBuffers;
+ streamBuffers.insertAt(camera3_stream_buffer_t(), 0, bufferCount);
+
+ // Register all buffers with the HAL. This means getting all the buffers
+ // from the stream, providing them to the HAL with the
+ // register_stream_buffers() method, and then returning them back to the
+ // stream in the error state, since they won't have valid data.
+ //
+ // Only registered buffers can be sent to the HAL.
+
+ uint32_t bufferIdx = 0;
+ for (; bufferIdx < bufferCount; bufferIdx++) {
+ res = getBufferLocked( &streamBuffers.editItemAt(bufferIdx) );
+ if (res != OK) {
+ ALOGE("%s: Unable to get buffer %d for registration with HAL",
+ __FUNCTION__, bufferIdx);
+ // Skip registering, go straight to cleanup
+ break;
+ }
+
+ sp<Fence> fence = new Fence(streamBuffers[bufferIdx].acquire_fence);
+ fence->waitForever("Camera3Stream::registerBuffers");
+
+ buffers.editItemAt(bufferIdx) = streamBuffers[bufferIdx].buffer;
+ }
+ if (bufferIdx == bufferCount) {
+ // Got all buffers, register with HAL
+ ALOGV("%s: Registering %d buffers with camera HAL",
+ __FUNCTION__, bufferCount);
+ ATRACE_BEGIN("camera3->register_stream_buffers");
+ res = hal3Device->ops->register_stream_buffers(hal3Device,
+ &bufferSet);
+ ATRACE_END();
+ }
+
+ // Return all valid buffers to stream, in ERROR state to indicate
+ // they weren't filled.
+ for (size_t i = 0; i < bufferIdx; i++) {
+ streamBuffers.editItemAt(i).release_fence = -1;
+ streamBuffers.editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR;
+ returnBufferLocked(streamBuffers[i], 0);
+ }
+
+ return res;
+}
+
+status_t Camera3Stream::getBufferLocked(camera3_stream_buffer *) {
+ ALOGE("%s: This type of stream does not support output", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+status_t Camera3Stream::returnBufferLocked(const camera3_stream_buffer &,
+ nsecs_t) {
+ ALOGE("%s: This type of stream does not support output", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+status_t Camera3Stream::getInputBufferLocked(camera3_stream_buffer *) {
+ ALOGE("%s: This type of stream does not support input", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+status_t Camera3Stream::returnInputBufferLocked(
+ const camera3_stream_buffer &) {
+ ALOGE("%s: This type of stream does not support input", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+void Camera3Stream::addBufferListener(
+ wp<Camera3StreamBufferListener> listener) {
+ Mutex::Autolock l(mLock);
+ mBufferListenerList.push_back(listener);
+}
+
+void Camera3Stream::removeBufferListener(
+ const sp<Camera3StreamBufferListener>& listener) {
+ Mutex::Autolock l(mLock);
+
+ bool erased = true;
+ List<wp<Camera3StreamBufferListener> >::iterator it, end;
+ for (it = mBufferListenerList.begin(), end = mBufferListenerList.end();
+ it != end;
+ ) {
+
+ if (*it == listener) {
+ it = mBufferListenerList.erase(it);
+ erased = true;
+ } else {
+ ++it;
+ }
+ }
+
+ if (!erased) {
+ ALOGW("%s: Could not find listener to remove, already removed",
+ __FUNCTION__);
+ }
+}
+
+}; // namespace camera3
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
new file mode 100644
index 0000000..6eeb721
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -0,0 +1,291 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_STREAM_H
+#define ANDROID_SERVERS_CAMERA3_STREAM_H
+
+#include <gui/Surface.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include <utils/String16.h>
+#include <utils/List.h>
+
+#include "hardware/camera3.h"
+
+#include "Camera3StreamBufferListener.h"
+#include "Camera3StreamInterface.h"
+
+namespace android {
+
+namespace camera3 {
+
+/**
+ * A class for managing a single stream of input or output data from the camera
+ * device.
+ *
+ * The stream has an internal state machine to track whether it's
+ * connected/configured/etc.
+ *
+ * States:
+ *
+ * STATE_ERROR: A serious error has occurred, stream is unusable. Outstanding
+ * buffers may still be returned.
+ *
+ * STATE_CONSTRUCTED: The stream is ready for configuration, but buffers cannot
+ * be gotten yet. Not connected to any endpoint, no buffers are registered
+ * with the HAL.
+ *
+ * STATE_IN_CONFIG: Configuration has started, but not yet concluded. During this
+ * time, the usage, max_buffers, and priv fields of camera3_stream returned by
+ * startConfiguration() may be modified.
+ *
+ * STATE_IN_RE_CONFIG: Configuration has started, and the stream has been
+ * configured before. Need to track separately from IN_CONFIG to avoid
+ * re-registering buffers with HAL.
+ *
+ * STATE_CONFIGURED: Stream is configured, and has registered buffers with the
+ * HAL. The stream's getBuffer/returnBuffer work. The priv pointer may still be
+ * modified.
+ *
+ * Transition table:
+ *
+ * <none> => STATE_CONSTRUCTED:
+ * When constructed with valid arguments
+ * <none> => STATE_ERROR:
+ * When constructed with invalid arguments
+ * STATE_CONSTRUCTED => STATE_IN_CONFIG:
+ * When startConfiguration() is called
+ * STATE_IN_CONFIG => STATE_CONFIGURED:
+ * When finishConfiguration() is called
+ * STATE_IN_CONFIG => STATE_ERROR:
+ * When finishConfiguration() fails to allocate or register buffers.
+ * STATE_CONFIGURED => STATE_IN_RE_CONFIG: *
+ * When startConfiguration() is called again, after making sure stream is
+ * idle with waitUntilIdle().
+ * STATE_IN_RE_CONFIG => STATE_CONFIGURED:
+ * When finishConfiguration() is called.
+ * STATE_IN_RE_CONFIG => STATE_ERROR:
+ * When finishConfiguration() fails to allocate or register buffers.
+ * STATE_CONFIGURED => STATE_CONSTRUCTED:
+ * When disconnect() is called after making sure stream is idle with
+ * waitUntilIdle().
+ */
+class Camera3Stream :
+ protected camera3_stream,
+ public virtual Camera3StreamInterface,
+ public virtual RefBase {
+ public:
+
+ virtual ~Camera3Stream();
+
+ static Camera3Stream* cast(camera3_stream *stream);
+ static const Camera3Stream* cast(const camera3_stream *stream);
+
+ /**
+ * Get the stream's ID
+ */
+ int getId() const;
+
+ /**
+ * Get the stream's dimensions and format
+ */
+ uint32_t getWidth() const;
+ uint32_t getHeight() const;
+ int getFormat() const;
+
+ /**
+ * Start the stream configuration process. Returns a handle to the stream's
+ * information to be passed into the HAL device's configure_streams call.
+ *
+ * Until finishConfiguration() is called, no other methods on the stream may be
+ * called. The usage and max_buffers fields of camera3_stream may be modified
+ * between start/finishConfiguration, but may not be changed after that.
+ * The priv field of camera3_stream may be modified at any time after
+ * startConfiguration.
+ *
+ * Returns NULL in case of error starting configuration.
+ */
+ camera3_stream* startConfiguration();
+
+ /**
+ * Check if the stream is mid-configuration (start has been called, but not
+ * finish). Used for lazy completion of configuration.
+ */
+ bool isConfiguring() const;
+
+ /**
+ * Completes the stream configuration process. During this call, the stream
+ * may call the device's register_stream_buffers() method. The stream
+ * information structure returned by startConfiguration() may no longer be
+ * modified after this call, but can still be read until the destruction of
+ * the stream.
+ *
+ * Returns:
+ * OK on a successful configuration
+ * NO_INIT in case of a serious error from the HAL device
+ * NO_MEMORY in case of an error registering buffers
+ * INVALID_OPERATION in case connecting to the consumer failed
+ */
+ status_t finishConfiguration(camera3_device *hal3Device);
+
+ /**
+ * Fill in the camera3_stream_buffer with the next valid buffer for this
+ * stream, to hand over to the HAL.
+ *
+ * This method may only be called once finishConfiguration has been called.
+ * For bidirectional streams, this method applies to the output-side
+ * buffers.
+ *
+ */
+ status_t getBuffer(camera3_stream_buffer *buffer);
+
+ /**
+ * Return a buffer to the stream after use by the HAL.
+ *
+ * This method may only be called for buffers provided by getBuffer().
+ * For bidirectional streams, this method applies to the output-side buffers
+ */
+ status_t returnBuffer(const camera3_stream_buffer &buffer,
+ nsecs_t timestamp);
+
+ /**
+ * Fill in the camera3_stream_buffer with the next valid buffer for this
+ * stream, to hand over to the HAL.
+ *
+ * This method may only be called once finishConfiguration has been called.
+ * For bidirectional streams, this method applies to the input-side
+ * buffers.
+ *
+ */
+ status_t getInputBuffer(camera3_stream_buffer *buffer);
+
+ /**
+ * Return a buffer to the stream after use by the HAL.
+ *
+ * This method may only be called for buffers provided by getBuffer().
+ * For bidirectional streams, this method applies to the input-side buffers
+ */
+ status_t returnInputBuffer(const camera3_stream_buffer &buffer);
+
+ /**
+ * Whether any of the stream's buffers are currently in use by the HAL,
+ * including buffers that have been returned but not yet had their
+ * release fence signaled.
+ */
+ bool hasOutstandingBuffers() const;
+
+ enum {
+ TIMEOUT_NEVER = -1
+ };
+
+ /**
+ * Set the status tracker to notify about idle transitions
+ */
+ virtual status_t setStatusTracker(sp<StatusTracker> statusTracker);
+
+ /**
+ * Disconnect stream from its non-HAL endpoint. After this,
+ * start/finishConfiguration must be called before the stream can be used
+ * again. This cannot be called if the stream has outstanding dequeued
+ * buffers.
+ */
+ status_t disconnect();
+
+ /**
+ * Debug dump of the stream's state.
+ */
+ virtual void dump(int fd, const Vector<String16> &args) const = 0;
+
+ void addBufferListener(
+ wp<Camera3StreamBufferListener> listener);
+ void removeBufferListener(
+ const sp<Camera3StreamBufferListener>& listener);
+
+ protected:
+ const int mId;
+ const String8 mName;
+ // Zero for formats with fixed buffer size for given dimensions.
+ const size_t mMaxSize;
+
+ enum {
+ STATE_ERROR,
+ STATE_CONSTRUCTED,
+ STATE_IN_CONFIG,
+ STATE_IN_RECONFIG,
+ STATE_CONFIGURED
+ } mState;
+
+ mutable Mutex mLock;
+
+ Camera3Stream(int id, camera3_stream_type type,
+ uint32_t width, uint32_t height, size_t maxSize, int format);
+
+ /**
+ * Interface to be implemented by derived classes
+ */
+
+ // getBuffer / returnBuffer implementations
+
+ // Since camera3_stream_buffer includes a raw pointer to the stream,
+ // cast to camera3_stream*, implementations must increment the
+ // refcount of the stream manually in getBufferLocked, and decrement it in
+ // returnBufferLocked.
+ virtual status_t getBufferLocked(camera3_stream_buffer *buffer);
+ virtual status_t returnBufferLocked(const camera3_stream_buffer &buffer,
+ nsecs_t timestamp);
+ virtual status_t getInputBufferLocked(camera3_stream_buffer *buffer);
+ virtual status_t returnInputBufferLocked(
+ const camera3_stream_buffer &buffer);
+ virtual bool hasOutstandingBuffersLocked() const = 0;
+ // Can return -ENOTCONN when we are already disconnected (not an error)
+ virtual status_t disconnectLocked() = 0;
+
+ // Configure the buffer queue interface to the other end of the stream,
+ // after the HAL has provided usage and max_buffers values. After this call,
+ // the stream must be ready to produce all buffers for registration with
+ // HAL.
+ virtual status_t configureQueueLocked() = 0;
+
+ // Get the total number of buffers in the queue
+ virtual size_t getBufferCountLocked() = 0;
+
+ // Get the usage flags for the other endpoint, or return
+ // INVALID_OPERATION if they cannot be obtained.
+ virtual status_t getEndpointUsage(uint32_t *usage) = 0;
+
+ // Tracking for idle state
+ wp<StatusTracker> mStatusTracker;
+ // Status tracker component ID
+ int mStatusId;
+
+ private:
+ uint32_t oldUsage;
+ uint32_t oldMaxBuffers;
+
+ // Gets all buffers from endpoint and registers them with the HAL.
+ status_t registerBuffersLocked(camera3_device *hal3Device);
+
+ void fireBufferListenersLocked(const camera3_stream_buffer& buffer,
+ bool acquired, bool output);
+ List<wp<Camera3StreamBufferListener> > mBufferListenerList;
+
+}; // class Camera3Stream
+
+}; // namespace camera3
+
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h b/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
new file mode 100644
index 0000000..62ea6c0
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_STREAMBUFFERLISTENER_H
+#define ANDROID_SERVERS_CAMERA3_STREAMBUFFERLISTENER_H
+
+#include <gui/Surface.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+namespace camera3 {
+
+class Camera3StreamBufferListener : public virtual RefBase {
+public:
+
+ struct BufferInfo {
+ bool mOutput; // if false then input buffer
+ Rect mCrop;
+ uint32_t mTransform;
+ uint32_t mScalingMode;
+ int64_t mTimestamp;
+ uint64_t mFrameNumber;
+ };
+
+ // Buffer was acquired by the HAL
+ virtual void onBufferAcquired(const BufferInfo& bufferInfo) = 0;
+ // Buffer was released by the HAL
+ virtual void onBufferReleased(const BufferInfo& bufferInfo) = 0;
+};
+
+}; //namespace camera3
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
new file mode 100644
index 0000000..c93ae15
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_STREAM_INTERFACE_H
+#define ANDROID_SERVERS_CAMERA3_STREAM_INTERFACE_H
+
+#include <utils/RefBase.h>
+#include "Camera3StreamBufferListener.h"
+
+struct camera3_stream_buffer;
+
+namespace android {
+
+namespace camera3 {
+
+class StatusTracker;
+
+/**
+ * An interface for managing a single stream of input and/or output data from
+ * the camera device.
+ */
+class Camera3StreamInterface : public virtual RefBase {
+ public:
+ /**
+ * Get the stream's ID
+ */
+ virtual int getId() const = 0;
+
+ /**
+ * Get the stream's dimensions and format
+ */
+ virtual uint32_t getWidth() const = 0;
+ virtual uint32_t getHeight() const = 0;
+ virtual int getFormat() const = 0;
+
+ /**
+ * Start the stream configuration process. Returns a handle to the stream's
+ * information to be passed into the HAL device's configure_streams call.
+ *
+ * Until finishConfiguration() is called, no other methods on the stream may
+ * be called. The usage and max_buffers fields of camera3_stream may be
+ * modified between start/finishConfiguration, but may not be changed after
+ * that. The priv field of camera3_stream may be modified at any time after
+ * startConfiguration.
+ *
+ * Returns NULL in case of error starting configuration.
+ */
+ virtual camera3_stream* startConfiguration() = 0;
+
+ /**
+ * Check if the stream is mid-configuration (start has been called, but not
+ * finish). Used for lazy completion of configuration.
+ */
+ virtual bool isConfiguring() const = 0;
+
+ /**
+ * Completes the stream configuration process. During this call, the stream
+ * may call the device's register_stream_buffers() method. The stream
+ * information structure returned by startConfiguration() may no longer be
+ * modified after this call, but can still be read until the destruction of
+ * the stream.
+ *
+ * Returns:
+ * OK on a successful configuration
+ * NO_INIT in case of a serious error from the HAL device
+ * NO_MEMORY in case of an error registering buffers
+ * INVALID_OPERATION in case connecting to the consumer failed
+ */
+ virtual status_t finishConfiguration(camera3_device *hal3Device) = 0;
+
+ /**
+ * Fill in the camera3_stream_buffer with the next valid buffer for this
+ * stream, to hand over to the HAL.
+ *
+ * This method may only be called once finishConfiguration has been called.
+ * For bidirectional streams, this method applies to the output-side
+ * buffers.
+ *
+ */
+ virtual status_t getBuffer(camera3_stream_buffer *buffer) = 0;
+
+ /**
+ * Return a buffer to the stream after use by the HAL.
+ *
+ * This method may only be called for buffers provided by getBuffer().
+ * For bidirectional streams, this method applies to the output-side buffers
+ */
+ virtual status_t returnBuffer(const camera3_stream_buffer &buffer,
+ nsecs_t timestamp) = 0;
+
+ /**
+ * Fill in the camera3_stream_buffer with the next valid buffer for this
+ * stream, to hand over to the HAL.
+ *
+ * This method may only be called once finishConfiguration has been called.
+ * For bidirectional streams, this method applies to the input-side
+ * buffers.
+ *
+ */
+ virtual status_t getInputBuffer(camera3_stream_buffer *buffer) = 0;
+
+ /**
+ * Return a buffer to the stream after use by the HAL.
+ *
+ * This method may only be called for buffers provided by getBuffer().
+ * For bidirectional streams, this method applies to the input-side buffers
+ */
+ virtual status_t returnInputBuffer(const camera3_stream_buffer &buffer) = 0;
+
+ /**
+ * Whether any of the stream's buffers are currently in use by the HAL,
+ * including buffers that have been returned but not yet had their
+ * release fence signaled.
+ */
+ virtual bool hasOutstandingBuffers() const = 0;
+
+ enum {
+ TIMEOUT_NEVER = -1
+ };
+
+ /**
+ * Set the state tracker to use for signaling idle transitions.
+ */
+ virtual status_t setStatusTracker(sp<StatusTracker> statusTracker) = 0;
+
+ /**
+ * Disconnect stream from its non-HAL endpoint. After this,
+ * start/finishConfiguration must be called before the stream can be used
+ * again. This cannot be called if the stream has outstanding dequeued
+ * buffers.
+ */
+ virtual status_t disconnect() = 0;
+
+ /**
+ * Debug dump of the stream's state.
+ */
+ virtual void dump(int fd, const Vector<String16> &args) const = 0;
+
+ virtual void addBufferListener(
+ wp<Camera3StreamBufferListener> listener) = 0;
+ virtual void removeBufferListener(
+ const sp<Camera3StreamBufferListener>& listener) = 0;
+};
+
+} // namespace camera3
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
new file mode 100644
index 0000000..1a54923
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-ZslStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include "Camera3ZslStream.h"
+
+typedef android::RingBufferConsumer::PinnedBufferItem PinnedBufferItem;
+
+namespace android {
+
+namespace camera3 {
+
+namespace {
+struct TimestampFinder : public RingBufferConsumer::RingBufferComparator {
+ typedef RingBufferConsumer::BufferInfo BufferInfo;
+
+ enum {
+ SELECT_I1 = -1,
+ SELECT_I2 = 1,
+ SELECT_NEITHER = 0,
+ };
+
+ TimestampFinder(nsecs_t timestamp) : mTimestamp(timestamp) {}
+ ~TimestampFinder() {}
+
+ template <typename T>
+ static void swap(T& a, T& b) {
+ T tmp = a;
+ a = b;
+ b = tmp;
+ }
+
+ /**
+ * Try to find the best candidate for a ZSL buffer.
+ * Match priority from best to worst:
+ * 1) Timestamps match.
+ * 2) Timestamp is closest to the needle (and lower).
+ * 3) Timestamp is closest to the needle (and higher).
+ *
+ */
+ virtual int compare(const BufferInfo *i1,
+ const BufferInfo *i2) const {
+ // Try to select non-null object first.
+ if (i1 == NULL) {
+ return SELECT_I2;
+ } else if (i2 == NULL) {
+ return SELECT_I1;
+ }
+
+ // Best result: timestamp is identical
+ if (i1->mTimestamp == mTimestamp) {
+ return SELECT_I1;
+ } else if (i2->mTimestamp == mTimestamp) {
+ return SELECT_I2;
+ }
+
+ const BufferInfo* infoPtrs[2] = {
+ i1,
+ i2
+ };
+ int infoSelectors[2] = {
+ SELECT_I1,
+ SELECT_I2
+ };
+
+ // Order i1,i2 so that always i1.timestamp < i2.timestamp
+ if (i1->mTimestamp > i2->mTimestamp) {
+ swap(infoPtrs[0], infoPtrs[1]);
+ swap(infoSelectors[0], infoSelectors[1]);
+ }
+
+ // Second best: closest (lower) timestamp
+ if (infoPtrs[1]->mTimestamp < mTimestamp) {
+ return infoSelectors[1];
+ } else if (infoPtrs[0]->mTimestamp < mTimestamp) {
+ return infoSelectors[0];
+ }
+
+ // Worst: closest (higher) timestamp
+ return infoSelectors[0];
+
+ /**
+ * The above cases should cover all the possibilities,
+ * and we get an 'empty' result only if the ring buffer
+ * was empty itself
+ */
+ }
+
+ const nsecs_t mTimestamp;
+}; // struct TimestampFinder
+} // namespace anonymous
+
+Camera3ZslStream::Camera3ZslStream(int id, uint32_t width, uint32_t height,
+ int depth) :
+ Camera3OutputStream(id, CAMERA3_STREAM_BIDIRECTIONAL,
+ width, height,
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED),
+ mDepth(depth) {
+
+ sp<BufferQueue> bq = new BufferQueue();
+ mProducer = new RingBufferConsumer(bq, GRALLOC_USAGE_HW_CAMERA_ZSL, depth);
+ mConsumer = new Surface(bq);
+}
+
+Camera3ZslStream::~Camera3ZslStream() {
+}
+
+status_t Camera3ZslStream::getInputBufferLocked(camera3_stream_buffer *buffer) {
+ ATRACE_CALL();
+
+ status_t res;
+
+ // TODO: potentially register from inputBufferLocked
+ // this should be ok, registerBuffersLocked only calls getBuffer for now
+ // register in output mode instead of input mode for ZSL streams.
+ if (mState == STATE_IN_CONFIG || mState == STATE_IN_RECONFIG) {
+ ALOGE("%s: Stream %d: Buffer registration for input streams"
+ " not implemented (state %d)",
+ __FUNCTION__, mId, mState);
+ return INVALID_OPERATION;
+ }
+
+ if ((res = getBufferPreconditionCheckLocked()) != OK) {
+ return res;
+ }
+
+ ANativeWindowBuffer* anb;
+ int fenceFd;
+
+ assert(mProducer != 0);
+
+ sp<PinnedBufferItem> bufferItem;
+ {
+ List<sp<RingBufferConsumer::PinnedBufferItem> >::iterator it, end;
+ it = mInputBufferQueue.begin();
+ end = mInputBufferQueue.end();
+
+ // Need to call enqueueInputBufferByTimestamp as a prerequisite
+ if (it == end) {
+ ALOGE("%s: Stream %d: No input buffer was queued",
+ __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+ bufferItem = *it;
+ mInputBufferQueue.erase(it);
+ }
+
+ anb = bufferItem->getBufferItem().mGraphicBuffer->getNativeBuffer();
+ assert(anb != NULL);
+ fenceFd = bufferItem->getBufferItem().mFence->dup();
+
+ /**
+ * FenceFD now owned by HAL except in case of error,
+ * in which case we reassign it to acquire_fence
+ */
+ handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
+ /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK);
+
+ mBuffersInFlight.push_back(bufferItem);
+
+ return OK;
+}
+
+status_t Camera3ZslStream::returnBufferCheckedLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output,
+ /*out*/
+ sp<Fence> *releaseFenceOut) {
+
+ if (output) {
+ // Output stream path
+ return Camera3OutputStream::returnBufferCheckedLocked(buffer,
+ timestamp,
+ output,
+ releaseFenceOut);
+ }
+
+ /**
+ * Input stream path
+ */
+ bool bufferFound = false;
+ sp<PinnedBufferItem> bufferItem;
+ {
+ // Find the buffer we are returning
+ Vector<sp<PinnedBufferItem> >::iterator it, end;
+ for (it = mBuffersInFlight.begin(), end = mBuffersInFlight.end();
+ it != end;
+ ++it) {
+
+ const sp<PinnedBufferItem>& tmp = *it;
+ ANativeWindowBuffer *anb =
+ tmp->getBufferItem().mGraphicBuffer->getNativeBuffer();
+ if (anb != NULL && &(anb->handle) == buffer.buffer) {
+ bufferFound = true;
+ bufferItem = tmp;
+ mBuffersInFlight.erase(it);
+ break;
+ }
+ }
+ }
+ if (!bufferFound) {
+ ALOGE("%s: Stream %d: Can't return buffer that wasn't sent to HAL",
+ __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ int releaseFenceFd = buffer.release_fence;
+
+ if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
+ if (buffer.release_fence != -1) {
+ ALOGE("%s: Stream %d: HAL should not set release_fence(%d) when "
+ "there is an error", __FUNCTION__, mId, buffer.release_fence);
+ close(buffer.release_fence);
+ }
+
+ /**
+ * Reassign release fence as the acquire fence incase of error
+ */
+ releaseFenceFd = buffer.acquire_fence;
+ }
+
+ /**
+ * Unconditionally return buffer to the buffer queue.
+ * - Fwk takes over the release_fence ownership
+ */
+ sp<Fence> releaseFence = new Fence(releaseFenceFd);
+ bufferItem->getBufferItem().mFence = releaseFence;
+ bufferItem.clear(); // dropping last reference unpins buffer
+
+ *releaseFenceOut = releaseFence;
+
+ return OK;
+}
+
+status_t Camera3ZslStream::returnInputBufferLocked(
+ const camera3_stream_buffer &buffer) {
+ ATRACE_CALL();
+
+ status_t res = returnAnyBufferLocked(buffer, /*timestamp*/0,
+ /*output*/false);
+
+ return res;
+}
+
+void Camera3ZslStream::dump(int fd, const Vector<String16> &args) const {
+ (void) args;
+
+ String8 lines;
+ lines.appendFormat(" Stream[%d]: ZSL\n", mId);
+ write(fd, lines.string(), lines.size());
+
+ Camera3IOStreamBase::dump(fd, args);
+
+ lines = String8();
+ lines.appendFormat(" Input buffers pending: %zu, in flight %zu\n",
+ mInputBufferQueue.size(), mBuffersInFlight.size());
+ write(fd, lines.string(), lines.size());
+}
+
+status_t Camera3ZslStream::enqueueInputBufferByTimestamp(
+ nsecs_t timestamp,
+ nsecs_t* actualTimestamp) {
+
+ Mutex::Autolock l(mLock);
+
+ TimestampFinder timestampFinder = TimestampFinder(timestamp);
+
+ sp<RingBufferConsumer::PinnedBufferItem> pinnedBuffer =
+ mProducer->pinSelectedBuffer(timestampFinder,
+ /*waitForFence*/false);
+
+ if (pinnedBuffer == 0) {
+ ALOGE("%s: No ZSL buffers were available yet", __FUNCTION__);
+ return NO_BUFFER_AVAILABLE;
+ }
+
+ nsecs_t actual = pinnedBuffer->getBufferItem().mTimestamp;
+
+ if (actual != timestamp) {
+ ALOGW("%s: ZSL buffer candidate search didn't find an exact match --"
+ " requested timestamp = %lld, actual timestamp = %lld",
+ __FUNCTION__, timestamp, actual);
+ }
+
+ mInputBufferQueue.push_back(pinnedBuffer);
+
+ if (actualTimestamp != NULL) {
+ *actualTimestamp = actual;
+ }
+
+ return OK;
+}
+
+status_t Camera3ZslStream::clearInputRingBuffer() {
+ Mutex::Autolock l(mLock);
+
+ mInputBufferQueue.clear();
+
+ return mProducer->clear();
+}
+
+status_t Camera3ZslStream::setTransform(int /*transform*/) {
+ ALOGV("%s: Not implemented", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+}; // namespace camera3
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.h b/services/camera/libcameraservice/device3/Camera3ZslStream.h
new file mode 100644
index 0000000..c7f4490
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_ZSL_STREAM_H
+#define ANDROID_SERVERS_CAMERA3_ZSL_STREAM_H
+
+#include <utils/RefBase.h>
+#include <gui/Surface.h>
+#include <gui/RingBufferConsumer.h>
+
+#include "Camera3OutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+/**
+ * A class for managing a single opaque ZSL stream to/from the camera device.
+ * This acts as a bidirectional stream at the HAL layer, caching and discarding
+ * most output buffers, and when directed, pushes a buffer back to the HAL for
+ * processing.
+ */
+class Camera3ZslStream :
+ public Camera3OutputStream {
+ public:
+ /**
+ * Set up a ZSL stream of a given resolution. Depth is the number of buffers
+ * cached within the stream that can be retrieved for input.
+ */
+ Camera3ZslStream(int id, uint32_t width, uint32_t height, int depth);
+ ~Camera3ZslStream();
+
+ virtual void dump(int fd, const Vector<String16> &args) const;
+
+ enum { NO_BUFFER_AVAILABLE = BufferQueue::NO_BUFFER_AVAILABLE };
+
+ /**
+ * Locate a buffer matching this timestamp in the RingBufferConsumer,
+ * and mark it to be queued at the next getInputBufferLocked invocation.
+ *
+ * Errors: Returns NO_BUFFER_AVAILABLE if we could not find a match.
+ *
+ */
+ status_t enqueueInputBufferByTimestamp(nsecs_t timestamp,
+ nsecs_t* actualTimestamp);
+
+ /**
+ * Clears the buffers that can be used by enqueueInputBufferByTimestamp
+ */
+ status_t clearInputRingBuffer();
+
+ protected:
+
+ /**
+ * Camera3OutputStreamInterface implementation
+ */
+ status_t setTransform(int transform);
+
+ private:
+
+ int mDepth;
+ // Input buffers pending to be queued into HAL
+ List<sp<RingBufferConsumer::PinnedBufferItem> > mInputBufferQueue;
+ sp<RingBufferConsumer> mProducer;
+
+ // Input buffers in flight to HAL
+ Vector<sp<RingBufferConsumer::PinnedBufferItem> > mBuffersInFlight;
+
+ /**
+ * Camera3Stream interface
+ */
+
+ // getInputBuffer/returnInputBuffer operate the input stream side of the
+ // ZslStream.
+ virtual status_t getInputBufferLocked(camera3_stream_buffer *buffer);
+ virtual status_t returnInputBufferLocked(
+ const camera3_stream_buffer &buffer);
+
+ // Actual body to return either input or output buffers
+ virtual status_t returnBufferCheckedLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output,
+ /*out*/
+ sp<Fence> *releaseFenceOut);
+}; // class Camera3ZslStream
+
+}; // namespace camera3
+
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/StatusTracker.cpp b/services/camera/libcameraservice/device3/StatusTracker.cpp
new file mode 100644
index 0000000..ab5419f
--- /dev/null
+++ b/services/camera/libcameraservice/device3/StatusTracker.cpp
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-Status"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+// This is needed for stdint.h to define INT64_MAX in C++
+#define __STDC_LIMIT_MACROS
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <ui/Fence.h>
+
+#include "device3/StatusTracker.h"
+#include "device3/Camera3Device.h"
+
+namespace android {
+
+namespace camera3 {
+
+StatusTracker::StatusTracker(wp<Camera3Device> parent) :
+ mComponentsChanged(false),
+ mParent(parent),
+ mNextComponentId(0),
+ mIdleFence(new Fence()),
+ mDeviceState(IDLE) {
+}
+
+StatusTracker::~StatusTracker() {
+}
+
+int StatusTracker::addComponent() {
+ int id;
+ ssize_t err;
+ {
+ Mutex::Autolock l(mLock);
+ id = mNextComponentId++;
+ ALOGV("%s: Adding new component %d", __FUNCTION__, id);
+
+ err = mStates.add(id, IDLE);
+ ALOGE_IF(err < 0, "%s: Can't add new component %d: %s (%d)",
+ __FUNCTION__, id, strerror(-err), err);
+ }
+
+ if (err >= 0) {
+ Mutex::Autolock pl(mPendingLock);
+ mComponentsChanged = true;
+ mPendingChangeSignal.signal();
+ }
+
+ return err < 0 ? err : id;
+}
+
+void StatusTracker::removeComponent(int id) {
+ ssize_t idx;
+ {
+ Mutex::Autolock l(mLock);
+ ALOGV("%s: Removing component %d", __FUNCTION__, id);
+ idx = mStates.removeItem(id);
+ }
+
+ if (idx >= 0) {
+ Mutex::Autolock pl(mPendingLock);
+ mComponentsChanged = true;
+ mPendingChangeSignal.signal();
+ }
+
+ return;
+}
+
+
+void StatusTracker::markComponentIdle(int id, const sp<Fence>& componentFence) {
+ markComponent(id, IDLE, componentFence);
+}
+
+void StatusTracker::markComponentActive(int id) {
+ markComponent(id, ACTIVE, Fence::NO_FENCE);
+}
+
+void StatusTracker::markComponent(int id, ComponentState state,
+ const sp<Fence>& componentFence) {
+ ALOGV("%s: Component %d is now %s", __FUNCTION__, id,
+ state == IDLE ? "idle" : "active");
+ Mutex::Autolock l(mPendingLock);
+
+ StateChange newState = {
+ id,
+ state,
+ componentFence
+ };
+
+ mPendingChangeQueue.add(newState);
+ mPendingChangeSignal.signal();
+}
+
+void StatusTracker::requestExit() {
+ // First mark thread dead
+ Thread::requestExit();
+ // Then exit any waits
+ mPendingChangeSignal.signal();
+}
+
+StatusTracker::ComponentState StatusTracker::getDeviceStateLocked() {
+ for (size_t i = 0; i < mStates.size(); i++) {
+ if (mStates.valueAt(i) == ACTIVE) {
+ ALOGV("%s: Component %d not idle", __FUNCTION__,
+ mStates.keyAt(i));
+ return ACTIVE;
+ }
+ }
+ // - If not yet signaled, getSignalTime returns INT64_MAX
+ // - If invalid fence or error, returns -1
+ // - Otherwise returns time of signalling.
+ // Treat -1 as 'signalled', since HAL may not be using fences, and want
+ // to be able to idle in case of errors.
+ nsecs_t signalTime = mIdleFence->getSignalTime();
+ bool fencesDone = signalTime != INT64_MAX;
+
+ ALOGV_IF(!fencesDone, "%s: Fences still to wait on", __FUNCTION__);
+
+ return fencesDone ? IDLE : ACTIVE;
+}
+
+bool StatusTracker::threadLoop() {
+ status_t res;
+
+ // Wait for state updates
+ {
+ Mutex::Autolock pl(mPendingLock);
+ while (mPendingChangeQueue.size() == 0 && !mComponentsChanged) {
+ res = mPendingChangeSignal.waitRelative(mPendingLock,
+ kWaitDuration);
+ if (exitPending()) return false;
+ if (res != OK) {
+ if (res != TIMED_OUT) {
+ ALOGE("%s: Error waiting on state changes: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ }
+ // TIMED_OUT is expected
+ break;
+ }
+ }
+ }
+
+ // After new pending states appear, or timeout, check if we're idle. Even
+ // with timeout, need to check to account for fences that may still be
+ // clearing out
+ sp<Camera3Device> parent;
+ {
+ Mutex::Autolock pl(mPendingLock);
+ Mutex::Autolock l(mLock);
+
+ // Collect all pending state updates and see if the device
+ // collectively transitions between idle and active for each one
+
+ // First pass for changed components or fence completions
+ ComponentState prevState = getDeviceStateLocked();
+ if (prevState != mDeviceState) {
+ // Only collect changes to overall device state
+ mStateTransitions.add(prevState);
+ }
+ // For each pending component state update, check if we've transitioned
+ // to a new overall device state
+ for (size_t i = 0; i < mPendingChangeQueue.size(); i++) {
+ const StateChange &newState = mPendingChangeQueue[i];
+ ssize_t idx = mStates.indexOfKey(newState.id);
+ // Ignore notices for unknown components
+ if (idx >= 0) {
+ // Update single component state
+ mStates.replaceValueAt(idx, newState.state);
+ mIdleFence = Fence::merge(String8("idleFence"),
+ mIdleFence, newState.fence);
+ // .. and see if overall device state has changed
+ ComponentState newState = getDeviceStateLocked();
+ if (newState != prevState) {
+ mStateTransitions.add(newState);
+ }
+ prevState = newState;
+ }
+ }
+ mPendingChangeQueue.clear();
+ mComponentsChanged = false;
+
+ // Store final state after all pending state changes are done with
+
+ mDeviceState = prevState;
+ parent = mParent.promote();
+ }
+
+ // Notify parent for all intermediate transitions
+ if (mStateTransitions.size() > 0 && parent.get()) {
+ for (size_t i = 0; i < mStateTransitions.size(); i++) {
+ bool idle = (mStateTransitions[i] == IDLE);
+ ALOGV("Camera device is now %s", idle ? "idle" : "active");
+ parent->notifyStatus(idle);
+ }
+ }
+ mStateTransitions.clear();
+
+ return true;
+}
+
+} // namespace android
+
+} // namespace camera3
diff --git a/services/camera/libcameraservice/device3/StatusTracker.h b/services/camera/libcameraservice/device3/StatusTracker.h
new file mode 100644
index 0000000..49cecb3
--- /dev/null
+++ b/services/camera/libcameraservice/device3/StatusTracker.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_STATUSTRACKER_H
+#define ANDROID_SERVERS_CAMERA3_STATUSTRACKER_H
+
+#include <utils/Condition.h>
+#include <utils/Errors.h>
+#include <utils/List.h>
+#include <utils/Mutex.h>
+#include <utils/Thread.h>
+#include <utils/KeyedVector.h>
+#include <hardware/camera3.h>
+
+#include "common/CameraDeviceBase.h"
+
+namespace android {
+
+class Camera3Device;
+class Fence;
+
+namespace camera3 {
+
+/**
+ * State tracking for idle and other collective state transitions.
+ * Collects idle notifications from different sources and calls the
+ * parent when all of them become idle.
+ *
+ * The parent is responsible for synchronizing the status updates with its
+ * internal state correctly, which means the notifyStatus call to the parent may
+ * block for a while.
+ */
+class StatusTracker: public Thread {
+ public:
+ StatusTracker(wp<Camera3Device> parent);
+ ~StatusTracker();
+
+ // An always-invalid component ID
+ static const int NO_STATUS_ID = -1;
+
+ // Add a component to track; returns non-negative unique ID for the new
+ // component on success, negative error code on failure.
+ // New components start in the idle state.
+ int addComponent();
+
+ // Remove existing component from idle tracking. Ignores unknown IDs
+ void removeComponent(int id);
+
+ // Set the state of a tracked component to be idle. Ignores unknown IDs; can
+ // accept a fence to wait on to complete idle. The fence is merged with any
+ // previous fences given, which means they all must signal before the
+ // component is considered idle.
+ void markComponentIdle(int id, const sp<Fence>& componentFence);
+
+ // Set the state of a tracked component to be active. Ignores unknown IDs.
+ void markComponentActive(int id);
+
+ virtual void requestExit();
+ protected:
+
+ virtual bool threadLoop();
+
+ private:
+ enum ComponentState {
+ IDLE,
+ ACTIVE
+ };
+
+ void markComponent(int id, ComponentState state,
+ const sp<Fence>& componentFence);
+
+ // Guards mPendingChange, mPendingStates, mComponentsChanged
+ Mutex mPendingLock;
+
+ Condition mPendingChangeSignal;
+
+ struct StateChange {
+ int id;
+ ComponentState state;
+ sp<Fence> fence;
+ };
+ // A queue of yet-to-be-processed state changes to components
+ Vector<StateChange> mPendingChangeQueue;
+ bool mComponentsChanged;
+
+ wp<Camera3Device> mParent;
+
+ // Guards rest of internals. Must be locked after mPendingLock if both used.
+ Mutex mLock;
+
+ int mNextComponentId;
+
+ // Current component states
+ KeyedVector<int, ComponentState> mStates;
+ // Merged fence for all processed state changes
+ sp<Fence> mIdleFence;
+ // Current overall device state
+ ComponentState mDeviceState;
+
+ // Private to threadLoop
+
+ // Determine current overall device state
+ // We're IDLE iff
+ // - All components are currently IDLE
+ // - The merged fence for all component updates has signalled
+ ComponentState getDeviceStateLocked();
+
+ Vector<ComponentState> mStateTransitions;
+
+ static const nsecs_t kWaitDuration = 250000000LL; // 250 ms
+};
+
+} // namespace camera3
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
new file mode 100644
index 0000000..9a6dc28
--- /dev/null
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
@@ -0,0 +1,359 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "RingBufferConsumer"
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+#include <utils/Log.h>
+
+#include <gui/RingBufferConsumer.h>
+
+#define BI_LOGV(x, ...) ALOGV("[%s] " x, mName.string(), ##__VA_ARGS__)
+#define BI_LOGD(x, ...) ALOGD("[%s] " x, mName.string(), ##__VA_ARGS__)
+#define BI_LOGI(x, ...) ALOGI("[%s] " x, mName.string(), ##__VA_ARGS__)
+#define BI_LOGW(x, ...) ALOGW("[%s] " x, mName.string(), ##__VA_ARGS__)
+#define BI_LOGE(x, ...) ALOGE("[%s] " x, mName.string(), ##__VA_ARGS__)
+
+#undef assert
+#define assert(x) ALOG_ASSERT((x), #x)
+
+typedef android::RingBufferConsumer::PinnedBufferItem PinnedBufferItem;
+
+namespace android {
+
+RingBufferConsumer::RingBufferConsumer(const sp<IGraphicBufferConsumer>& consumer,
+ uint32_t consumerUsage,
+ int bufferCount) :
+ ConsumerBase(consumer),
+ mBufferCount(bufferCount)
+{
+ mConsumer->setConsumerUsageBits(consumerUsage);
+ mConsumer->setMaxAcquiredBufferCount(bufferCount);
+
+ assert(bufferCount > 0);
+}
+
+RingBufferConsumer::~RingBufferConsumer() {
+}
+
+void RingBufferConsumer::setName(const String8& name) {
+ Mutex::Autolock _l(mMutex);
+ mName = name;
+ mConsumer->setConsumerName(name);
+}
+
+sp<PinnedBufferItem> RingBufferConsumer::pinSelectedBuffer(
+ const RingBufferComparator& filter,
+ bool waitForFence) {
+
+ sp<PinnedBufferItem> pinnedBuffer;
+
+ {
+ List<RingBufferItem>::iterator it, end, accIt;
+ BufferInfo acc, cur;
+ BufferInfo* accPtr = NULL;
+
+ Mutex::Autolock _l(mMutex);
+
+ for (it = mBufferItemList.begin(), end = mBufferItemList.end();
+ it != end;
+ ++it) {
+
+ const RingBufferItem& item = *it;
+
+ cur.mCrop = item.mCrop;
+ cur.mTransform = item.mTransform;
+ cur.mScalingMode = item.mScalingMode;
+ cur.mTimestamp = item.mTimestamp;
+ cur.mFrameNumber = item.mFrameNumber;
+ cur.mPinned = item.mPinCount > 0;
+
+ int ret = filter.compare(accPtr, &cur);
+
+ if (ret == 0) {
+ accPtr = NULL;
+ } else if (ret > 0) {
+ acc = cur;
+ accPtr = &acc;
+ accIt = it;
+ } // else acc = acc
+ }
+
+ if (!accPtr) {
+ return NULL;
+ }
+
+ pinnedBuffer = new PinnedBufferItem(this, *accIt);
+ pinBufferLocked(pinnedBuffer->getBufferItem());
+
+ } // end scope of mMutex autolock
+
+ if (waitForFence) {
+ status_t err = pinnedBuffer->getBufferItem().mFence->waitForever(
+ "RingBufferConsumer::pinSelectedBuffer");
+ if (err != OK) {
+ BI_LOGE("Failed to wait for fence of acquired buffer: %s (%d)",
+ strerror(-err), err);
+ }
+ }
+
+ return pinnedBuffer;
+}
+
+status_t RingBufferConsumer::clear() {
+
+ status_t err;
+ Mutex::Autolock _l(mMutex);
+
+ BI_LOGV("%s", __FUNCTION__);
+
+ // Avoid annoying log warnings by returning early
+ if (mBufferItemList.size() == 0) {
+ return OK;
+ }
+
+ do {
+ size_t pinnedFrames = 0;
+ err = releaseOldestBufferLocked(&pinnedFrames);
+
+ if (err == NO_BUFFER_AVAILABLE) {
+ assert(pinnedFrames == mBufferItemList.size());
+ break;
+ }
+
+ if (err == NOT_ENOUGH_DATA) {
+ // Fine. Empty buffer item list.
+ break;
+ }
+
+ if (err != OK) {
+ BI_LOGE("Clear failed, could not release buffer");
+ return err;
+ }
+
+ } while(true);
+
+ return OK;
+}
+
+void RingBufferConsumer::pinBufferLocked(const BufferItem& item) {
+ List<RingBufferItem>::iterator it, end;
+
+ for (it = mBufferItemList.begin(), end = mBufferItemList.end();
+ it != end;
+ ++it) {
+
+ RingBufferItem& find = *it;
+ if (item.mGraphicBuffer == find.mGraphicBuffer) {
+ find.mPinCount++;
+ break;
+ }
+ }
+
+ if (it == end) {
+ BI_LOGE("Failed to pin buffer (timestamp %lld, framenumber %lld)",
+ item.mTimestamp, item.mFrameNumber);
+ } else {
+ BI_LOGV("Pinned buffer (frame %lld, timestamp %lld)",
+ item.mFrameNumber, item.mTimestamp);
+ }
+}
+
+status_t RingBufferConsumer::releaseOldestBufferLocked(size_t* pinnedFrames) {
+ status_t err = OK;
+
+ List<RingBufferItem>::iterator it, end, accIt;
+
+ it = mBufferItemList.begin();
+ end = mBufferItemList.end();
+ accIt = end;
+
+ if (it == end) {
+ /**
+ * This is fine. We really care about being able to acquire a buffer
+ * successfully after this function completes, not about it releasing
+ * some buffer.
+ */
+ BI_LOGV("%s: No buffers yet acquired, can't release anything",
+ __FUNCTION__);
+ return NOT_ENOUGH_DATA;
+ }
+
+ for (; it != end; ++it) {
+ RingBufferItem& find = *it;
+
+ if (find.mPinCount > 0) {
+ if (pinnedFrames != NULL) {
+ ++(*pinnedFrames);
+ }
+ // Filter out pinned frame when searching for buffer to release
+ continue;
+ }
+
+ if (find.mTimestamp < accIt->mTimestamp || accIt == end) {
+ accIt = it;
+ }
+ }
+
+ if (accIt != end) {
+ RingBufferItem& item = *accIt;
+
+ // In case the object was never pinned, pass the acquire fence
+ // back to the release fence. If the fence was already waited on,
+ // it'll just be a no-op to wait on it again.
+
+ // item.mGraphicBuffer was populated with the proper graphic-buffer
+ // at acquire even if it was previously acquired
+ err = addReleaseFenceLocked(item.mBuf,
+ item.mGraphicBuffer, item.mFence);
+
+ if (err != OK) {
+ BI_LOGE("Failed to add release fence to buffer "
+ "(timestamp %lld, framenumber %lld",
+ item.mTimestamp, item.mFrameNumber);
+ return err;
+ }
+
+ BI_LOGV("Attempting to release buffer timestamp %lld, frame %lld",
+ item.mTimestamp, item.mFrameNumber);
+
+ // item.mGraphicBuffer was populated with the proper graphic-buffer
+ // at acquire even if it was previously acquired
+ err = releaseBufferLocked(item.mBuf, item.mGraphicBuffer,
+ EGL_NO_DISPLAY,
+ EGL_NO_SYNC_KHR);
+ if (err != OK) {
+ BI_LOGE("Failed to release buffer: %s (%d)",
+ strerror(-err), err);
+ return err;
+ }
+
+ BI_LOGV("Buffer timestamp %lld, frame %lld evicted",
+ item.mTimestamp, item.mFrameNumber);
+
+ size_t currentSize = mBufferItemList.size();
+ mBufferItemList.erase(accIt);
+ assert(mBufferItemList.size() == currentSize - 1);
+ } else {
+ BI_LOGW("All buffers pinned, could not find any to release");
+ return NO_BUFFER_AVAILABLE;
+
+ }
+
+ return OK;
+}
+
+void RingBufferConsumer::onFrameAvailable() {
+ status_t err;
+
+ {
+ Mutex::Autolock _l(mMutex);
+
+ /**
+ * Release oldest frame
+ */
+ if (mBufferItemList.size() >= (size_t)mBufferCount) {
+ err = releaseOldestBufferLocked(/*pinnedFrames*/NULL);
+ assert(err != NOT_ENOUGH_DATA);
+
+ // TODO: implement the case for NO_BUFFER_AVAILABLE
+ assert(err != NO_BUFFER_AVAILABLE);
+ if (err != OK) {
+ return;
+ }
+ // TODO: in unpinBuffer rerun this routine if we had buffers
+ // we could've locked but didn't because there was no space
+ }
+
+ RingBufferItem& item = *mBufferItemList.insert(mBufferItemList.end(),
+ RingBufferItem());
+
+ /**
+ * Acquire new frame
+ */
+ err = acquireBufferLocked(&item, 0);
+ if (err != OK) {
+ if (err != NO_BUFFER_AVAILABLE) {
+ BI_LOGE("Error acquiring buffer: %s (%d)", strerror(err), err);
+ }
+
+ mBufferItemList.erase(--mBufferItemList.end());
+ return;
+ }
+
+ BI_LOGV("New buffer acquired (timestamp %lld), "
+ "buffer items %u out of %d",
+ item.mTimestamp,
+ mBufferItemList.size(), mBufferCount);
+
+ item.mGraphicBuffer = mSlots[item.mBuf].mGraphicBuffer;
+ } // end of mMutex lock
+
+ ConsumerBase::onFrameAvailable();
+}
+
+void RingBufferConsumer::unpinBuffer(const BufferItem& item) {
+ Mutex::Autolock _l(mMutex);
+
+ List<RingBufferItem>::iterator it, end, accIt;
+
+ for (it = mBufferItemList.begin(), end = mBufferItemList.end();
+ it != end;
+ ++it) {
+
+ RingBufferItem& find = *it;
+ if (item.mGraphicBuffer == find.mGraphicBuffer) {
+ status_t res = addReleaseFenceLocked(item.mBuf,
+ item.mGraphicBuffer, item.mFence);
+
+ if (res != OK) {
+ BI_LOGE("Failed to add release fence to buffer "
+ "(timestamp %lld, framenumber %lld",
+ item.mTimestamp, item.mFrameNumber);
+ return;
+ }
+
+ find.mPinCount--;
+ break;
+ }
+ }
+
+ if (it == end) {
+ // This should never happen. If it happens, we have a bug.
+ BI_LOGE("Failed to unpin buffer (timestamp %lld, framenumber %lld)",
+ item.mTimestamp, item.mFrameNumber);
+ } else {
+ BI_LOGV("Unpinned buffer (timestamp %lld, framenumber %lld)",
+ item.mTimestamp, item.mFrameNumber);
+ }
+}
+
+status_t RingBufferConsumer::setDefaultBufferSize(uint32_t w, uint32_t h) {
+ Mutex::Autolock _l(mMutex);
+ return mConsumer->setDefaultBufferSize(w, h);
+}
+
+status_t RingBufferConsumer::setDefaultBufferFormat(uint32_t defaultFormat) {
+ Mutex::Autolock _l(mMutex);
+ return mConsumer->setDefaultBufferFormat(defaultFormat);
+}
+
+status_t RingBufferConsumer::setConsumerUsage(uint32_t usage) {
+ Mutex::Autolock _l(mMutex);
+ return mConsumer->setConsumerUsageBits(usage);
+}
+
+} // namespace android
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.h b/services/camera/libcameraservice/gui/RingBufferConsumer.h
new file mode 100644
index 0000000..b4ad824
--- /dev/null
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_GUI_RINGBUFFERCONSUMER_H
+#define ANDROID_GUI_RINGBUFFERCONSUMER_H
+
+#include <gui/ConsumerBase.h>
+
+#include <ui/GraphicBuffer.h>
+
+#include <utils/String8.h>
+#include <utils/Vector.h>
+#include <utils/threads.h>
+#include <utils/List.h>
+
+#define ANDROID_GRAPHICS_RINGBUFFERCONSUMER_JNI_ID "mRingBufferConsumer"
+
+namespace android {
+
+/**
+ * The RingBufferConsumer maintains a ring buffer of BufferItem objects,
+ * (which are 'acquired' as long as they are part of the ring buffer, and
+ * 'released' when they leave the ring buffer).
+ *
+ * When new buffers are produced, the oldest non-pinned buffer item is immediately
+ * dropped from the ring buffer, and overridden with the newest buffer.
+ *
+ * Users can only access a buffer item after pinning it (which also guarantees
+ * that during its duration it will not be released back into the BufferQueue).
+ *
+ * Note that the 'oldest' buffer is the one with the smallest timestamp.
+ *
+ * Edge cases:
+ * - If ringbuffer is not full, no drops occur when a buffer is produced.
+ * - If all the buffers get filled or pinned then there will be no empty
+ * buffers left, so the producer will block on dequeue.
+ */
+class RingBufferConsumer : public ConsumerBase,
+ public ConsumerBase::FrameAvailableListener
+{
+ public:
+ typedef ConsumerBase::FrameAvailableListener FrameAvailableListener;
+
+ typedef BufferQueue::BufferItem BufferItem;
+
+ enum { INVALID_BUFFER_SLOT = BufferQueue::INVALID_BUFFER_SLOT };
+ enum { NO_BUFFER_AVAILABLE = BufferQueue::NO_BUFFER_AVAILABLE };
+
+ // Create a new ring buffer consumer. The consumerUsage parameter determines
+ // the consumer usage flags passed to the graphics allocator. The
+ // bufferCount parameter specifies how many buffers can be pinned for user
+ // access at the same time.
+ RingBufferConsumer(const sp<IGraphicBufferConsumer>& consumer, uint32_t consumerUsage,
+ int bufferCount = BufferQueue::MIN_UNDEQUEUED_BUFFERS);
+
+ virtual ~RingBufferConsumer();
+
+ // set the name of the RingBufferConsumer that will be used to identify it in
+ // log messages.
+ void setName(const String8& name);
+
+ // setDefaultBufferSize is used to set the size of buffers returned by
+ // requestBuffers when a with and height of zero is requested.
+ status_t setDefaultBufferSize(uint32_t w, uint32_t h);
+
+ // setDefaultBufferFormat allows the BufferQueue to create
+ // GraphicBuffers of a defaultFormat if no format is specified
+ // by the producer endpoint.
+ status_t setDefaultBufferFormat(uint32_t defaultFormat);
+
+ // setConsumerUsage allows the BufferQueue consumer usage to be
+ // set at a later time after construction.
+ status_t setConsumerUsage(uint32_t usage);
+
+ // Buffer info, minus the graphics buffer/slot itself.
+ struct BufferInfo {
+ // mCrop is the current crop rectangle for this buffer slot.
+ Rect mCrop;
+
+ // mTransform is the current transform flags for this buffer slot.
+ uint32_t mTransform;
+
+ // mScalingMode is the current scaling mode for this buffer slot.
+ uint32_t mScalingMode;
+
+ // mTimestamp is the current timestamp for this buffer slot. This gets
+ // to set by queueBuffer each time this slot is queued.
+ int64_t mTimestamp;
+
+ // mFrameNumber is the number of the queued frame for this slot.
+ uint64_t mFrameNumber;
+
+ // mPinned is whether or not the buffer has been pinned already.
+ bool mPinned;
+ };
+
+ struct RingBufferComparator {
+ // Return < 0 to select i1, > 0 to select i2, 0 for neither
+ // i1 or i2 can be NULL.
+ //
+ // The comparator has to implement a total ordering. Otherwise
+ // a linear scan won't find the most preferred buffer.
+ virtual int compare(const BufferInfo* i1,
+ const BufferInfo* i2) const = 0;
+
+ virtual ~RingBufferComparator() {}
+ };
+
+ struct PinnedBufferItem : public LightRefBase<PinnedBufferItem> {
+ PinnedBufferItem(wp<RingBufferConsumer> consumer,
+ const BufferItem& item) :
+ mConsumer(consumer),
+ mBufferItem(item) {
+ }
+
+ ~PinnedBufferItem() {
+ sp<RingBufferConsumer> consumer = mConsumer.promote();
+ if (consumer != NULL) {
+ consumer->unpinBuffer(mBufferItem);
+ }
+ }
+
+ bool isEmpty() {
+ return mBufferItem.mBuf == BufferQueue::INVALID_BUFFER_SLOT;
+ }
+
+ BufferItem& getBufferItem() { return mBufferItem; }
+ const BufferItem& getBufferItem() const { return mBufferItem; }
+
+ private:
+ wp<RingBufferConsumer> mConsumer;
+ BufferItem mBufferItem;
+ };
+
+ // Find a buffer using the filter, then pin it before returning it.
+ //
+ // The filter will be invoked on each buffer item in the ring buffer,
+ // passing the item that was selected from each previous iteration,
+ // as well as the current iteration's item.
+ //
+ // Pinning will ensure that the buffer will not be dropped when a new
+ // frame is available.
+ sp<PinnedBufferItem> pinSelectedBuffer(const RingBufferComparator& filter,
+ bool waitForFence = true);
+
+ // Release all the non-pinned buffers in the ring buffer
+ status_t clear();
+
+ private:
+
+ // Override ConsumerBase::onFrameAvailable
+ virtual void onFrameAvailable();
+
+ void pinBufferLocked(const BufferItem& item);
+ void unpinBuffer(const BufferItem& item);
+
+ // Releases oldest buffer. Returns NO_BUFFER_AVAILABLE
+ // if all the buffers were pinned.
+ // Returns NOT_ENOUGH_DATA if list was empty.
+ status_t releaseOldestBufferLocked(size_t* pinnedFrames);
+
+ struct RingBufferItem : public BufferItem {
+ RingBufferItem() : BufferItem(), mPinCount(0) {}
+ int mPinCount;
+ };
+
+ // List of acquired buffers in our ring buffer
+ List<RingBufferItem> mBufferItemList;
+ const int mBufferCount;
+};
+
+} // namespace android
+
+#endif // ANDROID_GUI_CPUCONSUMER_H
diff --git a/services/camera/libcameraservice/utils/CameraTraces.cpp b/services/camera/libcameraservice/utils/CameraTraces.cpp
new file mode 100644
index 0000000..346e15f
--- /dev/null
+++ b/services/camera/libcameraservice/utils/CameraTraces.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraTraces"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include "utils/CameraTraces.h"
+#include <utils/ProcessCallStack.h>
+
+#include <utils/Mutex.h>
+#include <utils/List.h>
+
+#include <utils/Log.h>
+#include <cutils/trace.h>
+
+namespace android {
+namespace camera3 {
+
+struct CameraTracesImpl {
+ Mutex tracesLock;
+ List<ProcessCallStack> pcsList;
+}; // class CameraTraces::Impl;
+
+static CameraTracesImpl gImpl;
+CameraTracesImpl& CameraTraces::sImpl = gImpl;
+
+void CameraTraces::saveTrace() {
+ ALOGV("%s: begin", __FUNCTION__);
+ ATRACE_BEGIN("CameraTraces::saveTrace");
+ Mutex::Autolock al(sImpl.tracesLock);
+
+ List<ProcessCallStack>& pcsList = sImpl.pcsList;
+
+ // Insert new ProcessCallStack, and immediately crawl all the threads
+ pcsList.push_front(ProcessCallStack());
+ ProcessCallStack& pcs = *pcsList.begin();
+ pcs.update();
+
+ if (pcsList.size() > MAX_TRACES) {
+ // Prune list periodically and discard oldest entry
+ pcsList.erase(--pcsList.end());
+ }
+
+ IF_ALOGV() {
+ pcs.log(LOG_TAG, ANDROID_LOG_VERBOSE);
+ }
+
+ ALOGD("Process trace saved. Use dumpsys media.camera to view.");
+
+ ATRACE_END();
+}
+
+status_t CameraTraces::dump(int fd, const Vector<String16> &args __attribute__((unused))) {
+ ALOGV("%s: fd = %d", __FUNCTION__, fd);
+ Mutex::Autolock al(sImpl.tracesLock);
+ List<ProcessCallStack>& pcsList = sImpl.pcsList;
+
+ if (fd < 0) {
+ ALOGW("%s: Negative FD (%d)", __FUNCTION__, fd);
+ return BAD_VALUE;
+ }
+
+ fdprintf(fd, "Camera traces (%zu):\n", pcsList.size());
+
+ if (pcsList.empty()) {
+ fdprintf(fd, " No camera traces collected.\n");
+ }
+
+ // Print newest items first
+ List<ProcessCallStack>::iterator it, end;
+ for (it = pcsList.begin(), end = pcsList.end(); it != end; ++it) {
+ const ProcessCallStack& pcs = *it;
+ pcs.dump(fd, DUMP_INDENT);
+ }
+
+ return OK;
+}
+
+}; // namespace camera3
+}; // namespace android
diff --git a/services/camera/libcameraservice/utils/CameraTraces.h b/services/camera/libcameraservice/utils/CameraTraces.h
new file mode 100644
index 0000000..d10dbc9
--- /dev/null
+++ b/services/camera/libcameraservice/utils/CameraTraces.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_TRACES_H_
+#define ANDROID_SERVERS_CAMERA_TRACES_H_
+
+#include <utils/Errors.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+
+namespace android {
+namespace camera3 {
+
+class CameraTracesImpl;
+
+// Collect a list of the process's stack traces
+class CameraTraces {
+public:
+ /**
+ * Save the current stack trace for each thread in the process. At most
+ * MAX_TRACES will be saved, after which the oldest traces will be discarded.
+ *
+ * <p>Use CameraTraces::dump to print out the traces.</p>
+ */
+ static void saveTrace();
+
+ /**
+ * Prints all saved traces to the specified file descriptor.
+ *
+ * <p>Each line is indented by DUMP_INDENT spaces.</p>
+ */
+ static status_t dump(int fd, const Vector<String16>& args);
+
+private:
+ enum {
+ // Don't collect more than 100 traces. Discard oldest.
+ MAX_TRACES = 100,
+
+ // Insert 2 spaces when dumping the traces
+ DUMP_INDENT = 2,
+ };
+
+ CameraTraces();
+ ~CameraTraces();
+ CameraTraces(CameraTraces& rhs);
+
+ static CameraTracesImpl& sImpl;
+}; // class CameraTraces
+
+}; // namespace camera3
+}; // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA_TRACES_H_
diff --git a/services/camera/tests/CameraServiceTest/Android.mk b/services/camera/tests/CameraServiceTest/Android.mk
deleted file mode 100644
index 41b6f63..0000000
--- a/services/camera/tests/CameraServiceTest/Android.mk
+++ /dev/null
@@ -1,26 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= CameraServiceTest.cpp
-
-LOCAL_MODULE:= CameraServiceTest
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_C_INCLUDES += \
- frameworks/av/libs
-
-LOCAL_CFLAGS :=
-
-LOCAL_SHARED_LIBRARIES += \
- libbinder \
- libcutils \
- libutils \
- libui \
- libcamera_client \
- libgui
-
-# Disable it because the ISurface interface may change, and before we have a
-# chance to fix this test, we don't want to break normal builds.
-#include $(BUILD_EXECUTABLE)
diff --git a/services/camera/tests/CameraServiceTest/CameraServiceTest.cpp b/services/camera/tests/CameraServiceTest/CameraServiceTest.cpp
deleted file mode 100644
index e417b79..0000000
--- a/services/camera/tests/CameraServiceTest/CameraServiceTest.cpp
+++ /dev/null
@@ -1,924 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "CameraServiceTest"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-#include <camera/Camera.h>
-#include <camera/CameraParameters.h>
-#include <ui/GraphicBuffer.h>
-#include <camera/ICamera.h>
-#include <camera/ICameraClient.h>
-#include <camera/ICameraService.h>
-#include <binder/IPCThreadState.h>
-#include <binder/IServiceManager.h>
-#include <binder/ProcessState.h>
-#include <utils/KeyedVector.h>
-#include <utils/Log.h>
-#include <utils/Vector.h>
-#include <utils/threads.h>
-
-using namespace android;
-
-//
-// Assertion and Logging utilities
-//
-#define INFO(...) \
- do { \
- printf(__VA_ARGS__); \
- printf("\n"); \
- ALOGD(__VA_ARGS__); \
- } while(0)
-
-void assert_fail(const char *file, int line, const char *func, const char *expr) {
- INFO("assertion failed at file %s, line %d, function %s:",
- file, line, func);
- INFO("%s", expr);
- abort();
-}
-
-void assert_eq_fail(const char *file, int line, const char *func,
- const char *expr, int actual) {
- INFO("assertion failed at file %s, line %d, function %s:",
- file, line, func);
- INFO("(expected) %s != (actual) %d", expr, actual);
- abort();
-}
-
-#define ASSERT(e) \
- do { \
- if (!(e)) \
- assert_fail(__FILE__, __LINE__, __func__, #e); \
- } while(0)
-
-#define ASSERT_EQ(expected, actual) \
- do { \
- int _x = (actual); \
- if (_x != (expected)) \
- assert_eq_fail(__FILE__, __LINE__, __func__, #expected, _x); \
- } while(0)
-
-//
-// Holder service for pass objects between processes.
-//
-class IHolder : public IInterface {
-protected:
- enum {
- HOLDER_PUT = IBinder::FIRST_CALL_TRANSACTION,
- HOLDER_GET,
- HOLDER_CLEAR
- };
-public:
- DECLARE_META_INTERFACE(Holder);
-
- virtual void put(sp<IBinder> obj) = 0;
- virtual sp<IBinder> get() = 0;
- virtual void clear() = 0;
-};
-
-class BnHolder : public BnInterface<IHolder> {
- virtual status_t onTransact(uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
-};
-
-class BpHolder : public BpInterface<IHolder> {
-public:
- BpHolder(const sp<IBinder>& impl)
- : BpInterface<IHolder>(impl) {
- }
-
- virtual void put(sp<IBinder> obj) {
- Parcel data, reply;
- data.writeStrongBinder(obj);
- remote()->transact(HOLDER_PUT, data, &reply, IBinder::FLAG_ONEWAY);
- }
-
- virtual sp<IBinder> get() {
- Parcel data, reply;
- remote()->transact(HOLDER_GET, data, &reply);
- return reply.readStrongBinder();
- }
-
- virtual void clear() {
- Parcel data, reply;
- remote()->transact(HOLDER_CLEAR, data, &reply);
- }
-};
-
-IMPLEMENT_META_INTERFACE(Holder, "CameraServiceTest.Holder");
-
-status_t BnHolder::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) {
- switch(code) {
- case HOLDER_PUT: {
- put(data.readStrongBinder());
- return NO_ERROR;
- } break;
- case HOLDER_GET: {
- reply->writeStrongBinder(get());
- return NO_ERROR;
- } break;
- case HOLDER_CLEAR: {
- clear();
- return NO_ERROR;
- } break;
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-class HolderService : public BnHolder {
- virtual void put(sp<IBinder> obj) {
- mObj = obj;
- }
- virtual sp<IBinder> get() {
- return mObj;
- }
- virtual void clear() {
- mObj.clear();
- }
-private:
- sp<IBinder> mObj;
-};
-
-//
-// A mock CameraClient
-//
-class MCameraClient : public BnCameraClient {
-public:
- virtual void notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2);
- virtual void dataCallback(int32_t msgType, const sp<IMemory>& data);
- virtual void dataCallbackTimestamp(nsecs_t timestamp,
- int32_t msgType, const sp<IMemory>& data);
-
- // new functions
- void clearStat();
- enum OP { EQ, GE, LE, GT, LT };
- void assertNotify(int32_t msgType, OP op, int count);
- void assertData(int32_t msgType, OP op, int count);
- void waitNotify(int32_t msgType, OP op, int count);
- void waitData(int32_t msgType, OP op, int count);
- void assertDataSize(int32_t msgType, OP op, int dataSize);
-
- void setReleaser(ICamera *releaser) {
- mReleaser = releaser;
- }
-private:
- Mutex mLock;
- Condition mCond;
- DefaultKeyedVector<int32_t, int> mNotifyCount;
- DefaultKeyedVector<int32_t, int> mDataCount;
- DefaultKeyedVector<int32_t, int> mDataSize;
- bool test(OP op, int v1, int v2);
- void assertTest(OP op, int v1, int v2);
-
- ICamera *mReleaser;
-};
-
-void MCameraClient::clearStat() {
- Mutex::Autolock _l(mLock);
- mNotifyCount.clear();
- mDataCount.clear();
- mDataSize.clear();
-}
-
-bool MCameraClient::test(OP op, int v1, int v2) {
- switch (op) {
- case EQ: return v1 == v2;
- case GT: return v1 > v2;
- case LT: return v1 < v2;
- case GE: return v1 >= v2;
- case LE: return v1 <= v2;
- default: ASSERT(0); break;
- }
- return false;
-}
-
-void MCameraClient::assertTest(OP op, int v1, int v2) {
- if (!test(op, v1, v2)) {
- ALOGE("assertTest failed: op=%d, v1=%d, v2=%d", op, v1, v2);
- ASSERT(0);
- }
-}
-
-void MCameraClient::assertNotify(int32_t msgType, OP op, int count) {
- Mutex::Autolock _l(mLock);
- int v = mNotifyCount.valueFor(msgType);
- assertTest(op, v, count);
-}
-
-void MCameraClient::assertData(int32_t msgType, OP op, int count) {
- Mutex::Autolock _l(mLock);
- int v = mDataCount.valueFor(msgType);
- assertTest(op, v, count);
-}
-
-void MCameraClient::assertDataSize(int32_t msgType, OP op, int dataSize) {
- Mutex::Autolock _l(mLock);
- int v = mDataSize.valueFor(msgType);
- assertTest(op, v, dataSize);
-}
-
-void MCameraClient::notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2) {
- INFO("%s", __func__);
- Mutex::Autolock _l(mLock);
- ssize_t i = mNotifyCount.indexOfKey(msgType);
- if (i < 0) {
- mNotifyCount.add(msgType, 1);
- } else {
- ++mNotifyCount.editValueAt(i);
- }
- mCond.signal();
-}
-
-void MCameraClient::dataCallback(int32_t msgType, const sp<IMemory>& data) {
- INFO("%s", __func__);
- int dataSize = data->size();
- INFO("data type = %d, size = %d", msgType, dataSize);
- Mutex::Autolock _l(mLock);
- ssize_t i = mDataCount.indexOfKey(msgType);
- if (i < 0) {
- mDataCount.add(msgType, 1);
- mDataSize.add(msgType, dataSize);
- } else {
- ++mDataCount.editValueAt(i);
- mDataSize.editValueAt(i) = dataSize;
- }
- mCond.signal();
-
- if (msgType == CAMERA_MSG_VIDEO_FRAME) {
- ASSERT(mReleaser != NULL);
- mReleaser->releaseRecordingFrame(data);
- }
-}
-
-void MCameraClient::dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType,
- const sp<IMemory>& data) {
- dataCallback(msgType, data);
-}
-
-void MCameraClient::waitNotify(int32_t msgType, OP op, int count) {
- INFO("waitNotify: %d, %d, %d", msgType, op, count);
- Mutex::Autolock _l(mLock);
- while (true) {
- int v = mNotifyCount.valueFor(msgType);
- if (test(op, v, count)) {
- break;
- }
- mCond.wait(mLock);
- }
-}
-
-void MCameraClient::waitData(int32_t msgType, OP op, int count) {
- INFO("waitData: %d, %d, %d", msgType, op, count);
- Mutex::Autolock _l(mLock);
- while (true) {
- int v = mDataCount.valueFor(msgType);
- if (test(op, v, count)) {
- break;
- }
- mCond.wait(mLock);
- }
-}
-
-//
-// A mock Surface
-//
-class MSurface : public BnSurface {
-public:
- virtual status_t registerBuffers(const BufferHeap& buffers);
- virtual void postBuffer(ssize_t offset);
- virtual void unregisterBuffers();
- virtual sp<GraphicBuffer> requestBuffer(int bufferIdx, int usage);
- virtual status_t setBufferCount(int bufferCount);
-
- // new functions
- void clearStat();
- void waitUntil(int c0, int c1, int c2);
-
-private:
- // check callback count
- Condition mCond;
- Mutex mLock;
- int registerBuffersCount;
- int postBufferCount;
- int unregisterBuffersCount;
-};
-
-status_t MSurface::registerBuffers(const BufferHeap& buffers) {
- INFO("%s", __func__);
- Mutex::Autolock _l(mLock);
- ++registerBuffersCount;
- mCond.signal();
- return NO_ERROR;
-}
-
-void MSurface::postBuffer(ssize_t offset) {
- // INFO("%s", __func__);
- Mutex::Autolock _l(mLock);
- ++postBufferCount;
- mCond.signal();
-}
-
-void MSurface::unregisterBuffers() {
- INFO("%s", __func__);
- Mutex::Autolock _l(mLock);
- ++unregisterBuffersCount;
- mCond.signal();
-}
-
-sp<GraphicBuffer> MSurface::requestBuffer(int bufferIdx, int usage) {
- INFO("%s", __func__);
- return NULL;
-}
-
-status_t MSurface::setBufferCount(int bufferCount) {
- INFO("%s", __func__);
- return NULL;
-}
-
-void MSurface::clearStat() {
- Mutex::Autolock _l(mLock);
- registerBuffersCount = 0;
- postBufferCount = 0;
- unregisterBuffersCount = 0;
-}
-
-void MSurface::waitUntil(int c0, int c1, int c2) {
- INFO("waitUntil: %d %d %d", c0, c1, c2);
- Mutex::Autolock _l(mLock);
- while (true) {
- if (registerBuffersCount >= c0 &&
- postBufferCount >= c1 &&
- unregisterBuffersCount >= c2) {
- break;
- }
- mCond.wait(mLock);
- }
-}
-
-//
-// Utilities to use the Holder service
-//
-sp<IHolder> getHolder() {
- sp<IServiceManager> sm = defaultServiceManager();
- ASSERT(sm != 0);
- sp<IBinder> binder = sm->getService(String16("CameraServiceTest.Holder"));
- ASSERT(binder != 0);
- sp<IHolder> holder = interface_cast<IHolder>(binder);
- ASSERT(holder != 0);
- return holder;
-}
-
-void putTempObject(sp<IBinder> obj) {
- INFO("%s", __func__);
- getHolder()->put(obj);
-}
-
-sp<IBinder> getTempObject() {
- INFO("%s", __func__);
- return getHolder()->get();
-}
-
-void clearTempObject() {
- INFO("%s", __func__);
- getHolder()->clear();
-}
-
-//
-// Get a Camera Service
-//
-sp<ICameraService> getCameraService() {
- sp<IServiceManager> sm = defaultServiceManager();
- ASSERT(sm != 0);
- sp<IBinder> binder = sm->getService(String16("media.camera"));
- ASSERT(binder != 0);
- sp<ICameraService> cs = interface_cast<ICameraService>(binder);
- ASSERT(cs != 0);
- return cs;
-}
-
-int getNumberOfCameras() {
- sp<ICameraService> cs = getCameraService();
- return cs->getNumberOfCameras();
-}
-
-//
-// Various Connect Tests
-//
-void testConnect(int cameraId) {
- INFO("%s", __func__);
- sp<ICameraService> cs = getCameraService();
- sp<MCameraClient> cc = new MCameraClient();
- sp<ICamera> c = cs->connect(cc, cameraId);
- ASSERT(c != 0);
- c->disconnect();
-}
-
-void testAllowConnectOnceOnly(int cameraId) {
- INFO("%s", __func__);
- sp<ICameraService> cs = getCameraService();
- // Connect the first client.
- sp<MCameraClient> cc = new MCameraClient();
- sp<ICamera> c = cs->connect(cc, cameraId);
- ASSERT(c != 0);
- // Same client -- ok.
- ASSERT(cs->connect(cc, cameraId) != 0);
- // Different client -- not ok.
- sp<MCameraClient> cc2 = new MCameraClient();
- ASSERT(cs->connect(cc2, cameraId) == 0);
- c->disconnect();
-}
-
-void testReconnectFailed() {
- INFO("%s", __func__);
- sp<ICamera> c = interface_cast<ICamera>(getTempObject());
- sp<MCameraClient> cc = new MCameraClient();
- ASSERT(c->connect(cc) != NO_ERROR);
-}
-
-void testReconnectSuccess() {
- INFO("%s", __func__);
- sp<ICamera> c = interface_cast<ICamera>(getTempObject());
- sp<MCameraClient> cc = new MCameraClient();
- ASSERT(c->connect(cc) == NO_ERROR);
- c->disconnect();
-}
-
-void testLockFailed() {
- INFO("%s", __func__);
- sp<ICamera> c = interface_cast<ICamera>(getTempObject());
- ASSERT(c->lock() != NO_ERROR);
-}
-
-void testLockUnlockSuccess() {
- INFO("%s", __func__);
- sp<ICamera> c = interface_cast<ICamera>(getTempObject());
- ASSERT(c->lock() == NO_ERROR);
- ASSERT(c->unlock() == NO_ERROR);
-}
-
-void testLockSuccess() {
- INFO("%s", __func__);
- sp<ICamera> c = interface_cast<ICamera>(getTempObject());
- ASSERT(c->lock() == NO_ERROR);
- c->disconnect();
-}
-
-//
-// Run the connect tests in another process.
-//
-const char *gExecutable;
-
-struct FunctionTableEntry {
- const char *name;
- void (*func)();
-};
-
-FunctionTableEntry function_table[] = {
-#define ENTRY(x) {#x, &x}
- ENTRY(testReconnectFailed),
- ENTRY(testReconnectSuccess),
- ENTRY(testLockUnlockSuccess),
- ENTRY(testLockFailed),
- ENTRY(testLockSuccess),
-#undef ENTRY
-};
-
-void runFunction(const char *tag) {
- INFO("runFunction: %s", tag);
- int entries = sizeof(function_table) / sizeof(function_table[0]);
- for (int i = 0; i < entries; i++) {
- if (strcmp(function_table[i].name, tag) == 0) {
- (*function_table[i].func)();
- return;
- }
- }
- ASSERT(0);
-}
-
-void runInAnotherProcess(const char *tag) {
- pid_t pid = fork();
- if (pid == 0) {
- execlp(gExecutable, gExecutable, tag, NULL);
- ASSERT(0);
- } else {
- int status;
- ASSERT_EQ(pid, wait(&status));
- ASSERT_EQ(0, status);
- }
-}
-
-void testReconnect(int cameraId) {
- INFO("%s", __func__);
- sp<ICameraService> cs = getCameraService();
- sp<MCameraClient> cc = new MCameraClient();
- sp<ICamera> c = cs->connect(cc, cameraId);
- ASSERT(c != 0);
- // Reconnect to the same client -- ok.
- ASSERT(c->connect(cc) == NO_ERROR);
- // Reconnect to a different client (but the same pid) -- ok.
- sp<MCameraClient> cc2 = new MCameraClient();
- ASSERT(c->connect(cc2) == NO_ERROR);
- c->disconnect();
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
-}
-
-void testLockUnlock(int cameraId) {
- sp<ICameraService> cs = getCameraService();
- sp<MCameraClient> cc = new MCameraClient();
- sp<ICamera> c = cs->connect(cc, cameraId);
- ASSERT(c != 0);
- // We can lock as many times as we want.
- ASSERT(c->lock() == NO_ERROR);
- ASSERT(c->lock() == NO_ERROR);
- // Lock from a different process -- not ok.
- putTempObject(c->asBinder());
- runInAnotherProcess("testLockFailed");
- // Unlock then lock from a different process -- ok.
- ASSERT(c->unlock() == NO_ERROR);
- runInAnotherProcess("testLockUnlockSuccess");
- // Unlock then lock from a different process -- ok.
- runInAnotherProcess("testLockSuccess");
- clearTempObject();
-}
-
-void testReconnectFromAnotherProcess(int cameraId) {
- INFO("%s", __func__);
-
- sp<ICameraService> cs = getCameraService();
- sp<MCameraClient> cc = new MCameraClient();
- sp<ICamera> c = cs->connect(cc, cameraId);
- ASSERT(c != 0);
- // Reconnect from a different process -- not ok.
- putTempObject(c->asBinder());
- runInAnotherProcess("testReconnectFailed");
- // Unlock then reconnect from a different process -- ok.
- ASSERT(c->unlock() == NO_ERROR);
- runInAnotherProcess("testReconnectSuccess");
- clearTempObject();
-}
-
-// We need to flush the command buffer after the reference
-// to ICamera is gone. The sleep is for the server to run
-// the destructor for it.
-static void flushCommands() {
- IPCThreadState::self()->flushCommands();
- usleep(200000); // 200ms
-}
-
-// Run a test case
-#define RUN(class_name, cameraId) do { \
- { \
- INFO(#class_name); \
- class_name instance; \
- instance.init(cameraId); \
- instance.run(); \
- } \
- flushCommands(); \
-} while(0)
-
-// Base test case after the the camera is connected.
-class AfterConnect {
-public:
- void init(int cameraId) {
- cs = getCameraService();
- cc = new MCameraClient();
- c = cs->connect(cc, cameraId);
- ASSERT(c != 0);
- }
-
-protected:
- sp<ICameraService> cs;
- sp<MCameraClient> cc;
- sp<ICamera> c;
-
- ~AfterConnect() {
- c->disconnect();
- c.clear();
- cc.clear();
- cs.clear();
- }
-};
-
-class TestSetPreviewDisplay : public AfterConnect {
-public:
- void run() {
- sp<MSurface> surface = new MSurface();
- ASSERT(c->setPreviewDisplay(surface) == NO_ERROR);
- c->disconnect();
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
- }
-};
-
-class TestStartPreview : public AfterConnect {
-public:
- void run() {
- sp<MSurface> surface = new MSurface();
- ASSERT(c->setPreviewDisplay(surface) == NO_ERROR);
-
- ASSERT(c->startPreview() == NO_ERROR);
- ASSERT(c->previewEnabled() == true);
-
- surface->waitUntil(1, 10, 0); // needs 1 registerBuffers and 10 postBuffer
- surface->clearStat();
-
- sp<MSurface> another_surface = new MSurface();
- c->setPreviewDisplay(another_surface); // just to make sure unregisterBuffers
- // is called.
- surface->waitUntil(0, 0, 1); // needs unregisterBuffers
-
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
- }
-};
-
-class TestStartPreviewWithoutDisplay : public AfterConnect {
-public:
- void run() {
- ASSERT(c->startPreview() == NO_ERROR);
- ASSERT(c->previewEnabled() == true);
- c->disconnect();
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
- }
-};
-
-// Base test case after the the camera is connected and the preview is started.
-class AfterStartPreview : public AfterConnect {
-public:
- void init(int cameraId) {
- AfterConnect::init(cameraId);
- surface = new MSurface();
- ASSERT(c->setPreviewDisplay(surface) == NO_ERROR);
- ASSERT(c->startPreview() == NO_ERROR);
- }
-
-protected:
- sp<MSurface> surface;
-
- ~AfterStartPreview() {
- surface.clear();
- }
-};
-
-class TestAutoFocus : public AfterStartPreview {
-public:
- void run() {
- cc->assertNotify(CAMERA_MSG_FOCUS, MCameraClient::EQ, 0);
- c->autoFocus();
- cc->waitNotify(CAMERA_MSG_FOCUS, MCameraClient::EQ, 1);
- c->disconnect();
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
- }
-};
-
-class TestStopPreview : public AfterStartPreview {
-public:
- void run() {
- ASSERT(c->previewEnabled() == true);
- c->stopPreview();
- ASSERT(c->previewEnabled() == false);
- c->disconnect();
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
- }
-};
-
-class TestTakePicture: public AfterStartPreview {
-public:
- void run() {
- ASSERT(c->takePicture() == NO_ERROR);
- cc->waitNotify(CAMERA_MSG_SHUTTER, MCameraClient::EQ, 1);
- cc->waitData(CAMERA_MSG_RAW_IMAGE, MCameraClient::EQ, 1);
- cc->waitData(CAMERA_MSG_COMPRESSED_IMAGE, MCameraClient::EQ, 1);
- c->stopPreview();
- c->disconnect();
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
- }
-};
-
-class TestTakeMultiplePictures: public AfterStartPreview {
-public:
- void run() {
- for (int i = 0; i < 10; i++) {
- cc->clearStat();
- ASSERT(c->takePicture() == NO_ERROR);
- cc->waitNotify(CAMERA_MSG_SHUTTER, MCameraClient::EQ, 1);
- cc->waitData(CAMERA_MSG_RAW_IMAGE, MCameraClient::EQ, 1);
- cc->waitData(CAMERA_MSG_COMPRESSED_IMAGE, MCameraClient::EQ, 1);
- }
- c->disconnect();
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
- }
-};
-
-class TestGetParameters: public AfterStartPreview {
-public:
- void run() {
- String8 param_str = c->getParameters();
- INFO("%s", static_cast<const char*>(param_str));
- }
-};
-
-static bool getNextSize(const char **ptrS, int *w, int *h) {
- const char *s = *ptrS;
-
- // skip over ','
- if (*s == ',') s++;
-
- // remember start position in p
- const char *p = s;
- while (*s != '\0' && *s != 'x') {
- s++;
- }
- if (*s == '\0') return false;
-
- // get the width
- *w = atoi(p);
-
- // skip over 'x'
- ASSERT(*s == 'x');
- p = s + 1;
- while (*s != '\0' && *s != ',') {
- s++;
- }
-
- // get the height
- *h = atoi(p);
- *ptrS = s;
- return true;
-}
-
-class TestPictureSize : public AfterStartPreview {
-public:
- void checkOnePicture(int w, int h) {
- const float rate = 0.9; // byte per pixel limit
- int pixels = w * h;
-
- CameraParameters param(c->getParameters());
- param.setPictureSize(w, h);
- // disable thumbnail to get more accurate size.
- param.set(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH, 0);
- param.set(CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT, 0);
- c->setParameters(param.flatten());
-
- cc->clearStat();
- ASSERT(c->takePicture() == NO_ERROR);
- cc->waitData(CAMERA_MSG_RAW_IMAGE, MCameraClient::EQ, 1);
- //cc->assertDataSize(CAMERA_MSG_RAW_IMAGE, MCameraClient::EQ, pixels*3/2);
- cc->waitData(CAMERA_MSG_COMPRESSED_IMAGE, MCameraClient::EQ, 1);
- cc->assertDataSize(CAMERA_MSG_COMPRESSED_IMAGE, MCameraClient::LT,
- int(pixels * rate));
- cc->assertDataSize(CAMERA_MSG_COMPRESSED_IMAGE, MCameraClient::GT, 0);
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
- }
-
- void run() {
- CameraParameters param(c->getParameters());
- int w, h;
- const char *s = param.get(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES);
- while (getNextSize(&s, &w, &h)) {
- ALOGD("checking picture size %dx%d", w, h);
- checkOnePicture(w, h);
- }
- }
-};
-
-class TestPreviewCallbackFlag : public AfterConnect {
-public:
- void run() {
- sp<MSurface> surface = new MSurface();
- ASSERT(c->setPreviewDisplay(surface) == NO_ERROR);
-
- // Try all flag combinations.
- for (int v = 0; v < 8; v++) {
- ALOGD("TestPreviewCallbackFlag: flag=%d", v);
- usleep(100000); // sleep a while to clear the in-flight callbacks.
- cc->clearStat();
- c->setPreviewCallbackFlag(v);
- ASSERT(c->previewEnabled() == false);
- ASSERT(c->startPreview() == NO_ERROR);
- ASSERT(c->previewEnabled() == true);
- sleep(2);
- c->stopPreview();
- if ((v & CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) == 0) {
- cc->assertData(CAMERA_MSG_PREVIEW_FRAME, MCameraClient::EQ, 0);
- } else {
- if ((v & CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK) == 0) {
- cc->assertData(CAMERA_MSG_PREVIEW_FRAME, MCameraClient::GE, 10);
- } else {
- cc->assertData(CAMERA_MSG_PREVIEW_FRAME, MCameraClient::EQ, 1);
- }
- }
- }
- }
-};
-
-class TestRecording : public AfterConnect {
-public:
- void run() {
- ASSERT(c->recordingEnabled() == false);
- sp<MSurface> surface = new MSurface();
- ASSERT(c->setPreviewDisplay(surface) == NO_ERROR);
- c->setPreviewCallbackFlag(CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK);
- cc->setReleaser(c.get());
- c->startRecording();
- ASSERT(c->recordingEnabled() == true);
- sleep(2);
- c->stopRecording();
- usleep(100000); // sleep a while to clear the in-flight callbacks.
- cc->setReleaser(NULL);
- cc->assertData(CAMERA_MSG_VIDEO_FRAME, MCameraClient::GE, 10);
- }
-};
-
-class TestPreviewSize : public AfterStartPreview {
-public:
- void checkOnePicture(int w, int h) {
- int size = w*h*3/2; // should read from parameters
-
- c->stopPreview();
-
- CameraParameters param(c->getParameters());
- param.setPreviewSize(w, h);
- c->setPreviewCallbackFlag(CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK);
- c->setParameters(param.flatten());
-
- c->startPreview();
-
- cc->clearStat();
- cc->waitData(CAMERA_MSG_PREVIEW_FRAME, MCameraClient::GE, 1);
- cc->assertDataSize(CAMERA_MSG_PREVIEW_FRAME, MCameraClient::EQ, size);
- }
-
- void run() {
- CameraParameters param(c->getParameters());
- int w, h;
- const char *s = param.get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES);
- while (getNextSize(&s, &w, &h)) {
- ALOGD("checking preview size %dx%d", w, h);
- checkOnePicture(w, h);
- }
- }
-};
-
-void runHolderService() {
- defaultServiceManager()->addService(
- String16("CameraServiceTest.Holder"), new HolderService());
- ProcessState::self()->startThreadPool();
-}
-
-int main(int argc, char **argv)
-{
- if (argc != 1) {
- runFunction(argv[1]);
- return 0;
- }
- INFO("CameraServiceTest start");
- gExecutable = argv[0];
- runHolderService();
- int n = getNumberOfCameras();
- INFO("%d Cameras available", n);
-
- for (int id = 0; id < n; id++) {
- INFO("Testing camera %d", id);
- testConnect(id); flushCommands();
- testAllowConnectOnceOnly(id); flushCommands();
- testReconnect(id); flushCommands();
- testLockUnlock(id); flushCommands();
- testReconnectFromAnotherProcess(id); flushCommands();
-
- RUN(TestSetPreviewDisplay, id);
- RUN(TestStartPreview, id);
- RUN(TestStartPreviewWithoutDisplay, id);
- RUN(TestAutoFocus, id);
- RUN(TestStopPreview, id);
- RUN(TestTakePicture, id);
- RUN(TestTakeMultiplePictures, id);
- RUN(TestGetParameters, id);
- RUN(TestPictureSize, id);
- RUN(TestPreviewCallbackFlag, id);
- RUN(TestRecording, id);
- RUN(TestPreviewSize, id);
- }
-
- INFO("CameraServiceTest finished");
-}
diff --git a/services/camera/tests/CameraServiceTest/MODULE_LICENSE_APACHE2 b/services/camera/tests/CameraServiceTest/MODULE_LICENSE_APACHE2
deleted file mode 100644
index e69de29..0000000
--- a/services/camera/tests/CameraServiceTest/MODULE_LICENSE_APACHE2
+++ /dev/null
diff --git a/services/camera/tests/CameraServiceTest/NOTICE b/services/camera/tests/CameraServiceTest/NOTICE
deleted file mode 100644
index c5b1efa..0000000
--- a/services/camera/tests/CameraServiceTest/NOTICE
+++ /dev/null
@@ -1,190 +0,0 @@
-
- Copyright (c) 2005-2008, The Android Open Source Project
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-