diff options
author | Eino-Ville Talvala <etalvala@google.com> | 2013-07-25 17:12:35 -0700 |
---|---|---|
committer | Eino-Ville Talvala <etalvala@google.com> | 2013-07-30 10:58:44 -0700 |
commit | 7b82efe7a376c882f8f938e1c41b8311a8cdda4a (patch) | |
tree | d7ed69f0a495bc1a873a285ba11e72a9867c5565 /services/camera/libcameraservice/api1/client2 | |
parent | d054c32443a493513ab63529b0c8b1aca290278c (diff) | |
download | frameworks_av-7b82efe7a376c882f8f938e1c41b8311a8cdda4a.zip frameworks_av-7b82efe7a376c882f8f938e1c41b8311a8cdda4a.tar.gz frameworks_av-7b82efe7a376c882f8f938e1c41b8311a8cdda4a.tar.bz2 |
Camera: Rename new API to camera2, rearrange camera service
- Support API rename from photography to camera2
- Reorganize camera service files
- API support files to api1/, api2/, api_pro/
- HAL device support files into device{1,2,3}/
- Common files into common/
- Camera service remains at top-level
Change-Id: Ie474c12536f543832fba0a2dc936ac4fd39fe6a9
Diffstat (limited to 'services/camera/libcameraservice/api1/client2')
22 files changed, 8376 insertions, 0 deletions
diff --git a/services/camera/libcameraservice/api1/client2/BurstCapture.cpp b/services/camera/libcameraservice/api1/client2/BurstCapture.cpp new file mode 100644 index 0000000..0bfdfd4 --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/BurstCapture.cpp @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "Camera2-BurstCapture" + +#include <utils/Log.h> +#include <utils/Trace.h> + +#include "BurstCapture.h" + +#include "api1/Camera2Client.h" +#include "api1/client2/JpegCompressor.h" + +namespace android { +namespace camera2 { + +BurstCapture::BurstCapture(wp<Camera2Client> client, wp<CaptureSequencer> sequencer): + mCaptureStreamId(NO_STREAM), + mClient(client), + mSequencer(sequencer) +{ +} + +BurstCapture::~BurstCapture() { +} + +status_t BurstCapture::start(Vector<CameraMetadata> &/*metadatas*/, + int32_t /*firstCaptureId*/) { + ALOGE("Not completely implemented"); + return INVALID_OPERATION; +} + +void BurstCapture::onFrameAvailable() { + ALOGV("%s", __FUNCTION__); + Mutex::Autolock l(mInputMutex); + if(!mInputChanged) { + mInputChanged = true; + mInputSignal.signal(); + } +} + +bool BurstCapture::threadLoop() { + status_t res; + { + Mutex::Autolock l(mInputMutex); + while(!mInputChanged) { + res = mInputSignal.waitRelative(mInputMutex, kWaitDuration); + if(res == TIMED_OUT) return true; + } + mInputChanged = false; + } + + do { + sp<Camera2Client> client = mClient.promote(); + if(client == 0) return false; + ALOGV("%s: Calling processFrameAvailable()", __FUNCTION__); + res = processFrameAvailable(client); + } while(res == OK); + + return true; +} + +CpuConsumer::LockedBuffer* BurstCapture::jpegEncode( + CpuConsumer::LockedBuffer *imgBuffer, + int /*quality*/) +{ + ALOGV("%s", __FUNCTION__); + + CpuConsumer::LockedBuffer *imgEncoded = new CpuConsumer::LockedBuffer; + uint8_t *data = new uint8_t[ANDROID_JPEG_MAX_SIZE]; + imgEncoded->data = data; + imgEncoded->width = imgBuffer->width; + imgEncoded->height = imgBuffer->height; + imgEncoded->stride = imgBuffer->stride; + + Vector<CpuConsumer::LockedBuffer*> buffers; + buffers.push_back(imgBuffer); + buffers.push_back(imgEncoded); + + sp<JpegCompressor> jpeg = new JpegCompressor(); + jpeg->start(buffers, 1); + + bool success = jpeg->waitForDone(10 * 1e9); + if(success) { + return buffers[1]; + } + else { + ALOGE("%s: JPEG encode timed out", __FUNCTION__); + return NULL; // TODO: maybe change function return value to status_t + } +} + +status_t BurstCapture::processFrameAvailable(sp<Camera2Client> &/*client*/) { + ALOGE("Not implemented"); + return INVALID_OPERATION; +} + +} // namespace camera2 +} // namespace android diff --git a/services/camera/libcameraservice/api1/client2/BurstCapture.h b/services/camera/libcameraservice/api1/client2/BurstCapture.h new file mode 100644 index 0000000..ea321fd --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/BurstCapture.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA_BURST_CAPTURE_H +#define ANDROID_SERVERS_CAMERA_BURST_CAPTURE_H + +#include <camera/CameraMetadata.h> +#include <binder/MemoryBase.h> +#include <binder/MemoryHeapBase.h> +#include <gui/CpuConsumer.h> + +#include "device2/Camera2Device.h" + +namespace android { + +class Camera2Client; + +namespace camera2 { + +class CaptureSequencer; + +class BurstCapture : public virtual Thread, + public virtual CpuConsumer::FrameAvailableListener +{ +public: + BurstCapture(wp<Camera2Client> client, wp<CaptureSequencer> sequencer); + virtual ~BurstCapture(); + + virtual void onFrameAvailable(); + virtual status_t start(Vector<CameraMetadata> &metadatas, int32_t firstCaptureId); + +protected: + Mutex mInputMutex; + bool mInputChanged; + Condition mInputSignal; + int mCaptureStreamId; + wp<Camera2Client> mClient; + wp<CaptureSequencer> mSequencer; + + // Should only be accessed by processing thread + enum { + NO_STREAM = -1 + }; + + CpuConsumer::LockedBuffer* jpegEncode( + CpuConsumer::LockedBuffer *imgBuffer, + int quality); + + virtual status_t processFrameAvailable(sp<Camera2Client> &client); + +private: + virtual bool threadLoop(); + static const nsecs_t kWaitDuration = 10000000; // 10 ms +}; + +} // namespace camera2 +} // namespace android + +#endif diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp new file mode 100644 index 0000000..12d0859 --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp @@ -0,0 +1,539 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera2-CallbackProcessor" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> +#include <utils/Trace.h> +#include <gui/Surface.h> + +#include "common/CameraDeviceBase.h" +#include "api1/Camera2Client.h" +#include "api1/client2/CallbackProcessor.h" + +#define ALIGN(x, mask) ( ((x) + (mask) - 1) & ~((mask) - 1) ) + +namespace android { +namespace camera2 { + +CallbackProcessor::CallbackProcessor(sp<Camera2Client> client): + Thread(false), + mClient(client), + mDevice(client->getCameraDevice()), + mId(client->getCameraId()), + mCallbackAvailable(false), + mCallbackToApp(false), + mCallbackStreamId(NO_STREAM) { +} + +CallbackProcessor::~CallbackProcessor() { + ALOGV("%s: Exit", __FUNCTION__); + deleteStream(); +} + +void CallbackProcessor::onFrameAvailable() { + Mutex::Autolock l(mInputMutex); + if (!mCallbackAvailable) { + mCallbackAvailable = true; + mCallbackAvailableSignal.signal(); + } +} + +status_t CallbackProcessor::setCallbackWindow( + sp<ANativeWindow> callbackWindow) { + ATRACE_CALL(); + status_t res; + + Mutex::Autolock l(mInputMutex); + + sp<Camera2Client> client = mClient.promote(); + if (client == 0) return OK; + sp<CameraDeviceBase> device = client->getCameraDevice(); + + // If the window is changing, clear out stream if it already exists + if (mCallbackWindow != callbackWindow && mCallbackStreamId != NO_STREAM) { + res = device->deleteStream(mCallbackStreamId); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to delete old stream " + "for callbacks: %s (%d)", __FUNCTION__, + client->getCameraId(), strerror(-res), res); + return res; + } + mCallbackStreamId = NO_STREAM; + mCallbackConsumer.clear(); + } + mCallbackWindow = callbackWindow; + mCallbackToApp = (mCallbackWindow != NULL); + + return OK; +} + +status_t CallbackProcessor::updateStream(const Parameters ¶ms) { + ATRACE_CALL(); + status_t res; + + Mutex::Autolock l(mInputMutex); + + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + // If possible, use the flexible YUV format + int32_t callbackFormat = params.previewFormat; + if (mCallbackToApp) { + // TODO: etalvala: This should use the flexible YUV format as well, but + // need to reconcile HAL2/HAL3 requirements. + callbackFormat = HAL_PIXEL_FORMAT_YV12; + } else if(params.fastInfo.useFlexibleYuv && + (params.previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP || + params.previewFormat == HAL_PIXEL_FORMAT_YV12) ) { + callbackFormat = HAL_PIXEL_FORMAT_YCbCr_420_888; + } + + if (!mCallbackToApp && mCallbackConsumer == 0) { + // Create CPU buffer queue endpoint, since app hasn't given us one + // Make it async to avoid disconnect deadlocks + sp<BufferQueue> bq = new BufferQueue(); + mCallbackConsumer = new CpuConsumer(bq, kCallbackHeapCount); + mCallbackConsumer->setFrameAvailableListener(this); + mCallbackConsumer->setName(String8("Camera2Client::CallbackConsumer")); + mCallbackWindow = new Surface( + mCallbackConsumer->getProducerInterface()); + } + + if (mCallbackStreamId != NO_STREAM) { + // Check if stream parameters have to change + uint32_t currentWidth, currentHeight, currentFormat; + res = device->getStreamInfo(mCallbackStreamId, + ¤tWidth, ¤tHeight, ¤tFormat); + if (res != OK) { + ALOGE("%s: Camera %d: Error querying callback output stream info: " + "%s (%d)", __FUNCTION__, mId, + strerror(-res), res); + return res; + } + if (currentWidth != (uint32_t)params.previewWidth || + currentHeight != (uint32_t)params.previewHeight || + currentFormat != (uint32_t)callbackFormat) { + // Since size should only change while preview is not running, + // assuming that all existing use of old callback stream is + // completed. + ALOGV("%s: Camera %d: Deleting stream %d since the buffer " + "parameters changed", __FUNCTION__, mId, mCallbackStreamId); + res = device->deleteStream(mCallbackStreamId); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to delete old output stream " + "for callbacks: %s (%d)", __FUNCTION__, + mId, strerror(-res), res); + return res; + } + mCallbackStreamId = NO_STREAM; + } + } + + if (mCallbackStreamId == NO_STREAM) { + ALOGV("Creating callback stream: %d x %d, format 0x%x, API format 0x%x", + params.previewWidth, params.previewHeight, + callbackFormat, params.previewFormat); + res = device->createStream(mCallbackWindow, + params.previewWidth, params.previewHeight, + callbackFormat, 0, &mCallbackStreamId); + if (res != OK) { + ALOGE("%s: Camera %d: Can't create output stream for callbacks: " + "%s (%d)", __FUNCTION__, mId, + strerror(-res), res); + return res; + } + } + + return OK; +} + +status_t CallbackProcessor::deleteStream() { + ATRACE_CALL(); + sp<CameraDeviceBase> device; + status_t res; + { + Mutex::Autolock l(mInputMutex); + + if (mCallbackStreamId == NO_STREAM) { + return OK; + } + device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + } + res = device->waitUntilDrained(); + if (res != OK) { + ALOGE("%s: Error waiting for HAL to drain: %s (%d)", + __FUNCTION__, strerror(-res), res); + return res; + } + + res = device->deleteStream(mCallbackStreamId); + if (res != OK) { + ALOGE("%s: Unable to delete callback stream: %s (%d)", + __FUNCTION__, strerror(-res), res); + return res; + } + + { + Mutex::Autolock l(mInputMutex); + + mCallbackHeap.clear(); + mCallbackWindow.clear(); + mCallbackConsumer.clear(); + + mCallbackStreamId = NO_STREAM; + } + return OK; +} + +int CallbackProcessor::getStreamId() const { + Mutex::Autolock l(mInputMutex); + return mCallbackStreamId; +} + +void CallbackProcessor::dump(int /*fd*/, const Vector<String16>& /*args*/) const { +} + +bool CallbackProcessor::threadLoop() { + status_t res; + + { + Mutex::Autolock l(mInputMutex); + while (!mCallbackAvailable) { + res = mCallbackAvailableSignal.waitRelative(mInputMutex, + kWaitDuration); + if (res == TIMED_OUT) return true; + } + mCallbackAvailable = false; + } + + do { + sp<Camera2Client> client = mClient.promote(); + if (client == 0) { + res = discardNewCallback(); + } else { + res = processNewCallback(client); + } + } while (res == OK); + + return true; +} + +status_t CallbackProcessor::discardNewCallback() { + ATRACE_CALL(); + status_t res; + CpuConsumer::LockedBuffer imgBuffer; + res = mCallbackConsumer->lockNextBuffer(&imgBuffer); + if (res != OK) { + if (res != BAD_VALUE) { + ALOGE("%s: Camera %d: Error receiving next callback buffer: " + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); + } + return res; + } + mCallbackConsumer->unlockBuffer(imgBuffer); + return OK; +} + +status_t CallbackProcessor::processNewCallback(sp<Camera2Client> &client) { + ATRACE_CALL(); + status_t res; + + sp<Camera2Heap> callbackHeap; + bool useFlexibleYuv = false; + int32_t previewFormat = 0; + size_t heapIdx; + + { + /* acquire SharedParameters before mMutex so we don't dead lock + with Camera2Client code calling into StreamingProcessor */ + SharedParameters::Lock l(client->getParameters()); + Mutex::Autolock m(mInputMutex); + CpuConsumer::LockedBuffer imgBuffer; + if (mCallbackStreamId == NO_STREAM) { + ALOGV("%s: Camera %d:No stream is available" + , __FUNCTION__, mId); + return INVALID_OPERATION; + } + + ALOGV("%s: Getting buffer", __FUNCTION__); + res = mCallbackConsumer->lockNextBuffer(&imgBuffer); + if (res != OK) { + if (res != BAD_VALUE) { + ALOGE("%s: Camera %d: Error receiving next callback buffer: " + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); + } + return res; + } + ALOGV("%s: Camera %d: Preview callback available", __FUNCTION__, + mId); + + if ( l.mParameters.state != Parameters::PREVIEW + && l.mParameters.state != Parameters::RECORD + && l.mParameters.state != Parameters::VIDEO_SNAPSHOT) { + ALOGV("%s: Camera %d: No longer streaming", + __FUNCTION__, mId); + mCallbackConsumer->unlockBuffer(imgBuffer); + return OK; + } + + if (! (l.mParameters.previewCallbackFlags & + CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) ) { + ALOGV("%s: No longer enabled, dropping", __FUNCTION__); + mCallbackConsumer->unlockBuffer(imgBuffer); + return OK; + } + if ((l.mParameters.previewCallbackFlags & + CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK) && + !l.mParameters.previewCallbackOneShot) { + ALOGV("%s: One shot mode, already sent, dropping", __FUNCTION__); + mCallbackConsumer->unlockBuffer(imgBuffer); + return OK; + } + + previewFormat = l.mParameters.previewFormat; + useFlexibleYuv = l.mParameters.fastInfo.useFlexibleYuv && + (previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP || + previewFormat == HAL_PIXEL_FORMAT_YV12); + + int32_t expectedFormat = useFlexibleYuv ? + HAL_PIXEL_FORMAT_YCbCr_420_888 : previewFormat; + + if (imgBuffer.format != expectedFormat) { + ALOGE("%s: Camera %d: Unexpected format for callback: " + "0x%x, expected 0x%x", __FUNCTION__, mId, + imgBuffer.format, expectedFormat); + mCallbackConsumer->unlockBuffer(imgBuffer); + return INVALID_OPERATION; + } + + // In one-shot mode, stop sending callbacks after the first one + if (l.mParameters.previewCallbackFlags & + CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK) { + ALOGV("%s: clearing oneshot", __FUNCTION__); + l.mParameters.previewCallbackOneShot = false; + } + + uint32_t destYStride = 0; + uint32_t destCStride = 0; + if (useFlexibleYuv) { + if (previewFormat == HAL_PIXEL_FORMAT_YV12) { + // Strides must align to 16 for YV12 + destYStride = ALIGN(imgBuffer.width, 16); + destCStride = ALIGN(destYStride / 2, 16); + } else { + // No padding for NV21 + ALOG_ASSERT(previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP, + "Unexpected preview format 0x%x", previewFormat); + destYStride = imgBuffer.width; + destCStride = destYStride / 2; + } + } else { + destYStride = imgBuffer.stride; + // don't care about cStride + } + + size_t bufferSize = Camera2Client::calculateBufferSize( + imgBuffer.width, imgBuffer.height, + previewFormat, destYStride); + size_t currentBufferSize = (mCallbackHeap == 0) ? + 0 : (mCallbackHeap->mHeap->getSize() / kCallbackHeapCount); + if (bufferSize != currentBufferSize) { + mCallbackHeap.clear(); + mCallbackHeap = new Camera2Heap(bufferSize, kCallbackHeapCount, + "Camera2Client::CallbackHeap"); + if (mCallbackHeap->mHeap->getSize() == 0) { + ALOGE("%s: Camera %d: Unable to allocate memory for callbacks", + __FUNCTION__, mId); + mCallbackConsumer->unlockBuffer(imgBuffer); + return INVALID_OPERATION; + } + + mCallbackHeapHead = 0; + mCallbackHeapFree = kCallbackHeapCount; + } + + if (mCallbackHeapFree == 0) { + ALOGE("%s: Camera %d: No free callback buffers, dropping frame", + __FUNCTION__, mId); + mCallbackConsumer->unlockBuffer(imgBuffer); + return OK; + } + + heapIdx = mCallbackHeapHead; + + mCallbackHeapHead = (mCallbackHeapHead + 1) & kCallbackHeapCount; + mCallbackHeapFree--; + + // TODO: Get rid of this copy by passing the gralloc queue all the way + // to app + + ssize_t offset; + size_t size; + sp<IMemoryHeap> heap = + mCallbackHeap->mBuffers[heapIdx]->getMemory(&offset, + &size); + uint8_t *data = (uint8_t*)heap->getBase() + offset; + + if (!useFlexibleYuv) { + // Can just memcpy when HAL format matches API format + memcpy(data, imgBuffer.data, bufferSize); + } else { + res = convertFromFlexibleYuv(previewFormat, data, imgBuffer, + destYStride, destCStride); + if (res != OK) { + ALOGE("%s: Camera %d: Can't convert between 0x%x and 0x%x formats!", + __FUNCTION__, mId, imgBuffer.format, previewFormat); + mCallbackConsumer->unlockBuffer(imgBuffer); + return BAD_VALUE; + } + } + + ALOGV("%s: Freeing buffer", __FUNCTION__); + mCallbackConsumer->unlockBuffer(imgBuffer); + + // mCallbackHeap may get freed up once input mutex is released + callbackHeap = mCallbackHeap; + } + + // Call outside parameter lock to allow re-entrancy from notification + { + Camera2Client::SharedCameraCallbacks::Lock + l(client->mSharedCameraCallbacks); + if (l.mRemoteCallback != 0) { + ALOGV("%s: Camera %d: Invoking client data callback", + __FUNCTION__, mId); + l.mRemoteCallback->dataCallback(CAMERA_MSG_PREVIEW_FRAME, + callbackHeap->mBuffers[heapIdx], NULL); + } + } + + // Only increment free if we're still using the same heap + mCallbackHeapFree++; + + ALOGV("%s: exit", __FUNCTION__); + + return OK; +} + +status_t CallbackProcessor::convertFromFlexibleYuv(int32_t previewFormat, + uint8_t *dst, + const CpuConsumer::LockedBuffer &src, + uint32_t dstYStride, + uint32_t dstCStride) const { + + if (previewFormat != HAL_PIXEL_FORMAT_YCrCb_420_SP && + previewFormat != HAL_PIXEL_FORMAT_YV12) { + ALOGE("%s: Camera %d: Unexpected preview format when using " + "flexible YUV: 0x%x", __FUNCTION__, mId, previewFormat); + return INVALID_OPERATION; + } + + // Copy Y plane, adjusting for stride + const uint8_t *ySrc = src.data; + uint8_t *yDst = dst; + for (size_t row = 0; row < src.height; row++) { + memcpy(yDst, ySrc, src.width); + ySrc += src.stride; + yDst += dstYStride; + } + + // Copy/swizzle chroma planes, 4:2:0 subsampling + const uint8_t *cbSrc = src.dataCb; + const uint8_t *crSrc = src.dataCr; + size_t chromaHeight = src.height / 2; + size_t chromaWidth = src.width / 2; + ssize_t chromaGap = src.chromaStride - + (chromaWidth * src.chromaStep); + size_t dstChromaGap = dstCStride - chromaWidth; + + if (previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP) { + // Flexible YUV chroma to NV21 chroma + uint8_t *crcbDst = yDst; + // Check for shortcuts + if (cbSrc == crSrc + 1 && src.chromaStep == 2) { + ALOGV("%s: Fast NV21->NV21", __FUNCTION__); + // Source has semiplanar CrCb chroma layout, can copy by rows + for (size_t row = 0; row < chromaHeight; row++) { + memcpy(crcbDst, crSrc, src.width); + crcbDst += src.width; + crSrc += src.chromaStride; + } + } else { + ALOGV("%s: Generic->NV21", __FUNCTION__); + // Generic copy, always works but not very efficient + for (size_t row = 0; row < chromaHeight; row++) { + for (size_t col = 0; col < chromaWidth; col++) { + *(crcbDst++) = *crSrc; + *(crcbDst++) = *cbSrc; + crSrc += src.chromaStep; + cbSrc += src.chromaStep; + } + crSrc += chromaGap; + cbSrc += chromaGap; + } + } + } else { + // flexible YUV chroma to YV12 chroma + ALOG_ASSERT(previewFormat == HAL_PIXEL_FORMAT_YV12, + "Unexpected preview format 0x%x", previewFormat); + uint8_t *crDst = yDst; + uint8_t *cbDst = yDst + chromaHeight * dstCStride; + if (src.chromaStep == 1) { + ALOGV("%s: Fast YV12->YV12", __FUNCTION__); + // Source has planar chroma layout, can copy by row + for (size_t row = 0; row < chromaHeight; row++) { + memcpy(crDst, crSrc, chromaWidth); + crDst += dstCStride; + crSrc += src.chromaStride; + } + for (size_t row = 0; row < chromaHeight; row++) { + memcpy(cbDst, cbSrc, chromaWidth); + cbDst += dstCStride; + cbSrc += src.chromaStride; + } + } else { + ALOGV("%s: Generic->YV12", __FUNCTION__); + // Generic copy, always works but not very efficient + for (size_t row = 0; row < chromaHeight; row++) { + for (size_t col = 0; col < chromaWidth; col++) { + *(crDst++) = *crSrc; + *(cbDst++) = *cbSrc; + crSrc += src.chromaStep; + cbSrc += src.chromaStep; + } + crSrc += chromaGap; + cbSrc += chromaGap; + crDst += dstChromaGap; + cbDst += dstChromaGap; + } + } + } + + return OK; +} + +}; // namespace camera2 +}; // namespace android diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.h b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h new file mode 100644 index 0000000..613f5be --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_CALLBACKPROCESSOR_H +#define ANDROID_SERVERS_CAMERA_CAMERA2_CALLBACKPROCESSOR_H + +#include <utils/Thread.h> +#include <utils/String16.h> +#include <utils/Vector.h> +#include <utils/Mutex.h> +#include <utils/Condition.h> +#include <gui/CpuConsumer.h> + +#include "api1/client2/Camera2Heap.h" + +namespace android { + +class Camera2Client; +class CameraDeviceBase; + +namespace camera2 { + +class Parameters; + +/*** + * Still image capture output image processing + */ +class CallbackProcessor: + public Thread, public CpuConsumer::FrameAvailableListener { + public: + CallbackProcessor(sp<Camera2Client> client); + ~CallbackProcessor(); + + void onFrameAvailable(); + + // Set to NULL to disable the direct-to-app callback window + status_t setCallbackWindow(sp<ANativeWindow> callbackWindow); + status_t updateStream(const Parameters ¶ms); + status_t deleteStream(); + int getStreamId() const; + + void dump(int fd, const Vector<String16>& args) const; + private: + static const nsecs_t kWaitDuration = 10000000; // 10 ms + wp<Camera2Client> mClient; + wp<CameraDeviceBase> mDevice; + int mId; + + mutable Mutex mInputMutex; + bool mCallbackAvailable; + Condition mCallbackAvailableSignal; + + enum { + NO_STREAM = -1 + }; + + // True if mCallbackWindow is a remote consumer, false if just the local + // mCallbackConsumer + bool mCallbackToApp; + int mCallbackStreamId; + static const size_t kCallbackHeapCount = 6; + sp<CpuConsumer> mCallbackConsumer; + sp<ANativeWindow> mCallbackWindow; + sp<Camera2Heap> mCallbackHeap; + int mCallbackHeapId; + size_t mCallbackHeapHead, mCallbackHeapFree; + + virtual bool threadLoop(); + + status_t processNewCallback(sp<Camera2Client> &client); + // Used when shutting down + status_t discardNewCallback(); + + // Convert from flexible YUV to NV21 or YV12 + status_t convertFromFlexibleYuv(int32_t previewFormat, + uint8_t *dst, + const CpuConsumer::LockedBuffer &src, + uint32_t dstYStride, + uint32_t dstCStride) const; +}; + + +}; //namespace camera2 +}; //namespace android + +#endif diff --git a/services/camera/libcameraservice/api1/client2/Camera2Heap.h b/services/camera/libcameraservice/api1/client2/Camera2Heap.h new file mode 100644 index 0000000..9c72d76 --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/Camera2Heap.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROiD_SERVERS_CAMERA_CAMERA2HEAP_H +#define ANDROiD_SERVERS_CAMERA_CAMERA2HEAP_H + +#include <binder/MemoryBase.h> +#include <binder/MemoryHeapBase.h> + +namespace android { +namespace camera2 { + +// Utility class for managing a set of IMemory blocks +class Camera2Heap : public RefBase { + public: + Camera2Heap(size_t buf_size, uint_t num_buffers = 1, + const char *name = NULL) : + mBufSize(buf_size), + mNumBufs(num_buffers) { + mHeap = new MemoryHeapBase(buf_size * num_buffers, 0, name); + mBuffers = new sp<MemoryBase>[mNumBufs]; + for (uint_t i = 0; i < mNumBufs; i++) + mBuffers[i] = new MemoryBase(mHeap, + i * mBufSize, + mBufSize); + } + + virtual ~Camera2Heap() + { + delete [] mBuffers; + } + + size_t mBufSize; + uint_t mNumBufs; + sp<MemoryHeapBase> mHeap; + sp<MemoryBase> *mBuffers; +}; + +}; // namespace camera2 +}; // namespace android + +#endif diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp new file mode 100644 index 0000000..ad1590a --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp @@ -0,0 +1,710 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera2-CaptureSequencer" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> +#include <utils/Trace.h> +#include <utils/Vector.h> + +#include "api1/Camera2Client.h" +#include "api1/client2/CaptureSequencer.h" +#include "api1/client2/BurstCapture.h" +#include "api1/client2/Parameters.h" +#include "api1/client2/ZslProcessorInterface.h" + +namespace android { +namespace camera2 { + +/** Public members */ + +CaptureSequencer::CaptureSequencer(wp<Camera2Client> client): + Thread(false), + mStartCapture(false), + mBusy(false), + mNewAEState(false), + mNewFrameReceived(false), + mNewCaptureReceived(false), + mShutterNotified(false), + mClient(client), + mCaptureState(IDLE), + mTriggerId(0), + mTimeoutCount(0), + mCaptureId(Camera2Client::kCaptureRequestIdStart), + mMsgType(0) { + ALOGV("%s", __FUNCTION__); +} + +CaptureSequencer::~CaptureSequencer() { + ALOGV("%s: Exit", __FUNCTION__); +} + +void CaptureSequencer::setZslProcessor(wp<ZslProcessorInterface> processor) { + Mutex::Autolock l(mInputMutex); + mZslProcessor = processor; +} + +status_t CaptureSequencer::startCapture(int msgType) { + ALOGV("%s", __FUNCTION__); + ATRACE_CALL(); + Mutex::Autolock l(mInputMutex); + if (mBusy) { + ALOGE("%s: Already busy capturing!", __FUNCTION__); + return INVALID_OPERATION; + } + if (!mStartCapture) { + mMsgType = msgType; + mStartCapture = true; + mStartCaptureSignal.signal(); + } + return OK; +} + +status_t CaptureSequencer::waitUntilIdle(nsecs_t timeout) { + ATRACE_CALL(); + ALOGV("%s: Waiting for idle", __FUNCTION__); + Mutex::Autolock l(mStateMutex); + status_t res = -1; + while (mCaptureState != IDLE) { + nsecs_t startTime = systemTime(); + + res = mStateChanged.waitRelative(mStateMutex, timeout); + if (res != OK) return res; + + timeout -= (systemTime() - startTime); + } + ALOGV("%s: Now idle", __FUNCTION__); + return OK; +} + +void CaptureSequencer::notifyAutoExposure(uint8_t newState, int triggerId) { + ATRACE_CALL(); + Mutex::Autolock l(mInputMutex); + mAEState = newState; + mAETriggerId = triggerId; + if (!mNewAEState) { + mNewAEState = true; + mNewNotifySignal.signal(); + } +} + +void CaptureSequencer::onFrameAvailable(int32_t frameId, + const CameraMetadata &frame) { + ALOGV("%s: Listener found new frame", __FUNCTION__); + ATRACE_CALL(); + Mutex::Autolock l(mInputMutex); + mNewFrameId = frameId; + mNewFrame = frame; + if (!mNewFrameReceived) { + mNewFrameReceived = true; + mNewFrameSignal.signal(); + } +} + +void CaptureSequencer::onCaptureAvailable(nsecs_t timestamp, + sp<MemoryBase> captureBuffer) { + ATRACE_CALL(); + ALOGV("%s", __FUNCTION__); + Mutex::Autolock l(mInputMutex); + mCaptureTimestamp = timestamp; + mCaptureBuffer = captureBuffer; + if (!mNewCaptureReceived) { + mNewCaptureReceived = true; + mNewCaptureSignal.signal(); + } +} + + +void CaptureSequencer::dump(int fd, const Vector<String16>& /*args*/) { + String8 result; + if (mCaptureRequest.entryCount() != 0) { + result = " Capture request:\n"; + write(fd, result.string(), result.size()); + mCaptureRequest.dump(fd, 2, 6); + } else { + result = " Capture request: undefined\n"; + write(fd, result.string(), result.size()); + } + result = String8::format(" Current capture state: %s\n", + kStateNames[mCaptureState]); + result.append(" Latest captured frame:\n"); + write(fd, result.string(), result.size()); + mNewFrame.dump(fd, 2, 6); +} + +/** Private members */ + +const char* CaptureSequencer::kStateNames[CaptureSequencer::NUM_CAPTURE_STATES+1] = +{ + "IDLE", + "START", + "ZSL_START", + "ZSL_WAITING", + "ZSL_REPROCESSING", + "STANDARD_START", + "STANDARD_PRECAPTURE_WAIT", + "STANDARD_CAPTURE", + "STANDARD_CAPTURE_WAIT", + "BURST_CAPTURE_START", + "BURST_CAPTURE_WAIT", + "DONE", + "ERROR", + "UNKNOWN" +}; + +const CaptureSequencer::StateManager + CaptureSequencer::kStateManagers[CaptureSequencer::NUM_CAPTURE_STATES-1] = { + &CaptureSequencer::manageIdle, + &CaptureSequencer::manageStart, + &CaptureSequencer::manageZslStart, + &CaptureSequencer::manageZslWaiting, + &CaptureSequencer::manageZslReprocessing, + &CaptureSequencer::manageStandardStart, + &CaptureSequencer::manageStandardPrecaptureWait, + &CaptureSequencer::manageStandardCapture, + &CaptureSequencer::manageStandardCaptureWait, + &CaptureSequencer::manageBurstCaptureStart, + &CaptureSequencer::manageBurstCaptureWait, + &CaptureSequencer::manageDone, +}; + +bool CaptureSequencer::threadLoop() { + + sp<Camera2Client> client = mClient.promote(); + if (client == 0) return false; + + CaptureState currentState; + { + Mutex::Autolock l(mStateMutex); + currentState = mCaptureState; + } + + currentState = (this->*kStateManagers[currentState])(client); + + Mutex::Autolock l(mStateMutex); + if (currentState != mCaptureState) { + mCaptureState = currentState; + ATRACE_INT("cam2_capt_state", mCaptureState); + ALOGV("Camera %d: New capture state %s", + client->getCameraId(), kStateNames[mCaptureState]); + mStateChanged.signal(); + } + + if (mCaptureState == ERROR) { + ALOGE("Camera %d: Stopping capture sequencer due to error", + client->getCameraId()); + return false; + } + + return true; +} + +CaptureSequencer::CaptureState CaptureSequencer::manageIdle( + sp<Camera2Client> &/*client*/) { + status_t res; + Mutex::Autolock l(mInputMutex); + while (!mStartCapture) { + res = mStartCaptureSignal.waitRelative(mInputMutex, + kWaitDuration); + if (res == TIMED_OUT) break; + } + if (mStartCapture) { + mStartCapture = false; + mBusy = true; + return START; + } + return IDLE; +} + +CaptureSequencer::CaptureState CaptureSequencer::manageDone(sp<Camera2Client> &client) { + status_t res = OK; + ATRACE_CALL(); + mCaptureId++; + if (mCaptureId >= Camera2Client::kCaptureRequestIdEnd) { + mCaptureId = Camera2Client::kCaptureRequestIdStart; + } + { + Mutex::Autolock l(mInputMutex); + mBusy = false; + } + + { + SharedParameters::Lock l(client->getParameters()); + switch (l.mParameters.state) { + case Parameters::DISCONNECTED: + ALOGW("%s: Camera %d: Discarding image data during shutdown ", + __FUNCTION__, client->getCameraId()); + res = INVALID_OPERATION; + break; + case Parameters::STILL_CAPTURE: + res = client->getCameraDevice()->waitUntilDrained(); + if (res != OK) { + ALOGE("%s: Camera %d: Can't idle after still capture: " + "%s (%d)", __FUNCTION__, client->getCameraId(), + strerror(-res), res); + } + l.mParameters.state = Parameters::STOPPED; + break; + case Parameters::VIDEO_SNAPSHOT: + l.mParameters.state = Parameters::RECORD; + break; + default: + ALOGE("%s: Camera %d: Still image produced unexpectedly " + "in state %s!", + __FUNCTION__, client->getCameraId(), + Parameters::getStateName(l.mParameters.state)); + res = INVALID_OPERATION; + } + } + sp<ZslProcessorInterface> processor = mZslProcessor.promote(); + if (processor != 0) { + ALOGV("%s: Memory optimization, clearing ZSL queue", + __FUNCTION__); + processor->clearZslQueue(); + } + + /** + * Fire the jpegCallback in Camera#takePicture(..., jpegCallback) + */ + if (mCaptureBuffer != 0 && res == OK) { + Camera2Client::SharedCameraCallbacks::Lock + l(client->mSharedCameraCallbacks); + ALOGV("%s: Sending still image to client", __FUNCTION__); + if (l.mRemoteCallback != 0) { + l.mRemoteCallback->dataCallback(CAMERA_MSG_COMPRESSED_IMAGE, + mCaptureBuffer, NULL); + } else { + ALOGV("%s: No client!", __FUNCTION__); + } + } + mCaptureBuffer.clear(); + + return IDLE; +} + +CaptureSequencer::CaptureState CaptureSequencer::manageStart( + sp<Camera2Client> &client) { + ALOGV("%s", __FUNCTION__); + status_t res; + ATRACE_CALL(); + SharedParameters::Lock l(client->getParameters()); + CaptureState nextState = DONE; + + res = updateCaptureRequest(l.mParameters, client); + if (res != OK ) { + ALOGE("%s: Camera %d: Can't update still image capture request: %s (%d)", + __FUNCTION__, client->getCameraId(), strerror(-res), res); + return DONE; + } + + if(l.mParameters.lightFx != Parameters::LIGHTFX_NONE && + l.mParameters.state == Parameters::STILL_CAPTURE) { + nextState = BURST_CAPTURE_START; + } + else if (l.mParameters.zslMode && + l.mParameters.state == Parameters::STILL_CAPTURE && + l.mParameters.flashMode != Parameters::FLASH_MODE_ON) { + nextState = ZSL_START; + } else { + nextState = STANDARD_START; + } + mShutterNotified = false; + + return nextState; +} + +CaptureSequencer::CaptureState CaptureSequencer::manageZslStart( + sp<Camera2Client> &client) { + ALOGV("%s", __FUNCTION__); + status_t res; + sp<ZslProcessorInterface> processor = mZslProcessor.promote(); + if (processor == 0) { + ALOGE("%s: No ZSL queue to use!", __FUNCTION__); + return DONE; + } + + client->registerFrameListener(mCaptureId, mCaptureId + 1, + this); + + // TODO: Actually select the right thing here. + res = processor->pushToReprocess(mCaptureId); + if (res != OK) { + if (res == NOT_ENOUGH_DATA) { + ALOGV("%s: Camera %d: ZSL queue doesn't have good frame, " + "falling back to normal capture", __FUNCTION__, + client->getCameraId()); + } else { + ALOGE("%s: Camera %d: Error in ZSL queue: %s (%d)", + __FUNCTION__, client->getCameraId(), strerror(-res), res); + } + return STANDARD_START; + } + + SharedParameters::Lock l(client->getParameters()); + /* warning: this also locks a SharedCameraCallbacks */ + shutterNotifyLocked(l.mParameters, client, mMsgType); + mShutterNotified = true; + mTimeoutCount = kMaxTimeoutsForCaptureEnd; + return STANDARD_CAPTURE_WAIT; +} + +CaptureSequencer::CaptureState CaptureSequencer::manageZslWaiting( + sp<Camera2Client> &/*client*/) { + ALOGV("%s", __FUNCTION__); + return DONE; +} + +CaptureSequencer::CaptureState CaptureSequencer::manageZslReprocessing( + sp<Camera2Client> &/*client*/) { + ALOGV("%s", __FUNCTION__); + return START; +} + +CaptureSequencer::CaptureState CaptureSequencer::manageStandardStart( + sp<Camera2Client> &client) { + ATRACE_CALL(); + + // Get the onFrameAvailable callback when the requestID == mCaptureId + client->registerFrameListener(mCaptureId, mCaptureId + 1, + this); + { + SharedParameters::Lock l(client->getParameters()); + mTriggerId = l.mParameters.precaptureTriggerCounter++; + } + client->getCameraDevice()->triggerPrecaptureMetering(mTriggerId); + + mAeInPrecapture = false; + mTimeoutCount = kMaxTimeoutsForPrecaptureStart; + return STANDARD_PRECAPTURE_WAIT; +} + +CaptureSequencer::CaptureState CaptureSequencer::manageStandardPrecaptureWait( + sp<Camera2Client> &/*client*/) { + status_t res; + ATRACE_CALL(); + Mutex::Autolock l(mInputMutex); + while (!mNewAEState) { + res = mNewNotifySignal.waitRelative(mInputMutex, kWaitDuration); + if (res == TIMED_OUT) { + mTimeoutCount--; + break; + } + } + if (mTimeoutCount <= 0) { + ALOGW("Timed out waiting for precapture %s", + mAeInPrecapture ? "end" : "start"); + return STANDARD_CAPTURE; + } + if (mNewAEState) { + if (!mAeInPrecapture) { + // Waiting to see PRECAPTURE state + if (mAETriggerId == mTriggerId && + mAEState == ANDROID_CONTROL_AE_STATE_PRECAPTURE) { + ALOGV("%s: Got precapture start", __FUNCTION__); + mAeInPrecapture = true; + mTimeoutCount = kMaxTimeoutsForPrecaptureEnd; + } + } else { + // Waiting to see PRECAPTURE state end + if (mAETriggerId == mTriggerId && + mAEState != ANDROID_CONTROL_AE_STATE_PRECAPTURE) { + ALOGV("%s: Got precapture end", __FUNCTION__); + return STANDARD_CAPTURE; + } + } + mNewAEState = false; + } + return STANDARD_PRECAPTURE_WAIT; +} + +CaptureSequencer::CaptureState CaptureSequencer::manageStandardCapture( + sp<Camera2Client> &client) { + status_t res; + ATRACE_CALL(); + SharedParameters::Lock l(client->getParameters()); + Vector<uint8_t> outputStreams; + + /** + * Set up output streams in the request + * - preview + * - capture/jpeg + * - callback (if preview callbacks enabled) + * - recording (if recording enabled) + */ + outputStreams.push(client->getPreviewStreamId()); + outputStreams.push(client->getCaptureStreamId()); + + if (l.mParameters.previewCallbackFlags & + CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) { + outputStreams.push(client->getCallbackStreamId()); + } + + if (l.mParameters.state == Parameters::VIDEO_SNAPSHOT) { + outputStreams.push(client->getRecordingStreamId()); + } + + res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS, + outputStreams); + if (res == OK) { + res = mCaptureRequest.update(ANDROID_REQUEST_ID, + &mCaptureId, 1); + } + if (res == OK) { + res = mCaptureRequest.sort(); + } + + if (res != OK) { + ALOGE("%s: Camera %d: Unable to set up still capture request: %s (%d)", + __FUNCTION__, client->getCameraId(), strerror(-res), res); + return DONE; + } + + // Create a capture copy since CameraDeviceBase#capture takes ownership + CameraMetadata captureCopy = mCaptureRequest; + if (captureCopy.entryCount() == 0) { + ALOGE("%s: Camera %d: Unable to copy capture request for HAL device", + __FUNCTION__, client->getCameraId()); + return DONE; + } + + /** + * Clear the streaming request for still-capture pictures + * (as opposed to i.e. video snapshots) + */ + if (l.mParameters.state == Parameters::STILL_CAPTURE) { + // API definition of takePicture() - stop preview before taking pic + res = client->stopStream(); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to stop preview for still capture: " + "%s (%d)", + __FUNCTION__, client->getCameraId(), strerror(-res), res); + return DONE; + } + } + // TODO: Capture should be atomic with setStreamingRequest here + res = client->getCameraDevice()->capture(captureCopy); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to submit still image capture request: " + "%s (%d)", + __FUNCTION__, client->getCameraId(), strerror(-res), res); + return DONE; + } + + mTimeoutCount = kMaxTimeoutsForCaptureEnd; + return STANDARD_CAPTURE_WAIT; +} + +CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait( + sp<Camera2Client> &client) { + status_t res; + ATRACE_CALL(); + Mutex::Autolock l(mInputMutex); + + // Wait for new metadata result (mNewFrame) + while (!mNewFrameReceived) { + res = mNewFrameSignal.waitRelative(mInputMutex, kWaitDuration); + if (res == TIMED_OUT) { + mTimeoutCount--; + break; + } + } + + // Approximation of the shutter being closed + // - TODO: use the hal3 exposure callback in Camera3Device instead + if (mNewFrameReceived && !mShutterNotified) { + SharedParameters::Lock l(client->getParameters()); + /* warning: this also locks a SharedCameraCallbacks */ + shutterNotifyLocked(l.mParameters, client, mMsgType); + mShutterNotified = true; + } + + // Wait until jpeg was captured by JpegProcessor + while (mNewFrameReceived && !mNewCaptureReceived) { + res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration); + if (res == TIMED_OUT) { + mTimeoutCount--; + break; + } + } + if (mTimeoutCount <= 0) { + ALOGW("Timed out waiting for capture to complete"); + return DONE; + } + if (mNewFrameReceived && mNewCaptureReceived) { + if (mNewFrameId != mCaptureId) { + ALOGW("Mismatched capture frame IDs: Expected %d, got %d", + mCaptureId, mNewFrameId); + } + camera_metadata_entry_t entry; + entry = mNewFrame.find(ANDROID_SENSOR_TIMESTAMP); + if (entry.count == 0) { + ALOGE("No timestamp field in capture frame!"); + } + if (entry.data.i64[0] != mCaptureTimestamp) { + ALOGW("Mismatched capture timestamps: Metadata frame %lld," + " captured buffer %lld", + entry.data.i64[0], + mCaptureTimestamp); + } + client->removeFrameListener(mCaptureId, mCaptureId + 1, this); + + mNewFrameReceived = false; + mNewCaptureReceived = false; + return DONE; + } + return STANDARD_CAPTURE_WAIT; +} + +CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureStart( + sp<Camera2Client> &client) { + ALOGV("%s", __FUNCTION__); + status_t res; + ATRACE_CALL(); + + // check which burst mode is set, create respective burst object + { + SharedParameters::Lock l(client->getParameters()); + + res = updateCaptureRequest(l.mParameters, client); + if(res != OK) { + return DONE; + } + + // + // check for burst mode type in mParameters here + // + mBurstCapture = new BurstCapture(client, this); + } + + res = mCaptureRequest.update(ANDROID_REQUEST_ID, &mCaptureId, 1); + if (res == OK) { + res = mCaptureRequest.sort(); + } + if (res != OK) { + ALOGE("%s: Camera %d: Unable to set up still capture request: %s (%d)", + __FUNCTION__, client->getCameraId(), strerror(-res), res); + return DONE; + } + + CameraMetadata captureCopy = mCaptureRequest; + if (captureCopy.entryCount() == 0) { + ALOGE("%s: Camera %d: Unable to copy capture request for HAL device", + __FUNCTION__, client->getCameraId()); + return DONE; + } + + Vector<CameraMetadata> requests; + requests.push(mCaptureRequest); + res = mBurstCapture->start(requests, mCaptureId); + mTimeoutCount = kMaxTimeoutsForCaptureEnd * 10; + return BURST_CAPTURE_WAIT; +} + +CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureWait( + sp<Camera2Client> &/*client*/) { + status_t res; + ATRACE_CALL(); + + while (!mNewCaptureReceived) { + res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration); + if (res == TIMED_OUT) { + mTimeoutCount--; + break; + } + } + + if (mTimeoutCount <= 0) { + ALOGW("Timed out waiting for burst capture to complete"); + return DONE; + } + if (mNewCaptureReceived) { + mNewCaptureReceived = false; + // TODO: update mCaptureId to last burst's capture ID + 1? + return DONE; + } + + return BURST_CAPTURE_WAIT; +} + +status_t CaptureSequencer::updateCaptureRequest(const Parameters ¶ms, + sp<Camera2Client> &client) { + ATRACE_CALL(); + status_t res; + if (mCaptureRequest.entryCount() == 0) { + res = client->getCameraDevice()->createDefaultRequest( + CAMERA2_TEMPLATE_STILL_CAPTURE, + &mCaptureRequest); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to create default still image request:" + " %s (%d)", __FUNCTION__, client->getCameraId(), + strerror(-res), res); + return res; + } + } + + res = params.updateRequest(&mCaptureRequest); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to update common entries of capture " + "request: %s (%d)", __FUNCTION__, client->getCameraId(), + strerror(-res), res); + return res; + } + + res = params.updateRequestJpeg(&mCaptureRequest); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to update JPEG entries of capture " + "request: %s (%d)", __FUNCTION__, client->getCameraId(), + strerror(-res), res); + return res; + } + + return OK; +} + +/*static*/ void CaptureSequencer::shutterNotifyLocked(const Parameters ¶ms, + sp<Camera2Client> client, int msgType) { + ATRACE_CALL(); + + if (params.state == Parameters::STILL_CAPTURE + && params.playShutterSound + && (msgType & CAMERA_MSG_SHUTTER)) { + client->getCameraService()->playSound(CameraService::SOUND_SHUTTER); + } + + { + Camera2Client::SharedCameraCallbacks::Lock + l(client->mSharedCameraCallbacks); + + ALOGV("%s: Notifying of shutter close to client", __FUNCTION__); + if (l.mRemoteCallback != 0) { + // ShutterCallback + l.mRemoteCallback->notifyCallback(CAMERA_MSG_SHUTTER, + /*ext1*/0, /*ext2*/0); + + // RawCallback with null buffer + l.mRemoteCallback->notifyCallback(CAMERA_MSG_RAW_IMAGE_NOTIFY, + /*ext1*/0, /*ext2*/0); + } else { + ALOGV("%s: No client!", __FUNCTION__); + } + } +} + + +}; // namespace camera2 +}; // namespace android diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h new file mode 100644 index 0000000..76750aa --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h @@ -0,0 +1,177 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_CAPTURESEQUENCER_H +#define ANDROID_SERVERS_CAMERA_CAMERA2_CAPTURESEQUENCER_H + +#include <binder/MemoryBase.h> +#include <utils/Thread.h> +#include <utils/String16.h> +#include <utils/Vector.h> +#include <utils/Mutex.h> +#include <utils/Condition.h> +#include "camera/CameraMetadata.h" +#include "Parameters.h" +#include "FrameProcessor.h" + +namespace android { + +class Camera2Client; + +namespace camera2 { + +class ZslProcessorInterface; +class BurstCapture; + +/** + * Manages the still image capture process for + * zero-shutter-lag, regular, and video snapshots. + */ +class CaptureSequencer: + virtual public Thread, + virtual public FrameProcessor::FilteredListener { + public: + CaptureSequencer(wp<Camera2Client> client); + ~CaptureSequencer(); + + // Get reference to the ZslProcessor, which holds the ZSL buffers and frames + void setZslProcessor(wp<ZslProcessorInterface> processor); + + // Begin still image capture + status_t startCapture(int msgType); + + // Wait until current image capture completes; returns immediately if no + // capture is active. Returns TIMED_OUT if capture does not complete during + // the specified duration. + status_t waitUntilIdle(nsecs_t timeout); + + // Notifications about AE state changes + void notifyAutoExposure(uint8_t newState, int triggerId); + + // Notifications from the frame processor + virtual void onFrameAvailable(int32_t frameId, const CameraMetadata &frame); + + // Notifications from the JPEG processor + void onCaptureAvailable(nsecs_t timestamp, sp<MemoryBase> captureBuffer); + + void dump(int fd, const Vector<String16>& args); + + private: + /** + * Accessed by other threads + */ + Mutex mInputMutex; + + bool mStartCapture; + bool mBusy; + Condition mStartCaptureSignal; + + bool mNewAEState; + uint8_t mAEState; + int mAETriggerId; + Condition mNewNotifySignal; + + bool mNewFrameReceived; + int32_t mNewFrameId; + CameraMetadata mNewFrame; + Condition mNewFrameSignal; + + bool mNewCaptureReceived; + nsecs_t mCaptureTimestamp; + sp<MemoryBase> mCaptureBuffer; + Condition mNewCaptureSignal; + + bool mShutterNotified; + + /** + * Internal to CaptureSequencer + */ + static const nsecs_t kWaitDuration = 100000000; // 100 ms + static const int kMaxTimeoutsForPrecaptureStart = 2; // 200 ms + static const int kMaxTimeoutsForPrecaptureEnd = 20; // 2 sec + static const int kMaxTimeoutsForCaptureEnd = 40; // 4 sec + + wp<Camera2Client> mClient; + wp<ZslProcessorInterface> mZslProcessor; + sp<BurstCapture> mBurstCapture; + + enum CaptureState { + IDLE, + START, + ZSL_START, + ZSL_WAITING, + ZSL_REPROCESSING, + STANDARD_START, + STANDARD_PRECAPTURE_WAIT, + STANDARD_CAPTURE, + STANDARD_CAPTURE_WAIT, + BURST_CAPTURE_START, + BURST_CAPTURE_WAIT, + DONE, + ERROR, + NUM_CAPTURE_STATES + } mCaptureState; + static const char* kStateNames[]; + Mutex mStateMutex; // Guards mCaptureState + Condition mStateChanged; + + typedef CaptureState (CaptureSequencer::*StateManager)(sp<Camera2Client> &client); + static const StateManager kStateManagers[]; + + CameraMetadata mCaptureRequest; + + int mTriggerId; + int mTimeoutCount; + bool mAeInPrecapture; + + int32_t mCaptureId; + int mMsgType; + + // Main internal methods + + virtual bool threadLoop(); + + CaptureState manageIdle(sp<Camera2Client> &client); + CaptureState manageStart(sp<Camera2Client> &client); + + CaptureState manageZslStart(sp<Camera2Client> &client); + CaptureState manageZslWaiting(sp<Camera2Client> &client); + CaptureState manageZslReprocessing(sp<Camera2Client> &client); + + CaptureState manageStandardStart(sp<Camera2Client> &client); + CaptureState manageStandardPrecaptureWait(sp<Camera2Client> &client); + CaptureState manageStandardCapture(sp<Camera2Client> &client); + CaptureState manageStandardCaptureWait(sp<Camera2Client> &client); + + CaptureState manageBurstCaptureStart(sp<Camera2Client> &client); + CaptureState manageBurstCaptureWait(sp<Camera2Client> &client); + + CaptureState manageDone(sp<Camera2Client> &client); + + // Utility methods + + status_t updateCaptureRequest(const Parameters ¶ms, + sp<Camera2Client> &client); + + // Emit Shutter/Raw callback to java, and maybe play a shutter sound + static void shutterNotifyLocked(const Parameters ¶ms, + sp<Camera2Client> client, int msgType); +}; + +}; // namespace camera2 +}; // namespace android + +#endif diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp new file mode 100644 index 0000000..c34cb12 --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp @@ -0,0 +1,315 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera2-FrameProcessor" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> +#include <utils/Trace.h> + +#include "common/CameraDeviceBase.h" +#include "api1/Camera2Client.h" +#include "api1/client2/FrameProcessor.h" + +namespace android { +namespace camera2 { + +FrameProcessor::FrameProcessor(wp<CameraDeviceBase> device, + wp<Camera2Client> client) : + FrameProcessorBase(device), + mClient(client), + mLastFrameNumberOfFaces(0) { + + sp<CameraDeviceBase> d = device.promote(); + mSynthesize3ANotify = !(d->willNotify3A()); +} + +FrameProcessor::~FrameProcessor() { +} + +bool FrameProcessor::processSingleFrame(CameraMetadata &frame, + const sp<CameraDeviceBase> &device) { + + sp<Camera2Client> client = mClient.promote(); + if (!client.get()) { + return false; + } + + if (processFaceDetect(frame, client) != OK) { + return false; + } + + if (mSynthesize3ANotify) { + // Ignoring missing fields for now + process3aState(frame, client); + } + + if (!FrameProcessorBase::processSingleFrame(frame, device)) { + return false; + } + + return true; +} + +status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame, + const sp<Camera2Client> &client) { + status_t res = BAD_VALUE; + ATRACE_CALL(); + camera_metadata_ro_entry_t entry; + bool enableFaceDetect; + + { + SharedParameters::Lock l(client->getParameters()); + enableFaceDetect = l.mParameters.enableFaceDetect; + } + entry = frame.find(ANDROID_STATISTICS_FACE_DETECT_MODE); + + // TODO: This should be an error once implementations are compliant + if (entry.count == 0) { + return OK; + } + + uint8_t faceDetectMode = entry.data.u8[0]; + + camera_frame_metadata metadata; + Vector<camera_face_t> faces; + metadata.number_of_faces = 0; + + if (enableFaceDetect && + faceDetectMode != ANDROID_STATISTICS_FACE_DETECT_MODE_OFF) { + + SharedParameters::Lock l(client->getParameters()); + entry = frame.find(ANDROID_STATISTICS_FACE_RECTANGLES); + if (entry.count == 0) { + // No faces this frame + /* warning: locks SharedCameraCallbacks */ + callbackFaceDetection(client, metadata); + return OK; + } + metadata.number_of_faces = entry.count / 4; + if (metadata.number_of_faces > + l.mParameters.fastInfo.maxFaces) { + ALOGE("%s: Camera %d: More faces than expected! (Got %d, max %d)", + __FUNCTION__, client->getCameraId(), + metadata.number_of_faces, l.mParameters.fastInfo.maxFaces); + return res; + } + const int32_t *faceRects = entry.data.i32; + + entry = frame.find(ANDROID_STATISTICS_FACE_SCORES); + if (entry.count == 0) { + ALOGE("%s: Camera %d: Unable to read face scores", + __FUNCTION__, client->getCameraId()); + return res; + } + const uint8_t *faceScores = entry.data.u8; + + const int32_t *faceLandmarks = NULL; + const int32_t *faceIds = NULL; + + if (faceDetectMode == ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) { + entry = frame.find(ANDROID_STATISTICS_FACE_LANDMARKS); + if (entry.count == 0) { + ALOGE("%s: Camera %d: Unable to read face landmarks", + __FUNCTION__, client->getCameraId()); + return res; + } + faceLandmarks = entry.data.i32; + + entry = frame.find(ANDROID_STATISTICS_FACE_IDS); + + if (entry.count == 0) { + ALOGE("%s: Camera %d: Unable to read face IDs", + __FUNCTION__, client->getCameraId()); + return res; + } + faceIds = entry.data.i32; + } + + faces.setCapacity(metadata.number_of_faces); + + size_t maxFaces = metadata.number_of_faces; + for (size_t i = 0; i < maxFaces; i++) { + if (faceScores[i] == 0) { + metadata.number_of_faces--; + continue; + } + if (faceScores[i] > 100) { + ALOGW("%s: Face index %d with out of range score %d", + __FUNCTION__, i, faceScores[i]); + } + + camera_face_t face; + + face.rect[0] = l.mParameters.arrayXToNormalized(faceRects[i*4 + 0]); + face.rect[1] = l.mParameters.arrayYToNormalized(faceRects[i*4 + 1]); + face.rect[2] = l.mParameters.arrayXToNormalized(faceRects[i*4 + 2]); + face.rect[3] = l.mParameters.arrayYToNormalized(faceRects[i*4 + 3]); + + face.score = faceScores[i]; + if (faceDetectMode == ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) { + face.id = faceIds[i]; + face.left_eye[0] = + l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 0]); + face.left_eye[1] = + l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 1]); + face.right_eye[0] = + l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 2]); + face.right_eye[1] = + l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 3]); + face.mouth[0] = + l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 4]); + face.mouth[1] = + l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 5]); + } else { + face.id = 0; + face.left_eye[0] = face.left_eye[1] = -2000; + face.right_eye[0] = face.right_eye[1] = -2000; + face.mouth[0] = face.mouth[1] = -2000; + } + faces.push_back(face); + } + + metadata.faces = faces.editArray(); + } + + /* warning: locks SharedCameraCallbacks */ + callbackFaceDetection(client, metadata); + + return OK; +} + +status_t FrameProcessor::process3aState(const CameraMetadata &frame, + const sp<Camera2Client> &client) { + + ATRACE_CALL(); + camera_metadata_ro_entry_t entry; + int mId = client->getCameraId(); + + entry = frame.find(ANDROID_REQUEST_FRAME_COUNT); + int32_t frameNumber = entry.data.i32[0]; + + // Get 3A states from result metadata + bool gotAllStates = true; + + AlgState new3aState; + + entry = frame.find(ANDROID_CONTROL_AE_STATE); + if (entry.count == 0) { + ALOGE("%s: Camera %d: No AE state provided by HAL for frame %d!", + __FUNCTION__, mId, frameNumber); + gotAllStates = false; + } else { + new3aState.aeState = + static_cast<camera_metadata_enum_android_control_ae_state>( + entry.data.u8[0]); + } + + entry = frame.find(ANDROID_CONTROL_AF_STATE); + if (entry.count == 0) { + ALOGE("%s: Camera %d: No AF state provided by HAL for frame %d!", + __FUNCTION__, mId, frameNumber); + gotAllStates = false; + } else { + new3aState.afState = + static_cast<camera_metadata_enum_android_control_af_state>( + entry.data.u8[0]); + } + + entry = frame.find(ANDROID_CONTROL_AWB_STATE); + if (entry.count == 0) { + ALOGE("%s: Camera %d: No AWB state provided by HAL for frame %d!", + __FUNCTION__, mId, frameNumber); + gotAllStates = false; + } else { + new3aState.awbState = + static_cast<camera_metadata_enum_android_control_awb_state>( + entry.data.u8[0]); + } + + int32_t afTriggerId = 0; + entry = frame.find(ANDROID_CONTROL_AF_TRIGGER_ID); + if (entry.count == 0) { + ALOGE("%s: Camera %d: No AF trigger ID provided by HAL for frame %d!", + __FUNCTION__, mId, frameNumber); + gotAllStates = false; + } else { + afTriggerId = entry.data.i32[0]; + } + + int32_t aeTriggerId = 0; + entry = frame.find(ANDROID_CONTROL_AE_PRECAPTURE_ID); + if (entry.count == 0) { + ALOGE("%s: Camera %d: No AE precapture trigger ID provided by HAL" + " for frame %d!", + __FUNCTION__, mId, frameNumber); + gotAllStates = false; + } else { + aeTriggerId = entry.data.i32[0]; + } + + if (!gotAllStates) return BAD_VALUE; + + if (new3aState.aeState != m3aState.aeState) { + ALOGV("%s: AE state changed from 0x%x to 0x%x", + __FUNCTION__, m3aState.aeState, new3aState.aeState); + client->notifyAutoExposure(new3aState.aeState, aeTriggerId); + } + if (new3aState.afState != m3aState.afState) { + ALOGV("%s: AF state changed from 0x%x to 0x%x", + __FUNCTION__, m3aState.afState, new3aState.afState); + client->notifyAutoFocus(new3aState.afState, afTriggerId); + } + if (new3aState.awbState != m3aState.awbState) { + ALOGV("%s: AWB state changed from 0x%x to 0x%x", + __FUNCTION__, m3aState.awbState, new3aState.awbState); + client->notifyAutoWhitebalance(new3aState.awbState, aeTriggerId); + } + + m3aState = new3aState; + + return OK; +} + + +void FrameProcessor::callbackFaceDetection(sp<Camera2Client> client, + const camera_frame_metadata &metadata) { + + camera_frame_metadata *metadata_ptr = + const_cast<camera_frame_metadata*>(&metadata); + + /** + * Filter out repeated 0-face callbacks, + * but not when the last frame was >0 + */ + if (metadata.number_of_faces != 0 || + mLastFrameNumberOfFaces != metadata.number_of_faces) { + + Camera2Client::SharedCameraCallbacks::Lock + l(client->mSharedCameraCallbacks); + if (l.mRemoteCallback != NULL) { + l.mRemoteCallback->dataCallback(CAMERA_MSG_PREVIEW_METADATA, + NULL, + metadata_ptr); + } + } + + mLastFrameNumberOfFaces = metadata.number_of_faces; +} + +}; // namespace camera2 +}; // namespace android diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h new file mode 100644 index 0000000..2a17d45 --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_FRAMEPROCESSOR_H +#define ANDROID_SERVERS_CAMERA_CAMERA2_FRAMEPROCESSOR_H + +#include <utils/Thread.h> +#include <utils/String16.h> +#include <utils/Vector.h> +#include <utils/KeyedVector.h> +#include <utils/List.h> +#include <camera/CameraMetadata.h> + +#include "common/FrameProcessorBase.h" + +struct camera_frame_metadata; + +namespace android { + +class Camera2Client; + +namespace camera2 { + +/* Output frame metadata processing thread. This thread waits for new + * frames from the device, and analyzes them as necessary. + */ +class FrameProcessor : public FrameProcessorBase { + public: + FrameProcessor(wp<CameraDeviceBase> device, wp<Camera2Client> client); + ~FrameProcessor(); + + private: + wp<Camera2Client> mClient; + + bool mSynthesize3ANotify; + + int mLastFrameNumberOfFaces; + + void processNewFrames(const sp<Camera2Client> &client); + + virtual bool processSingleFrame(CameraMetadata &frame, + const sp<CameraDeviceBase> &device); + + status_t processFaceDetect(const CameraMetadata &frame, + const sp<Camera2Client> &client); + + // Send 3A state change notifications to client based on frame metadata + status_t process3aState(const CameraMetadata &frame, + const sp<Camera2Client> &client); + + struct AlgState { + camera_metadata_enum_android_control_ae_state aeState; + camera_metadata_enum_android_control_af_state afState; + camera_metadata_enum_android_control_awb_state awbState; + + AlgState() : + aeState(ANDROID_CONTROL_AE_STATE_INACTIVE), + afState(ANDROID_CONTROL_AF_STATE_INACTIVE), + awbState(ANDROID_CONTROL_AWB_STATE_INACTIVE) { + } + } m3aState; + + // Emit FaceDetection event to java if faces changed + void callbackFaceDetection(sp<Camera2Client> client, + const camera_frame_metadata &metadata); +}; + + +}; //namespace camera2 +}; //namespace android + +#endif diff --git a/services/camera/libcameraservice/api1/client2/JpegCompressor.cpp b/services/camera/libcameraservice/api1/client2/JpegCompressor.cpp new file mode 100644 index 0000000..2f0c67d --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/JpegCompressor.cpp @@ -0,0 +1,221 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "Camera2-JpegCompressor" + +#include <utils/Log.h> +#include <ui/GraphicBufferMapper.h> + +#include "JpegCompressor.h" + +namespace android { +namespace camera2 { + +JpegCompressor::JpegCompressor(): + Thread(false), + mIsBusy(false), + mCaptureTime(0) { +} + +JpegCompressor::~JpegCompressor() { + ALOGV("%s", __FUNCTION__); + Mutex::Autolock lock(mMutex); +} + +status_t JpegCompressor::start(Vector<CpuConsumer::LockedBuffer*> buffers, + nsecs_t captureTime) { + ALOGV("%s", __FUNCTION__); + Mutex::Autolock busyLock(mBusyMutex); + + if (mIsBusy) { + ALOGE("%s: Already processing a buffer!", __FUNCTION__); + return INVALID_OPERATION; + } + + mIsBusy = true; + + mBuffers = buffers; + mCaptureTime = captureTime; + + status_t res; + res = run("JpegCompressor"); + if (res != OK) { + ALOGE("%s: Unable to start up compression thread: %s (%d)", + __FUNCTION__, strerror(-res), res); + //delete mBuffers; // necessary? + } + return res; +} + +status_t JpegCompressor::cancel() { + ALOGV("%s", __FUNCTION__); + requestExitAndWait(); + return OK; +} + +status_t JpegCompressor::readyToRun() { + ALOGV("%s", __FUNCTION__); + return OK; +} + +bool JpegCompressor::threadLoop() { + ALOGV("%s", __FUNCTION__); + + mAuxBuffer = mBuffers[0]; // input + mJpegBuffer = mBuffers[1]; // output + + // Set up error management + mJpegErrorInfo = NULL; + JpegError error; + error.parent = this; + + mCInfo.err = jpeg_std_error(&error); + mCInfo.err->error_exit = jpegErrorHandler; + + jpeg_create_compress(&mCInfo); + if (checkError("Error initializing compression")) return false; + + // Route compressed data straight to output stream buffer + JpegDestination jpegDestMgr; + jpegDestMgr.parent = this; + jpegDestMgr.init_destination = jpegInitDestination; + jpegDestMgr.empty_output_buffer = jpegEmptyOutputBuffer; + jpegDestMgr.term_destination = jpegTermDestination; + + mCInfo.dest = &jpegDestMgr; + + // Set up compression parameters + mCInfo.image_width = mAuxBuffer->width; + mCInfo.image_height = mAuxBuffer->height; + mCInfo.input_components = 1; // 3; + mCInfo.in_color_space = JCS_GRAYSCALE; // JCS_RGB + + ALOGV("%s: image_width = %d, image_height = %d", __FUNCTION__, mCInfo.image_width, mCInfo.image_height); + + jpeg_set_defaults(&mCInfo); + if (checkError("Error configuring defaults")) return false; + + // Do compression + jpeg_start_compress(&mCInfo, TRUE); + if (checkError("Error starting compression")) return false; + + size_t rowStride = mAuxBuffer->stride;// * 3; + const size_t kChunkSize = 32; + while (mCInfo.next_scanline < mCInfo.image_height) { + JSAMPROW chunk[kChunkSize]; + for (size_t i = 0 ; i < kChunkSize; i++) { + chunk[i] = (JSAMPROW) + (mAuxBuffer->data + (i + mCInfo.next_scanline) * rowStride); + } + jpeg_write_scanlines(&mCInfo, chunk, kChunkSize); + if (checkError("Error while compressing")) return false; + if (exitPending()) { + ALOGV("%s: Cancel called, exiting early", __FUNCTION__); + cleanUp(); + return false; + } + } + + jpeg_finish_compress(&mCInfo); + if (checkError("Error while finishing compression")) return false; + + cleanUp(); + return false; +} + +bool JpegCompressor::isBusy() { + ALOGV("%s", __FUNCTION__); + Mutex::Autolock busyLock(mBusyMutex); + return mIsBusy; +} + +// old function -- TODO: update for new buffer type +bool JpegCompressor::isStreamInUse(uint32_t /*id*/) { + ALOGV("%s", __FUNCTION__); + Mutex::Autolock lock(mBusyMutex); + + if (mBuffers.size() && mIsBusy) { + for (size_t i = 0; i < mBuffers.size(); i++) { +// if ( mBuffers[i].streamId == (int)id ) return true; + } + } + return false; +} + +bool JpegCompressor::waitForDone(nsecs_t timeout) { + ALOGV("%s", __FUNCTION__); + Mutex::Autolock lock(mBusyMutex); + status_t res = OK; + if (mIsBusy) { + res = mDone.waitRelative(mBusyMutex, timeout); + } + return (res == OK); +} + +bool JpegCompressor::checkError(const char *msg) { + ALOGV("%s", __FUNCTION__); + if (mJpegErrorInfo) { + char errBuffer[JMSG_LENGTH_MAX]; + mJpegErrorInfo->err->format_message(mJpegErrorInfo, errBuffer); + ALOGE("%s: %s: %s", + __FUNCTION__, msg, errBuffer); + cleanUp(); + mJpegErrorInfo = NULL; + return true; + } + return false; +} + +void JpegCompressor::cleanUp() { + ALOGV("%s", __FUNCTION__); + jpeg_destroy_compress(&mCInfo); + Mutex::Autolock lock(mBusyMutex); + mIsBusy = false; + mDone.signal(); +} + +void JpegCompressor::jpegErrorHandler(j_common_ptr cinfo) { + ALOGV("%s", __FUNCTION__); + JpegError *error = static_cast<JpegError*>(cinfo->err); + error->parent->mJpegErrorInfo = cinfo; +} + +void JpegCompressor::jpegInitDestination(j_compress_ptr cinfo) { + ALOGV("%s", __FUNCTION__); + JpegDestination *dest= static_cast<JpegDestination*>(cinfo->dest); + ALOGV("%s: Setting destination to %p, size %d", + __FUNCTION__, dest->parent->mJpegBuffer->data, kMaxJpegSize); + dest->next_output_byte = (JOCTET*)(dest->parent->mJpegBuffer->data); + dest->free_in_buffer = kMaxJpegSize; +} + +boolean JpegCompressor::jpegEmptyOutputBuffer(j_compress_ptr /*cinfo*/) { + ALOGV("%s", __FUNCTION__); + ALOGE("%s: JPEG destination buffer overflow!", + __FUNCTION__); + return true; +} + +void JpegCompressor::jpegTermDestination(j_compress_ptr cinfo) { + (void) cinfo; // TODO: clean up + ALOGV("%s", __FUNCTION__); + ALOGV("%s: Done writing JPEG data. %d bytes left in buffer", + __FUNCTION__, cinfo->dest->free_in_buffer); +} + +}; // namespace camera2 +}; // namespace android diff --git a/services/camera/libcameraservice/api1/client2/JpegCompressor.h b/services/camera/libcameraservice/api1/client2/JpegCompressor.h new file mode 100644 index 0000000..945b1de --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/JpegCompressor.h @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/** + * This class simulates a hardware JPEG compressor. It receives image buffers + * in RGBA_8888 format, processes them in a worker thread, and then pushes them + * out to their destination stream. + */ + +#ifndef ANDROID_SERVERS_CAMERA_JPEGCOMPRESSOR_H +#define ANDROID_SERVERS_CAMERA_JPEGCOMPRESSOR_H + +#include "utils/Thread.h" +#include "utils/Mutex.h" +#include "utils/Timers.h" +#include "utils/Vector.h" +//#include "Base.h" +#include <stdio.h> +#include <gui/CpuConsumer.h> + +extern "C" { +#include <jpeglib.h> +} + + +namespace android { +namespace camera2 { + +class JpegCompressor: private Thread, public virtual RefBase { + public: + + JpegCompressor(); + ~JpegCompressor(); + + // Start compressing COMPRESSED format buffers; JpegCompressor takes + // ownership of the Buffers vector. + status_t start(Vector<CpuConsumer::LockedBuffer*> buffers, + nsecs_t captureTime); + + status_t cancel(); + + bool isBusy(); + bool isStreamInUse(uint32_t id); + + bool waitForDone(nsecs_t timeout); + + // TODO: Measure this + static const size_t kMaxJpegSize = 300000; + + private: + Mutex mBusyMutex; + Mutex mMutex; + bool mIsBusy; + Condition mDone; + nsecs_t mCaptureTime; + + Vector<CpuConsumer::LockedBuffer*> mBuffers; + CpuConsumer::LockedBuffer *mJpegBuffer; + CpuConsumer::LockedBuffer *mAuxBuffer; + bool mFoundJpeg, mFoundAux; + + jpeg_compress_struct mCInfo; + + struct JpegError : public jpeg_error_mgr { + JpegCompressor *parent; + }; + j_common_ptr mJpegErrorInfo; + + struct JpegDestination : public jpeg_destination_mgr { + JpegCompressor *parent; + }; + + static void jpegErrorHandler(j_common_ptr cinfo); + + static void jpegInitDestination(j_compress_ptr cinfo); + static boolean jpegEmptyOutputBuffer(j_compress_ptr cinfo); + static void jpegTermDestination(j_compress_ptr cinfo); + + bool checkError(const char *msg); + void cleanUp(); + + /** + * Inherited Thread virtual overrides + */ + private: + virtual status_t readyToRun(); + virtual bool threadLoop(); +}; + +}; // namespace camera2 +}; // namespace android + +#endif diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp new file mode 100644 index 0000000..b920edf --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp @@ -0,0 +1,388 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera2-JpegProcessor" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 + +#include <netinet/in.h> + +#include <binder/MemoryBase.h> +#include <binder/MemoryHeapBase.h> +#include <utils/Log.h> +#include <utils/Trace.h> +#include <gui/Surface.h> + +#include "common/CameraDeviceBase.h" +#include "api1/Camera2Client.h" +#include "api1/client2/Camera2Heap.h" +#include "api1/client2/CaptureSequencer.h" +#include "api1/client2/JpegProcessor.h" + +namespace android { +namespace camera2 { + +JpegProcessor::JpegProcessor( + sp<Camera2Client> client, + wp<CaptureSequencer> sequencer): + Thread(false), + mDevice(client->getCameraDevice()), + mSequencer(sequencer), + mId(client->getCameraId()), + mCaptureAvailable(false), + mCaptureStreamId(NO_STREAM) { +} + +JpegProcessor::~JpegProcessor() { + ALOGV("%s: Exit", __FUNCTION__); + deleteStream(); +} + +void JpegProcessor::onFrameAvailable() { + Mutex::Autolock l(mInputMutex); + if (!mCaptureAvailable) { + mCaptureAvailable = true; + mCaptureAvailableSignal.signal(); + } +} + +status_t JpegProcessor::updateStream(const Parameters ¶ms) { + ATRACE_CALL(); + ALOGV("%s", __FUNCTION__); + status_t res; + + Mutex::Autolock l(mInputMutex); + + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + // Find out buffer size for JPEG + camera_metadata_ro_entry_t maxJpegSize = + params.staticInfo(ANDROID_JPEG_MAX_SIZE); + if (maxJpegSize.count == 0) { + ALOGE("%s: Camera %d: Can't find ANDROID_JPEG_MAX_SIZE!", + __FUNCTION__, mId); + return INVALID_OPERATION; + } + + if (mCaptureConsumer == 0) { + // Create CPU buffer queue endpoint + sp<BufferQueue> bq = new BufferQueue(); + mCaptureConsumer = new CpuConsumer(bq, 1); + mCaptureConsumer->setFrameAvailableListener(this); + mCaptureConsumer->setName(String8("Camera2Client::CaptureConsumer")); + mCaptureWindow = new Surface( + mCaptureConsumer->getProducerInterface()); + // Create memory for API consumption + mCaptureHeap = new MemoryHeapBase(maxJpegSize.data.i32[0], 0, + "Camera2Client::CaptureHeap"); + if (mCaptureHeap->getSize() == 0) { + ALOGE("%s: Camera %d: Unable to allocate memory for capture", + __FUNCTION__, mId); + return NO_MEMORY; + } + } + + if (mCaptureStreamId != NO_STREAM) { + // Check if stream parameters have to change + uint32_t currentWidth, currentHeight; + res = device->getStreamInfo(mCaptureStreamId, + ¤tWidth, ¤tHeight, 0); + if (res != OK) { + ALOGE("%s: Camera %d: Error querying capture output stream info: " + "%s (%d)", __FUNCTION__, + mId, strerror(-res), res); + return res; + } + if (currentWidth != (uint32_t)params.pictureWidth || + currentHeight != (uint32_t)params.pictureHeight) { + ALOGV("%s: Camera %d: Deleting stream %d since the buffer dimensions changed", + __FUNCTION__, mId, mCaptureStreamId); + res = device->deleteStream(mCaptureStreamId); + if (res == -EBUSY) { + ALOGV("%s: Camera %d: Device is busy, call updateStream again " + " after it becomes idle", __FUNCTION__, mId); + return res; + } else if (res != OK) { + ALOGE("%s: Camera %d: Unable to delete old output stream " + "for capture: %s (%d)", __FUNCTION__, + mId, strerror(-res), res); + return res; + } + mCaptureStreamId = NO_STREAM; + } + } + + if (mCaptureStreamId == NO_STREAM) { + // Create stream for HAL production + res = device->createStream(mCaptureWindow, + params.pictureWidth, params.pictureHeight, + HAL_PIXEL_FORMAT_BLOB, maxJpegSize.data.i32[0], + &mCaptureStreamId); + if (res != OK) { + ALOGE("%s: Camera %d: Can't create output stream for capture: " + "%s (%d)", __FUNCTION__, mId, + strerror(-res), res); + return res; + } + + } + return OK; +} + +status_t JpegProcessor::deleteStream() { + ATRACE_CALL(); + + Mutex::Autolock l(mInputMutex); + + if (mCaptureStreamId != NO_STREAM) { + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + device->deleteStream(mCaptureStreamId); + + mCaptureHeap.clear(); + mCaptureWindow.clear(); + mCaptureConsumer.clear(); + + mCaptureStreamId = NO_STREAM; + } + return OK; +} + +int JpegProcessor::getStreamId() const { + Mutex::Autolock l(mInputMutex); + return mCaptureStreamId; +} + +void JpegProcessor::dump(int /*fd*/, const Vector<String16>& /*args*/) const { +} + +bool JpegProcessor::threadLoop() { + status_t res; + + { + Mutex::Autolock l(mInputMutex); + while (!mCaptureAvailable) { + res = mCaptureAvailableSignal.waitRelative(mInputMutex, + kWaitDuration); + if (res == TIMED_OUT) return true; + } + mCaptureAvailable = false; + } + + do { + res = processNewCapture(); + } while (res == OK); + + return true; +} + +status_t JpegProcessor::processNewCapture() { + ATRACE_CALL(); + status_t res; + sp<Camera2Heap> captureHeap; + + CpuConsumer::LockedBuffer imgBuffer; + + res = mCaptureConsumer->lockNextBuffer(&imgBuffer); + if (res != OK) { + if (res != BAD_VALUE) { + ALOGE("%s: Camera %d: Error receiving still image buffer: " + "%s (%d)", __FUNCTION__, + mId, strerror(-res), res); + } + return res; + } + + ALOGV("%s: Camera %d: Still capture available", __FUNCTION__, + mId); + + if (imgBuffer.format != HAL_PIXEL_FORMAT_BLOB) { + ALOGE("%s: Camera %d: Unexpected format for still image: " + "%x, expected %x", __FUNCTION__, mId, + imgBuffer.format, + HAL_PIXEL_FORMAT_BLOB); + mCaptureConsumer->unlockBuffer(imgBuffer); + return OK; + } + + // Find size of JPEG image + size_t jpegSize = findJpegSize(imgBuffer.data, imgBuffer.width); + if (jpegSize == 0) { // failed to find size, default to whole buffer + jpegSize = imgBuffer.width; + } + size_t heapSize = mCaptureHeap->getSize(); + if (jpegSize > heapSize) { + ALOGW("%s: JPEG image is larger than expected, truncating " + "(got %d, expected at most %d bytes)", + __FUNCTION__, jpegSize, heapSize); + jpegSize = heapSize; + } + + // TODO: Optimize this to avoid memcopy + sp<MemoryBase> captureBuffer = new MemoryBase(mCaptureHeap, 0, jpegSize); + void* captureMemory = mCaptureHeap->getBase(); + memcpy(captureMemory, imgBuffer.data, jpegSize); + + mCaptureConsumer->unlockBuffer(imgBuffer); + + sp<CaptureSequencer> sequencer = mSequencer.promote(); + if (sequencer != 0) { + sequencer->onCaptureAvailable(imgBuffer.timestamp, captureBuffer); + } + + return OK; +} + +/* + * JPEG FILE FORMAT OVERVIEW. + * http://www.jpeg.org/public/jfif.pdf + * (JPEG is the image compression algorithm, actual file format is called JFIF) + * + * "Markers" are 2-byte patterns used to distinguish parts of JFIF files. The + * first byte is always 0xFF, and the second byte is between 0x01 and 0xFE + * (inclusive). Because every marker begins with the same byte, they are + * referred to by the second byte's value. + * + * JFIF files all begin with the Start of Image (SOI) marker, which is 0xD8. + * Following it, "segment" sections begin with other markers, followed by a + * 2-byte length (in network byte order), then the segment data. + * + * For our purposes we will ignore the data, and just use the length to skip to + * the next segment. This is necessary because the data inside segments are + * allowed to contain the End of Image marker (0xFF 0xD9), preventing us from + * naievely scanning until the end. + * + * After all the segments are processed, the jpeg compressed image stream begins. + * This can be considered an opaque format with one requirement: all 0xFF bytes + * in this stream must be followed with a 0x00 byte. This prevents any of the + * image data to be interpreted as a segment. The only exception to this is at + * the end of the image stream there is an End of Image (EOI) marker, which is + * 0xFF followed by a non-zero (0xD9) byte. + */ + +const uint8_t MARK = 0xFF; // First byte of marker +const uint8_t SOI = 0xD8; // Start of Image +const uint8_t EOI = 0xD9; // End of Image +const size_t MARKER_LENGTH = 2; // length of a marker + +#pragma pack(push) +#pragma pack(1) +typedef struct segment { + uint8_t marker[MARKER_LENGTH]; + uint16_t length; +} segment_t; +#pragma pack(pop) + +/* HELPER FUNCTIONS */ + +// check for Start of Image marker +bool checkJpegStart(uint8_t* buf) { + return buf[0] == MARK && buf[1] == SOI; +} +// check for End of Image marker +bool checkJpegEnd(uint8_t *buf) { + return buf[0] == MARK && buf[1] == EOI; +} +// check for arbitrary marker, returns marker type (second byte) +// returns 0 if no marker found. Note: 0x00 is not a valid marker type +uint8_t checkJpegMarker(uint8_t *buf) { + if (buf[0] == MARK && buf[1] > 0 && buf[1] < 0xFF) { + return buf[1]; + } + return 0; +} + +// Return the size of the JPEG, 0 indicates failure +size_t JpegProcessor::findJpegSize(uint8_t* jpegBuffer, size_t maxSize) { + size_t size; + + // First check for JPEG transport header at the end of the buffer + uint8_t *header = jpegBuffer + (maxSize - sizeof(struct camera2_jpeg_blob)); + struct camera2_jpeg_blob *blob = (struct camera2_jpeg_blob*)(header); + if (blob->jpeg_blob_id == CAMERA2_JPEG_BLOB_ID) { + size = blob->jpeg_size; + if (size > 0 && size <= maxSize - sizeof(struct camera2_jpeg_blob)) { + // Verify SOI and EOI markers + size_t offset = size - MARKER_LENGTH; + uint8_t *end = jpegBuffer + offset; + if (checkJpegStart(jpegBuffer) && checkJpegEnd(end)) { + ALOGV("Found JPEG transport header, img size %d", size); + return size; + } else { + ALOGW("Found JPEG transport header with bad Image Start/End"); + } + } else { + ALOGW("Found JPEG transport header with bad size %d", size); + } + } + + // Check Start of Image + if ( !checkJpegStart(jpegBuffer) ) { + ALOGE("Could not find start of JPEG marker"); + return 0; + } + + // Read JFIF segment markers, skip over segment data + size = 0; + while (size <= maxSize - MARKER_LENGTH) { + segment_t *segment = (segment_t*)(jpegBuffer + size); + uint8_t type = checkJpegMarker(segment->marker); + if (type == 0) { // invalid marker, no more segments, begin JPEG data + ALOGV("JPEG stream found beginning at offset %d", size); + break; + } + if (type == EOI || size > maxSize - sizeof(segment_t)) { + ALOGE("Got premature End before JPEG data, offset %d", size); + return 0; + } + size_t length = ntohs(segment->length); + ALOGV("JFIF Segment, type %x length %x", type, length); + size += length + MARKER_LENGTH; + } + + // Find End of Image + // Scan JPEG buffer until End of Image (EOI) + bool foundEnd = false; + for ( ; size <= maxSize - MARKER_LENGTH; size++) { + if ( checkJpegEnd(jpegBuffer + size) ) { + foundEnd = true; + size += MARKER_LENGTH; + break; + } + } + if (!foundEnd) { + ALOGE("Could not find end of JPEG marker"); + return 0; + } + + if (size > maxSize) { + ALOGW("JPEG size %d too large, reducing to maxSize %d", size, maxSize); + size = maxSize; + } + ALOGV("Final JPEG size %d", size); + return size; +} + +}; // namespace camera2 +}; // namespace android diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.h b/services/camera/libcameraservice/api1/client2/JpegProcessor.h new file mode 100644 index 0000000..b2c05df --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.h @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_JPEGPROCESSOR_H +#define ANDROID_SERVERS_CAMERA_CAMERA2_JPEGPROCESSOR_H + +#include <utils/Thread.h> +#include <utils/String16.h> +#include <utils/Vector.h> +#include <utils/Mutex.h> +#include <utils/Condition.h> +#include <gui/CpuConsumer.h> + +#include "camera/CameraMetadata.h" + +namespace android { + +class Camera2Client; +class CameraDeviceBase; +class MemoryHeapBase; + +namespace camera2 { + +class CaptureSequencer; +class Parameters; + +/*** + * Still image capture output image processing + */ +class JpegProcessor: + public Thread, public CpuConsumer::FrameAvailableListener { + public: + JpegProcessor(sp<Camera2Client> client, wp<CaptureSequencer> sequencer); + ~JpegProcessor(); + + // CpuConsumer listener implementation + void onFrameAvailable(); + + status_t updateStream(const Parameters ¶ms); + status_t deleteStream(); + int getStreamId() const; + + void dump(int fd, const Vector<String16>& args) const; + private: + static const nsecs_t kWaitDuration = 10000000; // 10 ms + wp<CameraDeviceBase> mDevice; + wp<CaptureSequencer> mSequencer; + int mId; + + mutable Mutex mInputMutex; + bool mCaptureAvailable; + Condition mCaptureAvailableSignal; + + enum { + NO_STREAM = -1 + }; + + int mCaptureStreamId; + sp<CpuConsumer> mCaptureConsumer; + sp<ANativeWindow> mCaptureWindow; + sp<MemoryHeapBase> mCaptureHeap; + + virtual bool threadLoop(); + + status_t processNewCapture(); + size_t findJpegSize(uint8_t* jpegBuffer, size_t maxSize); + +}; + + +}; //namespace camera2 +}; //namespace android + +#endif diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp new file mode 100644 index 0000000..0459866 --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp @@ -0,0 +1,2645 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera2-Parameters" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> +#include <utils/Trace.h> +#include <utils/Vector.h> +#include <utils/SortedVector.h> + +#include <math.h> +#include <stdlib.h> +#include <cutils/properties.h> + +#include "Parameters.h" +#include "system/camera.h" + +namespace android { +namespace camera2 { + +Parameters::Parameters(int cameraId, + int cameraFacing) : + cameraId(cameraId), + cameraFacing(cameraFacing), + info(NULL) { +} + +Parameters::~Parameters() { +} + +status_t Parameters::initialize(const CameraMetadata *info) { + status_t res; + + if (info->entryCount() == 0) { + ALOGE("%s: No static information provided!", __FUNCTION__); + return BAD_VALUE; + } + Parameters::info = info; + + res = buildFastInfo(); + if (res != OK) return res; + + res = buildQuirks(); + if (res != OK) return res; + + camera_metadata_ro_entry_t availableProcessedSizes = + staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, 2); + if (!availableProcessedSizes.count) return NO_INIT; + + // TODO: Pick more intelligently + previewWidth = availableProcessedSizes.data.i32[0]; + previewHeight = availableProcessedSizes.data.i32[1]; + videoWidth = previewWidth; + videoHeight = previewHeight; + + params.setPreviewSize(previewWidth, previewHeight); + params.setVideoSize(videoWidth, videoHeight); + params.set(CameraParameters::KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO, + String8::format("%dx%d", + previewWidth, previewHeight)); + { + String8 supportedPreviewSizes; + for (size_t i=0; i < availableProcessedSizes.count; i += 2) { + if (i != 0) supportedPreviewSizes += ","; + supportedPreviewSizes += String8::format("%dx%d", + availableProcessedSizes.data.i32[i], + availableProcessedSizes.data.i32[i+1]); + } + params.set(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES, + supportedPreviewSizes); + params.set(CameraParameters::KEY_SUPPORTED_VIDEO_SIZES, + supportedPreviewSizes); + } + + camera_metadata_ro_entry_t availableFpsRanges = + staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, 2); + if (!availableFpsRanges.count) return NO_INIT; + + previewFpsRange[0] = availableFpsRanges.data.i32[0]; + previewFpsRange[1] = availableFpsRanges.data.i32[1]; + + params.set(CameraParameters::KEY_PREVIEW_FPS_RANGE, + String8::format("%d,%d", + previewFpsRange[0] * kFpsToApiScale, + previewFpsRange[1] * kFpsToApiScale)); + + { + String8 supportedPreviewFpsRange; + for (size_t i=0; i < availableFpsRanges.count; i += 2) { + if (i != 0) supportedPreviewFpsRange += ","; + supportedPreviewFpsRange += String8::format("(%d,%d)", + availableFpsRanges.data.i32[i] * kFpsToApiScale, + availableFpsRanges.data.i32[i+1] * kFpsToApiScale); + } + params.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FPS_RANGE, + supportedPreviewFpsRange); + } + + previewFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP; + params.set(CameraParameters::KEY_PREVIEW_FORMAT, + formatEnumToString(previewFormat)); // NV21 + + previewTransform = degToTransform(0, + cameraFacing == CAMERA_FACING_FRONT); + + camera_metadata_ro_entry_t availableFormats = + staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS); + + { + String8 supportedPreviewFormats; + bool addComma = false; + for (size_t i=0; i < availableFormats.count; i++) { + if (addComma) supportedPreviewFormats += ","; + addComma = true; + switch (availableFormats.data.i32[i]) { + case HAL_PIXEL_FORMAT_YCbCr_422_SP: + supportedPreviewFormats += + CameraParameters::PIXEL_FORMAT_YUV422SP; + break; + case HAL_PIXEL_FORMAT_YCrCb_420_SP: + supportedPreviewFormats += + CameraParameters::PIXEL_FORMAT_YUV420SP; + break; + case HAL_PIXEL_FORMAT_YCbCr_422_I: + supportedPreviewFormats += + CameraParameters::PIXEL_FORMAT_YUV422I; + break; + case HAL_PIXEL_FORMAT_YV12: + supportedPreviewFormats += + CameraParameters::PIXEL_FORMAT_YUV420P; + break; + case HAL_PIXEL_FORMAT_RGB_565: + supportedPreviewFormats += + CameraParameters::PIXEL_FORMAT_RGB565; + break; + case HAL_PIXEL_FORMAT_RGBA_8888: + supportedPreviewFormats += + CameraParameters::PIXEL_FORMAT_RGBA8888; + break; + case HAL_PIXEL_FORMAT_YCbCr_420_888: + // Flexible YUV allows both YV12 and NV21 + supportedPreviewFormats += + CameraParameters::PIXEL_FORMAT_YUV420P; + supportedPreviewFormats += ","; + supportedPreviewFormats += + CameraParameters::PIXEL_FORMAT_YUV420SP; + break; + // Not advertizing JPEG, RAW_SENSOR, etc, for preview formats + case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: + case HAL_PIXEL_FORMAT_RAW_SENSOR: + case HAL_PIXEL_FORMAT_BLOB: + addComma = false; + break; + + default: + ALOGW("%s: Camera %d: Unknown preview format: %x", + __FUNCTION__, cameraId, availableFormats.data.i32[i]); + addComma = false; + break; + } + } + params.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS, + supportedPreviewFormats); + } + + // PREVIEW_FRAME_RATE / SUPPORTED_PREVIEW_FRAME_RATES are deprecated, but + // still have to do something sane for them + + // NOTE: Not scaled like FPS range values are. + previewFps = fpsFromRange(previewFpsRange[0], previewFpsRange[1]); + params.set(CameraParameters::KEY_PREVIEW_FRAME_RATE, + previewFps); + + { + SortedVector<int32_t> sortedPreviewFrameRates; + + String8 supportedPreviewFrameRates; + for (size_t i=0; i < availableFpsRanges.count; i += 2) { + // from the [min, max] fps range use the max value + int fps = fpsFromRange(availableFpsRanges.data.i32[i], + availableFpsRanges.data.i32[i+1]); + + // de-dupe frame rates + if (sortedPreviewFrameRates.indexOf(fps) == NAME_NOT_FOUND) { + sortedPreviewFrameRates.add(fps); + } + else { + continue; + } + + if (sortedPreviewFrameRates.size() > 1) { + supportedPreviewFrameRates += ","; + } + + supportedPreviewFrameRates += String8::format("%d", + fps); + + ALOGV("%s: Supported preview frame rates: %s", + __FUNCTION__, supportedPreviewFrameRates.string()); + } + params.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES, + supportedPreviewFrameRates); + } + + camera_metadata_ro_entry_t availableJpegSizes = + staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES, 2); + if (!availableJpegSizes.count) return NO_INIT; + + // TODO: Pick maximum + pictureWidth = availableJpegSizes.data.i32[0]; + pictureHeight = availableJpegSizes.data.i32[1]; + + params.setPictureSize(pictureWidth, + pictureHeight); + + { + String8 supportedPictureSizes; + for (size_t i=0; i < availableJpegSizes.count; i += 2) { + if (i != 0) supportedPictureSizes += ","; + supportedPictureSizes += String8::format("%dx%d", + availableJpegSizes.data.i32[i], + availableJpegSizes.data.i32[i+1]); + } + params.set(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES, + supportedPictureSizes); + } + + params.setPictureFormat(CameraParameters::PIXEL_FORMAT_JPEG); + params.set(CameraParameters::KEY_SUPPORTED_PICTURE_FORMATS, + CameraParameters::PIXEL_FORMAT_JPEG); + + camera_metadata_ro_entry_t availableJpegThumbnailSizes = + staticInfo(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, 4); + if (!availableJpegThumbnailSizes.count) return NO_INIT; + + // TODO: Pick default thumbnail size sensibly + jpegThumbSize[0] = availableJpegThumbnailSizes.data.i32[0]; + jpegThumbSize[1] = availableJpegThumbnailSizes.data.i32[1]; + + params.set(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH, + jpegThumbSize[0]); + params.set(CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT, + jpegThumbSize[1]); + + { + String8 supportedJpegThumbSizes; + for (size_t i=0; i < availableJpegThumbnailSizes.count; i += 2) { + if (i != 0) supportedJpegThumbSizes += ","; + supportedJpegThumbSizes += String8::format("%dx%d", + availableJpegThumbnailSizes.data.i32[i], + availableJpegThumbnailSizes.data.i32[i+1]); + } + params.set(CameraParameters::KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES, + supportedJpegThumbSizes); + } + + jpegThumbQuality = 90; + params.set(CameraParameters::KEY_JPEG_THUMBNAIL_QUALITY, + jpegThumbQuality); + jpegQuality = 90; + params.set(CameraParameters::KEY_JPEG_QUALITY, + jpegQuality); + jpegRotation = 0; + params.set(CameraParameters::KEY_ROTATION, + jpegRotation); + + gpsEnabled = false; + gpsCoordinates[0] = 0.0; + gpsCoordinates[1] = 0.0; + gpsCoordinates[2] = 0.0; + gpsTimestamp = 0; + gpsProcessingMethod = "unknown"; + // GPS fields in CameraParameters are not set by implementation + + wbMode = ANDROID_CONTROL_AWB_MODE_AUTO; + params.set(CameraParameters::KEY_WHITE_BALANCE, + CameraParameters::WHITE_BALANCE_AUTO); + + camera_metadata_ro_entry_t availableWhiteBalanceModes = + staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES, 0, 0, false); + if (!availableWhiteBalanceModes.count) { + params.set(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE, + CameraParameters::WHITE_BALANCE_AUTO); + } else { + String8 supportedWhiteBalance; + bool addComma = false; + for (size_t i=0; i < availableWhiteBalanceModes.count; i++) { + if (addComma) supportedWhiteBalance += ","; + addComma = true; + switch (availableWhiteBalanceModes.data.u8[i]) { + case ANDROID_CONTROL_AWB_MODE_AUTO: + supportedWhiteBalance += + CameraParameters::WHITE_BALANCE_AUTO; + break; + case ANDROID_CONTROL_AWB_MODE_INCANDESCENT: + supportedWhiteBalance += + CameraParameters::WHITE_BALANCE_INCANDESCENT; + break; + case ANDROID_CONTROL_AWB_MODE_FLUORESCENT: + supportedWhiteBalance += + CameraParameters::WHITE_BALANCE_FLUORESCENT; + break; + case ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT: + supportedWhiteBalance += + CameraParameters::WHITE_BALANCE_WARM_FLUORESCENT; + break; + case ANDROID_CONTROL_AWB_MODE_DAYLIGHT: + supportedWhiteBalance += + CameraParameters::WHITE_BALANCE_DAYLIGHT; + break; + case ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT: + supportedWhiteBalance += + CameraParameters::WHITE_BALANCE_CLOUDY_DAYLIGHT; + break; + case ANDROID_CONTROL_AWB_MODE_TWILIGHT: + supportedWhiteBalance += + CameraParameters::WHITE_BALANCE_TWILIGHT; + break; + case ANDROID_CONTROL_AWB_MODE_SHADE: + supportedWhiteBalance += + CameraParameters::WHITE_BALANCE_SHADE; + break; + // Skipping values not mappable to v1 API + case ANDROID_CONTROL_AWB_MODE_OFF: + addComma = false; + break; + default: + ALOGW("%s: Camera %d: Unknown white balance value: %d", + __FUNCTION__, cameraId, + availableWhiteBalanceModes.data.u8[i]); + addComma = false; + break; + } + } + params.set(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE, + supportedWhiteBalance); + } + + effectMode = ANDROID_CONTROL_EFFECT_MODE_OFF; + params.set(CameraParameters::KEY_EFFECT, + CameraParameters::EFFECT_NONE); + + camera_metadata_ro_entry_t availableEffects = + staticInfo(ANDROID_CONTROL_AVAILABLE_EFFECTS, 0, 0, false); + if (!availableEffects.count) { + params.set(CameraParameters::KEY_SUPPORTED_EFFECTS, + CameraParameters::EFFECT_NONE); + } else { + String8 supportedEffects; + bool addComma = false; + for (size_t i=0; i < availableEffects.count; i++) { + if (addComma) supportedEffects += ","; + addComma = true; + switch (availableEffects.data.u8[i]) { + case ANDROID_CONTROL_EFFECT_MODE_OFF: + supportedEffects += + CameraParameters::EFFECT_NONE; + break; + case ANDROID_CONTROL_EFFECT_MODE_MONO: + supportedEffects += + CameraParameters::EFFECT_MONO; + break; + case ANDROID_CONTROL_EFFECT_MODE_NEGATIVE: + supportedEffects += + CameraParameters::EFFECT_NEGATIVE; + break; + case ANDROID_CONTROL_EFFECT_MODE_SOLARIZE: + supportedEffects += + CameraParameters::EFFECT_SOLARIZE; + break; + case ANDROID_CONTROL_EFFECT_MODE_SEPIA: + supportedEffects += + CameraParameters::EFFECT_SEPIA; + break; + case ANDROID_CONTROL_EFFECT_MODE_POSTERIZE: + supportedEffects += + CameraParameters::EFFECT_POSTERIZE; + break; + case ANDROID_CONTROL_EFFECT_MODE_WHITEBOARD: + supportedEffects += + CameraParameters::EFFECT_WHITEBOARD; + break; + case ANDROID_CONTROL_EFFECT_MODE_BLACKBOARD: + supportedEffects += + CameraParameters::EFFECT_BLACKBOARD; + break; + case ANDROID_CONTROL_EFFECT_MODE_AQUA: + supportedEffects += + CameraParameters::EFFECT_AQUA; + break; + default: + ALOGW("%s: Camera %d: Unknown effect value: %d", + __FUNCTION__, cameraId, availableEffects.data.u8[i]); + addComma = false; + break; + } + } + params.set(CameraParameters::KEY_SUPPORTED_EFFECTS, supportedEffects); + } + + antibandingMode = ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO; + params.set(CameraParameters::KEY_ANTIBANDING, + CameraParameters::ANTIBANDING_AUTO); + + camera_metadata_ro_entry_t availableAntibandingModes = + staticInfo(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES, 0, 0, false); + if (!availableAntibandingModes.count) { + params.set(CameraParameters::KEY_SUPPORTED_ANTIBANDING, + CameraParameters::ANTIBANDING_OFF); + } else { + String8 supportedAntibanding; + bool addComma = false; + for (size_t i=0; i < availableAntibandingModes.count; i++) { + if (addComma) supportedAntibanding += ","; + addComma = true; + switch (availableAntibandingModes.data.u8[i]) { + case ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF: + supportedAntibanding += + CameraParameters::ANTIBANDING_OFF; + break; + case ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ: + supportedAntibanding += + CameraParameters::ANTIBANDING_50HZ; + break; + case ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ: + supportedAntibanding += + CameraParameters::ANTIBANDING_60HZ; + break; + case ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO: + supportedAntibanding += + CameraParameters::ANTIBANDING_AUTO; + break; + default: + ALOGW("%s: Camera %d: Unknown antibanding value: %d", + __FUNCTION__, cameraId, + availableAntibandingModes.data.u8[i]); + addComma = false; + break; + } + } + params.set(CameraParameters::KEY_SUPPORTED_ANTIBANDING, + supportedAntibanding); + } + + sceneMode = ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED; + params.set(CameraParameters::KEY_SCENE_MODE, + CameraParameters::SCENE_MODE_AUTO); + + camera_metadata_ro_entry_t availableSceneModes = + staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES, 0, 0, false); + if (!availableSceneModes.count) { + params.remove(CameraParameters::KEY_SCENE_MODE); + } else { + String8 supportedSceneModes(CameraParameters::SCENE_MODE_AUTO); + bool addComma = true; + bool noSceneModes = false; + for (size_t i=0; i < availableSceneModes.count; i++) { + if (addComma) supportedSceneModes += ","; + addComma = true; + switch (availableSceneModes.data.u8[i]) { + case ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED: + noSceneModes = true; + break; + case ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY: + // Not in old API + addComma = false; + break; + case ANDROID_CONTROL_SCENE_MODE_ACTION: + supportedSceneModes += + CameraParameters::SCENE_MODE_ACTION; + break; + case ANDROID_CONTROL_SCENE_MODE_PORTRAIT: + supportedSceneModes += + CameraParameters::SCENE_MODE_PORTRAIT; + break; + case ANDROID_CONTROL_SCENE_MODE_LANDSCAPE: + supportedSceneModes += + CameraParameters::SCENE_MODE_LANDSCAPE; + break; + case ANDROID_CONTROL_SCENE_MODE_NIGHT: + supportedSceneModes += + CameraParameters::SCENE_MODE_NIGHT; + break; + case ANDROID_CONTROL_SCENE_MODE_NIGHT_PORTRAIT: + supportedSceneModes += + CameraParameters::SCENE_MODE_NIGHT_PORTRAIT; + break; + case ANDROID_CONTROL_SCENE_MODE_THEATRE: + supportedSceneModes += + CameraParameters::SCENE_MODE_THEATRE; + break; + case ANDROID_CONTROL_SCENE_MODE_BEACH: + supportedSceneModes += + CameraParameters::SCENE_MODE_BEACH; + break; + case ANDROID_CONTROL_SCENE_MODE_SNOW: + supportedSceneModes += + CameraParameters::SCENE_MODE_SNOW; + break; + case ANDROID_CONTROL_SCENE_MODE_SUNSET: + supportedSceneModes += + CameraParameters::SCENE_MODE_SUNSET; + break; + case ANDROID_CONTROL_SCENE_MODE_STEADYPHOTO: + supportedSceneModes += + CameraParameters::SCENE_MODE_STEADYPHOTO; + break; + case ANDROID_CONTROL_SCENE_MODE_FIREWORKS: + supportedSceneModes += + CameraParameters::SCENE_MODE_FIREWORKS; + break; + case ANDROID_CONTROL_SCENE_MODE_SPORTS: + supportedSceneModes += + CameraParameters::SCENE_MODE_SPORTS; + break; + case ANDROID_CONTROL_SCENE_MODE_PARTY: + supportedSceneModes += + CameraParameters::SCENE_MODE_PARTY; + break; + case ANDROID_CONTROL_SCENE_MODE_CANDLELIGHT: + supportedSceneModes += + CameraParameters::SCENE_MODE_CANDLELIGHT; + break; + case ANDROID_CONTROL_SCENE_MODE_BARCODE: + supportedSceneModes += + CameraParameters::SCENE_MODE_BARCODE; + break; + default: + ALOGW("%s: Camera %d: Unknown scene mode value: %d", + __FUNCTION__, cameraId, + availableSceneModes.data.u8[i]); + addComma = false; + break; + } + } + if (!noSceneModes) { + params.set(CameraParameters::KEY_SUPPORTED_SCENE_MODES, + supportedSceneModes); + } else { + params.remove(CameraParameters::KEY_SCENE_MODE); + } + } + + bool isFlashAvailable = false; + camera_metadata_ro_entry_t flashAvailable = + staticInfo(ANDROID_FLASH_INFO_AVAILABLE, 0, 1, false); + if (flashAvailable.count) { + isFlashAvailable = flashAvailable.data.u8[0]; + } + + camera_metadata_ro_entry_t availableAeModes = + staticInfo(ANDROID_CONTROL_AE_AVAILABLE_MODES, 0, 0, false); + + if (isFlashAvailable) { + flashMode = Parameters::FLASH_MODE_OFF; + params.set(CameraParameters::KEY_FLASH_MODE, + CameraParameters::FLASH_MODE_OFF); + + String8 supportedFlashModes(CameraParameters::FLASH_MODE_OFF); + supportedFlashModes = supportedFlashModes + + "," + CameraParameters::FLASH_MODE_AUTO + + "," + CameraParameters::FLASH_MODE_ON + + "," + CameraParameters::FLASH_MODE_TORCH; + for (size_t i=0; i < availableAeModes.count; i++) { + if (availableAeModes.data.u8[i] == + ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE) { + supportedFlashModes = supportedFlashModes + "," + + CameraParameters::FLASH_MODE_RED_EYE; + break; + } + } + params.set(CameraParameters::KEY_SUPPORTED_FLASH_MODES, + supportedFlashModes); + } else { + flashMode = Parameters::FLASH_MODE_OFF; + params.set(CameraParameters::KEY_FLASH_MODE, + CameraParameters::FLASH_MODE_OFF); + params.set(CameraParameters::KEY_SUPPORTED_FLASH_MODES, + CameraParameters::FLASH_MODE_OFF); + } + + camera_metadata_ro_entry_t minFocusDistance = + staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 1, false); + + camera_metadata_ro_entry_t availableAfModes = + staticInfo(ANDROID_CONTROL_AF_AVAILABLE_MODES, 0, 0, false); + + if (!minFocusDistance.count || minFocusDistance.data.f[0] == 0) { + // Fixed-focus lens + focusMode = Parameters::FOCUS_MODE_FIXED; + params.set(CameraParameters::KEY_FOCUS_MODE, + CameraParameters::FOCUS_MODE_FIXED); + params.set(CameraParameters::KEY_SUPPORTED_FOCUS_MODES, + CameraParameters::FOCUS_MODE_FIXED); + } else { + focusMode = Parameters::FOCUS_MODE_AUTO; + params.set(CameraParameters::KEY_FOCUS_MODE, + CameraParameters::FOCUS_MODE_AUTO); + String8 supportedFocusModes(CameraParameters::FOCUS_MODE_INFINITY); + bool addComma = true; + + for (size_t i=0; i < availableAfModes.count; i++) { + if (addComma) supportedFocusModes += ","; + addComma = true; + switch (availableAfModes.data.u8[i]) { + case ANDROID_CONTROL_AF_MODE_AUTO: + supportedFocusModes += + CameraParameters::FOCUS_MODE_AUTO; + break; + case ANDROID_CONTROL_AF_MODE_MACRO: + supportedFocusModes += + CameraParameters::FOCUS_MODE_MACRO; + break; + case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO: + supportedFocusModes += + CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO; + break; + case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE: + supportedFocusModes += + CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE; + break; + case ANDROID_CONTROL_AF_MODE_EDOF: + supportedFocusModes += + CameraParameters::FOCUS_MODE_EDOF; + break; + // Not supported in old API + case ANDROID_CONTROL_AF_MODE_OFF: + addComma = false; + break; + default: + ALOGW("%s: Camera %d: Unknown AF mode value: %d", + __FUNCTION__, cameraId, availableAfModes.data.u8[i]); + addComma = false; + break; + } + } + params.set(CameraParameters::KEY_SUPPORTED_FOCUS_MODES, + supportedFocusModes); + } + focusState = ANDROID_CONTROL_AF_STATE_INACTIVE; + shadowFocusMode = FOCUS_MODE_INVALID; + + camera_metadata_ro_entry_t max3aRegions = + staticInfo(ANDROID_CONTROL_MAX_REGIONS, 1, 1); + if (!max3aRegions.count) return NO_INIT; + + int32_t maxNumFocusAreas = 0; + if (focusMode != Parameters::FOCUS_MODE_FIXED) { + maxNumFocusAreas = max3aRegions.data.i32[0]; + } + params.set(CameraParameters::KEY_MAX_NUM_FOCUS_AREAS, maxNumFocusAreas); + params.set(CameraParameters::KEY_FOCUS_AREAS, + "(0,0,0,0,0)"); + focusingAreas.clear(); + focusingAreas.add(Parameters::Area(0,0,0,0,0)); + + camera_metadata_ro_entry_t availableFocalLengths = + staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, 0, 0, false); + if (!availableFocalLengths.count) return NO_INIT; + + float minFocalLength = availableFocalLengths.data.f[0]; + params.setFloat(CameraParameters::KEY_FOCAL_LENGTH, minFocalLength); + + float horizFov, vertFov; + res = calculatePictureFovs(&horizFov, &vertFov); + if (res != OK) { + ALOGE("%s: Can't calculate field of views!", __FUNCTION__); + return res; + } + + params.setFloat(CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE, horizFov); + params.setFloat(CameraParameters::KEY_VERTICAL_VIEW_ANGLE, vertFov); + + exposureCompensation = 0; + params.set(CameraParameters::KEY_EXPOSURE_COMPENSATION, + exposureCompensation); + + camera_metadata_ro_entry_t exposureCompensationRange = + staticInfo(ANDROID_CONTROL_AE_COMPENSATION_RANGE, 2, 2); + if (!exposureCompensationRange.count) return NO_INIT; + + params.set(CameraParameters::KEY_MAX_EXPOSURE_COMPENSATION, + exposureCompensationRange.data.i32[1]); + params.set(CameraParameters::KEY_MIN_EXPOSURE_COMPENSATION, + exposureCompensationRange.data.i32[0]); + + camera_metadata_ro_entry_t exposureCompensationStep = + staticInfo(ANDROID_CONTROL_AE_COMPENSATION_STEP, 1, 1); + if (!exposureCompensationStep.count) return NO_INIT; + + params.setFloat(CameraParameters::KEY_EXPOSURE_COMPENSATION_STEP, + (float)exposureCompensationStep.data.r[0].numerator / + exposureCompensationStep.data.r[0].denominator); + + autoExposureLock = false; + params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK, + CameraParameters::FALSE); + params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED, + CameraParameters::TRUE); + + autoWhiteBalanceLock = false; + params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK, + CameraParameters::FALSE); + params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED, + CameraParameters::TRUE); + + meteringAreas.add(Parameters::Area(0, 0, 0, 0, 0)); + params.set(CameraParameters::KEY_MAX_NUM_METERING_AREAS, + max3aRegions.data.i32[0]); + params.set(CameraParameters::KEY_METERING_AREAS, + "(0,0,0,0,0)"); + + zoom = 0; + params.set(CameraParameters::KEY_ZOOM, zoom); + params.set(CameraParameters::KEY_MAX_ZOOM, NUM_ZOOM_STEPS - 1); + + camera_metadata_ro_entry_t maxDigitalZoom = + staticInfo(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, /*minCount*/1, /*maxCount*/1); + if (!maxDigitalZoom.count) return NO_INIT; + + { + String8 zoomRatios; + float zoom = 1.f; + float zoomIncrement = (maxDigitalZoom.data.f[0] - zoom) / + (NUM_ZOOM_STEPS-1); + bool addComma = false; + for (size_t i=0; i < NUM_ZOOM_STEPS; i++) { + if (addComma) zoomRatios += ","; + addComma = true; + zoomRatios += String8::format("%d", static_cast<int>(zoom * 100)); + zoom += zoomIncrement; + } + params.set(CameraParameters::KEY_ZOOM_RATIOS, zoomRatios); + } + + params.set(CameraParameters::KEY_ZOOM_SUPPORTED, + CameraParameters::TRUE); + params.set(CameraParameters::KEY_SMOOTH_ZOOM_SUPPORTED, + CameraParameters::FALSE); + + params.set(CameraParameters::KEY_FOCUS_DISTANCES, + "Infinity,Infinity,Infinity"); + + params.set(CameraParameters::KEY_MAX_NUM_DETECTED_FACES_HW, + fastInfo.maxFaces); + params.set(CameraParameters::KEY_MAX_NUM_DETECTED_FACES_SW, + 0); + + params.set(CameraParameters::KEY_VIDEO_FRAME_FORMAT, + CameraParameters::PIXEL_FORMAT_ANDROID_OPAQUE); + + recordingHint = false; + params.set(CameraParameters::KEY_RECORDING_HINT, + CameraParameters::FALSE); + + params.set(CameraParameters::KEY_VIDEO_SNAPSHOT_SUPPORTED, + CameraParameters::TRUE); + + videoStabilization = false; + params.set(CameraParameters::KEY_VIDEO_STABILIZATION, + CameraParameters::FALSE); + + camera_metadata_ro_entry_t availableVideoStabilizationModes = + staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, 0, 0, + false); + + if (availableVideoStabilizationModes.count > 1) { + params.set(CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED, + CameraParameters::TRUE); + } else { + params.set(CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED, + CameraParameters::FALSE); + } + + // Set up initial state for non-Camera.Parameters state variables + + storeMetadataInBuffers = true; + playShutterSound = true; + enableFaceDetect = false; + + enableFocusMoveMessages = false; + afTriggerCounter = 1; + currentAfTriggerId = -1; + afInMotion = false; + + precaptureTriggerCounter = 1; + + previewCallbackFlags = 0; + previewCallbackOneShot = false; + previewCallbackSurface = false; + + camera_metadata_ro_entry_t supportedHardwareLevel = + staticInfo(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL, 0, 0, false); + if (!supportedHardwareLevel.count || (supportedHardwareLevel.data.u8[0] == + ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED)) { + ALOGI("Camera %d: ZSL mode disabled for limited mode HALs", cameraId); + zslMode = false; + } else { + char value[PROPERTY_VALUE_MAX]; + property_get("camera.disable_zsl_mode", value, "0"); + if (!strcmp(value,"1")) { + ALOGI("Camera %d: Disabling ZSL mode", cameraId); + zslMode = false; + } else { + zslMode = true; + } + } + + lightFx = LIGHTFX_NONE; + + state = STOPPED; + + paramsFlattened = params.flatten(); + + return OK; +} + +String8 Parameters::get() const { + return paramsFlattened; +} + +status_t Parameters::buildFastInfo() { + + camera_metadata_ro_entry_t activeArraySize = + staticInfo(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, 2, 4); + if (!activeArraySize.count) return NO_INIT; + int32_t arrayWidth; + int32_t arrayHeight; + if (activeArraySize.count == 2) { + ALOGW("%s: Camera %d: activeArraySize is missing xmin/ymin!", + __FUNCTION__, cameraId); + arrayWidth = activeArraySize.data.i32[0]; + arrayHeight = activeArraySize.data.i32[1]; + } else if (activeArraySize.count == 4) { + arrayWidth = activeArraySize.data.i32[2]; + arrayHeight = activeArraySize.data.i32[3]; + } else return NO_INIT; + + camera_metadata_ro_entry_t availableFaceDetectModes = + staticInfo(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES, 0, 0, + false); + + uint8_t bestFaceDetectMode = + ANDROID_STATISTICS_FACE_DETECT_MODE_OFF; + for (size_t i = 0 ; i < availableFaceDetectModes.count; i++) { + switch (availableFaceDetectModes.data.u8[i]) { + case ANDROID_STATISTICS_FACE_DETECT_MODE_OFF: + break; + case ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE: + if (bestFaceDetectMode != + ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) { + bestFaceDetectMode = + ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE; + } + break; + case ANDROID_STATISTICS_FACE_DETECT_MODE_FULL: + bestFaceDetectMode = + ANDROID_STATISTICS_FACE_DETECT_MODE_FULL; + break; + default: + ALOGE("%s: Camera %d: Unknown face detect mode %d:", + __FUNCTION__, cameraId, + availableFaceDetectModes.data.u8[i]); + return NO_INIT; + } + } + + int32_t maxFaces = 0; + camera_metadata_ro_entry_t maxFacesDetected = + staticInfo(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, 0, 1, false); + if (maxFacesDetected.count) { + maxFaces = maxFacesDetected.data.i32[0]; + } + + camera_metadata_ro_entry_t availableSceneModes = + staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES, 0, 0, false); + camera_metadata_ro_entry_t sceneModeOverrides = + staticInfo(ANDROID_CONTROL_SCENE_MODE_OVERRIDES, 0, 0, false); + camera_metadata_ro_entry_t minFocusDistance = + staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 0, false); + bool fixedLens = minFocusDistance.count == 0 || + minFocusDistance.data.f[0] == 0; + + camera_metadata_ro_entry_t availableFocalLengths = + staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS); + if (!availableFocalLengths.count) return NO_INIT; + + camera_metadata_ro_entry_t availableFormats = + staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS); + if (!availableFormats.count) return NO_INIT; + + + if (sceneModeOverrides.count > 0) { + // sceneModeOverrides is defined to have 3 entries for each scene mode, + // which are AE, AWB, and AF override modes the HAL wants for that scene + // mode. + const size_t kModesPerSceneMode = 3; + if (sceneModeOverrides.count != + availableSceneModes.count * kModesPerSceneMode) { + ALOGE("%s: Camera %d: Scene mode override list is an " + "unexpected size: %d (expected %d)", __FUNCTION__, + cameraId, sceneModeOverrides.count, + availableSceneModes.count); + return NO_INIT; + } + for (size_t i = 0; i < availableSceneModes.count; i++) { + DeviceInfo::OverrideModes modes; + uint8_t aeMode = + sceneModeOverrides.data.u8[i * kModesPerSceneMode + 0]; + switch(aeMode) { + case ANDROID_CONTROL_AE_MODE_ON: + modes.flashMode = FLASH_MODE_OFF; + break; + case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH: + modes.flashMode = FLASH_MODE_AUTO; + break; + case ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH: + modes.flashMode = FLASH_MODE_ON; + break; + case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE: + modes.flashMode = FLASH_MODE_RED_EYE; + break; + default: + ALOGE("%s: Unknown override AE mode: %d", __FUNCTION__, + aeMode); + modes.flashMode = FLASH_MODE_INVALID; + break; + } + modes.wbMode = + sceneModeOverrides.data.u8[i * kModesPerSceneMode + 1]; + uint8_t afMode = + sceneModeOverrides.data.u8[i * kModesPerSceneMode + 2]; + switch(afMode) { + case ANDROID_CONTROL_AF_MODE_OFF: + modes.focusMode = fixedLens ? + FOCUS_MODE_FIXED : FOCUS_MODE_INFINITY; + break; + case ANDROID_CONTROL_AF_MODE_AUTO: + case ANDROID_CONTROL_AF_MODE_MACRO: + case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO: + case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE: + case ANDROID_CONTROL_AF_MODE_EDOF: + modes.focusMode = static_cast<focusMode_t>(afMode); + break; + default: + ALOGE("%s: Unknown override AF mode: %d", __FUNCTION__, + afMode); + modes.focusMode = FOCUS_MODE_INVALID; + break; + } + fastInfo.sceneModeOverrides.add(availableSceneModes.data.u8[i], + modes); + } + } + + fastInfo.arrayWidth = arrayWidth; + fastInfo.arrayHeight = arrayHeight; + fastInfo.bestFaceDetectMode = bestFaceDetectMode; + fastInfo.maxFaces = maxFaces; + + // Find smallest (widest-angle) focal length to use as basis of still + // picture FOV reporting. + fastInfo.minFocalLength = availableFocalLengths.data.f[0]; + for (size_t i = 1; i < availableFocalLengths.count; i++) { + if (fastInfo.minFocalLength > availableFocalLengths.data.f[i]) { + fastInfo.minFocalLength = availableFocalLengths.data.f[i]; + } + } + + // Check if the HAL supports HAL_PIXEL_FORMAT_YCbCr_420_888 + fastInfo.useFlexibleYuv = false; + for (size_t i = 0; i < availableFormats.count; i++) { + if (availableFormats.data.i32[i] == HAL_PIXEL_FORMAT_YCbCr_420_888) { + fastInfo.useFlexibleYuv = true; + break; + } + } + ALOGV("Camera %d: Flexible YUV %s supported", + cameraId, fastInfo.useFlexibleYuv ? "is" : "is not"); + + return OK; +} + +status_t Parameters::buildQuirks() { + camera_metadata_ro_entry_t entry; + entry = info->find(ANDROID_QUIRKS_TRIGGER_AF_WITH_AUTO); + quirks.triggerAfWithAuto = (entry.count != 0 && entry.data.u8[0] == 1); + ALOGV_IF(quirks.triggerAfWithAuto, "Camera %d: Quirk triggerAfWithAuto enabled", + cameraId); + + entry = info->find(ANDROID_QUIRKS_USE_ZSL_FORMAT); + quirks.useZslFormat = (entry.count != 0 && entry.data.u8[0] == 1); + ALOGV_IF(quirks.useZslFormat, "Camera %d: Quirk useZslFormat enabled", + cameraId); + + entry = info->find(ANDROID_QUIRKS_METERING_CROP_REGION); + quirks.meteringCropRegion = (entry.count != 0 && entry.data.u8[0] == 1); + ALOGV_IF(quirks.meteringCropRegion, "Camera %d: Quirk meteringCropRegion" + " enabled", cameraId); + + return OK; +} + +camera_metadata_ro_entry_t Parameters::staticInfo(uint32_t tag, + size_t minCount, size_t maxCount, bool required) const { + camera_metadata_ro_entry_t entry = info->find(tag); + + if (CC_UNLIKELY( entry.count == 0 ) && required) { + const char* tagSection = get_camera_metadata_section_name(tag); + if (tagSection == NULL) tagSection = "<unknown>"; + const char* tagName = get_camera_metadata_tag_name(tag); + if (tagName == NULL) tagName = "<unknown>"; + + ALOGE("Error finding static metadata entry '%s.%s' (%x)", + tagSection, tagName, tag); + } else if (CC_UNLIKELY( + (minCount != 0 && entry.count < minCount) || + (maxCount != 0 && entry.count > maxCount) ) ) { + const char* tagSection = get_camera_metadata_section_name(tag); + if (tagSection == NULL) tagSection = "<unknown>"; + const char* tagName = get_camera_metadata_tag_name(tag); + if (tagName == NULL) tagName = "<unknown>"; + ALOGE("Malformed static metadata entry '%s.%s' (%x):" + "Expected between %d and %d values, but got %d values", + tagSection, tagName, tag, minCount, maxCount, entry.count); + } + + return entry; +} + +status_t Parameters::set(const String8& paramString) { + status_t res; + + CameraParameters newParams(paramString); + + // TODO: Currently ignoring any changes to supposedly read-only parameters + // such as supported preview sizes, etc. Should probably produce an error if + // they're changed. + + /** Extract and verify new parameters */ + + size_t i; + + Parameters validatedParams(*this); + + // PREVIEW_SIZE + newParams.getPreviewSize(&validatedParams.previewWidth, + &validatedParams.previewHeight); + + if (validatedParams.previewWidth != previewWidth || + validatedParams.previewHeight != previewHeight) { + if (state >= PREVIEW) { + ALOGE("%s: Preview size cannot be updated when preview " + "is active! (Currently %d x %d, requested %d x %d", + __FUNCTION__, + previewWidth, previewHeight, + validatedParams.previewWidth, validatedParams.previewHeight); + return BAD_VALUE; + } + camera_metadata_ro_entry_t availablePreviewSizes = + staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES); + for (i = 0; i < availablePreviewSizes.count; i += 2 ) { + if ((availablePreviewSizes.data.i32[i] == + validatedParams.previewWidth) && + (availablePreviewSizes.data.i32[i+1] == + validatedParams.previewHeight)) break; + } + if (i == availablePreviewSizes.count) { + ALOGE("%s: Requested preview size %d x %d is not supported", + __FUNCTION__, validatedParams.previewWidth, + validatedParams.previewHeight); + return BAD_VALUE; + } + } + + // RECORDING_HINT (always supported) + validatedParams.recordingHint = boolFromString( + newParams.get(CameraParameters::KEY_RECORDING_HINT) ); + bool recordingHintChanged = validatedParams.recordingHint != recordingHint; + ALOGV_IF(recordingHintChanged, "%s: Recording hint changed to %d", + __FUNCTION__, recordingHintChanged); + + // PREVIEW_FPS_RANGE + bool fpsRangeChanged = false; + newParams.getPreviewFpsRange(&validatedParams.previewFpsRange[0], + &validatedParams.previewFpsRange[1]); + validatedParams.previewFpsRange[0] /= kFpsToApiScale; + validatedParams.previewFpsRange[1] /= kFpsToApiScale; + + if (validatedParams.previewFpsRange[0] != previewFpsRange[0] || + validatedParams.previewFpsRange[1] != previewFpsRange[1]) { + fpsRangeChanged = true; + camera_metadata_ro_entry_t availablePreviewFpsRanges = + staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, 2); + for (i = 0; i < availablePreviewFpsRanges.count; i += 2) { + if ((availablePreviewFpsRanges.data.i32[i] == + validatedParams.previewFpsRange[0]) && + (availablePreviewFpsRanges.data.i32[i+1] == + validatedParams.previewFpsRange[1]) ) { + break; + } + } + if (i == availablePreviewFpsRanges.count) { + ALOGE("%s: Requested preview FPS range %d - %d is not supported", + __FUNCTION__, validatedParams.previewFpsRange[0], + validatedParams.previewFpsRange[1]); + return BAD_VALUE; + } + validatedParams.previewFps = + fpsFromRange(validatedParams.previewFpsRange[0], + validatedParams.previewFpsRange[1]); + newParams.setPreviewFrameRate(validatedParams.previewFps); + } + + // PREVIEW_FORMAT + validatedParams.previewFormat = + formatStringToEnum(newParams.getPreviewFormat()); + if (validatedParams.previewFormat != previewFormat) { + if (state >= PREVIEW) { + ALOGE("%s: Preview format cannot be updated when preview " + "is active!", __FUNCTION__); + return BAD_VALUE; + } + camera_metadata_ro_entry_t availableFormats = + staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS); + // If using flexible YUV, always support NV21/YV12. Otherwise, check + // HAL's list. + if (! (fastInfo.useFlexibleYuv && + (validatedParams.previewFormat == + HAL_PIXEL_FORMAT_YCrCb_420_SP || + validatedParams.previewFormat == + HAL_PIXEL_FORMAT_YV12) ) ) { + // Not using flexible YUV format, so check explicitly + for (i = 0; i < availableFormats.count; i++) { + if (availableFormats.data.i32[i] == + validatedParams.previewFormat) break; + } + if (i == availableFormats.count) { + ALOGE("%s: Requested preview format %s (0x%x) is not supported", + __FUNCTION__, newParams.getPreviewFormat(), + validatedParams.previewFormat); + return BAD_VALUE; + } + } + } + + // PREVIEW_FRAME_RATE + // Deprecated, only use if the preview fps range is unchanged this time. + // The single-value FPS is the same as the minimum of the range. + if (!fpsRangeChanged) { + validatedParams.previewFps = newParams.getPreviewFrameRate(); + if (validatedParams.previewFps != previewFps || recordingHintChanged) { + camera_metadata_ro_entry_t availableFrameRates = + staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES); + /** + * If recording hint is set, find the range that encompasses + * previewFps with the largest min index. + * + * If recording hint is not set, find the range with previewFps + * with the smallest min index. + * + * Either way, in case of multiple ranges, break the tie by + * selecting the smaller range. + */ + int targetFps = validatedParams.previewFps; + // all ranges which have targetFps + Vector<Range> candidateRanges; + for (i = 0; i < availableFrameRates.count; i+=2) { + Range r = { + availableFrameRates.data.i32[i], + availableFrameRates.data.i32[i+1] + }; + + if (r.min <= targetFps && targetFps <= r.max) { + candidateRanges.push(r); + } + } + if (candidateRanges.isEmpty()) { + ALOGE("%s: Requested preview frame rate %d is not supported", + __FUNCTION__, validatedParams.previewFps); + return BAD_VALUE; + } + // most applicable range with targetFps + Range bestRange = candidateRanges[0]; + for (i = 1; i < candidateRanges.size(); ++i) { + Range r = candidateRanges[i]; + + // Find by largest minIndex in recording mode + if (validatedParams.recordingHint) { + if (r.min > bestRange.min) { + bestRange = r; + } + else if (r.min == bestRange.min && r.max < bestRange.max) { + bestRange = r; + } + } + // Find by smallest minIndex in preview mode + else { + if (r.min < bestRange.min) { + bestRange = r; + } + else if (r.min == bestRange.min && r.max < bestRange.max) { + bestRange = r; + } + } + } + + validatedParams.previewFpsRange[0] = + bestRange.min; + validatedParams.previewFpsRange[1] = + bestRange.max; + + ALOGV("%s: New preview FPS range: %d, %d, recordingHint = %d", + __FUNCTION__, + validatedParams.previewFpsRange[0], + validatedParams.previewFpsRange[1], + validatedParams.recordingHint); + } + newParams.set(CameraParameters::KEY_PREVIEW_FPS_RANGE, + String8::format("%d,%d", + validatedParams.previewFpsRange[0] * kFpsToApiScale, + validatedParams.previewFpsRange[1] * kFpsToApiScale)); + + } + + // PICTURE_SIZE + newParams.getPictureSize(&validatedParams.pictureWidth, + &validatedParams.pictureHeight); + if (validatedParams.pictureWidth == pictureWidth || + validatedParams.pictureHeight == pictureHeight) { + camera_metadata_ro_entry_t availablePictureSizes = + staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES); + for (i = 0; i < availablePictureSizes.count; i+=2) { + if ((availablePictureSizes.data.i32[i] == + validatedParams.pictureWidth) && + (availablePictureSizes.data.i32[i+1] == + validatedParams.pictureHeight)) break; + } + if (i == availablePictureSizes.count) { + ALOGE("%s: Requested picture size %d x %d is not supported", + __FUNCTION__, validatedParams.pictureWidth, + validatedParams.pictureHeight); + return BAD_VALUE; + } + } + + // JPEG_THUMBNAIL_WIDTH/HEIGHT + validatedParams.jpegThumbSize[0] = + newParams.getInt(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH); + validatedParams.jpegThumbSize[1] = + newParams.getInt(CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT); + if (validatedParams.jpegThumbSize[0] != jpegThumbSize[0] || + validatedParams.jpegThumbSize[1] != jpegThumbSize[1]) { + camera_metadata_ro_entry_t availableJpegThumbSizes = + staticInfo(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES); + for (i = 0; i < availableJpegThumbSizes.count; i+=2) { + if ((availableJpegThumbSizes.data.i32[i] == + validatedParams.jpegThumbSize[0]) && + (availableJpegThumbSizes.data.i32[i+1] == + validatedParams.jpegThumbSize[1])) break; + } + if (i == availableJpegThumbSizes.count) { + ALOGE("%s: Requested JPEG thumbnail size %d x %d is not supported", + __FUNCTION__, validatedParams.jpegThumbSize[0], + validatedParams.jpegThumbSize[1]); + return BAD_VALUE; + } + } + + // JPEG_THUMBNAIL_QUALITY + int quality = newParams.getInt(CameraParameters::KEY_JPEG_THUMBNAIL_QUALITY); + // also makes sure quality fits in uint8_t + if (quality < 0 || quality > 100) { + ALOGE("%s: Requested JPEG thumbnail quality %d is not supported", + __FUNCTION__, quality); + return BAD_VALUE; + } + validatedParams.jpegThumbQuality = quality; + + // JPEG_QUALITY + quality = newParams.getInt(CameraParameters::KEY_JPEG_QUALITY); + // also makes sure quality fits in uint8_t + if (quality < 0 || quality > 100) { + ALOGE("%s: Requested JPEG quality %d is not supported", + __FUNCTION__, quality); + return BAD_VALUE; + } + validatedParams.jpegQuality = quality; + + // ROTATION + validatedParams.jpegRotation = + newParams.getInt(CameraParameters::KEY_ROTATION); + if (validatedParams.jpegRotation != 0 && + validatedParams.jpegRotation != 90 && + validatedParams.jpegRotation != 180 && + validatedParams.jpegRotation != 270) { + ALOGE("%s: Requested picture rotation angle %d is not supported", + __FUNCTION__, validatedParams.jpegRotation); + return BAD_VALUE; + } + + // GPS + + const char *gpsLatStr = + newParams.get(CameraParameters::KEY_GPS_LATITUDE); + if (gpsLatStr != NULL) { + const char *gpsLongStr = + newParams.get(CameraParameters::KEY_GPS_LONGITUDE); + const char *gpsAltitudeStr = + newParams.get(CameraParameters::KEY_GPS_ALTITUDE); + const char *gpsTimeStr = + newParams.get(CameraParameters::KEY_GPS_TIMESTAMP); + const char *gpsProcMethodStr = + newParams.get(CameraParameters::KEY_GPS_PROCESSING_METHOD); + if (gpsLongStr == NULL || + gpsAltitudeStr == NULL || + gpsTimeStr == NULL || + gpsProcMethodStr == NULL) { + ALOGE("%s: Incomplete set of GPS parameters provided", + __FUNCTION__); + return BAD_VALUE; + } + char *endPtr; + errno = 0; + validatedParams.gpsCoordinates[0] = strtod(gpsLatStr, &endPtr); + if (errno || endPtr == gpsLatStr) { + ALOGE("%s: Malformed GPS latitude: %s", __FUNCTION__, gpsLatStr); + return BAD_VALUE; + } + errno = 0; + validatedParams.gpsCoordinates[1] = strtod(gpsLongStr, &endPtr); + if (errno || endPtr == gpsLongStr) { + ALOGE("%s: Malformed GPS longitude: %s", __FUNCTION__, gpsLongStr); + return BAD_VALUE; + } + errno = 0; + validatedParams.gpsCoordinates[2] = strtod(gpsAltitudeStr, &endPtr); + if (errno || endPtr == gpsAltitudeStr) { + ALOGE("%s: Malformed GPS altitude: %s", __FUNCTION__, + gpsAltitudeStr); + return BAD_VALUE; + } + errno = 0; + validatedParams.gpsTimestamp = strtoll(gpsTimeStr, &endPtr, 10); + if (errno || endPtr == gpsTimeStr) { + ALOGE("%s: Malformed GPS timestamp: %s", __FUNCTION__, gpsTimeStr); + return BAD_VALUE; + } + validatedParams.gpsProcessingMethod = gpsProcMethodStr; + + validatedParams.gpsEnabled = true; + } else { + validatedParams.gpsEnabled = false; + } + + // EFFECT + validatedParams.effectMode = effectModeStringToEnum( + newParams.get(CameraParameters::KEY_EFFECT) ); + if (validatedParams.effectMode != effectMode) { + camera_metadata_ro_entry_t availableEffectModes = + staticInfo(ANDROID_CONTROL_AVAILABLE_EFFECTS); + for (i = 0; i < availableEffectModes.count; i++) { + if (validatedParams.effectMode == availableEffectModes.data.u8[i]) break; + } + if (i == availableEffectModes.count) { + ALOGE("%s: Requested effect mode \"%s\" is not supported", + __FUNCTION__, + newParams.get(CameraParameters::KEY_EFFECT) ); + return BAD_VALUE; + } + } + + // ANTIBANDING + validatedParams.antibandingMode = abModeStringToEnum( + newParams.get(CameraParameters::KEY_ANTIBANDING) ); + if (validatedParams.antibandingMode != antibandingMode) { + camera_metadata_ro_entry_t availableAbModes = + staticInfo(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES); + for (i = 0; i < availableAbModes.count; i++) { + if (validatedParams.antibandingMode == availableAbModes.data.u8[i]) + break; + } + if (i == availableAbModes.count) { + ALOGE("%s: Requested antibanding mode \"%s\" is not supported", + __FUNCTION__, + newParams.get(CameraParameters::KEY_ANTIBANDING)); + return BAD_VALUE; + } + } + + // SCENE_MODE + validatedParams.sceneMode = sceneModeStringToEnum( + newParams.get(CameraParameters::KEY_SCENE_MODE) ); + if (validatedParams.sceneMode != sceneMode && + validatedParams.sceneMode != + ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED) { + camera_metadata_ro_entry_t availableSceneModes = + staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES); + for (i = 0; i < availableSceneModes.count; i++) { + if (validatedParams.sceneMode == availableSceneModes.data.u8[i]) + break; + } + if (i == availableSceneModes.count) { + ALOGE("%s: Requested scene mode \"%s\" is not supported", + __FUNCTION__, + newParams.get(CameraParameters::KEY_SCENE_MODE)); + return BAD_VALUE; + } + } + bool sceneModeSet = + validatedParams.sceneMode != ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED; + + // FLASH_MODE + if (sceneModeSet) { + validatedParams.flashMode = + fastInfo.sceneModeOverrides. + valueFor(validatedParams.sceneMode).flashMode; + } else { + validatedParams.flashMode = FLASH_MODE_INVALID; + } + if (validatedParams.flashMode == FLASH_MODE_INVALID) { + validatedParams.flashMode = flashModeStringToEnum( + newParams.get(CameraParameters::KEY_FLASH_MODE) ); + } + + if (validatedParams.flashMode != flashMode) { + camera_metadata_ro_entry_t flashAvailable = + staticInfo(ANDROID_FLASH_INFO_AVAILABLE, 1, 1); + if (!flashAvailable.data.u8[0] && + validatedParams.flashMode != Parameters::FLASH_MODE_OFF) { + ALOGE("%s: Requested flash mode \"%s\" is not supported: " + "No flash on device", __FUNCTION__, + newParams.get(CameraParameters::KEY_FLASH_MODE)); + return BAD_VALUE; + } else if (validatedParams.flashMode == Parameters::FLASH_MODE_RED_EYE) { + camera_metadata_ro_entry_t availableAeModes = + staticInfo(ANDROID_CONTROL_AE_AVAILABLE_MODES); + for (i = 0; i < availableAeModes.count; i++) { + if (validatedParams.flashMode == availableAeModes.data.u8[i]) + break; + } + if (i == availableAeModes.count) { + ALOGE("%s: Requested flash mode \"%s\" is not supported", + __FUNCTION__, + newParams.get(CameraParameters::KEY_FLASH_MODE)); + return BAD_VALUE; + } + } else if (validatedParams.flashMode == -1) { + ALOGE("%s: Requested flash mode \"%s\" is unknown", + __FUNCTION__, + newParams.get(CameraParameters::KEY_FLASH_MODE)); + return BAD_VALUE; + } + // Update in case of override + newParams.set(CameraParameters::KEY_FLASH_MODE, + flashModeEnumToString(validatedParams.flashMode)); + } + + // WHITE_BALANCE + if (sceneModeSet) { + validatedParams.wbMode = + fastInfo.sceneModeOverrides. + valueFor(validatedParams.sceneMode).wbMode; + } else { + validatedParams.wbMode = ANDROID_CONTROL_AWB_MODE_OFF; + } + if (validatedParams.wbMode == ANDROID_CONTROL_AWB_MODE_OFF) { + validatedParams.wbMode = wbModeStringToEnum( + newParams.get(CameraParameters::KEY_WHITE_BALANCE) ); + } + if (validatedParams.wbMode != wbMode) { + camera_metadata_ro_entry_t availableWbModes = + staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES, 0, 0, false); + for (i = 0; i < availableWbModes.count; i++) { + if (validatedParams.wbMode == availableWbModes.data.u8[i]) break; + } + if (i == availableWbModes.count) { + ALOGE("%s: Requested white balance mode %s is not supported", + __FUNCTION__, + newParams.get(CameraParameters::KEY_WHITE_BALANCE)); + return BAD_VALUE; + } + // Update in case of override + newParams.set(CameraParameters::KEY_WHITE_BALANCE, + wbModeEnumToString(validatedParams.wbMode)); + } + + // FOCUS_MODE + if (sceneModeSet) { + validatedParams.focusMode = + fastInfo.sceneModeOverrides. + valueFor(validatedParams.sceneMode).focusMode; + } else { + validatedParams.focusMode = FOCUS_MODE_INVALID; + } + if (validatedParams.focusMode == FOCUS_MODE_INVALID) { + validatedParams.focusMode = focusModeStringToEnum( + newParams.get(CameraParameters::KEY_FOCUS_MODE) ); + } + if (validatedParams.focusMode != focusMode) { + validatedParams.currentAfTriggerId = -1; + if (validatedParams.focusMode != Parameters::FOCUS_MODE_FIXED) { + camera_metadata_ro_entry_t minFocusDistance = + staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 0, + false); + if (minFocusDistance.count && minFocusDistance.data.f[0] == 0) { + ALOGE("%s: Requested focus mode \"%s\" is not available: " + "fixed focus lens", + __FUNCTION__, + newParams.get(CameraParameters::KEY_FOCUS_MODE)); + return BAD_VALUE; + } else if (validatedParams.focusMode != + Parameters::FOCUS_MODE_INFINITY) { + camera_metadata_ro_entry_t availableFocusModes = + staticInfo(ANDROID_CONTROL_AF_AVAILABLE_MODES); + for (i = 0; i < availableFocusModes.count; i++) { + if (validatedParams.focusMode == + availableFocusModes.data.u8[i]) break; + } + if (i == availableFocusModes.count) { + ALOGE("%s: Requested focus mode \"%s\" is not supported", + __FUNCTION__, + newParams.get(CameraParameters::KEY_FOCUS_MODE)); + return BAD_VALUE; + } + } + } + validatedParams.focusState = ANDROID_CONTROL_AF_STATE_INACTIVE; + // Always reset shadow focus mode to avoid reverting settings + validatedParams.shadowFocusMode = FOCUS_MODE_INVALID; + // Update in case of override + newParams.set(CameraParameters::KEY_FOCUS_MODE, + focusModeEnumToString(validatedParams.focusMode)); + } else { + validatedParams.currentAfTriggerId = currentAfTriggerId; + } + + // FOCUS_AREAS + res = parseAreas(newParams.get(CameraParameters::KEY_FOCUS_AREAS), + &validatedParams.focusingAreas); + size_t max3aRegions = + (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS, 1, 1).data.i32[0]; + if (res == OK) res = validateAreas(validatedParams.focusingAreas, + max3aRegions, AREA_KIND_FOCUS); + if (res != OK) { + ALOGE("%s: Requested focus areas are malformed: %s", + __FUNCTION__, newParams.get(CameraParameters::KEY_FOCUS_AREAS)); + return BAD_VALUE; + } + + // EXPOSURE_COMPENSATION + validatedParams.exposureCompensation = + newParams.getInt(CameraParameters::KEY_EXPOSURE_COMPENSATION); + camera_metadata_ro_entry_t exposureCompensationRange = + staticInfo(ANDROID_CONTROL_AE_COMPENSATION_RANGE); + if ((validatedParams.exposureCompensation < + exposureCompensationRange.data.i32[0]) || + (validatedParams.exposureCompensation > + exposureCompensationRange.data.i32[1])) { + ALOGE("%s: Requested exposure compensation index is out of bounds: %d", + __FUNCTION__, validatedParams.exposureCompensation); + return BAD_VALUE; + } + + // AUTO_EXPOSURE_LOCK (always supported) + validatedParams.autoExposureLock = boolFromString( + newParams.get(CameraParameters::KEY_AUTO_EXPOSURE_LOCK)); + + // AUTO_WHITEBALANCE_LOCK (always supported) + validatedParams.autoWhiteBalanceLock = boolFromString( + newParams.get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK)); + + // METERING_AREAS + res = parseAreas(newParams.get(CameraParameters::KEY_METERING_AREAS), + &validatedParams.meteringAreas); + if (res == OK) { + res = validateAreas(validatedParams.meteringAreas, max3aRegions, + AREA_KIND_METERING); + } + if (res != OK) { + ALOGE("%s: Requested metering areas are malformed: %s", + __FUNCTION__, + newParams.get(CameraParameters::KEY_METERING_AREAS)); + return BAD_VALUE; + } + + // ZOOM + validatedParams.zoom = newParams.getInt(CameraParameters::KEY_ZOOM); + if (validatedParams.zoom < 0 + || validatedParams.zoom >= (int)NUM_ZOOM_STEPS) { + ALOGE("%s: Requested zoom level %d is not supported", + __FUNCTION__, validatedParams.zoom); + return BAD_VALUE; + } + + // VIDEO_SIZE + newParams.getVideoSize(&validatedParams.videoWidth, + &validatedParams.videoHeight); + if (validatedParams.videoWidth != videoWidth || + validatedParams.videoHeight != videoHeight) { + if (state == RECORD) { + ALOGE("%s: Video size cannot be updated when recording is active!", + __FUNCTION__); + return BAD_VALUE; + } + camera_metadata_ro_entry_t availableVideoSizes = + staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES); + for (i = 0; i < availableVideoSizes.count; i += 2 ) { + if ((availableVideoSizes.data.i32[i] == + validatedParams.videoWidth) && + (availableVideoSizes.data.i32[i+1] == + validatedParams.videoHeight)) break; + } + if (i == availableVideoSizes.count) { + ALOGE("%s: Requested video size %d x %d is not supported", + __FUNCTION__, validatedParams.videoWidth, + validatedParams.videoHeight); + return BAD_VALUE; + } + } + + // VIDEO_STABILIZATION + validatedParams.videoStabilization = boolFromString( + newParams.get(CameraParameters::KEY_VIDEO_STABILIZATION) ); + camera_metadata_ro_entry_t availableVideoStabilizationModes = + staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, 0, 0, + false); + if (validatedParams.videoStabilization && + availableVideoStabilizationModes.count == 1) { + ALOGE("%s: Video stabilization not supported", __FUNCTION__); + } + + // LIGHTFX + validatedParams.lightFx = lightFxStringToEnum( + newParams.get(CameraParameters::KEY_LIGHTFX)); + + /** Update internal parameters */ + + *this = validatedParams; + + /** Update external parameters calculated from the internal ones */ + + // HORIZONTAL/VERTICAL FIELD OF VIEW + float horizFov, vertFov; + res = calculatePictureFovs(&horizFov, &vertFov); + if (res != OK) { + ALOGE("%s: Can't calculate FOVs", __FUNCTION__); + // continue so parameters are at least consistent + } + newParams.setFloat(CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE, + horizFov); + newParams.setFloat(CameraParameters::KEY_VERTICAL_VIEW_ANGLE, + vertFov); + ALOGV("Current still picture FOV: %f x %f deg", horizFov, vertFov); + + // Need to flatten again in case of overrides + paramsFlattened = newParams.flatten(); + params = newParams; + + return OK; +} + +status_t Parameters::updateRequest(CameraMetadata *request) const { + ATRACE_CALL(); + status_t res; + + /** + * Mixin default important security values + * - android.led.transmit = defaulted ON + */ + camera_metadata_ro_entry_t entry = staticInfo(ANDROID_LED_AVAILABLE_LEDS, + /*minimumCount*/0, + /*maximumCount*/0, + /*required*/false); + for(size_t i = 0; i < entry.count; ++i) { + uint8_t led = entry.data.u8[i]; + + switch(led) { + // Transmit LED is unconditionally on when using + // the android.hardware.Camera API + case ANDROID_LED_AVAILABLE_LEDS_TRANSMIT: { + uint8_t transmitDefault = ANDROID_LED_TRANSMIT_ON; + res = request->update(ANDROID_LED_TRANSMIT, + &transmitDefault, 1); + if (res != OK) return res; + break; + } + } + } + + /** + * Construct metadata from parameters + */ + + uint8_t metadataMode = ANDROID_REQUEST_METADATA_MODE_FULL; + res = request->update(ANDROID_REQUEST_METADATA_MODE, + &metadataMode, 1); + if (res != OK) return res; + + res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, + previewFpsRange, 2); + if (res != OK) return res; + + uint8_t reqWbLock = autoWhiteBalanceLock ? + ANDROID_CONTROL_AWB_LOCK_ON : ANDROID_CONTROL_AWB_LOCK_OFF; + res = request->update(ANDROID_CONTROL_AWB_LOCK, + &reqWbLock, 1); + + res = request->update(ANDROID_CONTROL_EFFECT_MODE, + &effectMode, 1); + if (res != OK) return res; + res = request->update(ANDROID_CONTROL_AE_ANTIBANDING_MODE, + &antibandingMode, 1); + if (res != OK) return res; + + // android.hardware.Camera requires that when face detect is enabled, the + // camera is in a face-priority mode. HAL2 splits this into separate parts + // (face detection statistics and face priority scene mode). Map from other + // to the other. + bool sceneModeActive = + sceneMode != (uint8_t)ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED; + uint8_t reqControlMode = ANDROID_CONTROL_MODE_AUTO; + if (enableFaceDetect || sceneModeActive) { + reqControlMode = ANDROID_CONTROL_MODE_USE_SCENE_MODE; + } + res = request->update(ANDROID_CONTROL_MODE, + &reqControlMode, 1); + if (res != OK) return res; + + uint8_t reqSceneMode = + sceneModeActive ? sceneMode : + enableFaceDetect ? (uint8_t)ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY : + (uint8_t)ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED; + res = request->update(ANDROID_CONTROL_SCENE_MODE, + &reqSceneMode, 1); + if (res != OK) return res; + + uint8_t reqFlashMode = ANDROID_FLASH_MODE_OFF; + uint8_t reqAeMode = ANDROID_CONTROL_AE_MODE_OFF; + switch (flashMode) { + case Parameters::FLASH_MODE_OFF: + reqAeMode = ANDROID_CONTROL_AE_MODE_ON; break; + case Parameters::FLASH_MODE_AUTO: + reqAeMode = ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH; break; + case Parameters::FLASH_MODE_ON: + reqAeMode = ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH; break; + case Parameters::FLASH_MODE_TORCH: + reqAeMode = ANDROID_CONTROL_AE_MODE_ON; + reqFlashMode = ANDROID_FLASH_MODE_TORCH; + break; + case Parameters::FLASH_MODE_RED_EYE: + reqAeMode = ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE; break; + default: + ALOGE("%s: Camera %d: Unknown flash mode %d", __FUNCTION__, + cameraId, flashMode); + return BAD_VALUE; + } + res = request->update(ANDROID_FLASH_MODE, + &reqFlashMode, 1); + if (res != OK) return res; + res = request->update(ANDROID_CONTROL_AE_MODE, + &reqAeMode, 1); + if (res != OK) return res; + + uint8_t reqAeLock = autoExposureLock ? + ANDROID_CONTROL_AE_LOCK_ON : ANDROID_CONTROL_AE_LOCK_OFF; + res = request->update(ANDROID_CONTROL_AE_LOCK, + &reqAeLock, 1); + if (res != OK) return res; + + res = request->update(ANDROID_CONTROL_AWB_MODE, + &wbMode, 1); + if (res != OK) return res; + + float reqFocusDistance = 0; // infinity focus in diopters + uint8_t reqFocusMode = ANDROID_CONTROL_AF_MODE_OFF; + switch (focusMode) { + case Parameters::FOCUS_MODE_AUTO: + case Parameters::FOCUS_MODE_MACRO: + case Parameters::FOCUS_MODE_CONTINUOUS_VIDEO: + case Parameters::FOCUS_MODE_CONTINUOUS_PICTURE: + case Parameters::FOCUS_MODE_EDOF: + reqFocusMode = focusMode; + break; + case Parameters::FOCUS_MODE_INFINITY: + case Parameters::FOCUS_MODE_FIXED: + reqFocusMode = ANDROID_CONTROL_AF_MODE_OFF; + break; + default: + ALOGE("%s: Camera %d: Unknown focus mode %d", __FUNCTION__, + cameraId, focusMode); + return BAD_VALUE; + } + res = request->update(ANDROID_LENS_FOCUS_DISTANCE, + &reqFocusDistance, 1); + if (res != OK) return res; + res = request->update(ANDROID_CONTROL_AF_MODE, + &reqFocusMode, 1); + if (res != OK) return res; + + size_t reqFocusingAreasSize = focusingAreas.size() * 5; + int32_t *reqFocusingAreas = new int32_t[reqFocusingAreasSize]; + for (size_t i = 0; i < reqFocusingAreasSize; i += 5) { + if (focusingAreas[i].weight != 0) { + reqFocusingAreas[i + 0] = + normalizedXToArray(focusingAreas[i].left); + reqFocusingAreas[i + 1] = + normalizedYToArray(focusingAreas[i].top); + reqFocusingAreas[i + 2] = + normalizedXToArray(focusingAreas[i].right); + reqFocusingAreas[i + 3] = + normalizedYToArray(focusingAreas[i].bottom); + } else { + reqFocusingAreas[i + 0] = 0; + reqFocusingAreas[i + 1] = 0; + reqFocusingAreas[i + 2] = 0; + reqFocusingAreas[i + 3] = 0; + } + reqFocusingAreas[i + 4] = focusingAreas[i].weight; + } + res = request->update(ANDROID_CONTROL_AF_REGIONS, + reqFocusingAreas, reqFocusingAreasSize); + if (res != OK) return res; + delete[] reqFocusingAreas; + + res = request->update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, + &exposureCompensation, 1); + if (res != OK) return res; + + size_t reqMeteringAreasSize = meteringAreas.size() * 5; + int32_t *reqMeteringAreas = new int32_t[reqMeteringAreasSize]; + for (size_t i = 0; i < reqMeteringAreasSize; i += 5) { + if (meteringAreas[i].weight != 0) { + reqMeteringAreas[i + 0] = + normalizedXToArray(meteringAreas[i].left); + reqMeteringAreas[i + 1] = + normalizedYToArray(meteringAreas[i].top); + reqMeteringAreas[i + 2] = + normalizedXToArray(meteringAreas[i].right); + reqMeteringAreas[i + 3] = + normalizedYToArray(meteringAreas[i].bottom); + } else { + reqMeteringAreas[i + 0] = 0; + reqMeteringAreas[i + 1] = 0; + reqMeteringAreas[i + 2] = 0; + reqMeteringAreas[i + 3] = 0; + } + reqMeteringAreas[i + 4] = meteringAreas[i].weight; + } + res = request->update(ANDROID_CONTROL_AE_REGIONS, + reqMeteringAreas, reqMeteringAreasSize); + if (res != OK) return res; + + delete[] reqMeteringAreas; + + /* don't include jpeg thumbnail size - it's valid for + it to be set to (0,0), meaning 'no thumbnail' */ + CropRegion crop = calculateCropRegion( (CropRegion::Outputs)( + CropRegion::OUTPUT_PREVIEW | + CropRegion::OUTPUT_VIDEO | + CropRegion::OUTPUT_PICTURE )); + int32_t reqCropRegion[4] = { + static_cast<int32_t>(crop.left), + static_cast<int32_t>(crop.top), + static_cast<int32_t>(crop.width), + static_cast<int32_t>(crop.height) + }; + res = request->update(ANDROID_SCALER_CROP_REGION, + reqCropRegion, 4); + if (res != OK) return res; + + uint8_t reqVstabMode = videoStabilization ? + ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_ON : + ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF; + res = request->update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, + &reqVstabMode, 1); + if (res != OK) return res; + + uint8_t reqFaceDetectMode = enableFaceDetect ? + fastInfo.bestFaceDetectMode : + (uint8_t)ANDROID_STATISTICS_FACE_DETECT_MODE_OFF; + res = request->update(ANDROID_STATISTICS_FACE_DETECT_MODE, + &reqFaceDetectMode, 1); + if (res != OK) return res; + + return OK; +} + +status_t Parameters::updateRequestJpeg(CameraMetadata *request) const { + status_t res; + + res = request->update(ANDROID_JPEG_THUMBNAIL_SIZE, + jpegThumbSize, 2); + if (res != OK) return res; + res = request->update(ANDROID_JPEG_THUMBNAIL_QUALITY, + &jpegThumbQuality, 1); + if (res != OK) return res; + res = request->update(ANDROID_JPEG_QUALITY, + &jpegQuality, 1); + if (res != OK) return res; + res = request->update( + ANDROID_JPEG_ORIENTATION, + &jpegRotation, 1); + if (res != OK) return res; + + if (gpsEnabled) { + res = request->update( + ANDROID_JPEG_GPS_COORDINATES, + gpsCoordinates, 3); + if (res != OK) return res; + res = request->update( + ANDROID_JPEG_GPS_TIMESTAMP, + &gpsTimestamp, 1); + if (res != OK) return res; + res = request->update( + ANDROID_JPEG_GPS_PROCESSING_METHOD, + gpsProcessingMethod); + if (res != OK) return res; + } else { + res = request->erase(ANDROID_JPEG_GPS_COORDINATES); + if (res != OK) return res; + res = request->erase(ANDROID_JPEG_GPS_TIMESTAMP); + if (res != OK) return res; + res = request->erase(ANDROID_JPEG_GPS_PROCESSING_METHOD); + if (res != OK) return res; + } + return OK; +} + + +const char* Parameters::getStateName(State state) { +#define CASE_ENUM_TO_CHAR(x) case x: return(#x); break; + switch(state) { + CASE_ENUM_TO_CHAR(DISCONNECTED) + CASE_ENUM_TO_CHAR(STOPPED) + CASE_ENUM_TO_CHAR(WAITING_FOR_PREVIEW_WINDOW) + CASE_ENUM_TO_CHAR(PREVIEW) + CASE_ENUM_TO_CHAR(RECORD) + CASE_ENUM_TO_CHAR(STILL_CAPTURE) + CASE_ENUM_TO_CHAR(VIDEO_SNAPSHOT) + default: + return "Unknown state!"; + break; + } +#undef CASE_ENUM_TO_CHAR +} + +int Parameters::formatStringToEnum(const char *format) { + return + !format ? + HAL_PIXEL_FORMAT_YCrCb_420_SP : + !strcmp(format, CameraParameters::PIXEL_FORMAT_YUV422SP) ? + HAL_PIXEL_FORMAT_YCbCr_422_SP : // NV16 + !strcmp(format, CameraParameters::PIXEL_FORMAT_YUV420SP) ? + HAL_PIXEL_FORMAT_YCrCb_420_SP : // NV21 + !strcmp(format, CameraParameters::PIXEL_FORMAT_YUV422I) ? + HAL_PIXEL_FORMAT_YCbCr_422_I : // YUY2 + !strcmp(format, CameraParameters::PIXEL_FORMAT_YUV420P) ? + HAL_PIXEL_FORMAT_YV12 : // YV12 + !strcmp(format, CameraParameters::PIXEL_FORMAT_RGB565) ? + HAL_PIXEL_FORMAT_RGB_565 : // RGB565 + !strcmp(format, CameraParameters::PIXEL_FORMAT_RGBA8888) ? + HAL_PIXEL_FORMAT_RGBA_8888 : // RGB8888 + !strcmp(format, CameraParameters::PIXEL_FORMAT_BAYER_RGGB) ? + HAL_PIXEL_FORMAT_RAW_SENSOR : // Raw sensor data + -1; +} + +const char* Parameters::formatEnumToString(int format) { + const char *fmt; + switch(format) { + case HAL_PIXEL_FORMAT_YCbCr_422_SP: // NV16 + fmt = CameraParameters::PIXEL_FORMAT_YUV422SP; + break; + case HAL_PIXEL_FORMAT_YCrCb_420_SP: // NV21 + fmt = CameraParameters::PIXEL_FORMAT_YUV420SP; + break; + case HAL_PIXEL_FORMAT_YCbCr_422_I: // YUY2 + fmt = CameraParameters::PIXEL_FORMAT_YUV422I; + break; + case HAL_PIXEL_FORMAT_YV12: // YV12 + fmt = CameraParameters::PIXEL_FORMAT_YUV420P; + break; + case HAL_PIXEL_FORMAT_RGB_565: // RGB565 + fmt = CameraParameters::PIXEL_FORMAT_RGB565; + break; + case HAL_PIXEL_FORMAT_RGBA_8888: // RGBA8888 + fmt = CameraParameters::PIXEL_FORMAT_RGBA8888; + break; + case HAL_PIXEL_FORMAT_RAW_SENSOR: + ALOGW("Raw sensor preview format requested."); + fmt = CameraParameters::PIXEL_FORMAT_BAYER_RGGB; + break; + default: + ALOGE("%s: Unknown preview format: %x", + __FUNCTION__, format); + fmt = NULL; + break; + } + return fmt; +} + +int Parameters::wbModeStringToEnum(const char *wbMode) { + return + !wbMode ? + ANDROID_CONTROL_AWB_MODE_AUTO : + !strcmp(wbMode, CameraParameters::WHITE_BALANCE_AUTO) ? + ANDROID_CONTROL_AWB_MODE_AUTO : + !strcmp(wbMode, CameraParameters::WHITE_BALANCE_INCANDESCENT) ? + ANDROID_CONTROL_AWB_MODE_INCANDESCENT : + !strcmp(wbMode, CameraParameters::WHITE_BALANCE_FLUORESCENT) ? + ANDROID_CONTROL_AWB_MODE_FLUORESCENT : + !strcmp(wbMode, CameraParameters::WHITE_BALANCE_WARM_FLUORESCENT) ? + ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT : + !strcmp(wbMode, CameraParameters::WHITE_BALANCE_DAYLIGHT) ? + ANDROID_CONTROL_AWB_MODE_DAYLIGHT : + !strcmp(wbMode, CameraParameters::WHITE_BALANCE_CLOUDY_DAYLIGHT) ? + ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT : + !strcmp(wbMode, CameraParameters::WHITE_BALANCE_TWILIGHT) ? + ANDROID_CONTROL_AWB_MODE_TWILIGHT : + !strcmp(wbMode, CameraParameters::WHITE_BALANCE_SHADE) ? + ANDROID_CONTROL_AWB_MODE_SHADE : + -1; +} + +const char* Parameters::wbModeEnumToString(uint8_t wbMode) { + switch (wbMode) { + case ANDROID_CONTROL_AWB_MODE_AUTO: + return CameraParameters::WHITE_BALANCE_AUTO; + case ANDROID_CONTROL_AWB_MODE_INCANDESCENT: + return CameraParameters::WHITE_BALANCE_INCANDESCENT; + case ANDROID_CONTROL_AWB_MODE_FLUORESCENT: + return CameraParameters::WHITE_BALANCE_FLUORESCENT; + case ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT: + return CameraParameters::WHITE_BALANCE_WARM_FLUORESCENT; + case ANDROID_CONTROL_AWB_MODE_DAYLIGHT: + return CameraParameters::WHITE_BALANCE_DAYLIGHT; + case ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT: + return CameraParameters::WHITE_BALANCE_CLOUDY_DAYLIGHT; + case ANDROID_CONTROL_AWB_MODE_TWILIGHT: + return CameraParameters::WHITE_BALANCE_TWILIGHT; + case ANDROID_CONTROL_AWB_MODE_SHADE: + return CameraParameters::WHITE_BALANCE_SHADE; + default: + ALOGE("%s: Unknown AWB mode enum: %d", + __FUNCTION__, wbMode); + return "unknown"; + } +} + +int Parameters::effectModeStringToEnum(const char *effectMode) { + return + !effectMode ? + ANDROID_CONTROL_EFFECT_MODE_OFF : + !strcmp(effectMode, CameraParameters::EFFECT_NONE) ? + ANDROID_CONTROL_EFFECT_MODE_OFF : + !strcmp(effectMode, CameraParameters::EFFECT_MONO) ? + ANDROID_CONTROL_EFFECT_MODE_MONO : + !strcmp(effectMode, CameraParameters::EFFECT_NEGATIVE) ? + ANDROID_CONTROL_EFFECT_MODE_NEGATIVE : + !strcmp(effectMode, CameraParameters::EFFECT_SOLARIZE) ? + ANDROID_CONTROL_EFFECT_MODE_SOLARIZE : + !strcmp(effectMode, CameraParameters::EFFECT_SEPIA) ? + ANDROID_CONTROL_EFFECT_MODE_SEPIA : + !strcmp(effectMode, CameraParameters::EFFECT_POSTERIZE) ? + ANDROID_CONTROL_EFFECT_MODE_POSTERIZE : + !strcmp(effectMode, CameraParameters::EFFECT_WHITEBOARD) ? + ANDROID_CONTROL_EFFECT_MODE_WHITEBOARD : + !strcmp(effectMode, CameraParameters::EFFECT_BLACKBOARD) ? + ANDROID_CONTROL_EFFECT_MODE_BLACKBOARD : + !strcmp(effectMode, CameraParameters::EFFECT_AQUA) ? + ANDROID_CONTROL_EFFECT_MODE_AQUA : + -1; +} + +int Parameters::abModeStringToEnum(const char *abMode) { + return + !abMode ? + ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO : + !strcmp(abMode, CameraParameters::ANTIBANDING_AUTO) ? + ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO : + !strcmp(abMode, CameraParameters::ANTIBANDING_OFF) ? + ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF : + !strcmp(abMode, CameraParameters::ANTIBANDING_50HZ) ? + ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ : + !strcmp(abMode, CameraParameters::ANTIBANDING_60HZ) ? + ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ : + -1; +} + +int Parameters::sceneModeStringToEnum(const char *sceneMode) { + return + !sceneMode ? + ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED : + !strcmp(sceneMode, CameraParameters::SCENE_MODE_AUTO) ? + ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED : + !strcmp(sceneMode, CameraParameters::SCENE_MODE_ACTION) ? + ANDROID_CONTROL_SCENE_MODE_ACTION : + !strcmp(sceneMode, CameraParameters::SCENE_MODE_PORTRAIT) ? + ANDROID_CONTROL_SCENE_MODE_PORTRAIT : + !strcmp(sceneMode, CameraParameters::SCENE_MODE_LANDSCAPE) ? + ANDROID_CONTROL_SCENE_MODE_LANDSCAPE : + !strcmp(sceneMode, CameraParameters::SCENE_MODE_NIGHT) ? + ANDROID_CONTROL_SCENE_MODE_NIGHT : + !strcmp(sceneMode, CameraParameters::SCENE_MODE_NIGHT_PORTRAIT) ? + ANDROID_CONTROL_SCENE_MODE_NIGHT_PORTRAIT : + !strcmp(sceneMode, CameraParameters::SCENE_MODE_THEATRE) ? + ANDROID_CONTROL_SCENE_MODE_THEATRE : + !strcmp(sceneMode, CameraParameters::SCENE_MODE_BEACH) ? + ANDROID_CONTROL_SCENE_MODE_BEACH : + !strcmp(sceneMode, CameraParameters::SCENE_MODE_SNOW) ? + ANDROID_CONTROL_SCENE_MODE_SNOW : + !strcmp(sceneMode, CameraParameters::SCENE_MODE_SUNSET) ? + ANDROID_CONTROL_SCENE_MODE_SUNSET : + !strcmp(sceneMode, CameraParameters::SCENE_MODE_STEADYPHOTO) ? + ANDROID_CONTROL_SCENE_MODE_STEADYPHOTO : + !strcmp(sceneMode, CameraParameters::SCENE_MODE_FIREWORKS) ? + ANDROID_CONTROL_SCENE_MODE_FIREWORKS : + !strcmp(sceneMode, CameraParameters::SCENE_MODE_SPORTS) ? + ANDROID_CONTROL_SCENE_MODE_SPORTS : + !strcmp(sceneMode, CameraParameters::SCENE_MODE_PARTY) ? + ANDROID_CONTROL_SCENE_MODE_PARTY : + !strcmp(sceneMode, CameraParameters::SCENE_MODE_CANDLELIGHT) ? + ANDROID_CONTROL_SCENE_MODE_CANDLELIGHT : + !strcmp(sceneMode, CameraParameters::SCENE_MODE_BARCODE) ? + ANDROID_CONTROL_SCENE_MODE_BARCODE: + -1; +} + +Parameters::Parameters::flashMode_t Parameters::flashModeStringToEnum( + const char *flashMode) { + return + !flashMode ? + Parameters::FLASH_MODE_INVALID : + !strcmp(flashMode, CameraParameters::FLASH_MODE_OFF) ? + Parameters::FLASH_MODE_OFF : + !strcmp(flashMode, CameraParameters::FLASH_MODE_AUTO) ? + Parameters::FLASH_MODE_AUTO : + !strcmp(flashMode, CameraParameters::FLASH_MODE_ON) ? + Parameters::FLASH_MODE_ON : + !strcmp(flashMode, CameraParameters::FLASH_MODE_RED_EYE) ? + Parameters::FLASH_MODE_RED_EYE : + !strcmp(flashMode, CameraParameters::FLASH_MODE_TORCH) ? + Parameters::FLASH_MODE_TORCH : + Parameters::FLASH_MODE_INVALID; +} + +const char *Parameters::flashModeEnumToString(flashMode_t flashMode) { + switch (flashMode) { + case FLASH_MODE_OFF: + return CameraParameters::FLASH_MODE_OFF; + case FLASH_MODE_AUTO: + return CameraParameters::FLASH_MODE_AUTO; + case FLASH_MODE_ON: + return CameraParameters::FLASH_MODE_ON; + case FLASH_MODE_RED_EYE: + return CameraParameters::FLASH_MODE_RED_EYE; + case FLASH_MODE_TORCH: + return CameraParameters::FLASH_MODE_TORCH; + default: + ALOGE("%s: Unknown flash mode enum %d", + __FUNCTION__, flashMode); + return "unknown"; + } +} + +Parameters::Parameters::focusMode_t Parameters::focusModeStringToEnum( + const char *focusMode) { + return + !focusMode ? + Parameters::FOCUS_MODE_INVALID : + !strcmp(focusMode, CameraParameters::FOCUS_MODE_AUTO) ? + Parameters::FOCUS_MODE_AUTO : + !strcmp(focusMode, CameraParameters::FOCUS_MODE_INFINITY) ? + Parameters::FOCUS_MODE_INFINITY : + !strcmp(focusMode, CameraParameters::FOCUS_MODE_MACRO) ? + Parameters::FOCUS_MODE_MACRO : + !strcmp(focusMode, CameraParameters::FOCUS_MODE_FIXED) ? + Parameters::FOCUS_MODE_FIXED : + !strcmp(focusMode, CameraParameters::FOCUS_MODE_EDOF) ? + Parameters::FOCUS_MODE_EDOF : + !strcmp(focusMode, CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO) ? + Parameters::FOCUS_MODE_CONTINUOUS_VIDEO : + !strcmp(focusMode, CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE) ? + Parameters::FOCUS_MODE_CONTINUOUS_PICTURE : + Parameters::FOCUS_MODE_INVALID; +} + +const char *Parameters::focusModeEnumToString(focusMode_t focusMode) { + switch (focusMode) { + case FOCUS_MODE_AUTO: + return CameraParameters::FOCUS_MODE_AUTO; + case FOCUS_MODE_MACRO: + return CameraParameters::FOCUS_MODE_MACRO; + case FOCUS_MODE_CONTINUOUS_VIDEO: + return CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO; + case FOCUS_MODE_CONTINUOUS_PICTURE: + return CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE; + case FOCUS_MODE_EDOF: + return CameraParameters::FOCUS_MODE_EDOF; + case FOCUS_MODE_INFINITY: + return CameraParameters::FOCUS_MODE_INFINITY; + case FOCUS_MODE_FIXED: + return CameraParameters::FOCUS_MODE_FIXED; + default: + ALOGE("%s: Unknown focus mode enum: %d", + __FUNCTION__, focusMode); + return "unknown"; + } +} + +Parameters::Parameters::lightFxMode_t Parameters::lightFxStringToEnum( + const char *lightFxMode) { + return + !lightFxMode ? + Parameters::LIGHTFX_NONE : + !strcmp(lightFxMode, CameraParameters::LIGHTFX_LOWLIGHT) ? + Parameters::LIGHTFX_LOWLIGHT : + !strcmp(lightFxMode, CameraParameters::LIGHTFX_HDR) ? + Parameters::LIGHTFX_HDR : + Parameters::LIGHTFX_NONE; +} + +status_t Parameters::parseAreas(const char *areasCStr, + Vector<Parameters::Area> *areas) { + static const size_t NUM_FIELDS = 5; + areas->clear(); + if (areasCStr == NULL) { + // If no key exists, use default (0,0,0,0,0) + areas->push(); + return OK; + } + String8 areasStr(areasCStr); + ssize_t areaStart = areasStr.find("(", 0) + 1; + while (areaStart != 0) { + const char* area = areasStr.string() + areaStart; + char *numEnd; + int vals[NUM_FIELDS]; + for (size_t i = 0; i < NUM_FIELDS; i++) { + errno = 0; + vals[i] = strtol(area, &numEnd, 10); + if (errno || numEnd == area) return BAD_VALUE; + area = numEnd + 1; + } + areas->push(Parameters::Area( + vals[0], vals[1], vals[2], vals[3], vals[4]) ); + areaStart = areasStr.find("(", areaStart) + 1; + } + return OK; +} + +status_t Parameters::validateAreas(const Vector<Parameters::Area> &areas, + size_t maxRegions, + AreaKind areaKind) const { + // Definition of valid area can be found in + // include/camera/CameraParameters.h + if (areas.size() == 0) return BAD_VALUE; + if (areas.size() == 1) { + if (areas[0].left == 0 && + areas[0].top == 0 && + areas[0].right == 0 && + areas[0].bottom == 0 && + areas[0].weight == 0) { + // Single (0,0,0,0,0) entry is always valid (== driver decides) + return OK; + } + } + + // fixed focus can only set (0,0,0,0,0) focus area + if (areaKind == AREA_KIND_FOCUS && focusMode == FOCUS_MODE_FIXED) { + return BAD_VALUE; + } + + if (areas.size() > maxRegions) { + ALOGE("%s: Too many areas requested: %d", + __FUNCTION__, areas.size()); + return BAD_VALUE; + } + + for (Vector<Parameters::Area>::const_iterator a = areas.begin(); + a != areas.end(); a++) { + if (a->weight < 1 || a->weight > 1000) return BAD_VALUE; + if (a->left < -1000 || a->left > 1000) return BAD_VALUE; + if (a->top < -1000 || a->top > 1000) return BAD_VALUE; + if (a->right < -1000 || a->right > 1000) return BAD_VALUE; + if (a->bottom < -1000 || a->bottom > 1000) return BAD_VALUE; + if (a->left >= a->right) return BAD_VALUE; + if (a->top >= a->bottom) return BAD_VALUE; + } + return OK; +} + +bool Parameters::boolFromString(const char *boolStr) { + return !boolStr ? false : + !strcmp(boolStr, CameraParameters::TRUE) ? true : + false; +} + +int Parameters::degToTransform(int degrees, bool mirror) { + if (!mirror) { + if (degrees == 0) return 0; + else if (degrees == 90) return HAL_TRANSFORM_ROT_90; + else if (degrees == 180) return HAL_TRANSFORM_ROT_180; + else if (degrees == 270) return HAL_TRANSFORM_ROT_270; + } else { // Do mirror (horizontal flip) + if (degrees == 0) { // FLIP_H and ROT_0 + return HAL_TRANSFORM_FLIP_H; + } else if (degrees == 90) { // FLIP_H and ROT_90 + return HAL_TRANSFORM_FLIP_H | HAL_TRANSFORM_ROT_90; + } else if (degrees == 180) { // FLIP_H and ROT_180 + return HAL_TRANSFORM_FLIP_V; + } else if (degrees == 270) { // FLIP_H and ROT_270 + return HAL_TRANSFORM_FLIP_V | HAL_TRANSFORM_ROT_90; + } + } + ALOGE("%s: Bad input: %d", __FUNCTION__, degrees); + return -1; +} + +int Parameters::cropXToArray(int x) const { + ALOG_ASSERT(x >= 0, "Crop-relative X coordinate = '%d' is out of bounds" + "(lower = 0)", x); + + CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW); + ALOG_ASSERT(x < previewCrop.width, "Crop-relative X coordinate = '%d' " + "is out of bounds (upper = %f)", x, previewCrop.width); + + int ret = x + previewCrop.left; + + ALOG_ASSERT( (ret >= 0 && ret < fastInfo.arrayWidth), + "Calculated pixel array value X = '%d' is out of bounds (upper = %d)", + ret, fastInfo.arrayWidth); + return ret; +} + +int Parameters::cropYToArray(int y) const { + ALOG_ASSERT(y >= 0, "Crop-relative Y coordinate = '%d' is out of bounds " + "(lower = 0)", y); + + CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW); + ALOG_ASSERT(y < previewCrop.height, "Crop-relative Y coordinate = '%d' is " + "out of bounds (upper = %f)", y, previewCrop.height); + + int ret = y + previewCrop.top; + + ALOG_ASSERT( (ret >= 0 && ret < fastInfo.arrayHeight), + "Calculated pixel array value Y = '%d' is out of bounds (upper = %d)", + ret, fastInfo.arrayHeight); + + return ret; + +} + +int Parameters::normalizedXToCrop(int x) const { + CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW); + return (x + 1000) * (previewCrop.width - 1) / 2000; +} + +int Parameters::normalizedYToCrop(int y) const { + CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW); + return (y + 1000) * (previewCrop.height - 1) / 2000; +} + +int Parameters::arrayXToCrop(int x) const { + CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW); + return x - previewCrop.left; +} + +int Parameters::arrayYToCrop(int y) const { + CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW); + return y - previewCrop.top; +} + +int Parameters::cropXToNormalized(int x) const { + CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW); + return x * 2000 / (previewCrop.width - 1) - 1000; +} + +int Parameters::cropYToNormalized(int y) const { + CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW); + return y * 2000 / (previewCrop.height - 1) - 1000; +} + +int Parameters::arrayXToNormalized(int width) const { + int ret = cropXToNormalized(arrayXToCrop(width)); + + ALOG_ASSERT(ret >= -1000, "Calculated normalized value out of " + "lower bounds %d", ret); + ALOG_ASSERT(ret <= 1000, "Calculated normalized value out of " + "upper bounds %d", ret); + + // Work-around for HAL pre-scaling the coordinates themselves + if (quirks.meteringCropRegion) { + return width * 2000 / (fastInfo.arrayWidth - 1) - 1000; + } + + return ret; +} + +int Parameters::arrayYToNormalized(int height) const { + int ret = cropYToNormalized(arrayYToCrop(height)); + + ALOG_ASSERT(ret >= -1000, "Calculated normalized value out of lower bounds" + " %d", ret); + ALOG_ASSERT(ret <= 1000, "Calculated normalized value out of upper bounds" + " %d", ret); + + // Work-around for HAL pre-scaling the coordinates themselves + if (quirks.meteringCropRegion) { + return height * 2000 / (fastInfo.arrayHeight - 1) - 1000; + } + + return ret; +} + +int Parameters::normalizedXToArray(int x) const { + + // Work-around for HAL pre-scaling the coordinates themselves + if (quirks.meteringCropRegion) { + return (x + 1000) * (fastInfo.arrayWidth - 1) / 2000; + } + + return cropXToArray(normalizedXToCrop(x)); +} + +int Parameters::normalizedYToArray(int y) const { + // Work-around for HAL pre-scaling the coordinates themselves + if (quirks.meteringCropRegion) { + return (y + 1000) * (fastInfo.arrayHeight - 1) / 2000; + } + + return cropYToArray(normalizedYToCrop(y)); +} + +Parameters::CropRegion Parameters::calculateCropRegion( + Parameters::CropRegion::Outputs outputs) const { + + float zoomLeft, zoomTop, zoomWidth, zoomHeight; + + // Need to convert zoom index into a crop rectangle. The rectangle is + // chosen to maximize its area on the sensor + + camera_metadata_ro_entry_t maxDigitalZoom = + staticInfo(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM); + // For each zoom step by how many pixels more do we change the zoom + float zoomIncrement = (maxDigitalZoom.data.f[0] - 1) / + (NUM_ZOOM_STEPS-1); + // The desired activeAreaWidth/cropAreaWidth ratio (or height if h>w) + // via interpolating zoom step into a zoom ratio + float zoomRatio = 1 + zoomIncrement * zoom; + ALOG_ASSERT( (zoomRatio >= 1.f && zoomRatio <= maxDigitalZoom.data.f[0]), + "Zoom ratio calculated out of bounds. Expected 1 - %f, actual: %f", + maxDigitalZoom.data.f[0], zoomRatio); + + ALOGV("Zoom maxDigital=%f, increment=%f, ratio=%f, previewWidth=%d, " + "previewHeight=%d, activeWidth=%d, activeHeight=%d", + maxDigitalZoom.data.f[0], zoomIncrement, zoomRatio, previewWidth, + previewHeight, fastInfo.arrayWidth, fastInfo.arrayHeight); + + /* + * Assumption: On the HAL side each stream buffer calculates its crop + * rectangle as follows: + * cropRect = (zoomLeft, zoomRight, + * zoomWidth, zoomHeight * zoomWidth / outputWidth); + * + * Note that if zoomWidth > bufferWidth, the new cropHeight > zoomHeight + * (we can then get into trouble if the cropHeight > arrayHeight). + * By selecting the zoomRatio based on the smallest outputRatio, we + * guarantee this will never happen. + */ + + // Enumerate all possible output sizes, select the one with the smallest + // aspect ratio + float minOutputWidth, minOutputHeight, minOutputRatio; + { + float outputSizes[][2] = { + { static_cast<float>(previewWidth), + static_cast<float>(previewHeight) }, + { static_cast<float>(videoWidth), + static_cast<float>(videoHeight) }, + { static_cast<float>(jpegThumbSize[0]), + static_cast<float>(jpegThumbSize[1]) }, + { static_cast<float>(pictureWidth), + static_cast<float>(pictureHeight) }, + }; + + minOutputWidth = outputSizes[0][0]; + minOutputHeight = outputSizes[0][1]; + minOutputRatio = minOutputWidth / minOutputHeight; + for (unsigned int i = 0; + i < sizeof(outputSizes) / sizeof(outputSizes[0]); + ++i) { + + // skip over outputs we don't want to consider for the crop region + if ( !((1 << i) & outputs) ) { + continue; + } + + float outputWidth = outputSizes[i][0]; + float outputHeight = outputSizes[i][1]; + float outputRatio = outputWidth / outputHeight; + + if (minOutputRatio > outputRatio) { + minOutputRatio = outputRatio; + minOutputWidth = outputWidth; + minOutputHeight = outputHeight; + } + + // and then use this output ratio instead of preview output ratio + ALOGV("Enumerating output ratio %f = %f / %f, min is %f", + outputRatio, outputWidth, outputHeight, minOutputRatio); + } + } + + /* Ensure that the width/height never go out of bounds + * by scaling across a diffent dimension if an out-of-bounds + * possibility exists. + * + * e.g. if the previewratio < arrayratio and e.g. zoomratio = 1.0, then by + * calculating the zoomWidth from zoomHeight we'll actually get a + * zoomheight > arrayheight + */ + float arrayRatio = 1.f * fastInfo.arrayWidth / fastInfo.arrayHeight; + if (minOutputRatio >= arrayRatio) { + // Adjust the height based on the width + zoomWidth = fastInfo.arrayWidth / zoomRatio; + zoomHeight = zoomWidth * + minOutputHeight / minOutputWidth; + + } else { + // Adjust the width based on the height + zoomHeight = fastInfo.arrayHeight / zoomRatio; + zoomWidth = zoomHeight * + minOutputWidth / minOutputHeight; + } + // centering the zoom area within the active area + zoomLeft = (fastInfo.arrayWidth - zoomWidth) / 2; + zoomTop = (fastInfo.arrayHeight - zoomHeight) / 2; + + ALOGV("Crop region calculated (x=%d,y=%d,w=%f,h=%f) for zoom=%d", + (int32_t)zoomLeft, (int32_t)zoomTop, zoomWidth, zoomHeight, this->zoom); + + + CropRegion crop = { zoomLeft, zoomTop, zoomWidth, zoomHeight }; + return crop; +} + +status_t Parameters::calculatePictureFovs(float *horizFov, float *vertFov) + const { + camera_metadata_ro_entry_t sensorSize = + staticInfo(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, 2, 2); + if (!sensorSize.count) return NO_INIT; + + float arrayAspect = static_cast<float>(fastInfo.arrayWidth) / + fastInfo.arrayHeight; + float stillAspect = static_cast<float>(pictureWidth) / pictureHeight; + ALOGV("Array aspect: %f, still aspect: %f", arrayAspect, stillAspect); + + // The crop factors from the full sensor array to the still picture crop + // region + float horizCropFactor = 1.f; + float vertCropFactor = 1.f; + + /** + * Need to calculate the still image field of view based on the total pixel + * array field of view, and the relative aspect ratios of the pixel array + * and output streams. + * + * Special treatment for quirky definition of crop region and relative + * stream cropping. + */ + if (quirks.meteringCropRegion) { + // Use max of preview and video as first crop + float previewAspect = static_cast<float>(previewWidth) / previewHeight; + float videoAspect = static_cast<float>(videoWidth) / videoHeight; + if (videoAspect > previewAspect) { + previewAspect = videoAspect; + } + // First crop sensor to preview aspect ratio + if (arrayAspect < previewAspect) { + vertCropFactor = arrayAspect / previewAspect; + } else { + horizCropFactor = previewAspect / arrayAspect; + } + // Second crop to still aspect ratio + if (stillAspect < previewAspect) { + horizCropFactor *= stillAspect / previewAspect; + } else { + vertCropFactor *= previewAspect / stillAspect; + } + } else { + /** + * Crop are just a function of just the still/array relative aspect + * ratios. Since each stream will maximize its area within the crop + * region, and for FOV we assume a full-sensor crop region, we only ever + * crop the FOV either vertically or horizontally, never both. + */ + horizCropFactor = (arrayAspect > stillAspect) ? + (stillAspect / arrayAspect) : 1.f; + vertCropFactor = (arrayAspect < stillAspect) ? + (arrayAspect / stillAspect) : 1.f; + } + ALOGV("Horiz crop factor: %f, vert crop fact: %f", + horizCropFactor, vertCropFactor); + /** + * Basic field of view formula is: + * angle of view = 2 * arctangent ( d / 2f ) + * where d is the physical sensor dimension of interest, and f is + * the focal length. This only applies to rectilinear sensors, for focusing + * at distances >> f, etc. + */ + if (horizFov != NULL) { + *horizFov = 180 / M_PI * 2 * + atanf(horizCropFactor * sensorSize.data.f[0] / + (2 * fastInfo.minFocalLength)); + } + if (vertFov != NULL) { + *vertFov = 180 / M_PI * 2 * + atanf(vertCropFactor * sensorSize.data.f[1] / + (2 * fastInfo.minFocalLength)); + } + return OK; +} + +int32_t Parameters::fpsFromRange(int32_t /*min*/, int32_t max) const { + return max; +} + +}; // namespace camera2 +}; // namespace android diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h new file mode 100644 index 0000000..464830c --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/Parameters.h @@ -0,0 +1,372 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA_CAMERA2PARAMETERS_H +#define ANDROID_SERVERS_CAMERA_CAMERA2PARAMETERS_H + +#include <system/graphics.h> + +#include <utils/Errors.h> +#include <utils/Mutex.h> +#include <utils/String8.h> +#include <utils/Vector.h> +#include <utils/KeyedVector.h> +#include <camera/CameraParameters.h> +#include <camera/CameraMetadata.h> + +namespace android { +namespace camera2 { + +/** + * Current camera state; this is the full state of the Camera under the old + * camera API (contents of the CameraParameters object in a more-efficient + * format, plus other state). The enum values are mostly based off the + * corresponding camera2 enums, not the camera1 strings. A few are defined here + * if they don't cleanly map to camera2 values. + */ +struct Parameters { + /** + * Parameters and other state + */ + int cameraId; + int cameraFacing; + + int previewWidth, previewHeight; + int32_t previewFpsRange[2]; + int previewFps; // deprecated, here only for tracking changes + int previewFormat; + + int previewTransform; // set by CAMERA_CMD_SET_DISPLAY_ORIENTATION + + int pictureWidth, pictureHeight; + + int32_t jpegThumbSize[2]; + uint8_t jpegQuality, jpegThumbQuality; + int32_t jpegRotation; + + bool gpsEnabled; + double gpsCoordinates[3]; + int64_t gpsTimestamp; + String8 gpsProcessingMethod; + + uint8_t wbMode; + uint8_t effectMode; + uint8_t antibandingMode; + uint8_t sceneMode; + + enum flashMode_t { + FLASH_MODE_OFF = 0, + FLASH_MODE_AUTO, + FLASH_MODE_ON, + FLASH_MODE_TORCH, + FLASH_MODE_RED_EYE = ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE, + FLASH_MODE_INVALID = -1 + } flashMode; + + enum focusMode_t { + FOCUS_MODE_AUTO = ANDROID_CONTROL_AF_MODE_AUTO, + FOCUS_MODE_MACRO = ANDROID_CONTROL_AF_MODE_MACRO, + FOCUS_MODE_CONTINUOUS_VIDEO = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO, + FOCUS_MODE_CONTINUOUS_PICTURE = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE, + FOCUS_MODE_EDOF = ANDROID_CONTROL_AF_MODE_EDOF, + FOCUS_MODE_INFINITY, + FOCUS_MODE_FIXED, + FOCUS_MODE_INVALID = -1 + } focusMode; + + uint8_t focusState; // Latest focus state from HAL + + // For use with triggerAfWithAuto quirk + focusMode_t shadowFocusMode; + + struct Area { + int left, top, right, bottom; + int weight; + Area() {} + Area(int left, int top, int right, int bottom, int weight): + left(left), top(top), right(right), bottom(bottom), + weight(weight) {} + bool isEmpty() const { + return (left == 0) && (top == 0) && (right == 0) && (bottom == 0); + } + }; + Vector<Area> focusingAreas; + + int32_t exposureCompensation; + bool autoExposureLock; + bool autoWhiteBalanceLock; + + Vector<Area> meteringAreas; + + int zoom; + + int videoWidth, videoHeight; + + bool recordingHint; + bool videoStabilization; + + enum lightFxMode_t { + LIGHTFX_NONE = 0, + LIGHTFX_LOWLIGHT, + LIGHTFX_HDR + } lightFx; + + CameraParameters params; + String8 paramsFlattened; + + // These parameters are also part of the camera API-visible state, but not + // directly listed in Camera.Parameters + bool storeMetadataInBuffers; + bool playShutterSound; + bool enableFaceDetect; + + bool enableFocusMoveMessages; + int afTriggerCounter; + int currentAfTriggerId; + bool afInMotion; + + int precaptureTriggerCounter; + + uint32_t previewCallbackFlags; + bool previewCallbackOneShot; + bool previewCallbackSurface; + + bool zslMode; + + // Overall camera state + enum State { + DISCONNECTED, + STOPPED, + WAITING_FOR_PREVIEW_WINDOW, + PREVIEW, + RECORD, + STILL_CAPTURE, + VIDEO_SNAPSHOT + } state; + + // Number of zoom steps to simulate + static const unsigned int NUM_ZOOM_STEPS = 100; + + // Full static camera info, object owned by someone else, such as + // Camera2Device. + const CameraMetadata *info; + + // Fast-access static device information; this is a subset of the + // information available through the staticInfo() method, used for + // frequently-accessed values or values that have to be calculated from the + // static information. + struct DeviceInfo { + int32_t arrayWidth; + int32_t arrayHeight; + uint8_t bestFaceDetectMode; + int32_t maxFaces; + struct OverrideModes { + flashMode_t flashMode; + uint8_t wbMode; + focusMode_t focusMode; + OverrideModes(): + flashMode(FLASH_MODE_INVALID), + wbMode(ANDROID_CONTROL_AWB_MODE_OFF), + focusMode(FOCUS_MODE_INVALID) { + } + }; + DefaultKeyedVector<uint8_t, OverrideModes> sceneModeOverrides; + float minFocalLength; + bool useFlexibleYuv; + } fastInfo; + + // Quirks information; these are short-lived flags to enable workarounds for + // incomplete HAL implementations + struct Quirks { + bool triggerAfWithAuto; + bool useZslFormat; + bool meteringCropRegion; + } quirks; + + /** + * Parameter manipulation and setup methods + */ + + Parameters(int cameraId, int cameraFacing); + ~Parameters(); + + // Sets up default parameters + status_t initialize(const CameraMetadata *info); + + // Build fast-access device static info from static info + status_t buildFastInfo(); + // Query for quirks from static info + status_t buildQuirks(); + + // Get entry from camera static characteristics information. min/maxCount + // are used for error checking the number of values in the entry. 0 for + // max/minCount means to do no bounds check in that direction. In case of + // error, the entry data pointer is null and the count is 0. + camera_metadata_ro_entry_t staticInfo(uint32_t tag, + size_t minCount=0, size_t maxCount=0, bool required=true) const; + + // Validate and update camera parameters based on new settings + status_t set(const String8 ¶mString); + + // Retrieve the current settings + String8 get() const; + + // Update passed-in request for common parameters + status_t updateRequest(CameraMetadata *request) const; + + // Add/update JPEG entries in metadata + status_t updateRequestJpeg(CameraMetadata *request) const; + + // Calculate the crop region rectangle based on current stream sizes + struct CropRegion { + float left; + float top; + float width; + float height; + + enum Outputs { + OUTPUT_PREVIEW = 0x01, + OUTPUT_VIDEO = 0x02, + OUTPUT_JPEG_THUMBNAIL = 0x04, + OUTPUT_PICTURE = 0x08, + }; + }; + CropRegion calculateCropRegion(CropRegion::Outputs outputs) const; + + // Calculate the field of view of the high-resolution JPEG capture + status_t calculatePictureFovs(float *horizFov, float *vertFov) const; + + // Static methods for debugging and converting between camera1 and camera2 + // parameters + + static const char *getStateName(State state); + + static int formatStringToEnum(const char *format); + static const char *formatEnumToString(int format); + + static int wbModeStringToEnum(const char *wbMode); + static const char* wbModeEnumToString(uint8_t wbMode); + static int effectModeStringToEnum(const char *effectMode); + static int abModeStringToEnum(const char *abMode); + static int sceneModeStringToEnum(const char *sceneMode); + static flashMode_t flashModeStringToEnum(const char *flashMode); + static const char* flashModeEnumToString(flashMode_t flashMode); + static focusMode_t focusModeStringToEnum(const char *focusMode); + static const char* focusModeEnumToString(focusMode_t focusMode); + static lightFxMode_t lightFxStringToEnum(const char *lightFxMode); + + static status_t parseAreas(const char *areasCStr, + Vector<Area> *areas); + + enum AreaKind + { + AREA_KIND_FOCUS, + AREA_KIND_METERING + }; + status_t validateAreas(const Vector<Area> &areas, + size_t maxRegions, + AreaKind areaKind) const; + static bool boolFromString(const char *boolStr); + + // Map from camera orientation + facing to gralloc transform enum + static int degToTransform(int degrees, bool mirror); + + // API specifies FPS ranges are done in fixed point integer, with LSB = 0.001. + // Note that this doesn't apply to the (deprecated) single FPS value. + static const int kFpsToApiScale = 1000; + + // Transform between (-1000,-1000)-(1000,1000) normalized coords from camera + // API and HAL2 (0,0)-(activePixelArray.width/height) coordinates + int arrayXToNormalized(int width) const; + int arrayYToNormalized(int height) const; + int normalizedXToArray(int x) const; + int normalizedYToArray(int y) const; + + struct Range { + int min; + int max; + }; + + int32_t fpsFromRange(int32_t min, int32_t max) const; + +private: + + // Convert between HAL2 sensor array coordinates and + // viewfinder crop-region relative array coordinates + int cropXToArray(int x) const; + int cropYToArray(int y) const; + int arrayXToCrop(int x) const; + int arrayYToCrop(int y) const; + + // Convert between viewfinder crop-region relative array coordinates + // and camera API (-1000,1000)-(1000,1000) normalized coords + int cropXToNormalized(int x) const; + int cropYToNormalized(int y) const; + int normalizedXToCrop(int x) const; + int normalizedYToCrop(int y) const; +}; + +// This class encapsulates the Parameters class so that it can only be accessed +// by constructing a Lock object, which locks the SharedParameter's mutex. +class SharedParameters { + public: + SharedParameters(int cameraId, int cameraFacing): + mParameters(cameraId, cameraFacing) { + } + + template<typename S, typename P> + class BaseLock { + public: + BaseLock(S &p): + mParameters(p.mParameters), + mSharedParameters(p) { + mSharedParameters.mLock.lock(); + } + + ~BaseLock() { + mSharedParameters.mLock.unlock(); + } + P &mParameters; + private: + // Disallow copying, default construction + BaseLock(); + BaseLock(const BaseLock &); + BaseLock &operator=(const BaseLock &); + S &mSharedParameters; + }; + typedef BaseLock<SharedParameters, Parameters> Lock; + typedef BaseLock<const SharedParameters, const Parameters> ReadLock; + + // Access static info, read-only and immutable, so no lock needed + camera_metadata_ro_entry_t staticInfo(uint32_t tag, + size_t minCount=0, size_t maxCount=0) const { + return mParameters.staticInfo(tag, minCount, maxCount); + } + + // Only use for dumping or other debugging + const Parameters &unsafeAccess() { + return mParameters; + } + private: + Parameters mParameters; + mutable Mutex mLock; +}; + + +}; // namespace camera2 +}; // namespace android + +#endif diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp new file mode 100644 index 0000000..7e98016 --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp @@ -0,0 +1,880 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera2-StreamingProcessor" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 +//#define LOG_NNDEBUG 0 // Per-frame verbose logging + +#ifdef LOG_NNDEBUG +#define ALOGVV(...) ALOGV(__VA_ARGS__) +#else +#define ALOGVV(...) ((void)0) +#endif + +#include <utils/Log.h> +#include <utils/Trace.h> +#include <gui/Surface.h> +#include <media/hardware/MetadataBufferType.h> + +#include "common/CameraDeviceBase.h" +#include "api1/Camera2Client.h" +#include "api1/client2/StreamingProcessor.h" +#include "api1/client2/Camera2Heap.h" + +namespace android { +namespace camera2 { + +StreamingProcessor::StreamingProcessor(sp<Camera2Client> client): + mClient(client), + mDevice(client->getCameraDevice()), + mId(client->getCameraId()), + mActiveRequest(NONE), + mPaused(false), + mPreviewRequestId(Camera2Client::kPreviewRequestIdStart), + mPreviewStreamId(NO_STREAM), + mRecordingRequestId(Camera2Client::kRecordingRequestIdStart), + mRecordingStreamId(NO_STREAM), + mRecordingFrameAvailable(false), + mRecordingHeapCount(kDefaultRecordingHeapCount), + mRecordingHeapFree(kDefaultRecordingHeapCount) +{ +} + +StreamingProcessor::~StreamingProcessor() { + deletePreviewStream(); + deleteRecordingStream(); +} + +status_t StreamingProcessor::setPreviewWindow(sp<ANativeWindow> window) { + ATRACE_CALL(); + status_t res; + + res = deletePreviewStream(); + if (res != OK) return res; + + Mutex::Autolock m(mMutex); + + mPreviewWindow = window; + + return OK; +} + +bool StreamingProcessor::haveValidPreviewWindow() const { + Mutex::Autolock m(mMutex); + return mPreviewWindow != 0; +} + +status_t StreamingProcessor::updatePreviewRequest(const Parameters ¶ms) { + ATRACE_CALL(); + status_t res; + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + Mutex::Autolock m(mMutex); + if (mPreviewRequest.entryCount() == 0) { + res = device->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW, + &mPreviewRequest); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to create default preview request: " + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); + return res; + } + } + + res = params.updateRequest(&mPreviewRequest); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to update common entries of preview " + "request: %s (%d)", __FUNCTION__, mId, + strerror(-res), res); + return res; + } + + res = mPreviewRequest.update(ANDROID_REQUEST_ID, + &mPreviewRequestId, 1); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to update request id for preview: %s (%d)", + __FUNCTION__, mId, strerror(-res), res); + return res; + } + + return OK; +} + +status_t StreamingProcessor::updatePreviewStream(const Parameters ¶ms) { + ATRACE_CALL(); + Mutex::Autolock m(mMutex); + + status_t res; + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + if (mPreviewStreamId != NO_STREAM) { + // Check if stream parameters have to change + uint32_t currentWidth, currentHeight; + res = device->getStreamInfo(mPreviewStreamId, + ¤tWidth, ¤tHeight, 0); + if (res != OK) { + ALOGE("%s: Camera %d: Error querying preview stream info: " + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); + return res; + } + if (currentWidth != (uint32_t)params.previewWidth || + currentHeight != (uint32_t)params.previewHeight) { + ALOGV("%s: Camera %d: Preview size switch: %d x %d -> %d x %d", + __FUNCTION__, mId, currentWidth, currentHeight, + params.previewWidth, params.previewHeight); + res = device->waitUntilDrained(); + if (res != OK) { + ALOGE("%s: Camera %d: Error waiting for preview to drain: " + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); + return res; + } + res = device->deleteStream(mPreviewStreamId); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to delete old output stream " + "for preview: %s (%d)", __FUNCTION__, mId, + strerror(-res), res); + return res; + } + mPreviewStreamId = NO_STREAM; + } + } + + if (mPreviewStreamId == NO_STREAM) { + res = device->createStream(mPreviewWindow, + params.previewWidth, params.previewHeight, + CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0, + &mPreviewStreamId); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to create preview stream: %s (%d)", + __FUNCTION__, mId, strerror(-res), res); + return res; + } + } + + res = device->setStreamTransform(mPreviewStreamId, + params.previewTransform); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to set preview stream transform: " + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); + return res; + } + + return OK; +} + +status_t StreamingProcessor::deletePreviewStream() { + ATRACE_CALL(); + status_t res; + + Mutex::Autolock m(mMutex); + + if (mPreviewStreamId != NO_STREAM) { + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + ALOGV("%s: for cameraId %d on streamId %d", + __FUNCTION__, mId, mPreviewStreamId); + + res = device->waitUntilDrained(); + if (res != OK) { + ALOGE("%s: Error waiting for preview to drain: %s (%d)", + __FUNCTION__, strerror(-res), res); + return res; + } + res = device->deleteStream(mPreviewStreamId); + if (res != OK) { + ALOGE("%s: Unable to delete old preview stream: %s (%d)", + __FUNCTION__, strerror(-res), res); + return res; + } + mPreviewStreamId = NO_STREAM; + } + return OK; +} + +int StreamingProcessor::getPreviewStreamId() const { + Mutex::Autolock m(mMutex); + return mPreviewStreamId; +} + +status_t StreamingProcessor::setRecordingBufferCount(size_t count) { + ATRACE_CALL(); + // Make sure we can support this many buffer slots + if (count > BufferQueue::NUM_BUFFER_SLOTS) { + ALOGE("%s: Camera %d: Too many recording buffers requested: %d, max %d", + __FUNCTION__, mId, count, BufferQueue::NUM_BUFFER_SLOTS); + return BAD_VALUE; + } + + Mutex::Autolock m(mMutex); + + ALOGV("%s: Camera %d: New recording buffer count from encoder: %d", + __FUNCTION__, mId, count); + + // Need to re-size consumer and heap + if (mRecordingHeapCount != count) { + ALOGV("%s: Camera %d: Resetting recording heap and consumer", + __FUNCTION__, mId); + + if (isStreamActive(mActiveStreamIds, mRecordingStreamId)) { + ALOGE("%s: Camera %d: Setting recording buffer count when " + "recording stream is already active!", __FUNCTION__, + mId); + return INVALID_OPERATION; + } + + releaseAllRecordingFramesLocked(); + + if (mRecordingHeap != 0) { + mRecordingHeap.clear(); + } + mRecordingHeapCount = count; + mRecordingHeapFree = count; + + mRecordingConsumer.clear(); + } + + return OK; +} + +status_t StreamingProcessor::updateRecordingRequest(const Parameters ¶ms) { + ATRACE_CALL(); + status_t res; + Mutex::Autolock m(mMutex); + + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + if (mRecordingRequest.entryCount() == 0) { + res = device->createDefaultRequest(CAMERA2_TEMPLATE_VIDEO_RECORD, + &mRecordingRequest); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to create default recording request:" + " %s (%d)", __FUNCTION__, mId, strerror(-res), res); + return res; + } + } + + res = params.updateRequest(&mRecordingRequest); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to update common entries of recording " + "request: %s (%d)", __FUNCTION__, mId, + strerror(-res), res); + return res; + } + + res = mRecordingRequest.update(ANDROID_REQUEST_ID, + &mRecordingRequestId, 1); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to update request id for request: %s (%d)", + __FUNCTION__, mId, strerror(-res), res); + return res; + } + + return OK; +} + +status_t StreamingProcessor::updateRecordingStream(const Parameters ¶ms) { + ATRACE_CALL(); + status_t res; + Mutex::Autolock m(mMutex); + + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + bool newConsumer = false; + if (mRecordingConsumer == 0) { + ALOGV("%s: Camera %d: Creating recording consumer with %d + 1 " + "consumer-side buffers", __FUNCTION__, mId, mRecordingHeapCount); + // Create CPU buffer queue endpoint. We need one more buffer here so that we can + // always acquire and free a buffer when the heap is full; otherwise the consumer + // will have buffers in flight we'll never clear out. + sp<BufferQueue> bq = new BufferQueue(); + mRecordingConsumer = new BufferItemConsumer(bq, + GRALLOC_USAGE_HW_VIDEO_ENCODER, + mRecordingHeapCount + 1); + mRecordingConsumer->setFrameAvailableListener(this); + mRecordingConsumer->setName(String8("Camera2-RecordingConsumer")); + mRecordingWindow = new Surface( + mRecordingConsumer->getProducerInterface()); + newConsumer = true; + // Allocate memory later, since we don't know buffer size until receipt + } + + if (mRecordingStreamId != NO_STREAM) { + // Check if stream parameters have to change + uint32_t currentWidth, currentHeight; + res = device->getStreamInfo(mRecordingStreamId, + ¤tWidth, ¤tHeight, 0); + if (res != OK) { + ALOGE("%s: Camera %d: Error querying recording output stream info: " + "%s (%d)", __FUNCTION__, mId, + strerror(-res), res); + return res; + } + if (currentWidth != (uint32_t)params.videoWidth || + currentHeight != (uint32_t)params.videoHeight || newConsumer) { + // TODO: Should wait to be sure previous recording has finished + res = device->deleteStream(mRecordingStreamId); + + if (res == -EBUSY) { + ALOGV("%s: Camera %d: Device is busy, call " + "updateRecordingStream after it becomes idle", + __FUNCTION__, mId); + return res; + } else if (res != OK) { + ALOGE("%s: Camera %d: Unable to delete old output stream " + "for recording: %s (%d)", __FUNCTION__, + mId, strerror(-res), res); + return res; + } + mRecordingStreamId = NO_STREAM; + } + } + + if (mRecordingStreamId == NO_STREAM) { + mRecordingFrameCount = 0; + res = device->createStream(mRecordingWindow, + params.videoWidth, params.videoHeight, + CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0, &mRecordingStreamId); + if (res != OK) { + ALOGE("%s: Camera %d: Can't create output stream for recording: " + "%s (%d)", __FUNCTION__, mId, + strerror(-res), res); + return res; + } + } + + return OK; +} + +status_t StreamingProcessor::deleteRecordingStream() { + ATRACE_CALL(); + status_t res; + + Mutex::Autolock m(mMutex); + + if (mRecordingStreamId != NO_STREAM) { + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + res = device->waitUntilDrained(); + if (res != OK) { + ALOGE("%s: Error waiting for HAL to drain: %s (%d)", + __FUNCTION__, strerror(-res), res); + return res; + } + res = device->deleteStream(mRecordingStreamId); + if (res != OK) { + ALOGE("%s: Unable to delete recording stream: %s (%d)", + __FUNCTION__, strerror(-res), res); + return res; + } + mRecordingStreamId = NO_STREAM; + } + return OK; +} + +int StreamingProcessor::getRecordingStreamId() const { + return mRecordingStreamId; +} + +status_t StreamingProcessor::startStream(StreamType type, + const Vector<uint8_t> &outputStreams) { + ATRACE_CALL(); + status_t res; + + if (type == NONE) return INVALID_OPERATION; + + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + ALOGV("%s: Camera %d: type = %d", __FUNCTION__, mId, type); + + Mutex::Autolock m(mMutex); + + // If a recording stream is being started up, free up any + // outstanding buffers left from the previous recording session. + // There should never be any, so if there are, warn about it. + if (isStreamActive(outputStreams, mRecordingStreamId)) { + releaseAllRecordingFramesLocked(); + } + + ALOGV("%s: Camera %d: %s started, recording heap has %d free of %d", + __FUNCTION__, mId, (type == PREVIEW) ? "preview" : "recording", + mRecordingHeapFree, mRecordingHeapCount); + + CameraMetadata &request = (type == PREVIEW) ? + mPreviewRequest : mRecordingRequest; + + res = request.update( + ANDROID_REQUEST_OUTPUT_STREAMS, + outputStreams); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to set up preview request: %s (%d)", + __FUNCTION__, mId, strerror(-res), res); + return res; + } + + res = request.sort(); + if (res != OK) { + ALOGE("%s: Camera %d: Error sorting preview request: %s (%d)", + __FUNCTION__, mId, strerror(-res), res); + return res; + } + + res = device->setStreamingRequest(request); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to set preview request to start preview: " + "%s (%d)", + __FUNCTION__, mId, strerror(-res), res); + return res; + } + mActiveRequest = type; + mPaused = false; + mActiveStreamIds = outputStreams; + return OK; +} + +status_t StreamingProcessor::togglePauseStream(bool pause) { + ATRACE_CALL(); + status_t res; + + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + ALOGV("%s: Camera %d: toggling pause to %d", __FUNCTION__, mId, pause); + + Mutex::Autolock m(mMutex); + + if (mActiveRequest == NONE) { + ALOGE("%s: Camera %d: Can't toggle pause, streaming was not started", + __FUNCTION__, mId); + return INVALID_OPERATION; + } + + if (mPaused == pause) { + return OK; + } + + if (pause) { + res = device->clearStreamingRequest(); + if (res != OK) { + ALOGE("%s: Camera %d: Can't clear stream request: %s (%d)", + __FUNCTION__, mId, strerror(-res), res); + return res; + } + } else { + CameraMetadata &request = + (mActiveRequest == PREVIEW) ? mPreviewRequest + : mRecordingRequest; + res = device->setStreamingRequest(request); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to set preview request to resume: " + "%s (%d)", + __FUNCTION__, mId, strerror(-res), res); + return res; + } + } + + mPaused = pause; + return OK; +} + +status_t StreamingProcessor::stopStream() { + ATRACE_CALL(); + status_t res; + + Mutex::Autolock m(mMutex); + + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + res = device->clearStreamingRequest(); + if (res != OK) { + ALOGE("%s: Camera %d: Can't clear stream request: %s (%d)", + __FUNCTION__, mId, strerror(-res), res); + return res; + } + + mActiveRequest = NONE; + mActiveStreamIds.clear(); + mPaused = false; + + return OK; +} + +int32_t StreamingProcessor::getActiveRequestId() const { + Mutex::Autolock m(mMutex); + switch (mActiveRequest) { + case NONE: + return 0; + case PREVIEW: + return mPreviewRequestId; + case RECORD: + return mRecordingRequestId; + default: + ALOGE("%s: Unexpected mode %d", __FUNCTION__, mActiveRequest); + return 0; + } +} + +status_t StreamingProcessor::incrementStreamingIds() { + ATRACE_CALL(); + Mutex::Autolock m(mMutex); + + mPreviewRequestId++; + if (mPreviewRequestId >= Camera2Client::kPreviewRequestIdEnd) { + mPreviewRequestId = Camera2Client::kPreviewRequestIdStart; + } + mRecordingRequestId++; + if (mRecordingRequestId >= Camera2Client::kRecordingRequestIdEnd) { + mRecordingRequestId = Camera2Client::kRecordingRequestIdStart; + } + return OK; +} + +void StreamingProcessor::onFrameAvailable() { + ATRACE_CALL(); + Mutex::Autolock l(mMutex); + if (!mRecordingFrameAvailable) { + mRecordingFrameAvailable = true; + mRecordingFrameAvailableSignal.signal(); + } + +} + +bool StreamingProcessor::threadLoop() { + status_t res; + + { + Mutex::Autolock l(mMutex); + while (!mRecordingFrameAvailable) { + res = mRecordingFrameAvailableSignal.waitRelative( + mMutex, kWaitDuration); + if (res == TIMED_OUT) return true; + } + mRecordingFrameAvailable = false; + } + + do { + res = processRecordingFrame(); + } while (res == OK); + + return true; +} + +status_t StreamingProcessor::processRecordingFrame() { + ATRACE_CALL(); + status_t res; + sp<Camera2Heap> recordingHeap; + size_t heapIdx = 0; + nsecs_t timestamp; + + sp<Camera2Client> client = mClient.promote(); + if (client == 0) { + // Discard frames during shutdown + BufferItemConsumer::BufferItem imgBuffer; + res = mRecordingConsumer->acquireBuffer(&imgBuffer, 0); + if (res != OK) { + if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) { + ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)", + __FUNCTION__, mId, strerror(-res), res); + } + return res; + } + mRecordingConsumer->releaseBuffer(imgBuffer); + return OK; + } + + { + /* acquire SharedParameters before mMutex so we don't dead lock + with Camera2Client code calling into StreamingProcessor */ + SharedParameters::Lock l(client->getParameters()); + Mutex::Autolock m(mMutex); + BufferItemConsumer::BufferItem imgBuffer; + res = mRecordingConsumer->acquireBuffer(&imgBuffer, 0); + if (res != OK) { + if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) { + ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)", + __FUNCTION__, mId, strerror(-res), res); + } + return res; + } + timestamp = imgBuffer.mTimestamp; + + mRecordingFrameCount++; + ALOGVV("OnRecordingFrame: Frame %d", mRecordingFrameCount); + + if (l.mParameters.state != Parameters::RECORD && + l.mParameters.state != Parameters::VIDEO_SNAPSHOT) { + ALOGV("%s: Camera %d: Discarding recording image buffers " + "received after recording done", __FUNCTION__, + mId); + mRecordingConsumer->releaseBuffer(imgBuffer); + return INVALID_OPERATION; + } + + if (mRecordingHeap == 0) { + const size_t bufferSize = 4 + sizeof(buffer_handle_t); + ALOGV("%s: Camera %d: Creating recording heap with %d buffers of " + "size %d bytes", __FUNCTION__, mId, + mRecordingHeapCount, bufferSize); + + mRecordingHeap = new Camera2Heap(bufferSize, mRecordingHeapCount, + "Camera2Client::RecordingHeap"); + if (mRecordingHeap->mHeap->getSize() == 0) { + ALOGE("%s: Camera %d: Unable to allocate memory for recording", + __FUNCTION__, mId); + mRecordingConsumer->releaseBuffer(imgBuffer); + return NO_MEMORY; + } + for (size_t i = 0; i < mRecordingBuffers.size(); i++) { + if (mRecordingBuffers[i].mBuf != + BufferItemConsumer::INVALID_BUFFER_SLOT) { + ALOGE("%s: Camera %d: Non-empty recording buffers list!", + __FUNCTION__, mId); + } + } + mRecordingBuffers.clear(); + mRecordingBuffers.setCapacity(mRecordingHeapCount); + mRecordingBuffers.insertAt(0, mRecordingHeapCount); + + mRecordingHeapHead = 0; + mRecordingHeapFree = mRecordingHeapCount; + } + + if ( mRecordingHeapFree == 0) { + ALOGE("%s: Camera %d: No free recording buffers, dropping frame", + __FUNCTION__, mId); + mRecordingConsumer->releaseBuffer(imgBuffer); + return NO_MEMORY; + } + + heapIdx = mRecordingHeapHead; + mRecordingHeapHead = (mRecordingHeapHead + 1) % mRecordingHeapCount; + mRecordingHeapFree--; + + ALOGVV("%s: Camera %d: Timestamp %lld", + __FUNCTION__, mId, timestamp); + + ssize_t offset; + size_t size; + sp<IMemoryHeap> heap = + mRecordingHeap->mBuffers[heapIdx]->getMemory(&offset, + &size); + + uint8_t *data = (uint8_t*)heap->getBase() + offset; + uint32_t type = kMetadataBufferTypeGrallocSource; + *((uint32_t*)data) = type; + *((buffer_handle_t*)(data + 4)) = imgBuffer.mGraphicBuffer->handle; + ALOGVV("%s: Camera %d: Sending out buffer_handle_t %p", + __FUNCTION__, mId, + imgBuffer.mGraphicBuffer->handle); + mRecordingBuffers.replaceAt(imgBuffer, heapIdx); + recordingHeap = mRecordingHeap; + } + + // Call outside locked parameters to allow re-entrancy from notification + Camera2Client::SharedCameraCallbacks::Lock l(client->mSharedCameraCallbacks); + if (l.mRemoteCallback != 0) { + l.mRemoteCallback->dataCallbackTimestamp(timestamp, + CAMERA_MSG_VIDEO_FRAME, + recordingHeap->mBuffers[heapIdx]); + } else { + ALOGW("%s: Camera %d: Remote callback gone", __FUNCTION__, mId); + } + + return OK; +} + +void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) { + ATRACE_CALL(); + status_t res; + + Mutex::Autolock m(mMutex); + // Make sure this is for the current heap + ssize_t offset; + size_t size; + sp<IMemoryHeap> heap = mem->getMemory(&offset, &size); + if (heap->getHeapID() != mRecordingHeap->mHeap->getHeapID()) { + ALOGW("%s: Camera %d: Mismatched heap ID, ignoring release " + "(got %x, expected %x)", __FUNCTION__, mId, + heap->getHeapID(), mRecordingHeap->mHeap->getHeapID()); + return; + } + uint8_t *data = (uint8_t*)heap->getBase() + offset; + uint32_t type = *(uint32_t*)data; + if (type != kMetadataBufferTypeGrallocSource) { + ALOGE("%s: Camera %d: Recording frame type invalid (got %x, expected %x)", + __FUNCTION__, mId, type, + kMetadataBufferTypeGrallocSource); + return; + } + + // Release the buffer back to the recording queue + + buffer_handle_t imgHandle = *(buffer_handle_t*)(data + 4); + + size_t itemIndex; + for (itemIndex = 0; itemIndex < mRecordingBuffers.size(); itemIndex++) { + const BufferItemConsumer::BufferItem item = + mRecordingBuffers[itemIndex]; + if (item.mBuf != BufferItemConsumer::INVALID_BUFFER_SLOT && + item.mGraphicBuffer->handle == imgHandle) { + break; + } + } + if (itemIndex == mRecordingBuffers.size()) { + ALOGE("%s: Camera %d: Can't find buffer_handle_t %p in list of " + "outstanding buffers", __FUNCTION__, mId, + imgHandle); + return; + } + + ALOGVV("%s: Camera %d: Freeing buffer_handle_t %p", __FUNCTION__, + mId, imgHandle); + + res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to free recording frame " + "(buffer_handle_t: %p): %s (%d)", __FUNCTION__, + mId, imgHandle, strerror(-res), res); + return; + } + mRecordingBuffers.replaceAt(itemIndex); + + mRecordingHeapFree++; + ALOGV_IF(mRecordingHeapFree == mRecordingHeapCount, + "%s: Camera %d: All %d recording buffers returned", + __FUNCTION__, mId, mRecordingHeapCount); +} + +void StreamingProcessor::releaseAllRecordingFramesLocked() { + ATRACE_CALL(); + status_t res; + + if (mRecordingConsumer == 0) { + return; + } + + ALOGV("%s: Camera %d: Releasing all recording buffers", __FUNCTION__, + mId); + + size_t releasedCount = 0; + for (size_t itemIndex = 0; itemIndex < mRecordingBuffers.size(); itemIndex++) { + const BufferItemConsumer::BufferItem item = + mRecordingBuffers[itemIndex]; + if (item.mBuf != BufferItemConsumer::INVALID_BUFFER_SLOT) { + res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to free recording frame " + "(buffer_handle_t: %p): %s (%d)", __FUNCTION__, + mId, item.mGraphicBuffer->handle, strerror(-res), res); + } + mRecordingBuffers.replaceAt(itemIndex); + releasedCount++; + } + } + + if (releasedCount > 0) { + ALOGW("%s: Camera %d: Force-freed %d outstanding buffers " + "from previous recording session", __FUNCTION__, mId, releasedCount); + ALOGE_IF(releasedCount != mRecordingHeapCount - mRecordingHeapFree, + "%s: Camera %d: Force-freed %d buffers, but expected %d", + __FUNCTION__, mId, releasedCount, mRecordingHeapCount - mRecordingHeapFree); + } + + mRecordingHeapHead = 0; + mRecordingHeapFree = mRecordingHeapCount; +} + +bool StreamingProcessor::isStreamActive(const Vector<uint8_t> &streams, + uint8_t recordingStreamId) { + for (size_t i = 0; i < streams.size(); i++) { + if (streams[i] == recordingStreamId) { + return true; + } + } + return false; +} + + +status_t StreamingProcessor::dump(int fd, const Vector<String16>& /*args*/) { + String8 result; + + result.append(" Current requests:\n"); + if (mPreviewRequest.entryCount() != 0) { + result.append(" Preview request:\n"); + write(fd, result.string(), result.size()); + mPreviewRequest.dump(fd, 2, 6); + result.clear(); + } else { + result.append(" Preview request: undefined\n"); + } + + if (mRecordingRequest.entryCount() != 0) { + result = " Recording request:\n"; + write(fd, result.string(), result.size()); + mRecordingRequest.dump(fd, 2, 6); + result.clear(); + } else { + result = " Recording request: undefined\n"; + } + + const char* streamTypeString[] = { + "none", "preview", "record" + }; + result.append(String8::format(" Active request: %s (paused: %s)\n", + streamTypeString[mActiveRequest], + mPaused ? "yes" : "no")); + + write(fd, result.string(), result.size()); + + return OK; +} + +}; // namespace camera2 +}; // namespace android diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.h b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h new file mode 100644 index 0000000..d879b83 --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h @@ -0,0 +1,143 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_STREAMINGPROCESSOR_H +#define ANDROID_SERVERS_CAMERA_CAMERA2_STREAMINGPROCESSOR_H + +#include <utils/Mutex.h> +#include <utils/String16.h> +#include <gui/BufferItemConsumer.h> + +#include "camera/CameraMetadata.h" + +namespace android { + +class Camera2Client; +class CameraDeviceBase; +class IMemory; + +namespace camera2 { + +class Parameters; +class Camera2Heap; + +/** + * Management and processing for preview and recording streams + */ +class StreamingProcessor: + public Thread, public BufferItemConsumer::FrameAvailableListener { + public: + StreamingProcessor(sp<Camera2Client> client); + ~StreamingProcessor(); + + status_t setPreviewWindow(sp<ANativeWindow> window); + + bool haveValidPreviewWindow() const; + + status_t updatePreviewRequest(const Parameters ¶ms); + status_t updatePreviewStream(const Parameters ¶ms); + status_t deletePreviewStream(); + int getPreviewStreamId() const; + + status_t setRecordingBufferCount(size_t count); + status_t updateRecordingRequest(const Parameters ¶ms); + status_t updateRecordingStream(const Parameters ¶ms); + status_t deleteRecordingStream(); + int getRecordingStreamId() const; + + enum StreamType { + NONE, + PREVIEW, + RECORD + }; + status_t startStream(StreamType type, + const Vector<uint8_t> &outputStreams); + + // Toggle between paused and unpaused. Stream must be started first. + status_t togglePauseStream(bool pause); + + status_t stopStream(); + + // Returns the request ID for the currently streaming request + // Returns 0 if there is no active request. + status_t getActiveRequestId() const; + status_t incrementStreamingIds(); + + // Callback for new recording frames from HAL + virtual void onFrameAvailable(); + // Callback from stagefright which returns used recording frames + void releaseRecordingFrame(const sp<IMemory>& mem); + + status_t dump(int fd, const Vector<String16>& args); + + private: + mutable Mutex mMutex; + + enum { + NO_STREAM = -1 + }; + + wp<Camera2Client> mClient; + wp<CameraDeviceBase> mDevice; + int mId; + + StreamType mActiveRequest; + bool mPaused; + + Vector<uint8_t> mActiveStreamIds; + + // Preview-related members + int32_t mPreviewRequestId; + int mPreviewStreamId; + CameraMetadata mPreviewRequest; + sp<ANativeWindow> mPreviewWindow; + + // Recording-related members + static const nsecs_t kWaitDuration = 50000000; // 50 ms + + int32_t mRecordingRequestId; + int mRecordingStreamId; + int mRecordingFrameCount; + sp<BufferItemConsumer> mRecordingConsumer; + sp<ANativeWindow> mRecordingWindow; + CameraMetadata mRecordingRequest; + sp<camera2::Camera2Heap> mRecordingHeap; + + bool mRecordingFrameAvailable; + Condition mRecordingFrameAvailableSignal; + + static const size_t kDefaultRecordingHeapCount = 8; + size_t mRecordingHeapCount; + Vector<BufferItemConsumer::BufferItem> mRecordingBuffers; + size_t mRecordingHeapHead, mRecordingHeapFree; + + virtual bool threadLoop(); + + status_t processRecordingFrame(); + + // Unilaterally free any buffers still outstanding to stagefright + void releaseAllRecordingFramesLocked(); + + // Determine if the specified stream is currently in use + static bool isStreamActive(const Vector<uint8_t> &streams, + uint8_t recordingStreamId); +}; + + +}; // namespace camera2 +}; // namespace android + +#endif diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp new file mode 100644 index 0000000..11a2cbb --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp @@ -0,0 +1,556 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera2-ZslProcessor" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 +//#define LOG_NNDEBUG 0 + +#ifdef LOG_NNDEBUG +#define ALOGVV(...) ALOGV(__VA_ARGS__) +#else +#define ALOGVV(...) ((void)0) +#endif + +#include <utils/Log.h> +#include <utils/Trace.h> +#include <gui/Surface.h> + +#include "common/CameraDeviceBase.h" +#include "api1/Camera2Client.h" +#include "api1/client2/CaptureSequencer.h" +#include "api1/client2/ZslProcessor.h" + +namespace android { +namespace camera2 { + +ZslProcessor::ZslProcessor( + sp<Camera2Client> client, + wp<CaptureSequencer> sequencer): + Thread(false), + mState(RUNNING), + mClient(client), + mDevice(client->getCameraDevice()), + mSequencer(sequencer), + mId(client->getCameraId()), + mZslBufferAvailable(false), + mZslStreamId(NO_STREAM), + mZslReprocessStreamId(NO_STREAM), + mFrameListHead(0), + mZslQueueHead(0), + mZslQueueTail(0) { + mZslQueue.insertAt(0, kZslBufferDepth); + mFrameList.insertAt(0, kFrameListDepth); + sp<CaptureSequencer> captureSequencer = mSequencer.promote(); + if (captureSequencer != 0) captureSequencer->setZslProcessor(this); +} + +ZslProcessor::~ZslProcessor() { + ALOGV("%s: Exit", __FUNCTION__); + deleteStream(); +} + +void ZslProcessor::onFrameAvailable() { + Mutex::Autolock l(mInputMutex); + if (!mZslBufferAvailable) { + mZslBufferAvailable = true; + mZslBufferAvailableSignal.signal(); + } +} + +void ZslProcessor::onFrameAvailable(int32_t /*frameId*/, + const CameraMetadata &frame) { + Mutex::Autolock l(mInputMutex); + camera_metadata_ro_entry_t entry; + entry = frame.find(ANDROID_SENSOR_TIMESTAMP); + nsecs_t timestamp = entry.data.i64[0]; + (void)timestamp; + ALOGVV("Got preview frame for timestamp %lld", timestamp); + + if (mState != RUNNING) return; + + mFrameList.editItemAt(mFrameListHead) = frame; + mFrameListHead = (mFrameListHead + 1) % kFrameListDepth; + + findMatchesLocked(); +} + +void ZslProcessor::onBufferReleased(buffer_handle_t *handle) { + Mutex::Autolock l(mInputMutex); + + // Verify that the buffer is in our queue + size_t i = 0; + for (; i < mZslQueue.size(); i++) { + if (&(mZslQueue[i].buffer.mGraphicBuffer->handle) == handle) break; + } + if (i == mZslQueue.size()) { + ALOGW("%s: Released buffer %p not found in queue", + __FUNCTION__, handle); + } + + // Erase entire ZSL queue since we've now completed the capture and preview + // is stopped. + clearZslQueueLocked(); + + mState = RUNNING; +} + +status_t ZslProcessor::updateStream(const Parameters ¶ms) { + ATRACE_CALL(); + ALOGV("%s: Configuring ZSL streams", __FUNCTION__); + status_t res; + + Mutex::Autolock l(mInputMutex); + + sp<Camera2Client> client = mClient.promote(); + if (client == 0) { + ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + if (mZslConsumer == 0) { + // Create CPU buffer queue endpoint + sp<BufferQueue> bq = new BufferQueue(); + mZslConsumer = new BufferItemConsumer(bq, + GRALLOC_USAGE_HW_CAMERA_ZSL, + kZslBufferDepth); + mZslConsumer->setFrameAvailableListener(this); + mZslConsumer->setName(String8("Camera2Client::ZslConsumer")); + mZslWindow = new Surface( + mZslConsumer->getProducerInterface()); + } + + if (mZslStreamId != NO_STREAM) { + // Check if stream parameters have to change + uint32_t currentWidth, currentHeight; + res = device->getStreamInfo(mZslStreamId, + ¤tWidth, ¤tHeight, 0); + if (res != OK) { + ALOGE("%s: Camera %d: Error querying capture output stream info: " + "%s (%d)", __FUNCTION__, + mId, strerror(-res), res); + return res; + } + if (currentWidth != (uint32_t)params.fastInfo.arrayWidth || + currentHeight != (uint32_t)params.fastInfo.arrayHeight) { + res = device->deleteReprocessStream(mZslReprocessStreamId); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to delete old reprocess stream " + "for ZSL: %s (%d)", __FUNCTION__, + mId, strerror(-res), res); + return res; + } + ALOGV("%s: Camera %d: Deleting stream %d since the buffer dimensions changed", + __FUNCTION__, mId, mZslStreamId); + res = device->deleteStream(mZslStreamId); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to delete old output stream " + "for ZSL: %s (%d)", __FUNCTION__, + mId, strerror(-res), res); + return res; + } + mZslStreamId = NO_STREAM; + } + } + + if (mZslStreamId == NO_STREAM) { + // Create stream for HAL production + // TODO: Sort out better way to select resolution for ZSL + int streamType = params.quirks.useZslFormat ? + (int)CAMERA2_HAL_PIXEL_FORMAT_ZSL : + (int)HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED; + res = device->createStream(mZslWindow, + params.fastInfo.arrayWidth, params.fastInfo.arrayHeight, + streamType, 0, + &mZslStreamId); + if (res != OK) { + ALOGE("%s: Camera %d: Can't create output stream for ZSL: " + "%s (%d)", __FUNCTION__, mId, + strerror(-res), res); + return res; + } + res = device->createReprocessStreamFromStream(mZslStreamId, + &mZslReprocessStreamId); + if (res != OK) { + ALOGE("%s: Camera %d: Can't create reprocess stream for ZSL: " + "%s (%d)", __FUNCTION__, mId, + strerror(-res), res); + return res; + } + } + client->registerFrameListener(Camera2Client::kPreviewRequestIdStart, + Camera2Client::kPreviewRequestIdEnd, + this); + + return OK; +} + +status_t ZslProcessor::deleteStream() { + ATRACE_CALL(); + status_t res; + + Mutex::Autolock l(mInputMutex); + + if (mZslStreamId != NO_STREAM) { + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + clearZslQueueLocked(); + + res = device->deleteReprocessStream(mZslReprocessStreamId); + if (res != OK) { + ALOGE("%s: Camera %d: Cannot delete ZSL reprocessing stream %d: " + "%s (%d)", __FUNCTION__, mId, + mZslReprocessStreamId, strerror(-res), res); + return res; + } + + mZslReprocessStreamId = NO_STREAM; + res = device->deleteStream(mZslStreamId); + if (res != OK) { + ALOGE("%s: Camera %d: Cannot delete ZSL output stream %d: " + "%s (%d)", __FUNCTION__, mId, + mZslStreamId, strerror(-res), res); + return res; + } + + mZslWindow.clear(); + mZslConsumer.clear(); + + mZslStreamId = NO_STREAM; + } + return OK; +} + +int ZslProcessor::getStreamId() const { + Mutex::Autolock l(mInputMutex); + return mZslStreamId; +} + +status_t ZslProcessor::pushToReprocess(int32_t requestId) { + ALOGV("%s: Send in reprocess request with id %d", + __FUNCTION__, requestId); + Mutex::Autolock l(mInputMutex); + status_t res; + sp<Camera2Client> client = mClient.promote(); + + if (client == 0) { + ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + IF_ALOGV() { + dumpZslQueue(-1); + } + + if (mZslQueueTail != mZslQueueHead) { + CameraMetadata request; + size_t index = mZslQueueTail; + while (index != mZslQueueHead) { + if (!mZslQueue[index].frame.isEmpty()) { + request = mZslQueue[index].frame; + break; + } + index = (index + 1) % kZslBufferDepth; + } + if (index == mZslQueueHead) { + ALOGV("%s: ZSL queue has no valid frames to send yet.", + __FUNCTION__); + return NOT_ENOUGH_DATA; + } + // Verify that the frame is reasonable for reprocessing + + camera_metadata_entry_t entry; + entry = request.find(ANDROID_CONTROL_AE_STATE); + if (entry.count == 0) { + ALOGE("%s: ZSL queue frame has no AE state field!", + __FUNCTION__); + return BAD_VALUE; + } + if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED && + entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) { + ALOGV("%s: ZSL queue frame AE state is %d, need full capture", + __FUNCTION__, entry.data.u8[0]); + return NOT_ENOUGH_DATA; + } + + buffer_handle_t *handle = + &(mZslQueue[index].buffer.mGraphicBuffer->handle); + + uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS; + res = request.update(ANDROID_REQUEST_TYPE, + &requestType, 1); + uint8_t inputStreams[1] = + { static_cast<uint8_t>(mZslReprocessStreamId) }; + if (res == OK) request.update(ANDROID_REQUEST_INPUT_STREAMS, + inputStreams, 1); + uint8_t outputStreams[1] = + { static_cast<uint8_t>(client->getCaptureStreamId()) }; + if (res == OK) request.update(ANDROID_REQUEST_OUTPUT_STREAMS, + outputStreams, 1); + res = request.update(ANDROID_REQUEST_ID, + &requestId, 1); + + if (res != OK ) { + ALOGE("%s: Unable to update frame to a reprocess request", __FUNCTION__); + return INVALID_OPERATION; + } + + res = client->stopStream(); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: " + "%s (%d)", + __FUNCTION__, mId, strerror(-res), res); + return INVALID_OPERATION; + } + // TODO: have push-and-clear be atomic + res = client->getCameraDevice()->pushReprocessBuffer(mZslReprocessStreamId, + handle, this); + if (res != OK) { + ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)", + __FUNCTION__, strerror(-res), res); + return res; + } + + // Update JPEG settings + { + SharedParameters::Lock l(client->getParameters()); + res = l.mParameters.updateRequestJpeg(&request); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to update JPEG entries of ZSL " + "capture request: %s (%d)", __FUNCTION__, + mId, + strerror(-res), res); + return res; + } + } + + mLatestCapturedRequest = request; + res = client->getCameraDevice()->capture(request); + if (res != OK ) { + ALOGE("%s: Unable to send ZSL reprocess request to capture: %s (%d)", + __FUNCTION__, strerror(-res), res); + return res; + } + + mState = LOCKED; + } else { + ALOGV("%s: No ZSL buffers yet", __FUNCTION__); + return NOT_ENOUGH_DATA; + } + return OK; +} + +status_t ZslProcessor::clearZslQueue() { + Mutex::Autolock l(mInputMutex); + // If in middle of capture, can't clear out queue + if (mState == LOCKED) return OK; + + return clearZslQueueLocked(); +} + +status_t ZslProcessor::clearZslQueueLocked() { + for (size_t i = 0; i < mZslQueue.size(); i++) { + if (mZslQueue[i].buffer.mTimestamp != 0) { + mZslConsumer->releaseBuffer(mZslQueue[i].buffer); + } + mZslQueue.replaceAt(i); + } + mZslQueueHead = 0; + mZslQueueTail = 0; + return OK; +} + +void ZslProcessor::dump(int fd, const Vector<String16>& /*args*/) const { + Mutex::Autolock l(mInputMutex); + if (!mLatestCapturedRequest.isEmpty()) { + String8 result(" Latest ZSL capture request:\n"); + write(fd, result.string(), result.size()); + mLatestCapturedRequest.dump(fd, 2, 6); + } else { + String8 result(" Latest ZSL capture request: none yet\n"); + write(fd, result.string(), result.size()); + } + dumpZslQueue(fd); +} + +bool ZslProcessor::threadLoop() { + status_t res; + + { + Mutex::Autolock l(mInputMutex); + while (!mZslBufferAvailable) { + res = mZslBufferAvailableSignal.waitRelative(mInputMutex, + kWaitDuration); + if (res == TIMED_OUT) return true; + } + mZslBufferAvailable = false; + } + + do { + res = processNewZslBuffer(); + } while (res == OK); + + return true; +} + +status_t ZslProcessor::processNewZslBuffer() { + ATRACE_CALL(); + status_t res; + sp<BufferItemConsumer> zslConsumer; + { + Mutex::Autolock l(mInputMutex); + if (mZslConsumer == 0) return OK; + zslConsumer = mZslConsumer; + } + ALOGVV("Trying to get next buffer"); + BufferItemConsumer::BufferItem item; + res = zslConsumer->acquireBuffer(&item, 0); + if (res != OK) { + if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) { + ALOGE("%s: Camera %d: Error receiving ZSL image buffer: " + "%s (%d)", __FUNCTION__, + mId, strerror(-res), res); + } else { + ALOGVV(" No buffer"); + } + return res; + } + + Mutex::Autolock l(mInputMutex); + + if (mState == LOCKED) { + ALOGVV("In capture, discarding new ZSL buffers"); + zslConsumer->releaseBuffer(item); + return OK; + } + + ALOGVV("Got ZSL buffer: head: %d, tail: %d", mZslQueueHead, mZslQueueTail); + + if ( (mZslQueueHead + 1) % kZslBufferDepth == mZslQueueTail) { + ALOGVV("Releasing oldest buffer"); + zslConsumer->releaseBuffer(mZslQueue[mZslQueueTail].buffer); + mZslQueue.replaceAt(mZslQueueTail); + mZslQueueTail = (mZslQueueTail + 1) % kZslBufferDepth; + } + + ZslPair &queueHead = mZslQueue.editItemAt(mZslQueueHead); + + queueHead.buffer = item; + queueHead.frame.release(); + + mZslQueueHead = (mZslQueueHead + 1) % kZslBufferDepth; + + ALOGVV(" Acquired buffer, timestamp %lld", queueHead.buffer.mTimestamp); + + findMatchesLocked(); + + return OK; +} + +void ZslProcessor::findMatchesLocked() { + ALOGVV("Scanning"); + for (size_t i = 0; i < mZslQueue.size(); i++) { + ZslPair &queueEntry = mZslQueue.editItemAt(i); + nsecs_t bufferTimestamp = queueEntry.buffer.mTimestamp; + IF_ALOGV() { + camera_metadata_entry_t entry; + nsecs_t frameTimestamp = 0; + if (!queueEntry.frame.isEmpty()) { + entry = queueEntry.frame.find(ANDROID_SENSOR_TIMESTAMP); + frameTimestamp = entry.data.i64[0]; + } + ALOGVV(" %d: b: %lld\tf: %lld", i, + bufferTimestamp, frameTimestamp ); + } + if (queueEntry.frame.isEmpty() && bufferTimestamp != 0) { + // Have buffer, no matching frame. Look for one + for (size_t j = 0; j < mFrameList.size(); j++) { + bool match = false; + CameraMetadata &frame = mFrameList.editItemAt(j); + if (!frame.isEmpty()) { + camera_metadata_entry_t entry; + entry = frame.find(ANDROID_SENSOR_TIMESTAMP); + if (entry.count == 0) { + ALOGE("%s: Can't find timestamp in frame!", + __FUNCTION__); + continue; + } + nsecs_t frameTimestamp = entry.data.i64[0]; + if (bufferTimestamp == frameTimestamp) { + ALOGVV("%s: Found match %lld", __FUNCTION__, + frameTimestamp); + match = true; + } else { + int64_t delta = abs(bufferTimestamp - frameTimestamp); + if ( delta < 1000000) { + ALOGVV("%s: Found close match %lld (delta %lld)", + __FUNCTION__, bufferTimestamp, delta); + match = true; + } + } + } + if (match) { + queueEntry.frame.acquire(frame); + break; + } + } + } + } +} + +void ZslProcessor::dumpZslQueue(int fd) const { + String8 header("ZSL queue contents:"); + String8 indent(" "); + ALOGV("%s", header.string()); + if (fd != -1) { + header = indent + header + "\n"; + write(fd, header.string(), header.size()); + } + for (size_t i = 0; i < mZslQueue.size(); i++) { + const ZslPair &queueEntry = mZslQueue[i]; + nsecs_t bufferTimestamp = queueEntry.buffer.mTimestamp; + camera_metadata_ro_entry_t entry; + nsecs_t frameTimestamp = 0; + int frameAeState = -1; + if (!queueEntry.frame.isEmpty()) { + entry = queueEntry.frame.find(ANDROID_SENSOR_TIMESTAMP); + if (entry.count > 0) frameTimestamp = entry.data.i64[0]; + entry = queueEntry.frame.find(ANDROID_CONTROL_AE_STATE); + if (entry.count > 0) frameAeState = entry.data.u8[0]; + } + String8 result = + String8::format(" %d: b: %lld\tf: %lld, AE state: %d", i, + bufferTimestamp, frameTimestamp, frameAeState); + ALOGV("%s", result.string()); + if (fd != -1) { + result = indent + result + "\n"; + write(fd, result.string(), result.size()); + } + + } +} + +}; // namespace camera2 +}; // namespace android diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.h b/services/camera/libcameraservice/api1/client2/ZslProcessor.h new file mode 100644 index 0000000..5fb178f --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.h @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR_H +#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR_H + +#include <utils/Thread.h> +#include <utils/String16.h> +#include <utils/Vector.h> +#include <utils/Mutex.h> +#include <utils/Condition.h> +#include <gui/BufferItemConsumer.h> +#include <camera/CameraMetadata.h> + +#include "common/CameraDeviceBase.h" +#include "api1/client2/ZslProcessorInterface.h" +#include "api1/client2/FrameProcessor.h" + +namespace android { + +class Camera2Client; + +namespace camera2 { + +class CaptureSequencer; +class Parameters; + +/*** + * ZSL queue processing + */ +class ZslProcessor: + virtual public Thread, + virtual public BufferItemConsumer::FrameAvailableListener, + virtual public FrameProcessor::FilteredListener, + virtual public CameraDeviceBase::BufferReleasedListener, + public ZslProcessorInterface { + public: + ZslProcessor(sp<Camera2Client> client, wp<CaptureSequencer> sequencer); + ~ZslProcessor(); + + // From mZslConsumer + virtual void onFrameAvailable(); + // From FrameProcessor + virtual void onFrameAvailable(int32_t frameId, const CameraMetadata &frame); + + virtual void onBufferReleased(buffer_handle_t *handle); + + /** + **************************************** + * ZslProcessorInterface implementation * + **************************************** + */ + + status_t updateStream(const Parameters ¶ms); + status_t deleteStream(); + int getStreamId() const; + + status_t pushToReprocess(int32_t requestId); + status_t clearZslQueue(); + + void dump(int fd, const Vector<String16>& args) const; + private: + static const nsecs_t kWaitDuration = 10000000; // 10 ms + + enum { + RUNNING, + LOCKED + } mState; + + wp<Camera2Client> mClient; + wp<CameraDeviceBase> mDevice; + wp<CaptureSequencer> mSequencer; + int mId; + + mutable Mutex mInputMutex; + bool mZslBufferAvailable; + Condition mZslBufferAvailableSignal; + + enum { + NO_STREAM = -1 + }; + + int mZslStreamId; + int mZslReprocessStreamId; + sp<BufferItemConsumer> mZslConsumer; + sp<ANativeWindow> mZslWindow; + + struct ZslPair { + BufferItemConsumer::BufferItem buffer; + CameraMetadata frame; + }; + + static const size_t kZslBufferDepth = 4; + static const size_t kFrameListDepth = kZslBufferDepth * 2; + Vector<CameraMetadata> mFrameList; + size_t mFrameListHead; + + ZslPair mNextPair; + + Vector<ZslPair> mZslQueue; + size_t mZslQueueHead; + size_t mZslQueueTail; + + CameraMetadata mLatestCapturedRequest; + + virtual bool threadLoop(); + + status_t processNewZslBuffer(); + + // Match up entries from frame list to buffers in ZSL queue + void findMatchesLocked(); + + status_t clearZslQueueLocked(); + + void dumpZslQueue(int id) const; +}; + + +}; //namespace camera2 +}; //namespace android + +#endif diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp new file mode 100644 index 0000000..7c4da50 --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp @@ -0,0 +1,482 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera2-ZslProcessor3" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 +//#define LOG_NNDEBUG 0 + +#ifdef LOG_NNDEBUG +#define ALOGVV(...) ALOGV(__VA_ARGS__) +#else +#define ALOGVV(...) ((void)0) +#endif + +#include <utils/Log.h> +#include <utils/Trace.h> +#include <gui/Surface.h> + +#include "common/CameraDeviceBase.h" +#include "api1/Camera2Client.h" +#include "api1/client2/CaptureSequencer.h" +#include "api1/client2/ZslProcessor3.h" +#include "device3/Camera3Device.h" + +namespace android { +namespace camera2 { + +ZslProcessor3::ZslProcessor3( + sp<Camera2Client> client, + wp<CaptureSequencer> sequencer): + Thread(false), + mState(RUNNING), + mClient(client), + mSequencer(sequencer), + mId(client->getCameraId()), + mZslStreamId(NO_STREAM), + mFrameListHead(0), + mZslQueueHead(0), + mZslQueueTail(0) { + mZslQueue.insertAt(0, kZslBufferDepth); + mFrameList.insertAt(0, kFrameListDepth); + sp<CaptureSequencer> captureSequencer = mSequencer.promote(); + if (captureSequencer != 0) captureSequencer->setZslProcessor(this); +} + +ZslProcessor3::~ZslProcessor3() { + ALOGV("%s: Exit", __FUNCTION__); + deleteStream(); +} + +void ZslProcessor3::onFrameAvailable(int32_t /*frameId*/, + const CameraMetadata &frame) { + Mutex::Autolock l(mInputMutex); + camera_metadata_ro_entry_t entry; + entry = frame.find(ANDROID_SENSOR_TIMESTAMP); + nsecs_t timestamp = entry.data.i64[0]; + (void)timestamp; + ALOGVV("Got preview metadata for timestamp %lld", timestamp); + + if (mState != RUNNING) return; + + mFrameList.editItemAt(mFrameListHead) = frame; + mFrameListHead = (mFrameListHead + 1) % kFrameListDepth; +} + +status_t ZslProcessor3::updateStream(const Parameters ¶ms) { + ATRACE_CALL(); + ALOGV("%s: Configuring ZSL streams", __FUNCTION__); + status_t res; + + Mutex::Autolock l(mInputMutex); + + sp<Camera2Client> client = mClient.promote(); + if (client == 0) { + ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + sp<Camera3Device> device = + static_cast<Camera3Device*>(client->getCameraDevice().get()); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + if (mZslStreamId != NO_STREAM) { + // Check if stream parameters have to change + uint32_t currentWidth, currentHeight; + res = device->getStreamInfo(mZslStreamId, + ¤tWidth, ¤tHeight, 0); + if (res != OK) { + ALOGE("%s: Camera %d: Error querying capture output stream info: " + "%s (%d)", __FUNCTION__, + client->getCameraId(), strerror(-res), res); + return res; + } + if (currentWidth != (uint32_t)params.fastInfo.arrayWidth || + currentHeight != (uint32_t)params.fastInfo.arrayHeight) { + ALOGV("%s: Camera %d: Deleting stream %d since the buffer " + "dimensions changed", + __FUNCTION__, client->getCameraId(), mZslStreamId); + res = device->deleteStream(mZslStreamId); + if (res == -EBUSY) { + ALOGV("%s: Camera %d: Device is busy, call updateStream again " + " after it becomes idle", __FUNCTION__, mId); + return res; + } else if(res != OK) { + ALOGE("%s: Camera %d: Unable to delete old output stream " + "for ZSL: %s (%d)", __FUNCTION__, + client->getCameraId(), strerror(-res), res); + return res; + } + mZslStreamId = NO_STREAM; + } + } + + if (mZslStreamId == NO_STREAM) { + // Create stream for HAL production + // TODO: Sort out better way to select resolution for ZSL + + // Note that format specified internally in Camera3ZslStream + res = device->createZslStream( + params.fastInfo.arrayWidth, params.fastInfo.arrayHeight, + kZslBufferDepth, + &mZslStreamId, + &mZslStream); + if (res != OK) { + ALOGE("%s: Camera %d: Can't create ZSL stream: " + "%s (%d)", __FUNCTION__, client->getCameraId(), + strerror(-res), res); + return res; + } + } + client->registerFrameListener(Camera2Client::kPreviewRequestIdStart, + Camera2Client::kPreviewRequestIdEnd, + this); + + return OK; +} + +status_t ZslProcessor3::deleteStream() { + ATRACE_CALL(); + status_t res; + + Mutex::Autolock l(mInputMutex); + + if (mZslStreamId != NO_STREAM) { + sp<Camera2Client> client = mClient.promote(); + if (client == 0) { + ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + sp<Camera3Device> device = + reinterpret_cast<Camera3Device*>(client->getCameraDevice().get()); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + res = device->deleteStream(mZslStreamId); + if (res != OK) { + ALOGE("%s: Camera %d: Cannot delete ZSL output stream %d: " + "%s (%d)", __FUNCTION__, client->getCameraId(), + mZslStreamId, strerror(-res), res); + return res; + } + + mZslStreamId = NO_STREAM; + } + return OK; +} + +int ZslProcessor3::getStreamId() const { + Mutex::Autolock l(mInputMutex); + return mZslStreamId; +} + +status_t ZslProcessor3::pushToReprocess(int32_t requestId) { + ALOGV("%s: Send in reprocess request with id %d", + __FUNCTION__, requestId); + Mutex::Autolock l(mInputMutex); + status_t res; + sp<Camera2Client> client = mClient.promote(); + + if (client == 0) { + ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + IF_ALOGV() { + dumpZslQueue(-1); + } + + size_t metadataIdx; + nsecs_t candidateTimestamp = getCandidateTimestampLocked(&metadataIdx); + + if (candidateTimestamp == -1) { + ALOGE("%s: Could not find good candidate for ZSL reprocessing", + __FUNCTION__); + return NOT_ENOUGH_DATA; + } + + res = mZslStream->enqueueInputBufferByTimestamp(candidateTimestamp, + /*actualTimestamp*/NULL); + + if (res == mZslStream->NO_BUFFER_AVAILABLE) { + ALOGV("%s: No ZSL buffers yet", __FUNCTION__); + return NOT_ENOUGH_DATA; + } else if (res != OK) { + ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)", + __FUNCTION__, strerror(-res), res); + return res; + } + + { + CameraMetadata request = mFrameList[metadataIdx]; + + // Verify that the frame is reasonable for reprocessing + + camera_metadata_entry_t entry; + entry = request.find(ANDROID_CONTROL_AE_STATE); + if (entry.count == 0) { + ALOGE("%s: ZSL queue frame has no AE state field!", + __FUNCTION__); + return BAD_VALUE; + } + if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED && + entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) { + ALOGV("%s: ZSL queue frame AE state is %d, need full capture", + __FUNCTION__, entry.data.u8[0]); + return NOT_ENOUGH_DATA; + } + + uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS; + res = request.update(ANDROID_REQUEST_TYPE, + &requestType, 1); + uint8_t inputStreams[1] = + { static_cast<uint8_t>(mZslStreamId) }; + if (res == OK) request.update(ANDROID_REQUEST_INPUT_STREAMS, + inputStreams, 1); + // TODO: Shouldn't we also update the latest preview frame? + uint8_t outputStreams[1] = + { static_cast<uint8_t>(client->getCaptureStreamId()) }; + if (res == OK) request.update(ANDROID_REQUEST_OUTPUT_STREAMS, + outputStreams, 1); + res = request.update(ANDROID_REQUEST_ID, + &requestId, 1); + + if (res != OK ) { + ALOGE("%s: Unable to update frame to a reprocess request", + __FUNCTION__); + return INVALID_OPERATION; + } + + res = client->stopStream(); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: " + "%s (%d)", + __FUNCTION__, client->getCameraId(), strerror(-res), res); + return INVALID_OPERATION; + } + + // Update JPEG settings + { + SharedParameters::Lock l(client->getParameters()); + res = l.mParameters.updateRequestJpeg(&request); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to update JPEG entries of ZSL " + "capture request: %s (%d)", __FUNCTION__, + client->getCameraId(), + strerror(-res), res); + return res; + } + } + + mLatestCapturedRequest = request; + res = client->getCameraDevice()->capture(request); + if (res != OK ) { + ALOGE("%s: Unable to send ZSL reprocess request to capture: %s" + " (%d)", __FUNCTION__, strerror(-res), res); + return res; + } + + mState = LOCKED; + } + + return OK; +} + +status_t ZslProcessor3::clearZslQueue() { + Mutex::Autolock l(mInputMutex); + // If in middle of capture, can't clear out queue + if (mState == LOCKED) return OK; + + return clearZslQueueLocked(); +} + +status_t ZslProcessor3::clearZslQueueLocked() { + if (mZslStream != 0) { + return mZslStream->clearInputRingBuffer(); + } + return OK; +} + +void ZslProcessor3::dump(int fd, const Vector<String16>& /*args*/) const { + Mutex::Autolock l(mInputMutex); + if (!mLatestCapturedRequest.isEmpty()) { + String8 result(" Latest ZSL capture request:\n"); + write(fd, result.string(), result.size()); + mLatestCapturedRequest.dump(fd, 2, 6); + } else { + String8 result(" Latest ZSL capture request: none yet\n"); + write(fd, result.string(), result.size()); + } + dumpZslQueue(fd); +} + +bool ZslProcessor3::threadLoop() { + // TODO: remove dependency on thread. For now, shut thread down right + // away. + return false; +} + +void ZslProcessor3::dumpZslQueue(int fd) const { + String8 header("ZSL queue contents:"); + String8 indent(" "); + ALOGV("%s", header.string()); + if (fd != -1) { + header = indent + header + "\n"; + write(fd, header.string(), header.size()); + } + for (size_t i = 0; i < mZslQueue.size(); i++) { + const ZslPair &queueEntry = mZslQueue[i]; + nsecs_t bufferTimestamp = queueEntry.buffer.mTimestamp; + camera_metadata_ro_entry_t entry; + nsecs_t frameTimestamp = 0; + int frameAeState = -1; + if (!queueEntry.frame.isEmpty()) { + entry = queueEntry.frame.find(ANDROID_SENSOR_TIMESTAMP); + if (entry.count > 0) frameTimestamp = entry.data.i64[0]; + entry = queueEntry.frame.find(ANDROID_CONTROL_AE_STATE); + if (entry.count > 0) frameAeState = entry.data.u8[0]; + } + String8 result = + String8::format(" %d: b: %lld\tf: %lld, AE state: %d", i, + bufferTimestamp, frameTimestamp, frameAeState); + ALOGV("%s", result.string()); + if (fd != -1) { + result = indent + result + "\n"; + write(fd, result.string(), result.size()); + } + + } +} + +nsecs_t ZslProcessor3::getCandidateTimestampLocked(size_t* metadataIdx) const { + /** + * Find the smallest timestamp we know about so far + * - ensure that aeState is either converged or locked + */ + + size_t idx = 0; + nsecs_t minTimestamp = -1; + + size_t emptyCount = mFrameList.size(); + + for (size_t j = 0; j < mFrameList.size(); j++) { + const CameraMetadata &frame = mFrameList[j]; + if (!frame.isEmpty()) { + + emptyCount--; + + camera_metadata_ro_entry_t entry; + entry = frame.find(ANDROID_SENSOR_TIMESTAMP); + if (entry.count == 0) { + ALOGE("%s: Can't find timestamp in frame!", + __FUNCTION__); + continue; + } + nsecs_t frameTimestamp = entry.data.i64[0]; + if (minTimestamp > frameTimestamp || minTimestamp == -1) { + + entry = frame.find(ANDROID_CONTROL_AE_STATE); + + if (entry.count == 0) { + /** + * This is most likely a HAL bug. The aeState field is + * mandatory, so it should always be in a metadata packet. + */ + ALOGW("%s: ZSL queue frame has no AE state field!", + __FUNCTION__); + continue; + } + if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED && + entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) { + ALOGVV("%s: ZSL queue frame AE state is %d, need " + "full capture", __FUNCTION__, entry.data.u8[0]); + continue; + } + + minTimestamp = frameTimestamp; + idx = j; + } + + ALOGVV("%s: Saw timestamp %lld", __FUNCTION__, frameTimestamp); + } + } + + if (emptyCount == mFrameList.size()) { + /** + * This could be mildly bad and means our ZSL was triggered before + * there were any frames yet received by the camera framework. + * + * This is a fairly corner case which can happen under: + * + a user presses the shutter button real fast when the camera starts + * (startPreview followed immediately by takePicture). + * + burst capture case (hitting shutter button as fast possible) + * + * If this happens in steady case (preview running for a while, call + * a single takePicture) then this might be a fwk bug. + */ + ALOGW("%s: ZSL queue has no metadata frames", __FUNCTION__); + } + + ALOGV("%s: Candidate timestamp %lld (idx %d), empty frames: %d", + __FUNCTION__, minTimestamp, idx, emptyCount); + + if (metadataIdx) { + *metadataIdx = idx; + } + + return minTimestamp; +} + +void ZslProcessor3::onBufferAcquired(const BufferInfo& /*bufferInfo*/) { + // Intentionally left empty + // Although theoretically we could use this to get better dump info +} + +void ZslProcessor3::onBufferReleased(const BufferInfo& bufferInfo) { + Mutex::Autolock l(mInputMutex); + + // ignore output buffers + if (bufferInfo.mOutput) { + return; + } + + // TODO: Verify that the buffer is in our queue by looking at timestamp + // theoretically unnecessary unless we change the following assumptions: + // -- only 1 buffer reprocessed at a time (which is the case now) + + // Erase entire ZSL queue since we've now completed the capture and preview + // is stopped. + // + // We need to guarantee that if we do two back-to-back captures, + // the second won't use a buffer that's older/the same as the first, which + // is theoretically possible if we don't clear out the queue and the + // selection criteria is something like 'newest'. Clearing out the queue + // on a completed capture ensures we'll only use new data. + ALOGV("%s: Memory optimization, clearing ZSL queue", + __FUNCTION__); + clearZslQueueLocked(); + + // Required so we accept more ZSL requests + mState = RUNNING; +} + +}; // namespace camera2 +}; // namespace android diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h new file mode 100644 index 0000000..35b85f5 --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR3_H +#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR3_H + +#include <utils/Thread.h> +#include <utils/String16.h> +#include <utils/Vector.h> +#include <utils/Mutex.h> +#include <utils/Condition.h> +#include <gui/BufferItemConsumer.h> +#include <camera/CameraMetadata.h> + +#include "api1/client2/FrameProcessor.h" +#include "api1/client2/ZslProcessorInterface.h" +#include "device3/Camera3ZslStream.h" + +namespace android { + +class Camera2Client; + +namespace camera2 { + +class CaptureSequencer; +class Parameters; + +/*** + * ZSL queue processing + */ +class ZslProcessor3 : + public ZslProcessorInterface, + public camera3::Camera3StreamBufferListener, + virtual public Thread, + virtual public FrameProcessor::FilteredListener { + public: + ZslProcessor3(sp<Camera2Client> client, wp<CaptureSequencer> sequencer); + ~ZslProcessor3(); + + // From FrameProcessor + virtual void onFrameAvailable(int32_t frameId, const CameraMetadata &frame); + + /** + **************************************** + * ZslProcessorInterface implementation * + **************************************** + */ + + virtual status_t updateStream(const Parameters ¶ms); + virtual status_t deleteStream(); + virtual int getStreamId() const; + + virtual status_t pushToReprocess(int32_t requestId); + virtual status_t clearZslQueue(); + + void dump(int fd, const Vector<String16>& args) const; + + protected: + /** + ********************************************** + * Camera3StreamBufferListener implementation * + ********************************************** + */ + typedef camera3::Camera3StreamBufferListener::BufferInfo BufferInfo; + // Buffer was acquired by the HAL + virtual void onBufferAcquired(const BufferInfo& bufferInfo); + // Buffer was released by the HAL + virtual void onBufferReleased(const BufferInfo& bufferInfo); + + private: + static const nsecs_t kWaitDuration = 10000000; // 10 ms + + enum { + RUNNING, + LOCKED + } mState; + + wp<Camera2Client> mClient; + wp<CaptureSequencer> mSequencer; + + const int mId; + + mutable Mutex mInputMutex; + + enum { + NO_STREAM = -1 + }; + + int mZslStreamId; + sp<camera3::Camera3ZslStream> mZslStream; + + struct ZslPair { + BufferItemConsumer::BufferItem buffer; + CameraMetadata frame; + }; + + static const size_t kZslBufferDepth = 4; + static const size_t kFrameListDepth = kZslBufferDepth * 2; + Vector<CameraMetadata> mFrameList; + size_t mFrameListHead; + + ZslPair mNextPair; + + Vector<ZslPair> mZslQueue; + size_t mZslQueueHead; + size_t mZslQueueTail; + + CameraMetadata mLatestCapturedRequest; + + virtual bool threadLoop(); + + status_t clearZslQueueLocked(); + + void dumpZslQueue(int id) const; + + nsecs_t getCandidateTimestampLocked(size_t* metadataIdx) const; +}; + + +}; //namespace camera2 +}; //namespace android + +#endif diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h new file mode 100644 index 0000000..183c0c2 --- /dev/null +++ b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSORINTERFACE_H +#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSORINTERFACE_H + +#include <utils/Errors.h> +#include <utils/RefBase.h> + +namespace android { +namespace camera2 { + +class Parameters; + +class ZslProcessorInterface : virtual public RefBase { +public: + + // Get ID for use with android.request.outputStreams / inputStreams + virtual int getStreamId() const = 0; + + // Update the streams by recreating them if the size/format has changed + virtual status_t updateStream(const Parameters& params) = 0; + + // Delete the underlying CameraDevice streams + virtual status_t deleteStream() = 0; + + /** + * Submits a ZSL capture request (id = requestId) + * + * An appropriate ZSL buffer is selected by the closest timestamp, + * then we push that buffer to be reprocessed by the HAL. + * A capture request is created and submitted on behalf of the client. + */ + virtual status_t pushToReprocess(int32_t requestId) = 0; + + // Flush the ZSL buffer queue, freeing up all the buffers + virtual status_t clearZslQueue() = 0; + + // (Debugging only) Dump the current state to the specified file descriptor + virtual void dump(int fd, const Vector<String16>& args) const = 0; +}; + +}; //namespace camera2 +}; //namespace android + +#endif |