summaryrefslogtreecommitdiffstats
path: root/services/camera/libcameraservice/api1
diff options
context:
space:
mode:
Diffstat (limited to 'services/camera/libcameraservice/api1')
-rw-r--r--services/camera/libcameraservice/api1/Camera2Client.cpp207
-rw-r--r--services/camera/libcameraservice/api1/Camera2Client.h10
-rw-r--r--services/camera/libcameraservice/api1/CameraClient.cpp41
-rw-r--r--services/camera/libcameraservice/api1/CameraClient.h7
-rw-r--r--services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp10
-rw-r--r--services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp53
-rw-r--r--services/camera/libcameraservice/api1/client2/CaptureSequencer.h5
-rw-r--r--services/camera/libcameraservice/api1/client2/FrameProcessor.cpp57
-rw-r--r--services/camera/libcameraservice/api1/client2/FrameProcessor.h8
-rw-r--r--services/camera/libcameraservice/api1/client2/JpegProcessor.cpp36
-rw-r--r--services/camera/libcameraservice/api1/client2/Parameters.cpp440
-rw-r--r--services/camera/libcameraservice/api1/client2/Parameters.h53
-rw-r--r--services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp84
-rw-r--r--services/camera/libcameraservice/api1/client2/StreamingProcessor.h3
-rw-r--r--services/camera/libcameraservice/api1/client2/ZslProcessor.cpp36
-rw-r--r--services/camera/libcameraservice/api1/client2/ZslProcessor.h6
-rw-r--r--services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp233
-rw-r--r--services/camera/libcameraservice/api1/client2/ZslProcessor3.h19
-rw-r--r--services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp28
-rw-r--r--services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h5
20 files changed, 1086 insertions, 255 deletions
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index af23557..2a6aa7b 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -54,16 +54,17 @@ Camera2Client::Camera2Client(const sp<CameraService>& cameraService,
int clientPid,
uid_t clientUid,
int servicePid,
- int deviceVersion):
+ bool legacyMode):
Camera2ClientBase(cameraService, cameraClient, clientPackageName,
cameraId, cameraFacing, clientPid, clientUid, servicePid),
- mParameters(cameraId, cameraFacing),
- mDeviceVersion(deviceVersion)
+ mParameters(cameraId, cameraFacing)
{
ATRACE_CALL();
SharedParameters::Lock l(mParameters);
l.mParameters.state = Parameters::DISCONNECTED;
+
+ mLegacyMode = legacyMode;
}
status_t Camera2Client::initialize(camera_module_t *module)
@@ -80,7 +81,7 @@ status_t Camera2Client::initialize(camera_module_t *module)
{
SharedParameters::Lock l(mParameters);
- res = l.mParameters.initialize(&(mDevice->info()));
+ res = l.mParameters.initialize(&(mDevice->info()), mDeviceVersion);
if (res != OK) {
ALOGE("%s: Camera %d: unable to build defaults: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
@@ -118,7 +119,9 @@ status_t Camera2Client::initialize(camera_module_t *module)
mZslProcessorThread = zslProc;
break;
}
- case CAMERA_DEVICE_API_VERSION_3_0:{
+ case CAMERA_DEVICE_API_VERSION_3_0:
+ case CAMERA_DEVICE_API_VERSION_3_1:
+ case CAMERA_DEVICE_API_VERSION_3_2: {
sp<ZslProcessor3> zslProc =
new ZslProcessor3(this, mCaptureSequencer);
mZslProcessor = zslProc;
@@ -238,7 +241,7 @@ status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
result.append(" Scene mode: ");
switch (p.sceneMode) {
- case ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED:
+ case ANDROID_CONTROL_SCENE_MODE_DISABLED:
result.append("AUTO\n"); break;
CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_ACTION)
CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_PORTRAIT)
@@ -431,6 +434,9 @@ void Camera2Client::disconnect() {
mCallbackProcessor->deleteStream();
mZslProcessor->deleteStream();
+ // Remove all ZSL stream state before disconnect; needed to work around b/15408128.
+ mZslProcessor->disconnect();
+
ALOGV("Camera %d: Disconnecting device", mCameraId);
mDevice->disconnect();
@@ -753,6 +759,7 @@ status_t Camera2Client::startPreviewL(Parameters &params, bool restart) {
// ever take a picture.
// TODO: Find a better compromise, though this likely would involve HAL
// changes.
+ int lastJpegStreamId = mJpegProcessor->getStreamId();
res = updateProcessorStream(mJpegProcessor, params);
if (res != OK) {
ALOGE("%s: Camera %d: Can't pre-configure still image "
@@ -760,6 +767,7 @@ status_t Camera2Client::startPreviewL(Parameters &params, bool restart) {
__FUNCTION__, mCameraId, strerror(-res), res);
return res;
}
+ bool jpegStreamChanged = mJpegProcessor->getStreamId() != lastJpegStreamId;
Vector<int32_t> outputStreams;
bool callbacksEnabled = (params.previewCallbackFlags &
@@ -808,14 +816,24 @@ status_t Camera2Client::startPreviewL(Parameters &params, bool restart) {
return res;
}
}
- if (params.zslMode && !params.recordingHint) {
+
+ if (params.zslMode && !params.recordingHint &&
+ getRecordingStreamId() == NO_STREAM) {
res = updateProcessorStream(mZslProcessor, params);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update ZSL stream: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
return res;
}
+
+ if (jpegStreamChanged) {
+ ALOGV("%s: Camera %d: Clear ZSL buffer queue when Jpeg size is changed",
+ __FUNCTION__, mCameraId);
+ mZslProcessor->clearZslQueue();
+ }
outputStreams.push(getZslStreamId());
+ } else {
+ mZslProcessor->deleteStream();
}
outputStreams.push(getPreviewStreamId());
@@ -896,6 +914,20 @@ void Camera2Client::stopPreviewL() {
ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
}
+ // Clean up recording stream
+ res = mStreamingProcessor->deleteRecordingStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete recording stream before "
+ "stop preview: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+ {
+ // Ideally we should recover the override after recording stopped, but
+ // right now recording stream will live until here, so we are forced to
+ // recover here. TODO: find a better way to handle that (b/17495165)
+ SharedParameters::Lock l(mParameters);
+ l.mParameters.recoverOverriddenJpegSize();
+ }
// no break
case Parameters::WAITING_FOR_PREVIEW_WINDOW: {
SharedParameters::Lock l(mParameters);
@@ -963,6 +995,10 @@ status_t Camera2Client::startRecordingL(Parameters &params, bool restart) {
case Parameters::STOPPED:
res = startPreviewL(params, false);
if (res != OK) return res;
+ // Make sure first preview request is submitted to the HAL device to avoid
+ // two consecutive set of configure_streams being called into the HAL.
+ // TODO: Refactor this to avoid initial preview configuration.
+ syncWithDevice();
break;
case Parameters::PREVIEW:
// Ready to go
@@ -1016,18 +1052,95 @@ status_t Camera2Client::startRecordingL(Parameters &params, bool restart) {
return res;
}
}
+
+ // On current HALs, clean up ZSL before transitioning into recording
+ if (mDeviceVersion != CAMERA_DEVICE_API_VERSION_2_0) {
+ if (mZslProcessor->getStreamId() != NO_STREAM) {
+ ALOGV("%s: Camera %d: Clearing out zsl stream before "
+ "creating recording stream", __FUNCTION__, mCameraId);
+ res = mStreamingProcessor->stopStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't stop streaming to delete callback stream",
+ __FUNCTION__, mCameraId);
+ return res;
+ }
+ res = mDevice->waitUntilDrained();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+ res = mZslProcessor->clearZslQueue();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't clear zsl queue",
+ __FUNCTION__, mCameraId);
+ return res;
+ }
+ res = mZslProcessor->deleteStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete zsl stream before "
+ "record: %s (%d)", __FUNCTION__, mCameraId,
+ strerror(-res), res);
+ return res;
+ }
+ }
+ }
+
// Disable callbacks if they're enabled; can't record and use callbacks,
// and we can't fail record start without stagefright asserting.
params.previewCallbackFlags = 0;
- res = updateProcessorStream<
- StreamingProcessor,
- &StreamingProcessor::updateRecordingStream>(mStreamingProcessor,
- params);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to update recording stream: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
+ if (mDeviceVersion != CAMERA_DEVICE_API_VERSION_2_0) {
+ // For newer devices, may need to reconfigure video snapshot JPEG sizes
+ // during recording startup, so need a more complex sequence here to
+ // ensure an early stream reconfiguration doesn't happen
+ bool recordingStreamNeedsUpdate;
+ res = mStreamingProcessor->recordingStreamNeedsUpdate(params, &recordingStreamNeedsUpdate);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't query recording stream",
+ __FUNCTION__, mCameraId);
+ return res;
+ }
+
+ if (recordingStreamNeedsUpdate) {
+ // Need to stop stream here so updateProcessorStream won't trigger configureStream
+ // Right now camera device cannot handle configureStream failure gracefully
+ // when device is streaming
+ res = mStreamingProcessor->stopStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't stop streaming to update record "
+ "stream", __FUNCTION__, mCameraId);
+ return res;
+ }
+ res = mDevice->waitUntilDrained();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Waiting to stop streaming failed: "
+ "%s (%d)", __FUNCTION__, mCameraId,
+ strerror(-res), res);
+ }
+
+ res = updateProcessorStream<
+ StreamingProcessor,
+ &StreamingProcessor::updateRecordingStream>(
+ mStreamingProcessor,
+ params);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to update recording stream: "
+ "%s (%d)", __FUNCTION__, mCameraId,
+ strerror(-res), res);
+ return res;
+ }
+ }
+ } else {
+ // Maintain call sequencing for HALv2 devices.
+ res = updateProcessorStream<
+ StreamingProcessor,
+ &StreamingProcessor::updateRecordingStream>(mStreamingProcessor,
+ params);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to update recording stream: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ return res;
+ }
}
Vector<int32_t> outputStreams;
@@ -1036,6 +1149,16 @@ status_t Camera2Client::startRecordingL(Parameters &params, bool restart) {
res = mStreamingProcessor->startStream(StreamingProcessor::RECORD,
outputStreams);
+
+ // startStream might trigger a configureStream call and device might fail
+ // configureStream due to jpeg size > video size. Try again with jpeg size overridden
+ // to video size.
+ if (res == BAD_VALUE) {
+ overrideVideoSnapshotSize(params);
+ res = mStreamingProcessor->startStream(StreamingProcessor::RECORD,
+ outputStreams);
+ }
+
if (res != OK) {
ALOGE("%s: Camera %d: Unable to start recording stream: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
@@ -1120,6 +1243,8 @@ status_t Camera2Client::autoFocus() {
{
SharedParameters::Lock l(mParameters);
if (l.mParameters.state < Parameters::PREVIEW) {
+ ALOGE("%s: Camera %d: Call autoFocus when preview is inactive (state = %d).",
+ __FUNCTION__, mCameraId, l.mParameters.state);
return INVALID_OPERATION;
}
@@ -1162,7 +1287,7 @@ status_t Camera2Client::autoFocus() {
* Handle quirk mode for AF in scene modes
*/
if (l.mParameters.quirks.triggerAfWithAuto &&
- l.mParameters.sceneMode != ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED &&
+ l.mParameters.sceneMode != ANDROID_CONTROL_SCENE_MODE_DISABLED &&
l.mParameters.focusMode != Parameters::FOCUS_MODE_AUTO &&
!l.mParameters.focusingAreas[0].isEmpty()) {
ALOGV("%s: Quirk: Switching from focusMode %d to AUTO",
@@ -1219,6 +1344,9 @@ status_t Camera2Client::cancelAutoFocus() {
return OK;
}
+ if (l.mParameters.zslMode) {
+ mZslProcessor->clearZslQueue();
+ }
}
syncWithDevice();
@@ -1266,13 +1394,28 @@ status_t Camera2Client::takePicture(int msgType) {
ALOGV("%s: Camera %d: Starting picture capture", __FUNCTION__, mCameraId);
+ int lastJpegStreamId = mJpegProcessor->getStreamId();
res = updateProcessorStream(mJpegProcessor, l.mParameters);
+ // If video snapshot fail to configureStream, try override video snapshot size to
+ // video size
+ if (res == BAD_VALUE && l.mParameters.state == Parameters::VIDEO_SNAPSHOT) {
+ overrideVideoSnapshotSize(l.mParameters);
+ res = updateProcessorStream(mJpegProcessor, l.mParameters);
+ }
if (res != OK) {
ALOGE("%s: Camera %d: Can't set up still image stream: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
return res;
}
takePictureCounter = ++l.mParameters.takePictureCounter;
+
+ // Clear ZSL buffer queue when Jpeg size is changed.
+ bool jpegStreamChanged = mJpegProcessor->getStreamId() != lastJpegStreamId;
+ if (l.mParameters.zslMode && jpegStreamChanged) {
+ ALOGV("%s: Camera %d: Clear ZSL buffer queue when Jpeg size is changed",
+ __FUNCTION__, mCameraId);
+ mZslProcessor->clearZslQueue();
+ }
}
ATRACE_ASYNC_BEGIN(kTakepictureLabel, takePictureCounter);
@@ -1298,8 +1441,14 @@ status_t Camera2Client::setParameters(const String8& params) {
SharedParameters::Lock l(mParameters);
+ Parameters::focusMode_t focusModeBefore = l.mParameters.focusMode;
res = l.mParameters.set(params);
if (res != OK) return res;
+ Parameters::focusMode_t focusModeAfter = l.mParameters.focusMode;
+
+ if (l.mParameters.zslMode && focusModeAfter != focusModeBefore) {
+ mZslProcessor->clearZslQueue();
+ }
res = updateRequests(l.mParameters);
@@ -1310,7 +1459,8 @@ String8 Camera2Client::getParameters() const {
ATRACE_CALL();
ALOGV("%s: Camera %d", __FUNCTION__, mCameraId);
Mutex::Autolock icl(mBinderSerializationLock);
- if ( checkPid(__FUNCTION__) != OK) return String8();
+ // The camera service can unconditionally get the parameters at all times
+ if (getCallingPid() != mServicePid && checkPid(__FUNCTION__) != OK) return String8();
SharedParameters::ReadLock l(mParameters);
@@ -1390,6 +1540,13 @@ status_t Camera2Client::commandEnableShutterSoundL(bool enable) {
return OK;
}
+ // the camera2 api legacy mode can unconditionally disable the shutter sound
+ if (mLegacyMode) {
+ ALOGV("%s: Disable shutter sound in legacy mode", __FUNCTION__);
+ l.mParameters.playShutterSound = false;
+ return OK;
+ }
+
// Disabling shutter sound may not be allowed. In that case only
// allow the mediaserver process to disable the sound.
char value[PROPERTY_VALUE_MAX];
@@ -1655,8 +1812,8 @@ int Camera2Client::getZslStreamId() const {
}
status_t Camera2Client::registerFrameListener(int32_t minId, int32_t maxId,
- wp<camera2::FrameProcessor::FilteredListener> listener) {
- return mFrameProcessor->registerListener(minId, maxId, listener);
+ wp<camera2::FrameProcessor::FilteredListener> listener, bool sendPartials) {
+ return mFrameProcessor->registerListener(minId, maxId, listener, sendPartials);
}
status_t Camera2Client::removeFrameListener(int32_t minId, int32_t maxId,
@@ -1825,6 +1982,18 @@ status_t Camera2Client::updateProcessorStream(sp<ProcessorT> processor,
return res;
}
+status_t Camera2Client::overrideVideoSnapshotSize(Parameters &params) {
+ ALOGV("%s: Camera %d: configure still size to video size before recording"
+ , __FUNCTION__, mCameraId);
+ params.overrideJpegSizeByVideoSize();
+ status_t res = updateProcessorStream(mJpegProcessor, params);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't override video snapshot size to video size: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+ return res;
+}
+
const char* Camera2Client::kAutofocusLabel = "autofocus";
const char* Camera2Client::kTakepictureLabel = "take_picture";
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index fe0bf74..d68bb29 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -90,7 +90,7 @@ public:
int clientPid,
uid_t clientUid,
int servicePid,
- int deviceVersion);
+ bool legacyMode);
virtual ~Camera2Client();
@@ -118,7 +118,8 @@ public:
int getZslStreamId() const;
status_t registerFrameListener(int32_t minId, int32_t maxId,
- wp<camera2::FrameProcessor::FilteredListener> listener);
+ wp<camera2::FrameProcessor::FilteredListener> listener,
+ bool sendPartials = true);
status_t removeFrameListener(int32_t minId, int32_t maxId,
wp<camera2::FrameProcessor::FilteredListener> listener);
@@ -170,7 +171,6 @@ private:
void setPreviewCallbackFlagL(Parameters &params, int flag);
status_t updateRequests(Parameters &params);
- int mDeviceVersion;
// Used with stream IDs
static const int NO_STREAM = -1;
@@ -204,9 +204,13 @@ private:
bool mAfInMotion;
/** Utility members */
+ bool mLegacyMode;
// Wait until the camera device has received the latest control settings
status_t syncWithDevice();
+
+ // Video snapshot jpeg size overriding helper function
+ status_t overrideVideoSnapshotSize(Parameters &params);
};
}; // namespace android
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index 30b7bb8..1a4d9a6 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -38,7 +38,7 @@ CameraClient::CameraClient(const sp<CameraService>& cameraService,
const String16& clientPackageName,
int cameraId, int cameraFacing,
int clientPid, int clientUid,
- int servicePid):
+ int servicePid, bool legacyMode):
Client(cameraService, cameraClient, clientPackageName,
cameraId, cameraFacing, clientPid, clientUid, servicePid)
{
@@ -54,6 +54,7 @@ CameraClient::CameraClient(const sp<CameraService>& cameraService,
// Callback is disabled by default
mPreviewCallbackFlag = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
mOrientation = getOrientation(0, mCameraFacing == CAMERA_FACING_FRONT);
+ mLegacyMode = legacyMode;
mPlayShutterSound = true;
LOG1("CameraClient::CameraClient X (pid %d, id %d)", callingPid, cameraId);
}
@@ -79,7 +80,7 @@ status_t CameraClient::initialize(camera_module_t *module) {
ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
mHardware.clear();
- return NO_INIT;
+ return res;
}
mHardware->setCallbacks(notifyCallback,
@@ -121,6 +122,16 @@ status_t CameraClient::dump(int fd, const Vector<String16>& args) {
mClientPid);
len = (len > SIZE - 1) ? SIZE - 1 : len;
write(fd, buffer, len);
+
+ len = snprintf(buffer, SIZE, "Latest set parameters:\n");
+ len = (len > SIZE - 1) ? SIZE - 1 : len;
+ write(fd, buffer, len);
+
+ mLatestSetParameters.dump(fd, args);
+
+ const char *enddump = "\n\n";
+ write(fd, enddump, strlen(enddump));
+
return mHardware->dump(fd, args);
}
@@ -549,6 +560,7 @@ status_t CameraClient::setParameters(const String8& params) {
status_t result = checkPidAndHardware();
if (result != NO_ERROR) return result;
+ mLatestSetParameters = CameraParameters(params);
CameraParameters p(params);
return mHardware->setParameters(p);
}
@@ -556,7 +568,8 @@ status_t CameraClient::setParameters(const String8& params) {
// get preview/capture parameters - key/value pairs
String8 CameraClient::getParameters() const {
Mutex::Autolock lock(mLock);
- if (checkPidAndHardware() != NO_ERROR) return String8();
+ // The camera service can unconditionally get the parameters at all times
+ if (getCallingPid() != mServicePid && checkPidAndHardware() != NO_ERROR) return String8();
String8 params(mHardware->getParameters().flatten());
LOG1("getParameters (pid %d) (%s)", getCallingPid(), params.string());
@@ -575,6 +588,13 @@ status_t CameraClient::enableShutterSound(bool enable) {
return OK;
}
+ // the camera2 api legacy mode can unconditionally disable the shutter sound
+ if (mLegacyMode) {
+ ALOGV("%s: Disable shutter sound in legacy mode", __FUNCTION__);
+ mPlayShutterSound = false;
+ return OK;
+ }
+
// Disabling shutter sound may not be allowed. In that case only
// allow the mediaserver process to disable the sound.
char value[PROPERTY_VALUE_MAX];
@@ -929,7 +949,20 @@ void CameraClient::copyFrameAndPostCopiedFrame(
}
previewBuffer = mPreviewBuffer;
- memcpy(previewBuffer->base(), (uint8_t *)heap->base() + offset, size);
+ void* previewBufferBase = previewBuffer->base();
+ void* heapBase = heap->base();
+
+ if (heapBase == MAP_FAILED) {
+ ALOGE("%s: Failed to mmap heap for preview frame.", __FUNCTION__);
+ mLock.unlock();
+ return;
+ } else if (previewBufferBase == MAP_FAILED) {
+ ALOGE("%s: Failed to mmap preview buffer for preview frame.", __FUNCTION__);
+ mLock.unlock();
+ return;
+ }
+
+ memcpy(previewBufferBase, (uint8_t *) heapBase + offset, size);
sp<MemoryBase> frame = new MemoryBase(previewBuffer, 0, size);
if (frame == 0) {
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 4b89564..63a9d0f 100644
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -64,7 +64,8 @@ public:
int cameraFacing,
int clientPid,
int clientUid,
- int servicePid);
+ int servicePid,
+ bool legacyMode = false);
~CameraClient();
status_t initialize(camera_module_t *module);
@@ -129,6 +130,7 @@ private:
int mPreviewCallbackFlag;
int mOrientation; // Current display orientation
bool mPlayShutterSound;
+ bool mLegacyMode; // camera2 api legacy mode?
// Ensures atomicity among the public methods
mutable Mutex mLock;
@@ -140,6 +142,9 @@ private:
// of the original one), we allocate mPreviewBuffer and reuse it if possible.
sp<MemoryHeapBase> mPreviewBuffer;
+ // Debugging information
+ CameraParameters mLatestSetParameters;
+
// We need to avoid the deadlock when the incoming command thread and
// the CameraHardwareInterface callback thread both want to grab mLock.
// An extra flag is used to tell the callback thread that it should stop
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
index d2ac79c..bf3318e 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
@@ -110,11 +110,13 @@ status_t CallbackProcessor::updateStream(const Parameters &params) {
if (!mCallbackToApp && mCallbackConsumer == 0) {
// Create CPU buffer queue endpoint, since app hasn't given us one
// Make it async to avoid disconnect deadlocks
- sp<BufferQueue> bq = new BufferQueue();
- mCallbackConsumer = new CpuConsumer(bq, kCallbackHeapCount);
+ sp<IGraphicBufferProducer> producer;
+ sp<IGraphicBufferConsumer> consumer;
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mCallbackConsumer = new CpuConsumer(consumer, kCallbackHeapCount);
mCallbackConsumer->setFrameAvailableListener(this);
mCallbackConsumer->setName(String8("Camera2Client::CallbackConsumer"));
- mCallbackWindow = new Surface(bq);
+ mCallbackWindow = new Surface(producer);
}
if (mCallbackStreamId != NO_STREAM) {
@@ -153,7 +155,7 @@ status_t CallbackProcessor::updateStream(const Parameters &params) {
callbackFormat, params.previewFormat);
res = device->createStream(mCallbackWindow,
params.previewWidth, params.previewHeight,
- callbackFormat, 0, &mCallbackStreamId);
+ callbackFormat, &mCallbackStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Can't create output stream for callbacks: "
"%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index f5c28ed..9849f4d 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -106,13 +106,12 @@ void CaptureSequencer::notifyAutoExposure(uint8_t newState, int triggerId) {
}
}
-void CaptureSequencer::onFrameAvailable(int32_t requestId,
- const CameraMetadata &frame) {
- ALOGV("%s: Listener found new frame", __FUNCTION__);
+void CaptureSequencer::onResultAvailable(const CaptureResult &result) {
ATRACE_CALL();
+ ALOGV("%s: New result available.", __FUNCTION__);
Mutex::Autolock l(mInputMutex);
- mNewFrameId = requestId;
- mNewFrame = frame;
+ mNewFrameId = result.mResultExtras.requestId;
+ mNewFrame = result.mMetadata;
if (!mNewFrameReceived) {
mNewFrameReceived = true;
mNewFrameSignal.signal();
@@ -351,8 +350,10 @@ CaptureSequencer::CaptureState CaptureSequencer::manageZslStart(
return DONE;
}
+ // We don't want to get partial results for ZSL capture.
client->registerFrameListener(mCaptureId, mCaptureId + 1,
- this);
+ this,
+ /*sendPartials*/false);
// TODO: Actually select the right thing here.
res = processor->pushToReprocess(mCaptureId);
@@ -394,8 +395,14 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardStart(
bool isAeConverged = false;
// Get the onFrameAvailable callback when the requestID == mCaptureId
+ // We don't want to get partial results for normal capture, as we need
+ // Get ANDROID_SENSOR_TIMESTAMP from the capture result, but partial
+ // result doesn't have to have this metadata available.
+ // TODO: Update to use the HALv3 shutter notification for remove the
+ // need for this listener and make it faster. see bug 12530628.
client->registerFrameListener(mCaptureId, mCaptureId + 1,
- this);
+ this,
+ /*sendPartials*/false);
{
Mutex::Autolock l(mInputMutex);
@@ -438,11 +445,18 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardPrecaptureWait(
if (mNewAEState) {
if (!mAeInPrecapture) {
// Waiting to see PRECAPTURE state
- if (mAETriggerId == mTriggerId &&
- mAEState == ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
- ALOGV("%s: Got precapture start", __FUNCTION__);
- mAeInPrecapture = true;
- mTimeoutCount = kMaxTimeoutsForPrecaptureEnd;
+ if (mAETriggerId == mTriggerId) {
+ if (mAEState == ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+ ALOGV("%s: Got precapture start", __FUNCTION__);
+ mAeInPrecapture = true;
+ mTimeoutCount = kMaxTimeoutsForPrecaptureEnd;
+ } else if (mAEState == ANDROID_CONTROL_AE_STATE_CONVERGED ||
+ mAEState == ANDROID_CONTROL_AE_STATE_FLASH_REQUIRED) {
+ // It is legal to transit to CONVERGED or FLASH_REQUIRED
+ // directly after a trigger.
+ ALOGV("%s: AE is already in good state, start capture", __FUNCTION__);
+ return STANDARD_CAPTURE;
+ }
}
} else {
// Waiting to see PRECAPTURE state end
@@ -585,12 +599,15 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait(
entry = mNewFrame.find(ANDROID_SENSOR_TIMESTAMP);
if (entry.count == 0) {
ALOGE("No timestamp field in capture frame!");
- }
- if (entry.data.i64[0] != mCaptureTimestamp) {
- ALOGW("Mismatched capture timestamps: Metadata frame %" PRId64 ","
- " captured buffer %" PRId64,
- entry.data.i64[0],
- mCaptureTimestamp);
+ } else if (entry.count == 1) {
+ if (entry.data.i64[0] != mCaptureTimestamp) {
+ ALOGW("Mismatched capture timestamps: Metadata frame %" PRId64 ","
+ " captured buffer %" PRId64,
+ entry.data.i64[0],
+ mCaptureTimestamp);
+ }
+ } else {
+ ALOGE("Timestamp metadata is malformed!");
}
client->removeFrameListener(mCaptureId, mCaptureId + 1, this);
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
index 9fb4ee7..d42ab13 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
@@ -24,6 +24,7 @@
#include <utils/Mutex.h>
#include <utils/Condition.h>
#include "camera/CameraMetadata.h"
+#include "camera/CaptureResult.h"
#include "Parameters.h"
#include "FrameProcessor.h"
@@ -61,8 +62,8 @@ class CaptureSequencer:
// Notifications about AE state changes
void notifyAutoExposure(uint8_t newState, int triggerId);
- // Notifications from the frame processor
- virtual void onFrameAvailable(int32_t requestId, const CameraMetadata &frame);
+ // Notification from the frame processor
+ virtual void onResultAvailable(const CaptureResult &result);
// Notifications from the JPEG processor
void onCaptureAvailable(nsecs_t timestamp, sp<MemoryBase> captureBuffer);
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index dd5b27c..312a78c 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -40,7 +40,12 @@ FrameProcessor::FrameProcessor(wp<CameraDeviceBase> device,
{
SharedParameters::Lock l(client->getParameters());
- mUsePartialQuirk = l.mParameters.quirks.partialResults;
+
+ if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+ mUsePartialResult = (mNumPartialResults > 1);
+ } else {
+ mUsePartialResult = l.mParameters.quirks.partialResults;
+ }
// Initialize starting 3A state
m3aState.afTriggerId = l.mParameters.afTriggerCounter;
@@ -55,7 +60,7 @@ FrameProcessor::FrameProcessor(wp<CameraDeviceBase> device,
FrameProcessor::~FrameProcessor() {
}
-bool FrameProcessor::processSingleFrame(CameraMetadata &frame,
+bool FrameProcessor::processSingleFrame(CaptureResult &frame,
const sp<CameraDeviceBase> &device) {
sp<Camera2Client> client = mClient.promote();
@@ -63,17 +68,21 @@ bool FrameProcessor::processSingleFrame(CameraMetadata &frame,
return false;
}
- bool partialResult = false;
- if (mUsePartialQuirk) {
- camera_metadata_entry_t entry;
- entry = frame.find(ANDROID_QUIRKS_PARTIAL_RESULT);
- if (entry.count > 0 &&
- entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
- partialResult = true;
+ bool isPartialResult = false;
+ if (mUsePartialResult) {
+ if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+ isPartialResult = frame.mResultExtras.partialResultCount < mNumPartialResults;
+ } else {
+ camera_metadata_entry_t entry;
+ entry = frame.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
+ if (entry.count > 0 &&
+ entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+ isPartialResult = true;
+ }
}
}
- if (!partialResult && processFaceDetect(frame, client) != OK) {
+ if (!isPartialResult && processFaceDetect(frame.mMetadata, client) != OK) {
return false;
}
@@ -212,14 +221,15 @@ status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame,
return OK;
}
-status_t FrameProcessor::process3aState(const CameraMetadata &frame,
+status_t FrameProcessor::process3aState(const CaptureResult &frame,
const sp<Camera2Client> &client) {
ATRACE_CALL();
+ const CameraMetadata &metadata = frame.mMetadata;
camera_metadata_ro_entry_t entry;
int cameraId = client->getCameraId();
- entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
+ entry = metadata.find(ANDROID_REQUEST_FRAME_COUNT);
int32_t frameNumber = entry.data.i32[0];
// Don't send 3A notifications for the same frame number twice
@@ -238,26 +248,31 @@ status_t FrameProcessor::process3aState(const CameraMetadata &frame,
// TODO: Also use AE mode, AE trigger ID
- gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AF_MODE,
+ gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AF_MODE,
&new3aState.afMode, frameNumber, cameraId);
- gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AWB_MODE,
+ gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AWB_MODE,
&new3aState.awbMode, frameNumber, cameraId);
- gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AE_STATE,
+ gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AE_STATE,
&new3aState.aeState, frameNumber, cameraId);
- gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AF_STATE,
+ gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AF_STATE,
&new3aState.afState, frameNumber, cameraId);
- gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AWB_STATE,
+ gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AWB_STATE,
&new3aState.awbState, frameNumber, cameraId);
- gotAllStates &= get3aResult<int32_t>(frame, ANDROID_CONTROL_AF_TRIGGER_ID,
- &new3aState.afTriggerId, frameNumber, cameraId);
+ if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+ new3aState.afTriggerId = frame.mResultExtras.afTriggerId;
+ new3aState.aeTriggerId = frame.mResultExtras.precaptureTriggerId;
+ } else {
+ gotAllStates &= get3aResult<int32_t>(metadata, ANDROID_CONTROL_AF_TRIGGER_ID,
+ &new3aState.afTriggerId, frameNumber, cameraId);
- gotAllStates &= get3aResult<int32_t>(frame, ANDROID_CONTROL_AE_PRECAPTURE_ID,
- &new3aState.aeTriggerId, frameNumber, cameraId);
+ gotAllStates &= get3aResult<int32_t>(metadata, ANDROID_CONTROL_AE_PRECAPTURE_ID,
+ &new3aState.aeTriggerId, frameNumber, cameraId);
+ }
if (!gotAllStates) return BAD_VALUE;
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index 856ad32..68cf55b 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -51,14 +51,14 @@ class FrameProcessor : public FrameProcessorBase {
void processNewFrames(const sp<Camera2Client> &client);
- virtual bool processSingleFrame(CameraMetadata &frame,
+ virtual bool processSingleFrame(CaptureResult &frame,
const sp<CameraDeviceBase> &device);
status_t processFaceDetect(const CameraMetadata &frame,
const sp<Camera2Client> &client);
// Send 3A state change notifications to client based on frame metadata
- status_t process3aState(const CameraMetadata &frame,
+ status_t process3aState(const CaptureResult &frame,
const sp<Camera2Client> &client);
// Helper for process3aState
@@ -91,8 +91,8 @@ class FrameProcessor : public FrameProcessorBase {
}
} m3aState;
- // Whether the partial result quirk is enabled for this device
- bool mUsePartialQuirk;
+ // Whether the partial result is enabled for this device
+ bool mUsePartialResult;
// Track most recent frame number for which 3A notifications were sent for.
// Used to filter against sending 3A notifications for the same frame
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index 2de7a2b..b433781 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -73,30 +73,43 @@ status_t JpegProcessor::updateStream(const Parameters &params) {
}
// Find out buffer size for JPEG
- camera_metadata_ro_entry_t maxJpegSize =
- params.staticInfo(ANDROID_JPEG_MAX_SIZE);
- if (maxJpegSize.count == 0) {
- ALOGE("%s: Camera %d: Can't find ANDROID_JPEG_MAX_SIZE!",
- __FUNCTION__, mId);
+ ssize_t maxJpegSize = device->getJpegBufferSize(params.pictureWidth, params.pictureHeight);
+ if (maxJpegSize <= 0) {
+ ALOGE("%s: Camera %d: Jpeg buffer size (%zu) is invalid ",
+ __FUNCTION__, mId, maxJpegSize);
return INVALID_OPERATION;
}
if (mCaptureConsumer == 0) {
// Create CPU buffer queue endpoint
- sp<BufferQueue> bq = new BufferQueue();
- mCaptureConsumer = new CpuConsumer(bq, 1);
+ sp<IGraphicBufferProducer> producer;
+ sp<IGraphicBufferConsumer> consumer;
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mCaptureConsumer = new CpuConsumer(consumer, 1);
mCaptureConsumer->setFrameAvailableListener(this);
mCaptureConsumer->setName(String8("Camera2Client::CaptureConsumer"));
- mCaptureWindow = new Surface(bq);
+ mCaptureWindow = new Surface(producer);
+ }
+
+ // Since ashmem heaps are rounded up to page size, don't reallocate if
+ // the capture heap isn't exactly the same size as the required JPEG buffer
+ const size_t HEAP_SLACK_FACTOR = 2;
+ if (mCaptureHeap == 0 ||
+ (mCaptureHeap->getSize() < static_cast<size_t>(maxJpegSize)) ||
+ (mCaptureHeap->getSize() >
+ static_cast<size_t>(maxJpegSize) * HEAP_SLACK_FACTOR) ) {
// Create memory for API consumption
- mCaptureHeap = new MemoryHeapBase(maxJpegSize.data.i32[0], 0,
- "Camera2Client::CaptureHeap");
+ mCaptureHeap.clear();
+ mCaptureHeap =
+ new MemoryHeapBase(maxJpegSize, 0, "Camera2Client::CaptureHeap");
if (mCaptureHeap->getSize() == 0) {
ALOGE("%s: Camera %d: Unable to allocate memory for capture",
__FUNCTION__, mId);
return NO_MEMORY;
}
}
+ ALOGV("%s: Camera %d: JPEG capture heap now %d bytes; requested %d bytes",
+ __FUNCTION__, mId, mCaptureHeap->getSize(), maxJpegSize);
if (mCaptureStreamId != NO_STREAM) {
// Check if stream parameters have to change
@@ -132,8 +145,7 @@ status_t JpegProcessor::updateStream(const Parameters &params) {
// Create stream for HAL production
res = device->createStream(mCaptureWindow,
params.pictureWidth, params.pictureHeight,
- HAL_PIXEL_FORMAT_BLOB, maxJpegSize.data.i32[0],
- &mCaptureStreamId);
+ HAL_PIXEL_FORMAT_BLOB, &mCaptureStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Can't create output stream for capture: "
"%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 081a6e6..7b90d28 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -29,6 +29,9 @@
#include "Parameters.h"
#include "system/camera.h"
+#include "hardware/camera_common.h"
+#include <media/MediaProfiles.h>
+#include <media/mediarecorder.h>
namespace android {
namespace camera2 {
@@ -43,7 +46,7 @@ Parameters::Parameters(int cameraId,
Parameters::~Parameters() {
}
-status_t Parameters::initialize(const CameraMetadata *info) {
+status_t Parameters::initialize(const CameraMetadata *info, int deviceVersion) {
status_t res;
if (info->entryCount() == 0) {
@@ -51,6 +54,7 @@ status_t Parameters::initialize(const CameraMetadata *info) {
return BAD_VALUE;
}
Parameters::info = info;
+ mDeviceVersion = deviceVersion;
res = buildFastInfo();
if (res != OK) return res;
@@ -59,12 +63,42 @@ status_t Parameters::initialize(const CameraMetadata *info) {
if (res != OK) return res;
const Size MAX_PREVIEW_SIZE = { MAX_PREVIEW_WIDTH, MAX_PREVIEW_HEIGHT };
- res = getFilteredPreviewSizes(MAX_PREVIEW_SIZE, &availablePreviewSizes);
+ // Treat the H.264 max size as the max supported video size.
+ MediaProfiles *videoEncoderProfiles = MediaProfiles::getInstance();
+ int32_t maxVideoWidth = videoEncoderProfiles->getVideoEncoderParamByName(
+ "enc.vid.width.max", VIDEO_ENCODER_H264);
+ int32_t maxVideoHeight = videoEncoderProfiles->getVideoEncoderParamByName(
+ "enc.vid.height.max", VIDEO_ENCODER_H264);
+ const Size MAX_VIDEO_SIZE = {maxVideoWidth, maxVideoHeight};
+
+ res = getFilteredSizes(MAX_PREVIEW_SIZE, &availablePreviewSizes);
+ if (res != OK) return res;
+ res = getFilteredSizes(MAX_VIDEO_SIZE, &availableVideoSizes);
if (res != OK) return res;
- // TODO: Pick more intelligently
- previewWidth = availablePreviewSizes[0].width;
- previewHeight = availablePreviewSizes[0].height;
+ // Select initial preview and video size that's under the initial bound and
+ // on the list of both preview and recording sizes
+ previewWidth = 0;
+ previewHeight = 0;
+ for (size_t i = 0 ; i < availablePreviewSizes.size(); i++) {
+ int newWidth = availablePreviewSizes[i].width;
+ int newHeight = availablePreviewSizes[i].height;
+ if (newWidth >= previewWidth && newHeight >= previewHeight &&
+ newWidth <= MAX_INITIAL_PREVIEW_WIDTH &&
+ newHeight <= MAX_INITIAL_PREVIEW_HEIGHT) {
+ for (size_t j = 0; j < availableVideoSizes.size(); j++) {
+ if (availableVideoSizes[j].width == newWidth &&
+ availableVideoSizes[j].height == newHeight) {
+ previewWidth = newWidth;
+ previewHeight = newHeight;
+ }
+ }
+ }
+ }
+ if (previewWidth == 0) {
+ ALOGE("%s: No initial preview size can be found!", __FUNCTION__);
+ return BAD_VALUE;
+ }
videoWidth = previewWidth;
videoHeight = previewHeight;
@@ -84,8 +118,17 @@ status_t Parameters::initialize(const CameraMetadata *info) {
ALOGV("Supported preview sizes are: %s", supportedPreviewSizes.string());
params.set(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES,
supportedPreviewSizes);
+
+ String8 supportedVideoSizes;
+ for (size_t i = 0; i < availableVideoSizes.size(); i++) {
+ if (i != 0) supportedVideoSizes += ",";
+ supportedVideoSizes += String8::format("%dx%d",
+ availableVideoSizes[i].width,
+ availableVideoSizes[i].height);
+ }
+ ALOGV("Supported video sizes are: %s", supportedVideoSizes.string());
params.set(CameraParameters::KEY_SUPPORTED_VIDEO_SIZES,
- supportedPreviewSizes);
+ supportedVideoSizes);
}
camera_metadata_ro_entry_t availableFpsRanges =
@@ -99,16 +142,14 @@ status_t Parameters::initialize(const CameraMetadata *info) {
previewTransform = degToTransform(0,
cameraFacing == CAMERA_FACING_FRONT);
- camera_metadata_ro_entry_t availableFormats =
- staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS);
-
{
String8 supportedPreviewFormats;
+ SortedVector<int32_t> outputFormats = getAvailableOutputFormats();
bool addComma = false;
- for (size_t i=0; i < availableFormats.count; i++) {
+ for (size_t i=0; i < outputFormats.size(); i++) {
if (addComma) supportedPreviewFormats += ",";
addComma = true;
- switch (availableFormats.data.i32[i]) {
+ switch (outputFormats[i]) {
case HAL_PIXEL_FORMAT_YCbCr_422_SP:
supportedPreviewFormats +=
CameraParameters::PIXEL_FORMAT_YUV422SP;
@@ -150,7 +191,7 @@ status_t Parameters::initialize(const CameraMetadata *info) {
default:
ALOGW("%s: Camera %d: Unknown preview format: %x",
- __FUNCTION__, cameraId, availableFormats.data.i32[i]);
+ __FUNCTION__, cameraId, outputFormats[i]);
addComma = false;
break;
}
@@ -222,24 +263,26 @@ status_t Parameters::initialize(const CameraMetadata *info) {
supportedPreviewFrameRates);
}
- camera_metadata_ro_entry_t availableJpegSizes =
- staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES, 2);
- if (!availableJpegSizes.count) return NO_INIT;
+ Vector<Size> availableJpegSizes = getAvailableJpegSizes();
+ if (!availableJpegSizes.size()) return NO_INIT;
// TODO: Pick maximum
- pictureWidth = availableJpegSizes.data.i32[0];
- pictureHeight = availableJpegSizes.data.i32[1];
+ pictureWidth = availableJpegSizes[0].width;
+ pictureHeight = availableJpegSizes[0].height;
+ pictureWidthLastSet = pictureWidth;
+ pictureHeightLastSet = pictureHeight;
+ pictureSizeOverriden = false;
params.setPictureSize(pictureWidth,
pictureHeight);
{
String8 supportedPictureSizes;
- for (size_t i=0; i < availableJpegSizes.count; i += 2) {
+ for (size_t i=0; i < availableJpegSizes.size(); i++) {
if (i != 0) supportedPictureSizes += ",";
supportedPictureSizes += String8::format("%dx%d",
- availableJpegSizes.data.i32[i],
- availableJpegSizes.data.i32[i+1]);
+ availableJpegSizes[i].width,
+ availableJpegSizes[i].height);
}
params.set(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES,
supportedPictureSizes);
@@ -470,7 +513,7 @@ status_t Parameters::initialize(const CameraMetadata *info) {
supportedAntibanding);
}
- sceneMode = ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED;
+ sceneMode = ANDROID_CONTROL_SCENE_MODE_DISABLED;
params.set(CameraParameters::KEY_SCENE_MODE,
CameraParameters::SCENE_MODE_AUTO);
@@ -486,7 +529,7 @@ status_t Parameters::initialize(const CameraMetadata *info) {
if (addComma) supportedSceneModes += ",";
addComma = true;
switch (availableSceneModes.data.u8[i]) {
- case ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED:
+ case ANDROID_CONTROL_SCENE_MODE_DISABLED:
noSceneModes = true;
break;
case ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY:
@@ -579,8 +622,8 @@ status_t Parameters::initialize(const CameraMetadata *info) {
camera_metadata_ro_entry_t availableAeModes =
staticInfo(ANDROID_CONTROL_AE_AVAILABLE_MODES, 0, 0, false);
+ flashMode = Parameters::FLASH_MODE_OFF;
if (isFlashAvailable) {
- flashMode = Parameters::FLASH_MODE_OFF;
params.set(CameraParameters::KEY_FLASH_MODE,
CameraParameters::FLASH_MODE_OFF);
@@ -600,11 +643,10 @@ status_t Parameters::initialize(const CameraMetadata *info) {
params.set(CameraParameters::KEY_SUPPORTED_FLASH_MODES,
supportedFlashModes);
} else {
- flashMode = Parameters::FLASH_MODE_OFF;
- params.set(CameraParameters::KEY_FLASH_MODE,
- CameraParameters::FLASH_MODE_OFF);
- params.set(CameraParameters::KEY_SUPPORTED_FLASH_MODES,
- CameraParameters::FLASH_MODE_OFF);
+ // No flash means null flash mode and supported flash modes keys, so
+ // remove them just to be safe
+ params.remove(CameraParameters::KEY_FLASH_MODE);
+ params.remove(CameraParameters::KEY_SUPPORTED_FLASH_MODES);
}
camera_metadata_ro_entry_t minFocusDistance =
@@ -624,8 +666,17 @@ status_t Parameters::initialize(const CameraMetadata *info) {
focusMode = Parameters::FOCUS_MODE_AUTO;
params.set(CameraParameters::KEY_FOCUS_MODE,
CameraParameters::FOCUS_MODE_AUTO);
- String8 supportedFocusModes(CameraParameters::FOCUS_MODE_INFINITY);
- bool addComma = true;
+ String8 supportedFocusModes;
+ bool addComma = false;
+ camera_metadata_ro_entry_t focusDistanceCalibration =
+ staticInfo(ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION, 0, 0, false);
+
+ if (focusDistanceCalibration.count &&
+ focusDistanceCalibration.data.u8[0] !=
+ ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_UNCALIBRATED) {
+ supportedFocusModes += CameraParameters::FOCUS_MODE_INFINITY;
+ addComma = true;
+ }
for (size_t i=0; i < availableAfModes.count; i++) {
if (addComma) supportedFocusModes += ",";
@@ -668,13 +719,13 @@ status_t Parameters::initialize(const CameraMetadata *info) {
focusState = ANDROID_CONTROL_AF_STATE_INACTIVE;
shadowFocusMode = FOCUS_MODE_INVALID;
- camera_metadata_ro_entry_t max3aRegions =
- staticInfo(ANDROID_CONTROL_MAX_REGIONS, 1, 1);
- if (!max3aRegions.count) return NO_INIT;
+ camera_metadata_ro_entry_t max3aRegions = staticInfo(ANDROID_CONTROL_MAX_REGIONS,
+ Parameters::NUM_REGION, Parameters::NUM_REGION);
+ if (max3aRegions.count != Parameters::NUM_REGION) return NO_INIT;
int32_t maxNumFocusAreas = 0;
if (focusMode != Parameters::FOCUS_MODE_FIXED) {
- maxNumFocusAreas = max3aRegions.data.i32[0];
+ maxNumFocusAreas = max3aRegions.data.i32[Parameters::REGION_AF];
}
params.set(CameraParameters::KEY_MAX_NUM_FOCUS_AREAS, maxNumFocusAreas);
params.set(CameraParameters::KEY_FOCUS_AREAS,
@@ -734,7 +785,7 @@ status_t Parameters::initialize(const CameraMetadata *info) {
meteringAreas.add(Parameters::Area(0, 0, 0, 0, 0));
params.set(CameraParameters::KEY_MAX_NUM_METERING_AREAS,
- max3aRegions.data.i32[0]);
+ max3aRegions.data.i32[Parameters::REGION_AE]);
params.set(CameraParameters::KEY_METERING_AREAS,
"(0,0,0,0,0)");
@@ -931,13 +982,19 @@ status_t Parameters::buildFastInfo() {
bool fixedLens = minFocusDistance.count == 0 ||
minFocusDistance.data.f[0] == 0;
+ camera_metadata_ro_entry_t focusDistanceCalibration =
+ staticInfo(ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION, 0, 0,
+ false);
+ bool canFocusInfinity = (focusDistanceCalibration.count &&
+ focusDistanceCalibration.data.u8[0] !=
+ ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_UNCALIBRATED);
+
camera_metadata_ro_entry_t availableFocalLengths =
staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS);
if (!availableFocalLengths.count) return NO_INIT;
- camera_metadata_ro_entry_t availableFormats =
- staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS);
- if (!availableFormats.count) return NO_INIT;
+ SortedVector<int32_t> availableFormats = getAvailableOutputFormats();
+ if (!availableFormats.size()) return NO_INIT;
if (sceneModeOverrides.count > 0) {
@@ -982,6 +1039,13 @@ status_t Parameters::buildFastInfo() {
sceneModeOverrides.data.u8[i * kModesPerSceneMode + 2];
switch(afMode) {
case ANDROID_CONTROL_AF_MODE_OFF:
+ if (!fixedLens && !canFocusInfinity) {
+ ALOGE("%s: Camera %d: Scene mode override lists asks for"
+ " fixed focus on a device with focuser but not"
+ " calibrated for infinity focus", __FUNCTION__,
+ cameraId);
+ return NO_INIT;
+ }
modes.focusMode = fixedLens ?
FOCUS_MODE_FIXED : FOCUS_MODE_INFINITY;
break;
@@ -1021,8 +1085,8 @@ status_t Parameters::buildFastInfo() {
// Check if the HAL supports HAL_PIXEL_FORMAT_YCbCr_420_888
fastInfo.useFlexibleYuv = false;
- for (size_t i = 0; i < availableFormats.count; i++) {
- if (availableFormats.data.i32[i] == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+ for (size_t i = 0; i < availableFormats.size(); i++) {
+ if (availableFormats[i] == HAL_PIXEL_FORMAT_YCbCr_420_888) {
fastInfo.useFlexibleYuv = true;
break;
}
@@ -1225,8 +1289,7 @@ status_t Parameters::set(const String8& paramString) {
"is active!", __FUNCTION__);
return BAD_VALUE;
}
- camera_metadata_ro_entry_t availableFormats =
- staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS);
+ SortedVector<int32_t> availableFormats = getAvailableOutputFormats();
// If using flexible YUV, always support NV21/YV12. Otherwise, check
// HAL's list.
if (! (fastInfo.useFlexibleYuv &&
@@ -1235,11 +1298,10 @@ status_t Parameters::set(const String8& paramString) {
validatedParams.previewFormat ==
HAL_PIXEL_FORMAT_YV12) ) ) {
// Not using flexible YUV format, so check explicitly
- for (i = 0; i < availableFormats.count; i++) {
- if (availableFormats.data.i32[i] ==
- validatedParams.previewFormat) break;
+ for (i = 0; i < availableFormats.size(); i++) {
+ if (availableFormats[i] == validatedParams.previewFormat) break;
}
- if (i == availableFormats.count) {
+ if (i == availableFormats.size()) {
ALOGE("%s: Requested preview format %s (0x%x) is not supported",
__FUNCTION__, newParams.getPreviewFormat(),
validatedParams.previewFormat);
@@ -1355,17 +1417,16 @@ status_t Parameters::set(const String8& paramString) {
// PICTURE_SIZE
newParams.getPictureSize(&validatedParams.pictureWidth,
&validatedParams.pictureHeight);
- if (validatedParams.pictureWidth == pictureWidth ||
- validatedParams.pictureHeight == pictureHeight) {
- camera_metadata_ro_entry_t availablePictureSizes =
- staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES);
- for (i = 0; i < availablePictureSizes.count; i+=2) {
- if ((availablePictureSizes.data.i32[i] ==
+ if (validatedParams.pictureWidth != pictureWidth ||
+ validatedParams.pictureHeight != pictureHeight) {
+ Vector<Size> availablePictureSizes = getAvailableJpegSizes();
+ for (i = 0; i < availablePictureSizes.size(); i++) {
+ if ((availablePictureSizes[i].width ==
validatedParams.pictureWidth) &&
- (availablePictureSizes.data.i32[i+1] ==
+ (availablePictureSizes[i].height ==
validatedParams.pictureHeight)) break;
}
- if (i == availablePictureSizes.count) {
+ if (i == availablePictureSizes.size()) {
ALOGE("%s: Requested picture size %d x %d is not supported",
__FUNCTION__, validatedParams.pictureWidth,
validatedParams.pictureHeight);
@@ -1522,7 +1583,7 @@ status_t Parameters::set(const String8& paramString) {
newParams.get(CameraParameters::KEY_SCENE_MODE) );
if (validatedParams.sceneMode != sceneMode &&
validatedParams.sceneMode !=
- ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED) {
+ ANDROID_CONTROL_SCENE_MODE_DISABLED) {
camera_metadata_ro_entry_t availableSceneModes =
staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
for (i = 0; i < availableSceneModes.count; i++) {
@@ -1537,7 +1598,7 @@ status_t Parameters::set(const String8& paramString) {
}
}
bool sceneModeSet =
- validatedParams.sceneMode != ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED;
+ validatedParams.sceneMode != ANDROID_CONTROL_SCENE_MODE_DISABLED;
// FLASH_MODE
if (sceneModeSet) {
@@ -1555,7 +1616,9 @@ status_t Parameters::set(const String8& paramString) {
if (validatedParams.flashMode != flashMode) {
camera_metadata_ro_entry_t flashAvailable =
staticInfo(ANDROID_FLASH_INFO_AVAILABLE, 1, 1);
- if (!flashAvailable.data.u8[0] &&
+ bool isFlashAvailable =
+ flashAvailable.data.u8[0] == ANDROID_FLASH_INFO_AVAILABLE_TRUE;
+ if (!isFlashAvailable &&
validatedParams.flashMode != Parameters::FLASH_MODE_OFF) {
ALOGE("%s: Requested flash mode \"%s\" is not supported: "
"No flash on device", __FUNCTION__,
@@ -1580,9 +1643,11 @@ status_t Parameters::set(const String8& paramString) {
newParams.get(CameraParameters::KEY_FLASH_MODE));
return BAD_VALUE;
}
- // Update in case of override
- newParams.set(CameraParameters::KEY_FLASH_MODE,
- flashModeEnumToString(validatedParams.flashMode));
+ // Update in case of override, but only if flash is supported
+ if (isFlashAvailable) {
+ newParams.set(CameraParameters::KEY_FLASH_MODE,
+ flashModeEnumToString(validatedParams.flashMode));
+ }
}
// WHITE_BALANCE
@@ -1667,10 +1732,11 @@ status_t Parameters::set(const String8& paramString) {
// FOCUS_AREAS
res = parseAreas(newParams.get(CameraParameters::KEY_FOCUS_AREAS),
&validatedParams.focusingAreas);
- size_t max3aRegions =
- (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS, 1, 1).data.i32[0];
+ size_t maxAfRegions = (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS,
+ Parameters::NUM_REGION, Parameters::NUM_REGION).
+ data.i32[Parameters::REGION_AF];
if (res == OK) res = validateAreas(validatedParams.focusingAreas,
- max3aRegions, AREA_KIND_FOCUS);
+ maxAfRegions, AREA_KIND_FOCUS);
if (res != OK) {
ALOGE("%s: Requested focus areas are malformed: %s",
__FUNCTION__, newParams.get(CameraParameters::KEY_FOCUS_AREAS));
@@ -1700,10 +1766,13 @@ status_t Parameters::set(const String8& paramString) {
newParams.get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK));
// METERING_AREAS
+ size_t maxAeRegions = (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS,
+ Parameters::NUM_REGION, Parameters::NUM_REGION).
+ data.i32[Parameters::REGION_AE];
res = parseAreas(newParams.get(CameraParameters::KEY_METERING_AREAS),
&validatedParams.meteringAreas);
if (res == OK) {
- res = validateAreas(validatedParams.meteringAreas, max3aRegions,
+ res = validateAreas(validatedParams.meteringAreas, maxAeRegions,
AREA_KIND_METERING);
}
if (res != OK) {
@@ -1728,21 +1797,26 @@ status_t Parameters::set(const String8& paramString) {
if (validatedParams.videoWidth != videoWidth ||
validatedParams.videoHeight != videoHeight) {
if (state == RECORD) {
- ALOGE("%s: Video size cannot be updated when recording is active!",
- __FUNCTION__);
- return BAD_VALUE;
- }
- for (i = 0; i < availablePreviewSizes.size(); i++) {
- if ((availablePreviewSizes[i].width ==
- validatedParams.videoWidth) &&
- (availablePreviewSizes[i].height ==
- validatedParams.videoHeight)) break;
- }
- if (i == availablePreviewSizes.size()) {
- ALOGE("%s: Requested video size %d x %d is not supported",
- __FUNCTION__, validatedParams.videoWidth,
+ ALOGW("%s: Video size cannot be updated (from %d x %d to %d x %d)"
+ " when recording is active! Ignore the size update!",
+ __FUNCTION__, videoWidth, videoHeight, validatedParams.videoWidth,
validatedParams.videoHeight);
- return BAD_VALUE;
+ validatedParams.videoWidth = videoWidth;
+ validatedParams.videoHeight = videoHeight;
+ newParams.setVideoSize(videoWidth, videoHeight);
+ } else {
+ for (i = 0; i < availableVideoSizes.size(); i++) {
+ if ((availableVideoSizes[i].width ==
+ validatedParams.videoWidth) &&
+ (availableVideoSizes[i].height ==
+ validatedParams.videoHeight)) break;
+ }
+ if (i == availableVideoSizes.size()) {
+ ALOGE("%s: Requested video size %d x %d is not supported",
+ __FUNCTION__, validatedParams.videoWidth,
+ validatedParams.videoHeight);
+ return BAD_VALUE;
+ }
}
}
@@ -1764,6 +1838,7 @@ status_t Parameters::set(const String8& paramString) {
/** Update internal parameters */
*this = validatedParams;
+ updateOverriddenJpegSize();
/** Update external parameters calculated from the internal ones */
@@ -1855,7 +1930,7 @@ status_t Parameters::updateRequest(CameraMetadata *request) const {
// (face detection statistics and face priority scene mode). Map from other
// to the other.
bool sceneModeActive =
- sceneMode != (uint8_t)ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED;
+ sceneMode != (uint8_t)ANDROID_CONTROL_SCENE_MODE_DISABLED;
uint8_t reqControlMode = ANDROID_CONTROL_MODE_AUTO;
if (enableFaceDetect || sceneModeActive) {
reqControlMode = ANDROID_CONTROL_MODE_USE_SCENE_MODE;
@@ -1867,7 +1942,7 @@ status_t Parameters::updateRequest(CameraMetadata *request) const {
uint8_t reqSceneMode =
sceneModeActive ? sceneMode :
enableFaceDetect ? (uint8_t)ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY :
- (uint8_t)ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED;
+ (uint8_t)ANDROID_CONTROL_SCENE_MODE_DISABLED;
res = request->update(ANDROID_CONTROL_SCENE_MODE,
&reqSceneMode, 1);
if (res != OK) return res;
@@ -1988,6 +2063,23 @@ status_t Parameters::updateRequest(CameraMetadata *request) const {
reqMeteringAreas, reqMeteringAreasSize);
if (res != OK) return res;
+ // Set awb regions to be the same as the metering regions if allowed
+ size_t maxAwbRegions = (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS,
+ Parameters::NUM_REGION, Parameters::NUM_REGION).
+ data.i32[Parameters::REGION_AWB];
+ if (maxAwbRegions > 0) {
+ if (maxAwbRegions >= meteringAreas.size()) {
+ res = request->update(ANDROID_CONTROL_AWB_REGIONS,
+ reqMeteringAreas, reqMeteringAreasSize);
+ } else {
+ // Ensure the awb regions are zeroed if the region count is too high.
+ int32_t zeroedAwbAreas[5] = {0, 0, 0, 0, 0};
+ res = request->update(ANDROID_CONTROL_AWB_REGIONS,
+ zeroedAwbAreas, sizeof(zeroedAwbAreas)/sizeof(int32_t));
+ }
+ if (res != OK) return res;
+ }
+
delete[] reqMeteringAreas;
/* don't include jpeg thumbnail size - it's valid for
@@ -2064,6 +2156,52 @@ status_t Parameters::updateRequestJpeg(CameraMetadata *request) const {
return OK;
}
+status_t Parameters::overrideJpegSizeByVideoSize() {
+ if (pictureSizeOverriden) {
+ ALOGV("Picture size has been overridden. Skip overriding");
+ return OK;
+ }
+
+ pictureSizeOverriden = true;
+ pictureWidthLastSet = pictureWidth;
+ pictureHeightLastSet = pictureHeight;
+ pictureWidth = videoWidth;
+ pictureHeight = videoHeight;
+ // This change of picture size is invisible to app layer.
+ // Do not update app visible params
+ return OK;
+}
+
+status_t Parameters::updateOverriddenJpegSize() {
+ if (!pictureSizeOverriden) {
+ ALOGV("Picture size has not been overridden. Skip checking");
+ return OK;
+ }
+
+ pictureWidthLastSet = pictureWidth;
+ pictureHeightLastSet = pictureHeight;
+
+ if (pictureWidth <= videoWidth && pictureHeight <= videoHeight) {
+ // Picture size is now smaller than video size. No need to override anymore
+ return recoverOverriddenJpegSize();
+ }
+
+ pictureWidth = videoWidth;
+ pictureHeight = videoHeight;
+
+ return OK;
+}
+
+status_t Parameters::recoverOverriddenJpegSize() {
+ if (!pictureSizeOverriden) {
+ ALOGV("Picture size has not been overridden. Skip recovering");
+ return OK;
+ }
+ pictureSizeOverriden = false;
+ pictureWidth = pictureWidthLastSet;
+ pictureHeight = pictureHeightLastSet;
+ return OK;
+}
const char* Parameters::getStateName(State state) {
#define CASE_ENUM_TO_CHAR(x) case x: return(#x); break;
@@ -2083,24 +2221,7 @@ const char* Parameters::getStateName(State state) {
}
int Parameters::formatStringToEnum(const char *format) {
- return
- !format ?
- HAL_PIXEL_FORMAT_YCrCb_420_SP :
- !strcmp(format, CameraParameters::PIXEL_FORMAT_YUV422SP) ?
- HAL_PIXEL_FORMAT_YCbCr_422_SP : // NV16
- !strcmp(format, CameraParameters::PIXEL_FORMAT_YUV420SP) ?
- HAL_PIXEL_FORMAT_YCrCb_420_SP : // NV21
- !strcmp(format, CameraParameters::PIXEL_FORMAT_YUV422I) ?
- HAL_PIXEL_FORMAT_YCbCr_422_I : // YUY2
- !strcmp(format, CameraParameters::PIXEL_FORMAT_YUV420P) ?
- HAL_PIXEL_FORMAT_YV12 : // YV12
- !strcmp(format, CameraParameters::PIXEL_FORMAT_RGB565) ?
- HAL_PIXEL_FORMAT_RGB_565 : // RGB565
- !strcmp(format, CameraParameters::PIXEL_FORMAT_RGBA8888) ?
- HAL_PIXEL_FORMAT_RGBA_8888 : // RGB8888
- !strcmp(format, CameraParameters::PIXEL_FORMAT_BAYER_RGGB) ?
- HAL_PIXEL_FORMAT_RAW_SENSOR : // Raw sensor data
- -1;
+ return CameraParameters::previewFormatToEnum(format);
}
const char* Parameters::formatEnumToString(int format) {
@@ -2228,9 +2349,9 @@ int Parameters::abModeStringToEnum(const char *abMode) {
int Parameters::sceneModeStringToEnum(const char *sceneMode) {
return
!sceneMode ?
- ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED :
+ ANDROID_CONTROL_SCENE_MODE_DISABLED :
!strcmp(sceneMode, CameraParameters::SCENE_MODE_AUTO) ?
- ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED :
+ ANDROID_CONTROL_SCENE_MODE_DISABLED :
!strcmp(sceneMode, CameraParameters::SCENE_MODE_ACTION) ?
ANDROID_CONTROL_SCENE_MODE_ACTION :
!strcmp(sceneMode, CameraParameters::SCENE_MODE_PORTRAIT) ?
@@ -2268,7 +2389,7 @@ Parameters::Parameters::flashMode_t Parameters::flashModeStringToEnum(
const char *flashMode) {
return
!flashMode ?
- Parameters::FLASH_MODE_INVALID :
+ Parameters::FLASH_MODE_OFF :
!strcmp(flashMode, CameraParameters::FLASH_MODE_OFF) ?
Parameters::FLASH_MODE_OFF :
!strcmp(flashMode, CameraParameters::FLASH_MODE_AUTO) ?
@@ -2569,7 +2690,7 @@ int Parameters::normalizedYToArray(int y) const {
return cropYToArray(normalizedYToCrop(y));
}
-status_t Parameters::getFilteredPreviewSizes(Size limit, Vector<Size> *sizes) {
+status_t Parameters::getFilteredSizes(Size limit, Vector<Size> *sizes) {
if (info == NULL) {
ALOGE("%s: Static metadata is not initialized", __FUNCTION__);
return NO_INIT;
@@ -2578,22 +2699,37 @@ status_t Parameters::getFilteredPreviewSizes(Size limit, Vector<Size> *sizes) {
ALOGE("%s: Input size is null", __FUNCTION__);
return BAD_VALUE;
}
-
- const size_t SIZE_COUNT = sizeof(Size) / sizeof(int);
- camera_metadata_ro_entry_t availableProcessedSizes =
- staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, SIZE_COUNT);
- if (availableProcessedSizes.count < SIZE_COUNT) return BAD_VALUE;
-
- Size previewSize;
- for (size_t i = 0; i < availableProcessedSizes.count; i += SIZE_COUNT) {
- previewSize.width = availableProcessedSizes.data.i32[i];
- previewSize.height = availableProcessedSizes.data.i32[i+1];
- // Need skip the preview sizes that are too large.
- if (previewSize.width <= limit.width &&
- previewSize.height <= limit.height) {
- sizes->push(previewSize);
+ sizes->clear();
+
+ if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
+ Vector<StreamConfiguration> scs = getStreamConfigurations();
+ for (size_t i=0; i < scs.size(); i++) {
+ const StreamConfiguration &sc = scs[i];
+ if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
+ sc.format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
+ sc.width <= limit.width && sc.height <= limit.height) {
+ Size sz = {sc.width, sc.height};
+ sizes->push(sz);
}
+ }
+ } else {
+ const size_t SIZE_COUNT = sizeof(Size) / sizeof(int);
+ camera_metadata_ro_entry_t availableProcessedSizes =
+ staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, SIZE_COUNT);
+ if (availableProcessedSizes.count < SIZE_COUNT) return BAD_VALUE;
+
+ Size filteredSize;
+ for (size_t i = 0; i < availableProcessedSizes.count; i += SIZE_COUNT) {
+ filteredSize.width = availableProcessedSizes.data.i32[i];
+ filteredSize.height = availableProcessedSizes.data.i32[i+1];
+ // Need skip the preview sizes that are too large.
+ if (filteredSize.width <= limit.width &&
+ filteredSize.height <= limit.height) {
+ sizes->push(filteredSize);
+ }
+ }
}
+
if (sizes->isEmpty()) {
ALOGE("generated preview size list is empty!!");
return BAD_VALUE;
@@ -2627,6 +2763,78 @@ Parameters::Size Parameters::getMaxSizeForRatio(
return maxSize;
}
+Vector<Parameters::StreamConfiguration> Parameters::getStreamConfigurations() {
+ const int STREAM_CONFIGURATION_SIZE = 4;
+ const int STREAM_FORMAT_OFFSET = 0;
+ const int STREAM_WIDTH_OFFSET = 1;
+ const int STREAM_HEIGHT_OFFSET = 2;
+ const int STREAM_IS_INPUT_OFFSET = 3;
+ Vector<StreamConfiguration> scs;
+ if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
+ ALOGE("StreamConfiguration is only valid after device HAL 3.2!");
+ return scs;
+ }
+
+ camera_metadata_ro_entry_t availableStreamConfigs =
+ staticInfo(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
+ for (size_t i=0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) {
+ int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET];
+ int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET];
+ int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET];
+ int32_t isInput = availableStreamConfigs.data.i32[i + STREAM_IS_INPUT_OFFSET];
+ StreamConfiguration sc = {format, width, height, isInput};
+ scs.add(sc);
+ }
+ return scs;
+}
+
+SortedVector<int32_t> Parameters::getAvailableOutputFormats() {
+ SortedVector<int32_t> outputFormats; // Non-duplicated output formats
+ if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
+ Vector<StreamConfiguration> scs = getStreamConfigurations();
+ for (size_t i=0; i < scs.size(); i++) {
+ const StreamConfiguration &sc = scs[i];
+ if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT) {
+ outputFormats.add(sc.format);
+ }
+ }
+ } else {
+ camera_metadata_ro_entry_t availableFormats = staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS);
+ for (size_t i=0; i < availableFormats.count; i++) {
+ outputFormats.add(availableFormats.data.i32[i]);
+ }
+ }
+ return outputFormats;
+}
+
+Vector<Parameters::Size> Parameters::getAvailableJpegSizes() {
+ Vector<Parameters::Size> jpegSizes;
+ if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
+ Vector<StreamConfiguration> scs = getStreamConfigurations();
+ for (size_t i=0; i < scs.size(); i++) {
+ const StreamConfiguration &sc = scs[i];
+ if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
+ sc.format == HAL_PIXEL_FORMAT_BLOB) {
+ Size sz = {sc.width, sc.height};
+ jpegSizes.add(sz);
+ }
+ }
+ } else {
+ const int JPEG_SIZE_ENTRY_COUNT = 2;
+ const int WIDTH_OFFSET = 0;
+ const int HEIGHT_OFFSET = 1;
+ camera_metadata_ro_entry_t availableJpegSizes =
+ staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES);
+ for (size_t i=0; i < availableJpegSizes.count; i+= JPEG_SIZE_ENTRY_COUNT) {
+ int width = availableJpegSizes.data.i32[i + WIDTH_OFFSET];
+ int height = availableJpegSizes.data.i32[i + HEIGHT_OFFSET];
+ Size sz = {width, height};
+ jpegSizes.add(sz);
+ }
+ }
+ return jpegSizes;
+}
+
Parameters::CropRegion Parameters::calculateCropRegion(
Parameters::CropRegion::Outputs outputs) const {
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index da07ccf..815cc55 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -52,6 +52,9 @@ struct Parameters {
int previewTransform; // set by CAMERA_CMD_SET_DISPLAY_ORIENTATION
int pictureWidth, pictureHeight;
+ // Store the picture size before they are overriden by video snapshot
+ int pictureWidthLastSet, pictureHeightLastSet;
+ bool pictureSizeOverriden;
int32_t jpegThumbSize[2];
uint8_t jpegQuality, jpegThumbQuality;
@@ -114,6 +117,14 @@ struct Parameters {
bool autoExposureLock;
bool autoWhiteBalanceLock;
+ // 3A region types, for use with ANDROID_CONTROL_MAX_REGIONS
+ enum region_t {
+ REGION_AE = 0,
+ REGION_AWB,
+ REGION_AF,
+ NUM_REGION // Number of region types
+ } region;
+
Vector<Area> meteringAreas;
int zoom;
@@ -168,8 +179,13 @@ struct Parameters {
// Number of zoom steps to simulate
static const unsigned int NUM_ZOOM_STEPS = 100;
// Max preview size allowed
+ // This is set to a 1:1 value to allow for any aspect ratio that has
+ // a max long side of 1920 pixels
static const unsigned int MAX_PREVIEW_WIDTH = 1920;
- static const unsigned int MAX_PREVIEW_HEIGHT = 1080;
+ static const unsigned int MAX_PREVIEW_HEIGHT = 1920;
+ // Initial max preview/recording size bound
+ static const int MAX_INITIAL_PREVIEW_WIDTH = 1920;
+ static const int MAX_INITIAL_PREVIEW_HEIGHT = 1080;
// Aspect ratio tolerance
static const float ASPECT_RATIO_TOLERANCE = 0.001;
@@ -219,7 +235,7 @@ struct Parameters {
~Parameters();
// Sets up default parameters
- status_t initialize(const CameraMetadata *info);
+ status_t initialize(const CameraMetadata *info, int deviceVersion);
// Build fast-access device static info from static info
status_t buildFastInfo();
@@ -245,6 +261,12 @@ struct Parameters {
// Add/update JPEG entries in metadata
status_t updateRequestJpeg(CameraMetadata *request) const;
+ /* Helper functions to override jpeg size for video snapshot */
+ // Override jpeg size by video size. Called during startRecording.
+ status_t overrideJpegSizeByVideoSize();
+ // Recover overridden jpeg size. Called during stopRecording.
+ status_t recoverOverriddenJpegSize();
+
// Calculate the crop region rectangle based on current stream sizes
struct CropRegion {
float left;
@@ -334,10 +356,35 @@ private:
int normalizedYToCrop(int y) const;
Vector<Size> availablePreviewSizes;
+ Vector<Size> availableVideoSizes;
// Get size list (that are no larger than limit) from static metadata.
- status_t getFilteredPreviewSizes(Size limit, Vector<Size> *sizes);
+ status_t getFilteredSizes(Size limit, Vector<Size> *sizes);
// Get max size (from the size array) that matches the given aspect ratio.
Size getMaxSizeForRatio(float ratio, const int32_t* sizeArray, size_t count);
+
+ // Helper function for overriding jpeg size for video snapshot
+ // Check if overridden jpeg size needs to be updated after Parameters::set.
+ // The behavior of this function is tailored to the implementation of Parameters::set.
+ // Do not use this function for other purpose.
+ status_t updateOverriddenJpegSize();
+
+ struct StreamConfiguration {
+ int32_t format;
+ int32_t width;
+ int32_t height;
+ int32_t isInput;
+ };
+ // Helper function extract available stream configuration
+ // Only valid since device HAL version 3.2
+ // returns an empty Vector if device HAL version does support it
+ Vector<StreamConfiguration> getStreamConfigurations();
+
+ // Helper function to get non-duplicated available output formats
+ SortedVector<int32_t> getAvailableOutputFormats();
+ // Helper function to get available output jpeg sizes
+ Vector<Size> getAvailableJpegSizes();
+
+ int mDeviceVersion;
};
// This class encapsulates the Parameters class so that it can only be accessed
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index 77ae7ec..9e7fff8 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -89,8 +89,26 @@ status_t StreamingProcessor::updatePreviewRequest(const Parameters &params) {
Mutex::Autolock m(mMutex);
if (mPreviewRequest.entryCount() == 0) {
- res = device->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
- &mPreviewRequest);
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) {
+ ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ // Use CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG for ZSL streaming case.
+ if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_0) {
+ if (params.zslMode && !params.recordingHint) {
+ res = device->createDefaultRequest(CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG,
+ &mPreviewRequest);
+ } else {
+ res = device->createDefaultRequest(CAMERA3_TEMPLATE_PREVIEW,
+ &mPreviewRequest);
+ }
+ } else {
+ res = device->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
+ &mPreviewRequest);
+ }
+
if (res != OK) {
ALOGE("%s: Camera %d: Unable to create default preview request: "
"%s (%d)", __FUNCTION__, mId, strerror(-res), res);
@@ -163,8 +181,7 @@ status_t StreamingProcessor::updatePreviewStream(const Parameters &params) {
if (mPreviewStreamId == NO_STREAM) {
res = device->createStream(mPreviewWindow,
params.previewWidth, params.previewHeight,
- CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0,
- &mPreviewStreamId);
+ CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, &mPreviewStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to create preview stream: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
@@ -301,6 +318,44 @@ status_t StreamingProcessor::updateRecordingRequest(const Parameters &params) {
return OK;
}
+status_t StreamingProcessor::recordingStreamNeedsUpdate(
+ const Parameters &params, bool *needsUpdate) {
+ status_t res;
+
+ if (needsUpdate == 0) {
+ ALOGE("%s: Camera %d: invalid argument", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ if (mRecordingStreamId == NO_STREAM) {
+ *needsUpdate = true;
+ return OK;
+ }
+
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ uint32_t currentWidth, currentHeight;
+ res = device->getStreamInfo(mRecordingStreamId,
+ &currentWidth, &currentHeight, 0);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Error querying recording output stream info: "
+ "%s (%d)", __FUNCTION__, mId,
+ strerror(-res), res);
+ return res;
+ }
+
+ if (mRecordingConsumer == 0 || currentWidth != (uint32_t)params.videoWidth ||
+ currentHeight != (uint32_t)params.videoHeight) {
+ *needsUpdate = true;
+ }
+ *needsUpdate = false;
+ return res;
+}
+
status_t StreamingProcessor::updateRecordingStream(const Parameters &params) {
ATRACE_CALL();
status_t res;
@@ -319,13 +374,15 @@ status_t StreamingProcessor::updateRecordingStream(const Parameters &params) {
// Create CPU buffer queue endpoint. We need one more buffer here so that we can
// always acquire and free a buffer when the heap is full; otherwise the consumer
// will have buffers in flight we'll never clear out.
- sp<BufferQueue> bq = new BufferQueue();
- mRecordingConsumer = new BufferItemConsumer(bq,
+ sp<IGraphicBufferProducer> producer;
+ sp<IGraphicBufferConsumer> consumer;
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mRecordingConsumer = new BufferItemConsumer(consumer,
GRALLOC_USAGE_HW_VIDEO_ENCODER,
mRecordingHeapCount + 1);
mRecordingConsumer->setFrameAvailableListener(this);
mRecordingConsumer->setName(String8("Camera2-RecordingConsumer"));
- mRecordingWindow = new Surface(bq);
+ mRecordingWindow = new Surface(producer);
newConsumer = true;
// Allocate memory later, since we don't know buffer size until receipt
}
@@ -365,7 +422,7 @@ status_t StreamingProcessor::updateRecordingStream(const Parameters &params) {
mRecordingFrameCount = 0;
res = device->createStream(mRecordingWindow,
params.videoWidth, params.videoHeight,
- CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0, &mRecordingStreamId);
+ CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, &mRecordingStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Can't create output stream for recording: "
"%s (%d)", __FUNCTION__, mId,
@@ -428,10 +485,13 @@ status_t StreamingProcessor::startStream(StreamType type,
Mutex::Autolock m(mMutex);
- // If a recording stream is being started up, free up any
- // outstanding buffers left from the previous recording session.
- // There should never be any, so if there are, warn about it.
- if (isStreamActive(outputStreams, mRecordingStreamId)) {
+ // If a recording stream is being started up and no recording
+ // stream is active yet, free up any outstanding buffers left
+ // from the previous recording session. There should never be
+ // any, so if there are, warn about it.
+ bool isRecordingStreamIdle = !isStreamActive(mActiveStreamIds, mRecordingStreamId);
+ bool startRecordingStream = isStreamActive(outputStreams, mRecordingStreamId);
+ if (startRecordingStream && isRecordingStreamIdle) {
releaseAllRecordingFramesLocked();
}
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.h b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
index 833bb8f..8466af4 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
@@ -54,6 +54,9 @@ class StreamingProcessor:
status_t setRecordingBufferCount(size_t count);
status_t updateRecordingRequest(const Parameters &params);
+ // If needsUpdate is set to true, a updateRecordingStream call with params will recreate
+ // recording stream
+ status_t recordingStreamNeedsUpdate(const Parameters &params, bool *needsUpdate);
status_t updateRecordingStream(const Parameters &params);
status_t deleteRecordingStream();
int getRecordingStreamId() const;
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 130f81a..8f78103 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -48,6 +48,7 @@ ZslProcessor::ZslProcessor(
mDevice(client->getCameraDevice()),
mSequencer(sequencer),
mId(client->getCameraId()),
+ mDeleted(false),
mZslBufferAvailable(false),
mZslStreamId(NO_STREAM),
mZslReprocessStreamId(NO_STREAM),
@@ -62,7 +63,7 @@ ZslProcessor::ZslProcessor(
ZslProcessor::~ZslProcessor() {
ALOGV("%s: Exit", __FUNCTION__);
- deleteStream();
+ disconnect();
}
void ZslProcessor::onFrameAvailable() {
@@ -73,18 +74,19 @@ void ZslProcessor::onFrameAvailable() {
}
}
-void ZslProcessor::onFrameAvailable(int32_t /*requestId*/,
- const CameraMetadata &frame) {
+void ZslProcessor::onResultAvailable(const CaptureResult &result) {
+ ATRACE_CALL();
+ ALOGV("%s:", __FUNCTION__);
Mutex::Autolock l(mInputMutex);
camera_metadata_ro_entry_t entry;
- entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+ entry = result.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
nsecs_t timestamp = entry.data.i64[0];
(void)timestamp;
ALOGVV("Got preview frame for timestamp %" PRId64, timestamp);
if (mState != RUNNING) return;
- mFrameList.editItemAt(mFrameListHead) = frame;
+ mFrameList.editItemAt(mFrameListHead) = result.mMetadata;
mFrameListHead = (mFrameListHead + 1) % kFrameListDepth;
findMatchesLocked();
@@ -130,13 +132,15 @@ status_t ZslProcessor::updateStream(const Parameters &params) {
if (mZslConsumer == 0) {
// Create CPU buffer queue endpoint
- sp<BufferQueue> bq = new BufferQueue();
- mZslConsumer = new BufferItemConsumer(bq,
+ sp<IGraphicBufferProducer> producer;
+ sp<IGraphicBufferConsumer> consumer;
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mZslConsumer = new BufferItemConsumer(consumer,
GRALLOC_USAGE_HW_CAMERA_ZSL,
kZslBufferDepth);
mZslConsumer->setFrameAvailableListener(this);
mZslConsumer->setName(String8("Camera2Client::ZslConsumer"));
- mZslWindow = new Surface(bq);
+ mZslWindow = new Surface(producer);
}
if (mZslStreamId != NO_STREAM) {
@@ -172,6 +176,8 @@ status_t ZslProcessor::updateStream(const Parameters &params) {
}
}
+ mDeleted = false;
+
if (mZslStreamId == NO_STREAM) {
// Create stream for HAL production
// TODO: Sort out better way to select resolution for ZSL
@@ -180,8 +186,7 @@ status_t ZslProcessor::updateStream(const Parameters &params) {
(int)HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
res = device->createStream(mZslWindow,
params.fastInfo.arrayWidth, params.fastInfo.arrayHeight,
- streamType, 0,
- &mZslStreamId);
+ streamType, &mZslStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Can't create output stream for ZSL: "
"%s (%d)", __FUNCTION__, mId,
@@ -199,13 +204,22 @@ status_t ZslProcessor::updateStream(const Parameters &params) {
}
client->registerFrameListener(Camera2Client::kPreviewRequestIdStart,
Camera2Client::kPreviewRequestIdEnd,
- this);
+ this,
+ /*sendPartials*/false);
return OK;
}
status_t ZslProcessor::deleteStream() {
ATRACE_CALL();
+ Mutex::Autolock l(mInputMutex);
+ // WAR(b/15408128): do not delete stream unless client is being disconnected.
+ mDeleted = true;
+ return OK;
+}
+
+status_t ZslProcessor::disconnect() {
+ ATRACE_CALL();
status_t res;
Mutex::Autolock l(mInputMutex);
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.h b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
index 6d3cb85..b6533cf 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
@@ -24,6 +24,7 @@
#include <utils/Condition.h>
#include <gui/BufferItemConsumer.h>
#include <camera/CameraMetadata.h>
+#include <camera/CaptureResult.h>
#include "common/CameraDeviceBase.h"
#include "api1/client2/ZslProcessorInterface.h"
@@ -54,7 +55,7 @@ class ZslProcessor:
// From mZslConsumer
virtual void onFrameAvailable();
// From FrameProcessor
- virtual void onFrameAvailable(int32_t requestId, const CameraMetadata &frame);
+ virtual void onResultAvailable(const CaptureResult &result);
virtual void onBufferReleased(buffer_handle_t *handle);
@@ -66,6 +67,7 @@ class ZslProcessor:
status_t updateStream(const Parameters &params);
status_t deleteStream();
+ status_t disconnect();
int getStreamId() const;
status_t pushToReprocess(int32_t requestId);
@@ -85,6 +87,8 @@ class ZslProcessor:
wp<CaptureSequencer> mSequencer;
int mId;
+ bool mDeleted;
+
mutable Mutex mInputMutex;
bool mZslBufferAvailable;
Condition mZslBufferAvailableSignal;
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
index 2fce2b6..f110b66 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -44,6 +44,7 @@ ZslProcessor3::ZslProcessor3(
sp<Camera2Client> client,
wp<CaptureSequencer> sequencer):
Thread(false),
+ mLatestClearedBufferTimestamp(0),
mState(RUNNING),
mClient(client),
mSequencer(sequencer),
@@ -51,9 +52,42 @@ ZslProcessor3::ZslProcessor3(
mZslStreamId(NO_STREAM),
mFrameListHead(0),
mZslQueueHead(0),
- mZslQueueTail(0) {
- mZslQueue.insertAt(0, kZslBufferDepth);
- mFrameList.insertAt(0, kFrameListDepth);
+ mZslQueueTail(0),
+ mHasFocuser(false) {
+ // Initialize buffer queue and frame list based on pipeline max depth.
+ size_t pipelineMaxDepth = kDefaultMaxPipelineDepth;
+ if (client != 0) {
+ sp<Camera3Device> device =
+ static_cast<Camera3Device*>(client->getCameraDevice().get());
+ if (device != 0) {
+ camera_metadata_ro_entry_t entry =
+ device->info().find(ANDROID_REQUEST_PIPELINE_MAX_DEPTH);
+ if (entry.count == 1) {
+ pipelineMaxDepth = entry.data.u8[0];
+ } else {
+ ALOGW("%s: Unable to find the android.request.pipelineMaxDepth,"
+ " use default pipeline max depth %zu", __FUNCTION__,
+ kDefaultMaxPipelineDepth);
+ }
+
+ entry = device->info().find(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE);
+ if (entry.count > 0 && entry.data.f[0] != 0.) {
+ mHasFocuser = true;
+ }
+ }
+ }
+
+ ALOGV("%s: Initialize buffer queue and frame list depth based on max pipeline depth (%d)",
+ __FUNCTION__, pipelineMaxDepth);
+ // Need to keep buffer queue longer than metadata queue because sometimes buffer arrives
+ // earlier than metadata which causes the buffer corresponding to oldest metadata being
+ // removed.
+ mFrameListDepth = pipelineMaxDepth;
+ mBufferQueueDepth = mFrameListDepth + 1;
+
+
+ mZslQueue.insertAt(0, mBufferQueueDepth);
+ mFrameList.insertAt(0, mFrameListDepth);
sp<CaptureSequencer> captureSequencer = mSequencer.promote();
if (captureSequencer != 0) captureSequencer->setZslProcessor(this);
}
@@ -63,19 +97,34 @@ ZslProcessor3::~ZslProcessor3() {
deleteStream();
}
-void ZslProcessor3::onFrameAvailable(int32_t /*requestId*/,
- const CameraMetadata &frame) {
+void ZslProcessor3::onResultAvailable(const CaptureResult &result) {
+ ATRACE_CALL();
+ ALOGV("%s:", __FUNCTION__);
Mutex::Autolock l(mInputMutex);
camera_metadata_ro_entry_t entry;
- entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+ entry = result.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
nsecs_t timestamp = entry.data.i64[0];
- (void)timestamp;
- ALOGVV("Got preview metadata for timestamp %" PRId64, timestamp);
+ if (entry.count == 0) {
+ ALOGE("%s: metadata doesn't have timestamp, skip this result", __FUNCTION__);
+ return;
+ }
+
+ entry = result.mMetadata.find(ANDROID_REQUEST_FRAME_COUNT);
+ if (entry.count == 0) {
+ ALOGE("%s: metadata doesn't have frame number, skip this result", __FUNCTION__);
+ return;
+ }
+ int32_t frameNumber = entry.data.i32[0];
+
+ ALOGVV("Got preview metadata for frame %d with timestamp %" PRId64, frameNumber, timestamp);
if (mState != RUNNING) return;
- mFrameList.editItemAt(mFrameListHead) = frame;
- mFrameListHead = (mFrameListHead + 1) % kFrameListDepth;
+ // Corresponding buffer has been cleared. No need to push into mFrameList
+ if (timestamp <= mLatestClearedBufferTimestamp) return;
+
+ mFrameList.editItemAt(mFrameListHead) = result.mMetadata;
+ mFrameListHead = (mFrameListHead + 1) % mFrameListDepth;
}
status_t ZslProcessor3::updateStream(const Parameters &params) {
@@ -135,7 +184,7 @@ status_t ZslProcessor3::updateStream(const Parameters &params) {
// Note that format specified internally in Camera3ZslStream
res = device->createZslStream(
params.fastInfo.arrayWidth, params.fastInfo.arrayHeight,
- kZslBufferDepth,
+ mBufferQueueDepth,
&mZslStreamId,
&mZslStream);
if (res != OK) {
@@ -144,10 +193,15 @@ status_t ZslProcessor3::updateStream(const Parameters &params) {
strerror(-res), res);
return res;
}
+
+ // Only add the camera3 buffer listener when the stream is created.
+ mZslStream->addBufferListener(this);
}
+
client->registerFrameListener(Camera2Client::kPreviewRequestIdStart,
Camera2Client::kPreviewRequestIdEnd,
- this);
+ this,
+ /*sendPartials*/false);
return OK;
}
@@ -190,6 +244,46 @@ int ZslProcessor3::getStreamId() const {
return mZslStreamId;
}
+status_t ZslProcessor3::updateRequestWithDefaultStillRequest(CameraMetadata &request) const {
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) {
+ ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+ sp<Camera3Device> device =
+ static_cast<Camera3Device*>(client->getCameraDevice().get());
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ CameraMetadata stillTemplate;
+ device->createDefaultRequest(CAMERA3_TEMPLATE_STILL_CAPTURE, &stillTemplate);
+
+ // Find some of the post-processing tags, and assign the value from template to the request.
+ // Only check the aberration mode and noise reduction mode for now, as they are very important
+ // for image quality.
+ uint32_t postProcessingTags[] = {
+ ANDROID_NOISE_REDUCTION_MODE,
+ ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
+ ANDROID_COLOR_CORRECTION_MODE,
+ ANDROID_TONEMAP_MODE,
+ ANDROID_SHADING_MODE,
+ ANDROID_HOT_PIXEL_MODE,
+ ANDROID_EDGE_MODE
+ };
+
+ camera_metadata_entry_t entry;
+ for (size_t i = 0; i < sizeof(postProcessingTags) / sizeof(uint32_t); i++) {
+ entry = stillTemplate.find(postProcessingTags[i]);
+ if (entry.count > 0) {
+ request.update(postProcessingTags[i], entry.data.u8, 1);
+ }
+ }
+
+ return OK;
+}
+
status_t ZslProcessor3::pushToReprocess(int32_t requestId) {
ALOGV("%s: Send in reprocess request with id %d",
__FUNCTION__, requestId);
@@ -249,18 +343,45 @@ status_t ZslProcessor3::pushToReprocess(int32_t requestId) {
uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS;
res = request.update(ANDROID_REQUEST_TYPE,
&requestType, 1);
+ if (res != OK) {
+ ALOGE("%s: Unable to update request type",
+ __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
int32_t inputStreams[1] =
{ mZslStreamId };
- if (res == OK) request.update(ANDROID_REQUEST_INPUT_STREAMS,
+ res = request.update(ANDROID_REQUEST_INPUT_STREAMS,
inputStreams, 1);
+ if (res != OK) {
+ ALOGE("%s: Unable to update request input streams",
+ __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ uint8_t captureIntent =
+ static_cast<uint8_t>(ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE);
+ res = request.update(ANDROID_CONTROL_CAPTURE_INTENT,
+ &captureIntent, 1);
+ if (res != OK ) {
+ ALOGE("%s: Unable to update request capture intent",
+ __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
// TODO: Shouldn't we also update the latest preview frame?
int32_t outputStreams[1] =
{ client->getCaptureStreamId() };
- if (res == OK) request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
+ res = request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
outputStreams, 1);
+ if (res != OK) {
+ ALOGE("%s: Unable to update request output streams",
+ __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
res = request.update(ANDROID_REQUEST_ID,
&requestId, 1);
-
if (res != OK ) {
ALOGE("%s: Unable to update frame to a reprocess request",
__FUNCTION__);
@@ -288,6 +409,13 @@ status_t ZslProcessor3::pushToReprocess(int32_t requestId) {
}
}
+ // Update post-processing settings
+ res = updateRequestWithDefaultStillRequest(request);
+ if (res != OK) {
+ ALOGW("%s: Unable to update post-processing tags, the reprocessed image quality "
+ "may be compromised", __FUNCTION__);
+ }
+
mLatestCapturedRequest = request;
res = client->getCameraDevice()->capture(request);
if (res != OK ) {
@@ -312,11 +440,19 @@ status_t ZslProcessor3::clearZslQueue() {
status_t ZslProcessor3::clearZslQueueLocked() {
if (mZslStream != 0) {
- return mZslStream->clearInputRingBuffer();
+ // clear result metadata list first.
+ clearZslResultQueueLocked();
+ return mZslStream->clearInputRingBuffer(&mLatestClearedBufferTimestamp);
}
return OK;
}
+void ZslProcessor3::clearZslResultQueueLocked() {
+ mFrameList.clear();
+ mFrameListHead = 0;
+ mFrameList.insertAt(0, mFrameListDepth);
+}
+
void ZslProcessor3::dump(int fd, const Vector<String16>& /*args*/) const {
Mutex::Autolock l(mInputMutex);
if (!mLatestCapturedRequest.isEmpty()) {
@@ -368,6 +504,23 @@ void ZslProcessor3::dumpZslQueue(int fd) const {
}
}
+bool ZslProcessor3::isFixedFocusMode(uint8_t afMode) const {
+ switch (afMode) {
+ case ANDROID_CONTROL_AF_MODE_AUTO:
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+ case ANDROID_CONTROL_AF_MODE_MACRO:
+ return false;
+ break;
+ case ANDROID_CONTROL_AF_MODE_OFF:
+ case ANDROID_CONTROL_AF_MODE_EDOF:
+ return true;
+ default:
+ ALOGE("%s: unknown focus mode %d", __FUNCTION__, afMode);
+ return false;
+ }
+}
+
nsecs_t ZslProcessor3::getCandidateTimestampLocked(size_t* metadataIdx) const {
/**
* Find the smallest timestamp we know about so far
@@ -413,6 +566,38 @@ nsecs_t ZslProcessor3::getCandidateTimestampLocked(size_t* metadataIdx) const {
continue;
}
+ entry = frame.find(ANDROID_CONTROL_AF_MODE);
+ if (entry.count == 0) {
+ ALOGW("%s: ZSL queue frame has no AF mode field!",
+ __FUNCTION__);
+ continue;
+ }
+ uint8_t afMode = entry.data.u8[0];
+ if (afMode == ANDROID_CONTROL_AF_MODE_OFF) {
+ // Skip all the ZSL buffer for manual AF mode, as we don't really
+ // know the af state.
+ continue;
+ }
+
+ // Check AF state if device has focuser and focus mode isn't fixed
+ if (mHasFocuser && !isFixedFocusMode(afMode)) {
+ // Make sure the candidate frame has good focus.
+ entry = frame.find(ANDROID_CONTROL_AF_STATE);
+ if (entry.count == 0) {
+ ALOGW("%s: ZSL queue frame has no AF state field!",
+ __FUNCTION__);
+ continue;
+ }
+ uint8_t afState = entry.data.u8[0];
+ if (afState != ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED &&
+ afState != ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED &&
+ afState != ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) {
+ ALOGW("%s: ZSL queue frame AF state is %d is not good for capture, skip it",
+ __FUNCTION__, afState);
+ continue;
+ }
+ }
+
minTimestamp = frameTimestamp;
idx = j;
}
@@ -453,13 +638,15 @@ void ZslProcessor3::onBufferAcquired(const BufferInfo& /*bufferInfo*/) {
}
void ZslProcessor3::onBufferReleased(const BufferInfo& bufferInfo) {
- Mutex::Autolock l(mInputMutex);
// ignore output buffers
if (bufferInfo.mOutput) {
return;
}
+ // Lock mutex only once we know this is an input buffer returned to avoid
+ // potential deadlock
+ Mutex::Autolock l(mInputMutex);
// TODO: Verify that the buffer is in our queue by looking at timestamp
// theoretically unnecessary unless we change the following assumptions:
// -- only 1 buffer reprocessed at a time (which is the case now)
@@ -470,11 +657,17 @@ void ZslProcessor3::onBufferReleased(const BufferInfo& bufferInfo) {
// We need to guarantee that if we do two back-to-back captures,
// the second won't use a buffer that's older/the same as the first, which
// is theoretically possible if we don't clear out the queue and the
- // selection criteria is something like 'newest'. Clearing out the queue
- // on a completed capture ensures we'll only use new data.
+ // selection criteria is something like 'newest'. Clearing out the result
+ // metadata queue on a completed capture ensures we'll only use new timestamp.
+ // Calling clearZslQueueLocked is a guaranteed deadlock because this callback
+ // holds the Camera3Stream internal lock (mLock), and clearZslQueueLocked requires
+ // to hold the same lock.
+ // TODO: need figure out a way to clear the Zsl buffer queue properly. Right now
+ // it is safe not to do so, as back to back ZSL capture requires stop and start
+ // preview, which will flush ZSL queue automatically.
ALOGV("%s: Memory optimization, clearing ZSL queue",
__FUNCTION__);
- clearZslQueueLocked();
+ clearZslResultQueueLocked();
// Required so we accept more ZSL requests
mState = RUNNING;
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
index d2f8322..fc9f70c 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
@@ -50,8 +50,8 @@ class ZslProcessor3 :
ZslProcessor3(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
~ZslProcessor3();
- // From FrameProcessor
- virtual void onFrameAvailable(int32_t requestId, const CameraMetadata &frame);
+ // From FrameProcessor::FilteredListener
+ virtual void onResultAvailable(const CaptureResult &result);
/**
****************************************
@@ -82,6 +82,7 @@ class ZslProcessor3 :
private:
static const nsecs_t kWaitDuration = 10000000; // 10 ms
+ nsecs_t mLatestClearedBufferTimestamp;
enum {
RUNNING,
@@ -107,8 +108,9 @@ class ZslProcessor3 :
CameraMetadata frame;
};
- static const size_t kZslBufferDepth = 4;
- static const size_t kFrameListDepth = kZslBufferDepth * 2;
+ static const int32_t kDefaultMaxPipelineDepth = 4;
+ size_t mBufferQueueDepth;
+ size_t mFrameListDepth;
Vector<CameraMetadata> mFrameList;
size_t mFrameListHead;
@@ -120,13 +122,22 @@ class ZslProcessor3 :
CameraMetadata mLatestCapturedRequest;
+ bool mHasFocuser;
+
virtual bool threadLoop();
status_t clearZslQueueLocked();
+ void clearZslResultQueueLocked();
+
void dumpZslQueue(int id) const;
nsecs_t getCandidateTimestampLocked(size_t* metadataIdx) const;
+
+ bool isFixedFocusMode(uint8_t afMode) const;
+
+ // Update the post-processing metadata with the default still capture request template
+ status_t updateRequestWithDefaultStillRequest(CameraMetadata &request) const;
};
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
new file mode 100644
index 0000000..9efeaba
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ZslProcessorInterface.h"
+
+namespace android {
+namespace camera2 {
+
+status_t ZslProcessorInterface::disconnect() {
+ return OK;
+}
+
+}; //namespace camera2
+}; //namespace android
+
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
index 183c0c2..9e266e7 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
@@ -19,6 +19,8 @@
#include <utils/Errors.h>
#include <utils/RefBase.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
namespace android {
namespace camera2 {
@@ -37,6 +39,9 @@ public:
// Delete the underlying CameraDevice streams
virtual status_t deleteStream() = 0;
+ // Clear any additional state necessary before the CameraDevice is disconnected
+ virtual status_t disconnect();
+
/**
* Submits a ZSL capture request (id = requestId)
*