summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--camera/ProCamera.cpp2
-rw-r--r--media/libstagefright/SurfaceMediaSource.cpp7
-rw-r--r--media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp33
-rw-r--r--media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h5
-rw-r--r--media/libstagefright/omx/GraphicBufferSource.cpp9
-rw-r--r--media/libstagefright/wifi-display/sink/DirectRenderer.cpp50
-rw-r--r--media/libstagefright/wifi-display/sink/DirectRenderer.h7
-rw-r--r--media/libstagefright/wifi-display/source/Converter.cpp139
-rw-r--r--media/libstagefright/wifi-display/source/Converter.h63
-rw-r--r--media/libstagefright/wifi-display/source/PlaybackSession.cpp10
-rw-r--r--services/audioflinger/Android.mk19
-rw-r--r--services/audioflinger/AudioFlinger.cpp1
-rw-r--r--services/audioflinger/AudioMixer.cpp1
-rw-r--r--services/audioflinger/AudioPolicyService.cpp1
-rw-r--r--services/audioflinger/AudioWatchdog.cpp5
-rw-r--r--services/audioflinger/Configuration.h47
-rw-r--r--services/audioflinger/Effects.cpp1
-rw-r--r--services/audioflinger/FastMixer.cpp7
-rw-r--r--services/audioflinger/FastMixerState.cpp1
-rw-r--r--services/audioflinger/StateQueue.cpp1
-rw-r--r--services/audioflinger/StateQueueInstantiations.cpp1
-rw-r--r--services/audioflinger/Threads.cpp4
-rw-r--r--services/audioflinger/Tracks.cpp1
-rw-r--r--services/camera/libcameraservice/Camera2Device.cpp4
-rw-r--r--services/camera/libcameraservice/Camera2Device.h1
-rw-r--r--services/camera/libcameraservice/Camera3Device.cpp84
-rw-r--r--services/camera/libcameraservice/Camera3Device.h13
-rw-r--r--services/camera/libcameraservice/CameraDeviceBase.h7
-rw-r--r--services/camera/libcameraservice/camera2/CallbackProcessor.cpp3
-rw-r--r--services/camera/libcameraservice/camera2/FrameProcessor.cpp101
-rw-r--r--services/camera/libcameraservice/camera2/FrameProcessor.h19
-rw-r--r--services/camera/libcameraservice/camera2/StreamingProcessor.cpp3
-rw-r--r--services/camera/libcameraservice/camera2/ZslProcessor.cpp3
-rw-r--r--services/camera/libcameraservice/camera3/Camera3InputStream.cpp3
-rw-r--r--services/camera/libcameraservice/gui/RingBufferConsumer.cpp3
35 files changed, 467 insertions, 192 deletions
diff --git a/camera/ProCamera.cpp b/camera/ProCamera.cpp
index 190402e..1040415 100644
--- a/camera/ProCamera.cpp
+++ b/camera/ProCamera.cpp
@@ -248,7 +248,7 @@ status_t ProCamera::createStreamCpu(int width, int height, int format,
if (c == 0) return NO_INIT;
sp<BufferQueue> bq = new BufferQueue();
- sp<CpuConsumer> cc = new CpuConsumer(bq, heapCount, synchronousMode);
+ sp<CpuConsumer> cc = new CpuConsumer(bq, heapCount/*, synchronousMode*/);
cc->setName(String8("ProCamera::mCpuConsumer"));
sp<Surface> stc = new Surface(
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index 305e7e0..befd4cc 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -21,7 +21,7 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaData.h>
#include <OMX_IVCommon.h>
-#include <MetadataBufferType.h>
+#include <media/hardware/MetadataBufferType.h>
#include <ui/GraphicBuffer.h>
#include <gui/ISurfaceComposer.h>
@@ -54,9 +54,8 @@ SurfaceMediaSource::SurfaceMediaSource(uint32_t bufferWidth, uint32_t bufferHeig
ALOGE("Invalid dimensions %dx%d", bufferWidth, bufferHeight);
}
- mBufferQueue = new BufferQueue(true);
+ mBufferQueue = new BufferQueue();
mBufferQueue->setDefaultBufferSize(bufferWidth, bufferHeight);
- mBufferQueue->setSynchronousMode(true);
mBufferQueue->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER |
GRALLOC_USAGE_HW_TEXTURE);
@@ -71,7 +70,7 @@ SurfaceMediaSource::SurfaceMediaSource(uint32_t bufferWidth, uint32_t bufferHeig
listener = static_cast<BufferQueue::ConsumerListener*>(this);
proxy = new BufferQueue::ProxyConsumerListener(listener);
- status_t err = mBufferQueue->consumerConnect(proxy);
+ status_t err = mBufferQueue->consumerConnect(proxy, false);
if (err != NO_ERROR) {
ALOGE("SurfaceMediaSource: error connecting to BufferQueue: %s (%d)",
strerror(-err), err);
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index d8456fe..5f2b5c8 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -149,7 +149,8 @@ SoftVPXEncoder::SoftVPXEncoder(const char *name,
mLevel(OMX_VIDEO_VP8Level_Version0),
mConversionBuffer(NULL),
mInputDataIsMeta(false),
- mGrallocModule(NULL) {
+ mGrallocModule(NULL),
+ mKeyFrameRequested(false) {
initPorts();
}
@@ -519,6 +520,27 @@ OMX_ERRORTYPE SoftVPXEncoder::internalSetParameter(OMX_INDEXTYPE index,
}
}
+OMX_ERRORTYPE SoftVPXEncoder::setConfig(
+ OMX_INDEXTYPE index, const OMX_PTR _params) {
+ switch (index) {
+ case OMX_IndexConfigVideoIntraVOPRefresh:
+ {
+ OMX_CONFIG_INTRAREFRESHVOPTYPE *params =
+ (OMX_CONFIG_INTRAREFRESHVOPTYPE *)_params;
+
+ if (params->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorBadPortIndex;
+ }
+
+ mKeyFrameRequested = params->IntraRefreshVOP;
+ return OMX_ErrorNone;
+ }
+
+ default:
+ return SimpleSoftOMXComponent::setConfig(index, _params);
+ }
+}
+
OMX_ERRORTYPE SoftVPXEncoder::internalSetProfileLevel(
const OMX_VIDEO_PARAM_PROFILELEVELTYPE* profileAndLevel) {
if (profileAndLevel->nPortIndex != kOutputPortIndex) {
@@ -750,12 +772,19 @@ void SoftVPXEncoder::onQueueFilled(OMX_U32 portIndex) {
vpx_image_t raw_frame;
vpx_img_wrap(&raw_frame, VPX_IMG_FMT_I420, mWidth, mHeight,
kInputBufferAlignment, source);
+
+ vpx_enc_frame_flags_t flags = 0;
+ if (mKeyFrameRequested) {
+ flags |= VPX_EFLAG_FORCE_KF;
+ mKeyFrameRequested = false;
+ }
+
codec_return = vpx_codec_encode(
mCodecContext,
&raw_frame,
inputBufferHeader->nTimeStamp, // in timebase units
mFrameDurationUs, // frame duration in timebase units
- 0, // frame flags
+ flags, // frame flags
VPX_DL_REALTIME); // encoding deadline
if (codec_return != VPX_CODEC_OK) {
ALOGE("vpx encoder failed to encode frame");
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index d570154..4ee5e51 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -78,6 +78,9 @@ protected:
virtual OMX_ERRORTYPE internalSetParameter(
OMX_INDEXTYPE index, const OMX_PTR param);
+ virtual OMX_ERRORTYPE setConfig(
+ OMX_INDEXTYPE index, const OMX_PTR params);
+
// OMX callback when buffers available
// Note that both an input and output buffer
// is expected to be available to carry out
@@ -163,6 +166,8 @@ private:
bool mInputDataIsMeta;
const hw_module_t *mGrallocModule;
+ bool mKeyFrameRequested;
+
// Initializes input and output OMX ports with sensible
// default values.
void initPorts();
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index 6f3ed0d..bbd71be 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -18,12 +18,12 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
-#include <GraphicBufferSource.h>
+#include "GraphicBufferSource.h"
#include <OMX_Core.h>
#include <media/stagefright/foundation/ADebug.h>
-#include <MetadataBufferType.h>
+#include <media/hardware/MetadataBufferType.h>
#include <ui/GraphicBuffer.h>
namespace android {
@@ -52,10 +52,9 @@ GraphicBufferSource::GraphicBufferSource(OMXNodeInstance* nodeInstance,
String8 name("GraphicBufferSource");
- mBufferQueue = new BufferQueue(true);
+ mBufferQueue = new BufferQueue();
mBufferQueue->setConsumerName(name);
mBufferQueue->setDefaultBufferSize(bufferWidth, bufferHeight);
- mBufferQueue->setSynchronousMode(true);
mBufferQueue->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER |
GRALLOC_USAGE_HW_TEXTURE);
@@ -76,7 +75,7 @@ GraphicBufferSource::GraphicBufferSource(OMXNodeInstance* nodeInstance,
sp<BufferQueue::ConsumerListener> proxy;
proxy = new BufferQueue::ProxyConsumerListener(listener);
- mInitCheck = mBufferQueue->consumerConnect(proxy);
+ mInitCheck = mBufferQueue->consumerConnect(proxy, false);
if (mInitCheck != NO_ERROR) {
ALOGE("Error connecting to BufferQueue: %s (%d)",
strerror(-mInitCheck), mInitCheck);
diff --git a/media/libstagefright/wifi-display/sink/DirectRenderer.cpp b/media/libstagefright/wifi-display/sink/DirectRenderer.cpp
index 15f9c88..cdb2267 100644
--- a/media/libstagefright/wifi-display/sink/DirectRenderer.cpp
+++ b/media/libstagefright/wifi-display/sink/DirectRenderer.cpp
@@ -29,9 +29,8 @@
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
namespace android {
@@ -488,12 +487,38 @@ void DirectRenderer::onMessageReceived(const sp<AMessage> &msg) {
break;
}
+ case kWhatQueueAccessUnit:
+ onQueueAccessUnit(msg);
+ break;
+
+ case kWhatSetFormat:
+ onSetFormat(msg);
+ break;
+
default:
TRESPASS();
}
}
void DirectRenderer::setFormat(size_t trackIndex, const sp<AMessage> &format) {
+ sp<AMessage> msg = new AMessage(kWhatSetFormat, id());
+ msg->setSize("trackIndex", trackIndex);
+ msg->setMessage("format", format);
+ msg->post();
+}
+
+void DirectRenderer::onSetFormat(const sp<AMessage> &msg) {
+ size_t trackIndex;
+ CHECK(msg->findSize("trackIndex", &trackIndex));
+
+ sp<AMessage> format;
+ CHECK(msg->findMessage("format", &format));
+
+ internalSetFormat(trackIndex, format);
+}
+
+void DirectRenderer::internalSetFormat(
+ size_t trackIndex, const sp<AMessage> &format) {
CHECK_LT(trackIndex, 2u);
CHECK(mDecoderContext[trackIndex] == NULL);
@@ -517,18 +542,21 @@ void DirectRenderer::setFormat(size_t trackIndex, const sp<AMessage> &format) {
void DirectRenderer::queueAccessUnit(
size_t trackIndex, const sp<ABuffer> &accessUnit) {
- CHECK_LT(trackIndex, 2u);
+ sp<AMessage> msg = new AMessage(kWhatQueueAccessUnit, id());
+ msg->setSize("trackIndex", trackIndex);
+ msg->setBuffer("accessUnit", accessUnit);
+ msg->post();
+}
- if (mDecoderContext[trackIndex] == NULL) {
- CHECK_EQ(trackIndex, 0u);
+void DirectRenderer::onQueueAccessUnit(const sp<AMessage> &msg) {
+ size_t trackIndex;
+ CHECK(msg->findSize("trackIndex", &trackIndex));
- sp<AMessage> format = new AMessage;
- format->setString("mime", "video/avc");
- format->setInt32("width", 640);
- format->setInt32("height", 360);
+ sp<ABuffer> accessUnit;
+ CHECK(msg->findBuffer("accessUnit", &accessUnit));
- setFormat(trackIndex, format);
- }
+ CHECK_LT(trackIndex, 2u);
+ CHECK(mDecoderContext[trackIndex] != NULL);
mDecoderContext[trackIndex]->queueInputBuffer(accessUnit);
}
diff --git a/media/libstagefright/wifi-display/sink/DirectRenderer.h b/media/libstagefright/wifi-display/sink/DirectRenderer.h
index 1e7dc34..07c2170 100644
--- a/media/libstagefright/wifi-display/sink/DirectRenderer.h
+++ b/media/libstagefright/wifi-display/sink/DirectRenderer.h
@@ -43,6 +43,8 @@ private:
enum {
kWhatDecoderNotify,
kWhatRenderVideo,
+ kWhatQueueAccessUnit,
+ kWhatSetFormat,
};
struct OutputInfo {
@@ -72,6 +74,11 @@ private:
void scheduleVideoRenderIfNecessary();
void onRenderVideo();
+ void onSetFormat(const sp<AMessage> &msg);
+ void onQueueAccessUnit(const sp<AMessage> &msg);
+
+ void internalSetFormat(size_t trackIndex, const sp<AMessage> &format);
+
DISALLOW_EVIL_CONSTRUCTORS(DirectRenderer);
};
diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp
index 0214520..6f23854 100644
--- a/media/libstagefright/wifi-display/source/Converter.cpp
+++ b/media/libstagefright/wifi-display/source/Converter.cpp
@@ -21,6 +21,7 @@
#include "Converter.h"
#include "MediaPuller.h"
+#include "include/avc_utils.h"
#include <cutils/properties.h>
#include <gui/Surface.h>
@@ -33,6 +34,8 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
+#include <arpa/inet.h>
+
#include <OMX_Video.h>
namespace android {
@@ -40,12 +43,14 @@ namespace android {
Converter::Converter(
const sp<AMessage> &notify,
const sp<ALooper> &codecLooper,
- const sp<AMessage> &outputFormat)
- : mInitCheck(NO_INIT),
- mNotify(notify),
+ const sp<AMessage> &outputFormat,
+ uint32_t flags)
+ : mNotify(notify),
mCodecLooper(codecLooper),
mOutputFormat(outputFormat),
+ mFlags(flags),
mIsVideo(false),
+ mIsH264(false),
mIsPCMAudio(false),
mNeedToManuallyPrependSPSPPS(false),
mDoMoreWorkPending(false)
@@ -55,21 +60,18 @@ Converter::Converter(
#endif
,mPrevVideoBitrate(-1)
,mNumFramesToDrop(0)
+ ,mEncodingSuspended(false)
{
AString mime;
CHECK(mOutputFormat->findString("mime", &mime));
if (!strncasecmp("video/", mime.c_str(), 6)) {
mIsVideo = true;
+
+ mIsH264 = !strcasecmp(mime.c_str(), MEDIA_MIMETYPE_VIDEO_AVC);
} else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, mime.c_str())) {
mIsPCMAudio = true;
}
-
- mInitCheck = initEncoder();
-
- if (mInitCheck != OK) {
- releaseEncoder();
- }
}
static void ReleaseMediaBufferReference(const sp<ABuffer> &accessUnit) {
@@ -118,8 +120,19 @@ void Converter::shutdownAsync() {
(new AMessage(kWhatShutdown, id()))->post();
}
-status_t Converter::initCheck() const {
- return mInitCheck;
+status_t Converter::init() {
+ status_t err = initEncoder();
+
+ if (err != OK) {
+ releaseEncoder();
+ }
+
+ return err;
+}
+
+sp<IGraphicBufferProducer> Converter::getGraphicBufferProducer() {
+ CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
+ return mGraphicBufferProducer;
}
size_t Converter::getInputBufferCount() const {
@@ -244,6 +257,16 @@ status_t Converter::initEncoder() {
return err;
}
+ if (mFlags & FLAG_USE_SURFACE_INPUT) {
+ CHECK(mIsVideo);
+
+ err = mEncoder->createInputSurface(&mGraphicBufferProducer);
+
+ if (err != OK) {
+ return err;
+ }
+ }
+
err = mEncoder->start();
if (err != OK) {
@@ -256,7 +279,17 @@ status_t Converter::initEncoder() {
return err;
}
- return mEncoder->getOutputBuffers(&mEncoderOutputBuffers);
+ err = mEncoder->getOutputBuffers(&mEncoderOutputBuffers);
+
+ if (err != OK) {
+ return err;
+ }
+
+ if (mFlags & FLAG_USE_SURFACE_INPUT) {
+ scheduleDoMoreWork();
+ }
+
+ return OK;
}
void Converter::notifyError(status_t err) {
@@ -312,9 +345,12 @@ void Converter::onMessageReceived(const sp<AMessage> &msg) {
sp<ABuffer> accessUnit;
CHECK(msg->findBuffer("accessUnit", &accessUnit));
- if (mIsVideo && mNumFramesToDrop) {
- --mNumFramesToDrop;
- ALOGI("dropping frame.");
+ if (mNumFramesToDrop > 0 || mEncodingSuspended) {
+ if (mNumFramesToDrop > 0) {
+ --mNumFramesToDrop;
+ ALOGI("dropping frame.");
+ }
+
ReleaseMediaBufferReference(accessUnit);
break;
}
@@ -396,7 +432,7 @@ void Converter::onMessageReceived(const sp<AMessage> &msg) {
}
if (mIsVideo) {
- ALOGI("requesting IDR frame");
+ ALOGV("requesting IDR frame");
mEncoder->requestIDRFrame();
}
break;
@@ -411,6 +447,10 @@ void Converter::onMessageReceived(const sp<AMessage> &msg) {
AString mime;
CHECK(mOutputFormat->findString("mime", &mime));
ALOGI("encoder (%s) shut down.", mime.c_str());
+
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatShutdownCompleted);
+ notify->post();
break;
}
@@ -431,6 +471,21 @@ void Converter::onMessageReceived(const sp<AMessage> &msg) {
break;
}
+ case kWhatSuspendEncoding:
+ {
+ int32_t suspend;
+ CHECK(msg->findInt32("suspend", &suspend));
+
+ mEncodingSuspended = suspend;
+
+ if (mFlags & FLAG_USE_SURFACE_INPUT) {
+ sp<AMessage> params = new AMessage;
+ params->setInt32("drop-input-frames",suspend);
+ mEncoder->setParameters(params);
+ }
+ break;
+ }
+
default:
TRESPASS();
}
@@ -616,22 +671,39 @@ status_t Converter::feedEncoderInputBuffers() {
return OK;
}
+sp<ABuffer> Converter::prependCSD(const sp<ABuffer> &accessUnit) const {
+ CHECK(mCSD0 != NULL);
+
+ sp<ABuffer> dup = new ABuffer(accessUnit->size() + mCSD0->size());
+ memcpy(dup->data(), mCSD0->data(), mCSD0->size());
+ memcpy(dup->data() + mCSD0->size(), accessUnit->data(), accessUnit->size());
+
+ int64_t timeUs;
+ CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
+ dup->meta()->setInt64("timeUs", timeUs);
+
+ return dup;
+}
+
status_t Converter::doMoreWork() {
status_t err;
- for (;;) {
- size_t bufferIndex;
- err = mEncoder->dequeueInputBuffer(&bufferIndex);
+ if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
+ for (;;) {
+ size_t bufferIndex;
+ err = mEncoder->dequeueInputBuffer(&bufferIndex);
- if (err != OK) {
- break;
+ if (err != OK) {
+ break;
+ }
+
+ mAvailEncoderInputIndices.push_back(bufferIndex);
}
- mAvailEncoderInputIndices.push_back(bufferIndex);
+ feedEncoderInputBuffers();
}
- feedEncoderInputBuffers();
-
for (;;) {
size_t bufferIndex;
size_t offset;
@@ -705,9 +777,19 @@ status_t Converter::doMoreWork() {
if (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) {
if (!handle) {
+ if (mIsH264) {
+ mCSD0 = buffer;
+ }
mOutputFormat->setBuffer("csd-0", buffer);
}
} else {
+ if (mNeedToManuallyPrependSPSPPS
+ && mIsH264
+ && (mFlags & FLAG_PREPEND_CSD_IF_NECESSARY)
+ && IsIDR(buffer)) {
+ buffer = prependCSD(buffer);
+ }
+
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kWhatAccessUnit);
notify->setBuffer("accessUnit", buffer);
@@ -732,9 +814,18 @@ void Converter::requestIDRFrame() {
}
void Converter::dropAFrame() {
+ // Unsupported in surface input mode.
+ CHECK(!(mFlags & FLAG_USE_SURFACE_INPUT));
+
(new AMessage(kWhatDropAFrame, id()))->post();
}
+void Converter::suspendEncoding(bool suspend) {
+ sp<AMessage> msg = new AMessage(kWhatSuspendEncoding, id());
+ msg->setInt32("suspend", suspend);
+ msg->post();
+}
+
int32_t Converter::getVideoBitrate() const {
return mPrevVideoBitrate;
}
diff --git a/media/libstagefright/wifi-display/source/Converter.h b/media/libstagefright/wifi-display/source/Converter.h
index 76c8b19..5876e07 100644
--- a/media/libstagefright/wifi-display/source/Converter.h
+++ b/media/libstagefright/wifi-display/source/Converter.h
@@ -18,13 +18,12 @@
#define CONVERTER_H_
-#include "WifiDisplaySource.h"
-
#include <media/stagefright/foundation/AHandler.h>
namespace android {
struct ABuffer;
+struct IGraphicBufferProducer;
struct MediaCodec;
#define ENABLE_SILENCE_DETECTION 0
@@ -33,11 +32,25 @@ struct MediaCodec;
// media access unit of a different format.
// Right now this'll convert raw video into H.264 and raw audio into AAC.
struct Converter : public AHandler {
+ enum {
+ kWhatAccessUnit,
+ kWhatEOS,
+ kWhatError,
+ kWhatShutdownCompleted,
+ };
+
+ enum FlagBits {
+ FLAG_USE_SURFACE_INPUT = 1,
+ FLAG_PREPEND_CSD_IF_NECESSARY = 2,
+ };
Converter(const sp<AMessage> &notify,
const sp<ALooper> &codecLooper,
- const sp<AMessage> &outputFormat);
+ const sp<AMessage> &outputFormat,
+ uint32_t flags = 0);
- status_t initCheck() const;
+ status_t init();
+
+ sp<IGraphicBufferProducer> getGraphicBufferProducer();
size_t getInputBufferCount() const;
@@ -50,22 +63,7 @@ struct Converter : public AHandler {
void requestIDRFrame();
void dropAFrame();
-
- enum {
- kWhatAccessUnit,
- kWhatEOS,
- kWhatError,
- };
-
- enum {
- kWhatDoMoreWork,
- kWhatRequestIDRFrame,
- kWhatShutdown,
- kWhatMediaPullerNotify,
- kWhatEncoderActivity,
- kWhatDropAFrame,
- kWhatReleaseOutputBuffer,
- };
+ void suspendEncoding(bool suspend);
void shutdownAsync();
@@ -74,22 +72,40 @@ struct Converter : public AHandler {
static int32_t GetInt32Property(const char *propName, int32_t defaultValue);
+ enum {
+ // MUST not conflict with private enums below.
+ kWhatMediaPullerNotify = 'pulN',
+ };
+
protected:
virtual ~Converter();
virtual void onMessageReceived(const sp<AMessage> &msg);
private:
- status_t mInitCheck;
+ enum {
+ kWhatDoMoreWork,
+ kWhatRequestIDRFrame,
+ kWhatSuspendEncoding,
+ kWhatShutdown,
+ kWhatEncoderActivity,
+ kWhatDropAFrame,
+ kWhatReleaseOutputBuffer,
+ };
+
sp<AMessage> mNotify;
sp<ALooper> mCodecLooper;
sp<AMessage> mOutputFormat;
+ uint32_t mFlags;
bool mIsVideo;
+ bool mIsH264;
bool mIsPCMAudio;
bool mNeedToManuallyPrependSPSPPS;
sp<MediaCodec> mEncoder;
sp<AMessage> mEncoderActivityNotify;
+ sp<IGraphicBufferProducer> mGraphicBufferProducer;
+
Vector<sp<ABuffer> > mEncoderInputBuffers;
Vector<sp<ABuffer> > mEncoderOutputBuffers;
@@ -97,6 +113,8 @@ private:
List<sp<ABuffer> > mInputBufferQueue;
+ sp<ABuffer> mCSD0;
+
bool mDoMoreWorkPending;
#if ENABLE_SILENCE_DETECTION
@@ -109,6 +127,7 @@ private:
int32_t mPrevVideoBitrate;
int32_t mNumFramesToDrop;
+ bool mEncodingSuspended;
status_t initEncoder();
void releaseEncoder();
@@ -127,6 +146,8 @@ private:
static bool IsSilence(const sp<ABuffer> &accessUnit);
+ sp<ABuffer> prependCSD(const sp<ABuffer> &accessUnit) const;
+
DISALLOW_EVIL_CONSTRUCTORS(Converter);
};
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index a15fbac..0aa4ee5 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -521,7 +521,7 @@ void WifiDisplaySource::PlaybackSession::onMessageReceived(
if (mTracks.isEmpty()) {
ALOGI("Reached EOS");
}
- } else {
+ } else if (what != Converter::kWhatShutdownCompleted) {
CHECK_EQ(what, Converter::kWhatError);
status_t err;
@@ -957,14 +957,16 @@ status_t WifiDisplaySource::PlaybackSession::addSource(
sp<Converter> converter = new Converter(notify, codecLooper, format);
- err = converter->initCheck();
+ looper()->registerHandler(converter);
+
+ err = converter->init();
if (err != OK) {
ALOGE("%s converter returned err %d", isVideo ? "video" : "audio", err);
+
+ looper()->unregisterHandler(converter->id());
return err;
}
- looper()->registerHandler(converter);
-
notify = new AMessage(Converter::kWhatMediaPullerNotify, converter->id());
notify->setSize("trackIndex", trackIndex);
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 714854e..54377f1 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -27,9 +27,6 @@ LOCAL_SRC_FILES:= \
LOCAL_SRC_FILES += StateQueue.cpp
-# uncomment for debugging timing problems related to StateQueue::push()
-LOCAL_CFLAGS += -DSTATE_QUEUE_DUMP
-
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-effects) \
$(call include-path-for, audio-utils)
@@ -56,24 +53,10 @@ LOCAL_STATIC_LIBRARIES := \
LOCAL_MODULE:= libaudioflinger
-LOCAL_SRC_FILES += FastMixer.cpp FastMixerState.cpp
-
-LOCAL_CFLAGS += -DFAST_MIXER_STATISTICS
-
-# uncomment to display CPU load adjusted for CPU frequency
-# LOCAL_CFLAGS += -DCPU_FREQUENCY_STATISTICS
+LOCAL_SRC_FILES += FastMixer.cpp FastMixerState.cpp AudioWatchdog.cpp
LOCAL_CFLAGS += -DSTATE_QUEUE_INSTANTIATIONS='"StateQueueInstantiations.cpp"'
-LOCAL_CFLAGS += -UFAST_TRACKS_AT_NON_NATIVE_SAMPLE_RATE
-
-# uncomment to allow tee sink debugging to be enabled by property
-# LOCAL_CFLAGS += -DTEE_SINK
-
-# uncomment to enable the audio watchdog
-# LOCAL_SRC_FILES += AudioWatchdog.cpp
-# LOCAL_CFLAGS += -DAUDIO_WATCHDOG
-
# Define ANDROID_SMP appropriately. Used to get inline tracing fast-path.
ifeq ($(TARGET_CPU_SMP),true)
LOCAL_CFLAGS += -DANDROID_SMP=1
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 17a69fa..6a3007b 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -19,6 +19,7 @@
#define LOG_TAG "AudioFlinger"
//#define LOG_NDEBUG 0
+#include "Configuration.h"
#include <dirent.h>
#include <math.h>
#include <signal.h>
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 7d38f80..df4e029 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -18,6 +18,7 @@
#define LOG_TAG "AudioMixer"
//#define LOG_NDEBUG 0
+#include "Configuration.h"
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index eacecf0..d192787 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "AudioPolicyService"
//#define LOG_NDEBUG 0
+#include "Configuration.h"
#undef __STRICT_ANSI__
#define __STDINT_LIMITS
#define __STDC_LIMIT_MACROS
diff --git a/services/audioflinger/AudioWatchdog.cpp b/services/audioflinger/AudioWatchdog.cpp
index 8f328ee..93d185e 100644
--- a/services/audioflinger/AudioWatchdog.cpp
+++ b/services/audioflinger/AudioWatchdog.cpp
@@ -17,9 +17,12 @@
#define LOG_TAG "AudioWatchdog"
//#define LOG_NDEBUG 0
+#include "Configuration.h"
#include <utils/Log.h>
#include "AudioWatchdog.h"
+#ifdef AUDIO_WATCHDOG
+
namespace android {
void AudioWatchdogDump::dump(int fd)
@@ -132,3 +135,5 @@ void AudioWatchdog::setDump(AudioWatchdogDump *dump)
}
} // namespace android
+
+#endif // AUDIO_WATCHDOG
diff --git a/services/audioflinger/Configuration.h b/services/audioflinger/Configuration.h
new file mode 100644
index 0000000..bc2038a
--- /dev/null
+++ b/services/audioflinger/Configuration.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Put build-time configuration options here rather than Android.mk,
+// so that the instantiate for AudioFlinger service will pick up the same options.
+
+#ifndef ANDROID_AUDIOFLINGER_CONFIGURATION_H
+#define ANDROID_AUDIOFLINGER_CONFIGURATION_H
+
+// uncomment to enable detailed battery usage reporting (not debugged)
+//#define ADD_BATTERY_DATA
+
+// uncomment to enable the audio watchdog
+//#define AUDIO_WATCHDOG
+
+// uncomment to display CPU load adjusted for CPU frequency
+//#define CPU_FREQUENCY_STATISTICS
+
+// uncomment to enable fast mixer to take performance samples for later statistical analysis
+#define FAST_MIXER_STATISTICS
+
+// uncomment to allow fast tracks at non-native sample rate
+//#define FAST_TRACKS_AT_NON_NATIVE_SAMPLE_RATE
+
+// uncomment for debugging timing problems related to StateQueue::push()
+//#define STATE_QUEUE_DUMP
+
+// uncomment to allow tee sink debugging to be enabled by property
+//#define TEE_SINK
+
+// uncomment to log CPU statistics every n wall clock seconds
+//#define DEBUG_CPU_USAGE 10
+
+#endif // ANDROID_AUDIOFLINGER_CONFIGURATION_H
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 942ea35..1c7a64b 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -19,6 +19,7 @@
#define LOG_TAG "AudioFlinger"
//#define LOG_NDEBUG 0
+#include "Configuration.h"
#include <utils/Log.h>
#include <audio_effects/effect_visualizer.h>
#include <audio_utils/primitives.h>
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 12e4683..819e8ec 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -25,6 +25,7 @@
#define ATRACE_TAG ATRACE_TAG_AUDIO
+#include "Configuration.h"
#include <sys/atomics.h>
#include <time.h>
#include <utils/Log.h>
@@ -142,7 +143,9 @@ bool FastMixer::threadLoop()
preIdle = *current;
current = &preIdle;
oldTsValid = false;
+#ifdef FAST_MIXER_STATISTICS
oldLoadValid = false;
+#endif
ignoreNextOverrun = true;
}
previous = current;
@@ -182,8 +185,10 @@ bool FastMixer::threadLoop()
warmupCycles = 0;
sleepNs = -1;
coldGen = current->mColdGen;
+#ifdef FAST_MIXER_STATISTICS
bounds = 0;
full = false;
+#endif
oldTsValid = !clock_gettime(CLOCK_MONOTONIC, &oldTs);
} else {
sleepNs = FAST_HOT_IDLE_NS;
@@ -614,6 +619,7 @@ FastMixerDumpState::FastMixerDumpState() :
{
mMeasuredWarmupTs.tv_sec = 0;
mMeasuredWarmupTs.tv_nsec = 0;
+#ifdef FAST_MIXER_STATISTICS
// sample arrays aren't accessed atomically with respect to the bounds,
// so clearing reduces chance for dumpsys to read random uninitialized samples
memset(&mMonotonicNs, 0, sizeof(mMonotonicNs));
@@ -621,6 +627,7 @@ FastMixerDumpState::FastMixerDumpState() :
#ifdef CPU_FREQUENCY_STATISTICS
memset(&mCpukHz, 0, sizeof(mCpukHz));
#endif
+#endif
}
FastMixerDumpState::~FastMixerDumpState()
diff --git a/services/audioflinger/FastMixerState.cpp b/services/audioflinger/FastMixerState.cpp
index c45c81b..737de97 100644
--- a/services/audioflinger/FastMixerState.cpp
+++ b/services/audioflinger/FastMixerState.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "Configuration.h"
#include "FastMixerState.h"
namespace android {
diff --git a/services/audioflinger/StateQueue.cpp b/services/audioflinger/StateQueue.cpp
index 3e891a5..c2d3bbd 100644
--- a/services/audioflinger/StateQueue.cpp
+++ b/services/audioflinger/StateQueue.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "StateQueue"
//#define LOG_NDEBUG 0
+#include "Configuration.h"
#include <time.h>
#include <cutils/atomic.h>
#include <utils/Log.h>
diff --git a/services/audioflinger/StateQueueInstantiations.cpp b/services/audioflinger/StateQueueInstantiations.cpp
index 077582f..0d5cd0c 100644
--- a/services/audioflinger/StateQueueInstantiations.cpp
+++ b/services/audioflinger/StateQueueInstantiations.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "Configuration.h"
#include "FastMixerState.h"
#include "StateQueue.h"
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index d4cd0ea..97a1e43 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -20,6 +20,7 @@
//#define LOG_NDEBUG 0
#define ATRACE_TAG ATRACE_TAG_AUDIO
+#include "Configuration.h"
#include <math.h>
#include <fcntl.h>
#include <sys/stat.h>
@@ -54,14 +55,11 @@
#include "ServiceUtilities.h"
#include "SchedulingPolicyService.h"
-#undef ADD_BATTERY_DATA
-
#ifdef ADD_BATTERY_DATA
#include <media/IMediaPlayerService.h>
#include <media/IMediaDeathNotifier.h>
#endif
-// #define DEBUG_CPU_USAGE 10 // log statistics every n wall clock seconds
#ifdef DEBUG_CPU_USAGE
#include <cpustats/CentralTendencyStatistics.h>
#include <cpustats/ThreadCpuUsage.h>
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index f0dbee3..c45daae 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -19,6 +19,7 @@
#define LOG_TAG "AudioFlinger"
//#define LOG_NDEBUG 0
+#include "Configuration.h"
#include <math.h>
#include <cutils/compiler.h>
#include <utils/Log.h>
diff --git a/services/camera/libcameraservice/Camera2Device.cpp b/services/camera/libcameraservice/Camera2Device.cpp
index 77df152..710d0e9 100644
--- a/services/camera/libcameraservice/Camera2Device.cpp
+++ b/services/camera/libcameraservice/Camera2Device.cpp
@@ -445,6 +445,10 @@ status_t Camera2Device::setNotifyCallback(NotificationListener *listener) {
return res;
}
+bool Camera2Device::willNotify3A() {
+ return true;
+}
+
void Camera2Device::notificationCallback(int32_t msg_type,
int32_t ext1,
int32_t ext2,
diff --git a/services/camera/libcameraservice/Camera2Device.h b/services/camera/libcameraservice/Camera2Device.h
index 3034a1d..372ce9f 100644
--- a/services/camera/libcameraservice/Camera2Device.h
+++ b/services/camera/libcameraservice/Camera2Device.h
@@ -59,6 +59,7 @@ class Camera2Device: public CameraDeviceBase {
virtual status_t createDefaultRequest(int templateId, CameraMetadata *request);
virtual status_t waitUntilDrained();
virtual status_t setNotifyCallback(NotificationListener *listener);
+ virtual bool willNotify3A();
virtual status_t waitForNextFrame(nsecs_t timeout);
virtual status_t getNextFrame(CameraMetadata *frame);
virtual status_t triggerAutofocus(uint32_t id);
diff --git a/services/camera/libcameraservice/Camera3Device.cpp b/services/camera/libcameraservice/Camera3Device.cpp
index 73bf30c..9d0f392 100644
--- a/services/camera/libcameraservice/Camera3Device.cpp
+++ b/services/camera/libcameraservice/Camera3Device.cpp
@@ -844,6 +844,10 @@ status_t Camera3Device::setNotifyCallback(NotificationListener *listener) {
return OK;
}
+bool Camera3Device::willNotify3A() {
+ return false;
+}
+
status_t Camera3Device::waitForNextFrame(nsecs_t timeout) {
ATRACE_CALL();
status_t res;
@@ -1244,13 +1248,6 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) {
}
- AlgState cur3aState;
- AlgState new3aState;
- int32_t aeTriggerId = 0;
- int32_t afTriggerId = 0;
-
- NotificationListener *listener = NULL;
-
// Process the result metadata, if provided
if (result->result != NULL) {
Mutex::Autolock l(mOutputLock);
@@ -1288,59 +1285,6 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) {
" metadata for frame %d (%lld vs %lld respectively)",
frameNumber, timestamp, entry.data.i64[0]);
}
-
- // Get 3A states from result metadata
-
- entry = captureResult.find(ANDROID_CONTROL_AE_STATE);
- if (entry.count == 0) {
- CLOGE("No AE state provided by HAL for frame %d!",
- frameNumber);
- } else {
- new3aState.aeState =
- static_cast<camera_metadata_enum_android_control_ae_state>(
- entry.data.u8[0]);
- }
-
- entry = captureResult.find(ANDROID_CONTROL_AF_STATE);
- if (entry.count == 0) {
- CLOGE("No AF state provided by HAL for frame %d!",
- frameNumber);
- } else {
- new3aState.afState =
- static_cast<camera_metadata_enum_android_control_af_state>(
- entry.data.u8[0]);
- }
-
- entry = captureResult.find(ANDROID_CONTROL_AWB_STATE);
- if (entry.count == 0) {
- CLOGE("No AWB state provided by HAL for frame %d!",
- frameNumber);
- } else {
- new3aState.awbState =
- static_cast<camera_metadata_enum_android_control_awb_state>(
- entry.data.u8[0]);
- }
-
- entry = captureResult.find(ANDROID_CONTROL_AF_TRIGGER_ID);
- if (entry.count == 0) {
- CLOGE("No AF trigger ID provided by HAL for frame %d!",
- frameNumber);
- } else {
- afTriggerId = entry.data.i32[0];
- }
-
- entry = captureResult.find(ANDROID_CONTROL_AE_PRECAPTURE_ID);
- if (entry.count == 0) {
- CLOGE("No AE precapture trigger ID provided by HAL"
- " for frame %d!", frameNumber);
- } else {
- aeTriggerId = entry.data.i32[0];
- }
-
- listener = mListener;
- cur3aState = m3AState;
-
- m3AState = new3aState;
} // scope for mOutputLock
// Return completed buffers to their streams with the timestamp
@@ -1357,30 +1301,16 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) {
}
}
- // Finally, dispatch any 3A change events to listeners if we got metadata
+ // Finally, signal any waiters for new frames
if (result->result != NULL) {
mResultSignal.signal();
}
- if (result->result != NULL && listener != NULL) {
- if (new3aState.aeState != cur3aState.aeState) {
- ALOGVV("%s: AE state changed from 0x%x to 0x%x",
- __FUNCTION__, cur3aState.aeState, new3aState.aeState);
- listener->notifyAutoExposure(new3aState.aeState, aeTriggerId);
- }
- if (new3aState.afState != cur3aState.afState) {
- ALOGVV("%s: AF state changed from 0x%x to 0x%x",
- __FUNCTION__, cur3aState.afState, new3aState.afState);
- listener->notifyAutoFocus(new3aState.afState, afTriggerId);
- }
- if (new3aState.awbState != cur3aState.awbState) {
- listener->notifyAutoWhitebalance(new3aState.awbState, aeTriggerId);
- }
- }
-
}
+
+
void Camera3Device::notify(const camera3_notify_msg *msg) {
ATRACE_CALL();
NotificationListener *listener;
diff --git a/services/camera/libcameraservice/Camera3Device.h b/services/camera/libcameraservice/Camera3Device.h
index faa42b9..2328f89 100644
--- a/services/camera/libcameraservice/Camera3Device.h
+++ b/services/camera/libcameraservice/Camera3Device.h
@@ -107,6 +107,7 @@ class Camera3Device :
virtual status_t waitUntilDrained();
virtual status_t setNotifyCallback(NotificationListener *listener);
+ virtual bool willNotify3A();
virtual status_t waitForNextFrame(nsecs_t timeout);
virtual status_t getNextFrame(CameraMetadata *frame);
@@ -389,18 +390,6 @@ class Camera3Device :
Condition mResultSignal;
NotificationListener *mListener;
- struct AlgState {
- camera_metadata_enum_android_control_ae_state aeState;
- camera_metadata_enum_android_control_af_state afState;
- camera_metadata_enum_android_control_awb_state awbState;
-
- AlgState() :
- aeState(ANDROID_CONTROL_AE_STATE_INACTIVE),
- afState(ANDROID_CONTROL_AF_STATE_INACTIVE),
- awbState(ANDROID_CONTROL_AWB_STATE_INACTIVE) {
- }
- } m3AState;
-
/**** End scope for mOutputLock ****/
/**
diff --git a/services/camera/libcameraservice/CameraDeviceBase.h b/services/camera/libcameraservice/CameraDeviceBase.h
index 8c457d9..aa92bec 100644
--- a/services/camera/libcameraservice/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/CameraDeviceBase.h
@@ -156,6 +156,13 @@ class CameraDeviceBase : public virtual RefBase {
virtual status_t setNotifyCallback(NotificationListener *listener) = 0;
/**
+ * Whether the device supports calling notifyAutofocus, notifyAutoExposure,
+ * and notifyAutoWhitebalance; if this returns false, the client must
+ * synthesize these notifications from received frame metadata.
+ */
+ virtual bool willNotify3A() = 0;
+
+ /**
* Wait for a new frame to be produced, with timeout in nanoseconds.
* Returns TIMED_OUT when no frame produced within the specified duration
*/
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
index efbbe57..d7bafda 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
@@ -111,8 +111,7 @@ status_t CallbackProcessor::updateStream(const Parameters &params) {
// Create CPU buffer queue endpoint, since app hasn't given us one
// Make it async to avoid disconnect deadlocks
sp<BufferQueue> bq = new BufferQueue();
- mCallbackConsumer = new CpuConsumer(bq, kCallbackHeapCount,
- /*synchronized*/ false);
+ mCallbackConsumer = new CpuConsumer(bq, kCallbackHeapCount);
mCallbackConsumer->setFrameAvailableListener(this);
mCallbackConsumer->setName(String8("Camera2Client::CallbackConsumer"));
mCallbackWindow = new Surface(
diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.cpp b/services/camera/libcameraservice/camera2/FrameProcessor.cpp
index d13d398..114a7a8 100644
--- a/services/camera/libcameraservice/camera2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/FrameProcessor.cpp
@@ -33,6 +33,9 @@ FrameProcessor::FrameProcessor(wp<CameraDeviceBase> device,
ProFrameProcessor(device),
mClient(client),
mLastFrameNumberOfFaces(0) {
+
+ sp<CameraDeviceBase> d = device.promote();
+ mSynthesize3ANotify = !(d->willNotify3A());
}
FrameProcessor::~FrameProcessor() {
@@ -50,6 +53,11 @@ bool FrameProcessor::processSingleFrame(CameraMetadata &frame,
return false;
}
+ if (mSynthesize3ANotify) {
+ // Ignoring missing fields for now
+ process3aState(frame, client);
+ }
+
if (!ProFrameProcessor::processSingleFrame(frame, device)) {
return false;
}
@@ -185,6 +193,99 @@ status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame,
return OK;
}
+status_t FrameProcessor::process3aState(const CameraMetadata &frame,
+ const sp<Camera2Client> &client) {
+
+ ATRACE_CALL();
+ camera_metadata_ro_entry_t entry;
+ int mId = client->getCameraId();
+
+ entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
+ int32_t frameNumber = entry.data.i32[0];
+
+ // Get 3A states from result metadata
+ bool gotAllStates = true;
+
+ AlgState new3aState;
+
+ entry = frame.find(ANDROID_CONTROL_AE_STATE);
+ if (entry.count == 0) {
+ ALOGE("%s: Camera %d: No AE state provided by HAL for frame %d!",
+ __FUNCTION__, mId, frameNumber);
+ gotAllStates = false;
+ } else {
+ new3aState.aeState =
+ static_cast<camera_metadata_enum_android_control_ae_state>(
+ entry.data.u8[0]);
+ }
+
+ entry = frame.find(ANDROID_CONTROL_AF_STATE);
+ if (entry.count == 0) {
+ ALOGE("%s: Camera %d: No AF state provided by HAL for frame %d!",
+ __FUNCTION__, mId, frameNumber);
+ gotAllStates = false;
+ } else {
+ new3aState.afState =
+ static_cast<camera_metadata_enum_android_control_af_state>(
+ entry.data.u8[0]);
+ }
+
+ entry = frame.find(ANDROID_CONTROL_AWB_STATE);
+ if (entry.count == 0) {
+ ALOGE("%s: Camera %d: No AWB state provided by HAL for frame %d!",
+ __FUNCTION__, mId, frameNumber);
+ gotAllStates = false;
+ } else {
+ new3aState.awbState =
+ static_cast<camera_metadata_enum_android_control_awb_state>(
+ entry.data.u8[0]);
+ }
+
+ int32_t afTriggerId = 0;
+ entry = frame.find(ANDROID_CONTROL_AF_TRIGGER_ID);
+ if (entry.count == 0) {
+ ALOGE("%s: Camera %d: No AF trigger ID provided by HAL for frame %d!",
+ __FUNCTION__, mId, frameNumber);
+ gotAllStates = false;
+ } else {
+ afTriggerId = entry.data.i32[0];
+ }
+
+ int32_t aeTriggerId = 0;
+ entry = frame.find(ANDROID_CONTROL_AE_PRECAPTURE_ID);
+ if (entry.count == 0) {
+ ALOGE("%s: Camera %d: No AE precapture trigger ID provided by HAL"
+ " for frame %d!",
+ __FUNCTION__, mId, frameNumber);
+ gotAllStates = false;
+ } else {
+ aeTriggerId = entry.data.i32[0];
+ }
+
+ if (!gotAllStates) return BAD_VALUE;
+
+ if (new3aState.aeState != m3aState.aeState) {
+ ALOGV("%s: AE state changed from 0x%x to 0x%x",
+ __FUNCTION__, m3aState.aeState, new3aState.aeState);
+ client->notifyAutoExposure(new3aState.aeState, aeTriggerId);
+ }
+ if (new3aState.afState != m3aState.afState) {
+ ALOGV("%s: AF state changed from 0x%x to 0x%x",
+ __FUNCTION__, m3aState.afState, new3aState.afState);
+ client->notifyAutoFocus(new3aState.afState, afTriggerId);
+ }
+ if (new3aState.awbState != m3aState.awbState) {
+ ALOGV("%s: AWB state changed from 0x%x to 0x%x",
+ __FUNCTION__, m3aState.awbState, new3aState.awbState);
+ client->notifyAutoWhitebalance(new3aState.awbState, aeTriggerId);
+ }
+
+ m3aState = new3aState;
+
+ return OK;
+}
+
+
void FrameProcessor::callbackFaceDetection(sp<Camera2Client> client,
const camera_frame_metadata &metadata) {
diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.h b/services/camera/libcameraservice/camera2/FrameProcessor.h
index 27ed8f6..f480c55 100644
--- a/services/camera/libcameraservice/camera2/FrameProcessor.h
+++ b/services/camera/libcameraservice/camera2/FrameProcessor.h
@@ -44,6 +44,9 @@ class FrameProcessor : public ProFrameProcessor {
private:
wp<Camera2Client> mClient;
+
+ bool mSynthesize3ANotify;
+
int mLastFrameNumberOfFaces;
void processNewFrames(const sp<Camera2Client> &client);
@@ -54,6 +57,22 @@ class FrameProcessor : public ProFrameProcessor {
status_t processFaceDetect(const CameraMetadata &frame,
const sp<Camera2Client> &client);
+ // Send 3A state change notifications to client based on frame metadata
+ status_t process3aState(const CameraMetadata &frame,
+ const sp<Camera2Client> &client);
+
+ struct AlgState {
+ camera_metadata_enum_android_control_ae_state aeState;
+ camera_metadata_enum_android_control_af_state afState;
+ camera_metadata_enum_android_control_awb_state awbState;
+
+ AlgState() :
+ aeState(ANDROID_CONTROL_AE_STATE_INACTIVE),
+ afState(ANDROID_CONTROL_AF_STATE_INACTIVE),
+ awbState(ANDROID_CONTROL_AWB_STATE_INACTIVE) {
+ }
+ } m3aState;
+
// Emit FaceDetection event to java if faces changed
void callbackFaceDetection(sp<Camera2Client> client,
const camera_frame_metadata &metadata);
diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
index 76fa46c..5981be7 100644
--- a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
@@ -322,8 +322,7 @@ status_t StreamingProcessor::updateRecordingStream(const Parameters &params) {
sp<BufferQueue> bq = new BufferQueue();
mRecordingConsumer = new BufferItemConsumer(bq,
GRALLOC_USAGE_HW_VIDEO_ENCODER,
- mRecordingHeapCount + 1,
- true);
+ mRecordingHeapCount + 1);
mRecordingConsumer->setFrameAvailableListener(this);
mRecordingConsumer->setName(String8("Camera2-RecordingConsumer"));
mRecordingWindow = new Surface(
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.cpp b/services/camera/libcameraservice/camera2/ZslProcessor.cpp
index 3c575f6..0094992 100644
--- a/services/camera/libcameraservice/camera2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/ZslProcessor.cpp
@@ -131,8 +131,7 @@ status_t ZslProcessor::updateStream(const Parameters &params) {
sp<BufferQueue> bq = new BufferQueue();
mZslConsumer = new BufferItemConsumer(bq,
GRALLOC_USAGE_HW_CAMERA_ZSL,
- kZslBufferDepth,
- true);
+ kZslBufferDepth);
mZslConsumer->setFrameAvailableListener(this);
mZslConsumer->setName(String8("Camera2Client::ZslConsumer"));
mZslWindow = new Surface(
diff --git a/services/camera/libcameraservice/camera3/Camera3InputStream.cpp b/services/camera/libcameraservice/camera3/Camera3InputStream.cpp
index 6d9acc3..e9a9c2b 100644
--- a/services/camera/libcameraservice/camera3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/camera3/Camera3InputStream.cpp
@@ -213,8 +213,7 @@ status_t Camera3InputStream::configureQueueLocked() {
if (mConsumer.get() == 0) {
sp<BufferQueue> bq = new BufferQueue();
mConsumer = new BufferItemConsumer(bq, camera3_stream::usage,
- mTotalBufferCount,
- /*synchronousMode*/true);
+ mTotalBufferCount);
mConsumer->setName(String8::format("Camera3-InputStream-%d", mId));
}
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
index 7625735..8141f4e 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
@@ -36,11 +36,10 @@ namespace android {
RingBufferConsumer::RingBufferConsumer(uint32_t consumerUsage,
int bufferCount) :
- ConsumerBase(new BufferQueue(true)),
+ ConsumerBase(new BufferQueue()),
mBufferCount(bufferCount)
{
mBufferQueue->setConsumerUsageBits(consumerUsage);
- mBufferQueue->setSynchronousMode(true);
mBufferQueue->setMaxAcquiredBufferCount(bufferCount);
assert(bufferCount > 0);