summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--camera/CameraParameters.cpp5
-rw-r--r--include/camera/CameraParameters.h10
-rw-r--r--include/media/stagefright/SurfaceMediaSource.h6
-rw-r--r--media/libstagefright/ACodec.cpp32
-rw-r--r--media/libstagefright/SurfaceMediaSource.cpp32
-rw-r--r--media/libstagefright/wifi-display/ANetworkSession.cpp6
-rw-r--r--media/libstagefright/wifi-display/source/Converter.cpp113
-rw-r--r--media/libstagefright/wifi-display/source/Converter.h16
-rw-r--r--media/libstagefright/wifi-display/source/MediaPuller.cpp49
-rw-r--r--media/libstagefright/wifi-display/source/MediaPuller.h2
-rw-r--r--media/libstagefright/wifi-display/source/PlaybackSession.cpp219
-rw-r--r--media/libstagefright/wifi-display/source/PlaybackSession.h4
-rw-r--r--media/libstagefright/wifi-display/source/RepeaterSource.cpp16
-rw-r--r--media/libstagefright/wifi-display/source/WifiDisplaySource.cpp46
-rw-r--r--media/libstagefright/wifi-display/source/WifiDisplaySource.h7
-rw-r--r--services/camera/libcameraservice/Android.mk1
-rw-r--r--services/camera/libcameraservice/Camera2Client.cpp574
-rw-r--r--services/camera/libcameraservice/Camera2Client.h44
-rw-r--r--services/camera/libcameraservice/camera2/JpegProcessor.cpp51
-rw-r--r--services/camera/libcameraservice/camera2/JpegProcessor.h6
-rw-r--r--services/camera/libcameraservice/camera2/Parameters.cpp16
-rw-r--r--services/camera/libcameraservice/camera2/Parameters.h1
-rw-r--r--services/camera/libcameraservice/camera2/StreamingProcessor.cpp604
-rw-r--r--services/camera/libcameraservice/camera2/StreamingProcessor.h108
24 files changed, 1188 insertions, 780 deletions
diff --git a/camera/CameraParameters.cpp b/camera/CameraParameters.cpp
index d10f2e5..fd91bf2 100644
--- a/camera/CameraParameters.cpp
+++ b/camera/CameraParameters.cpp
@@ -90,7 +90,6 @@ const char CameraParameters::KEY_RECORDING_HINT[] = "recording-hint";
const char CameraParameters::KEY_VIDEO_SNAPSHOT_SUPPORTED[] = "video-snapshot-supported";
const char CameraParameters::KEY_VIDEO_STABILIZATION[] = "video-stabilization";
const char CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED[] = "video-stabilization-supported";
-const char CameraParameters::KEY_LIGHTFX[] = "light-fx";
const char CameraParameters::TRUE[] = "true";
const char CameraParameters::FALSE[] = "false";
@@ -168,10 +167,6 @@ const char CameraParameters::FOCUS_MODE_EDOF[] = "edof";
const char CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO[] = "continuous-video";
const char CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE[] = "continuous-picture";
-// Values for light fx settings
-const char CameraParameters::LIGHTFX_LOWLIGHT[] = "low-light";
-const char CameraParameters::LIGHTFX_HDR[] = "high-dynamic-range";
-
CameraParameters::CameraParameters()
: mMap()
{
diff --git a/include/camera/CameraParameters.h b/include/camera/CameraParameters.h
index d521543..5540d32 100644
--- a/include/camera/CameraParameters.h
+++ b/include/camera/CameraParameters.h
@@ -525,10 +525,6 @@ public:
// stream and record stabilized videos.
static const char KEY_VIDEO_STABILIZATION_SUPPORTED[];
- // Supported modes for special effects with light.
- // Example values: "lowlight,hdr".
- static const char KEY_LIGHTFX[];
-
// Value for KEY_ZOOM_SUPPORTED or KEY_SMOOTH_ZOOM_SUPPORTED.
static const char TRUE[];
static const char FALSE[];
@@ -668,12 +664,6 @@ public:
// other modes.
static const char FOCUS_MODE_CONTINUOUS_PICTURE[];
- // Values for light special effects
- // Low-light enhancement mode
- static const char LIGHTFX_LOWLIGHT[];
- // High-dynamic range mode
- static const char LIGHTFX_HDR[];
-
private:
DefaultKeyedVector<String8,String8> mMap;
};
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index 9e07ea4..f60a535 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -52,6 +52,8 @@ class GraphicBuffer;
// may be dropped. It is possible to wait for the buffers to be
// returned (but not implemented)
+#define DEBUG_PENDING_BUFFERS 0
+
class SurfaceMediaSource : public MediaSource,
public MediaBufferObserver,
protected BufferQueue::ConsumerListener {
@@ -169,6 +171,10 @@ private:
size_t mNumPendingBuffers;
+#if DEBUG_PENDING_BUFFERS
+ Vector<MediaBuffer *> mPendingBuffers;
+#endif
+
// mCurrentTimestamp is the timestamp for the current texture. It
// gets set to mLastQueuedTimestamp each time updateTexImage is called.
int64_t mCurrentTimestamp;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 8190498..2b4220f 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -32,6 +32,8 @@
#include <media/stagefright/OMXClient.h>
#include <media/stagefright/OMXCodec.h>
+#include <media/hardware/HardwareAPI.h>
+
#include <OMX_Component.h>
#include "include/avc_utils.h"
@@ -881,6 +883,33 @@ status_t ACodec::configureCodec(
}
}
+ int32_t prependSPSPPS;
+ if (encoder
+ && msg->findInt32("prepend-sps-pps-to-idr-frames", &prependSPSPPS)
+ && prependSPSPPS != 0) {
+ OMX_INDEXTYPE index;
+ err = mOMX->getExtensionIndex(
+ mNode,
+ "OMX.google.android.index.prependSPSPPSToIDRFrames",
+ &index);
+
+ if (err == OK) {
+ PrependSPSPPSToIDRFramesParams params;
+ InitOMXParams(&params);
+ params.bEnable = OMX_TRUE;
+
+ err = mOMX->setParameter(
+ mNode, index, &params, sizeof(params));
+ }
+
+ if (err != OK) {
+ ALOGE("Encoder could not be configured to emit SPS/PPS before "
+ "IDR frames. (err %d)", err);
+
+ return err;
+ }
+ }
+
if (!strncasecmp(mime, "video/", 6)) {
if (encoder) {
err = setupVideoEncoder(mime, msg);
@@ -2471,6 +2500,9 @@ bool ACodec::BaseState::onOMXEmptyBufferDone(IOMX::buffer_id bufferID) {
// by this "mediaBuffer" object. Now that the OMX component has
// told us that it's done with the input buffer, we can decrement
// the mediaBuffer's reference count.
+
+ ALOGV("releasing mbuf %p", mediaBuffer);
+
((MediaBuffer *)mediaBuffer)->release();
mediaBuffer = NULL;
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index 3d3f421..9d39d0e 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -191,6 +191,23 @@ status_t SurfaceMediaSource::stop()
ALOGV("stop");
Mutex::Autolock lock(mMutex);
+ if (mStopped) {
+ return OK;
+ }
+
+ while (mNumPendingBuffers > 0) {
+ ALOGI("Still waiting for %d buffers to be returned.",
+ mNumPendingBuffers);
+
+#if DEBUG_PENDING_BUFFERS
+ for (size_t i = 0; i < mPendingBuffers.size(); ++i) {
+ ALOGI("%d: %p", i, mPendingBuffers.itemAt(i));
+ }
+#endif
+
+ mMediaBuffersAvailableCondition.wait(mMutex);
+ }
+
mStopped = true;
mFrameAvailableCondition.signal();
mMediaBuffersAvailableCondition.signal();
@@ -335,6 +352,12 @@ status_t SurfaceMediaSource::read( MediaBuffer **buffer,
++mNumPendingBuffers;
+#if DEBUG_PENDING_BUFFERS
+ mPendingBuffers.push_back(*buffer);
+#endif
+
+ ALOGV("returning mbuf %p", *buffer);
+
return OK;
}
@@ -391,6 +414,15 @@ void SurfaceMediaSource::signalBufferReturned(MediaBuffer *buffer) {
CHECK(!"signalBufferReturned: bogus buffer");
}
+#if DEBUG_PENDING_BUFFERS
+ for (size_t i = 0; i < mPendingBuffers.size(); ++i) {
+ if (mPendingBuffers.itemAt(i) == buffer) {
+ mPendingBuffers.removeAt(i);
+ break;
+ }
+ }
+#endif
+
--mNumPendingBuffers;
mMediaBuffersAvailableCondition.broadcast();
}
diff --git a/media/libstagefright/wifi-display/ANetworkSession.cpp b/media/libstagefright/wifi-display/ANetworkSession.cpp
index 90db758..0279c34 100644
--- a/media/libstagefright/wifi-display/ANetworkSession.cpp
+++ b/media/libstagefright/wifi-display/ANetworkSession.cpp
@@ -176,7 +176,7 @@ ANetworkSession::Session::Session(
}
ANetworkSession::Session::~Session() {
- ALOGI("Session %d gone", mSessionID);
+ ALOGV("Session %d gone", mSessionID);
close(mSocket);
mSocket = -1;
@@ -1084,7 +1084,7 @@ void ANetworkSession::threadLoop() {
} else {
status_t err = session->readMore();
if (err != OK) {
- ALOGI("readMore on socket %d failed w/ error %d (%s)",
+ ALOGE("readMore on socket %d failed w/ error %d (%s)",
s, err, strerror(-err));
}
}
@@ -1093,7 +1093,7 @@ void ANetworkSession::threadLoop() {
if (FD_ISSET(s, &ws)) {
status_t err = session->writeMore();
if (err != OK) {
- ALOGI("writeMore on socket %d failed w/ error %d (%s)",
+ ALOGE("writeMore on socket %d failed w/ error %d (%s)",
s, err, strerror(-err));
}
}
diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp
index c6118d4..968a805 100644
--- a/media/libstagefright/wifi-display/source/Converter.cpp
+++ b/media/libstagefright/wifi-display/source/Converter.cpp
@@ -20,12 +20,15 @@
#include "Converter.h"
+#include "MediaPuller.h"
+
#include <cutils/properties.h>
#include <gui/SurfaceTextureClient.h>
#include <media/ICrypto.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
@@ -50,17 +53,22 @@ Converter::Converter(
}
mInitCheck = initEncoder();
+
+ if (mInitCheck != OK) {
+ if (mEncoder != NULL) {
+ mEncoder->release();
+ mEncoder.clear();
+ }
+ }
}
Converter::~Converter() {
- if (mEncoder != NULL) {
- mEncoder->release();
- mEncoder.clear();
- }
+ CHECK(mEncoder == NULL);
+}
- AString mime;
- CHECK(mInputFormat->findString("mime", &mime));
- ALOGI("encoder (%s) shut down.", mime.c_str());
+void Converter::shutdownAsync() {
+ ALOGV("shutdown");
+ (new AMessage(kWhatShutdown, id()))->post();
}
status_t Converter::initCheck() const {
@@ -126,6 +134,7 @@ status_t Converter::initEncoder() {
mOutputFormat->setInt32("bitrate", videoBitrate);
mOutputFormat->setInt32("frame-rate", 24);
mOutputFormat->setInt32("i-frame-interval", 1); // Iframes every 1 secs
+ // mOutputFormat->setInt32("prepend-sps-pps-to-idr-frames", 1);
}
ALOGV("output format is '%s'", mOutputFormat->debugString(0).c_str());
@@ -155,16 +164,6 @@ status_t Converter::initEncoder() {
return mEncoder->getOutputBuffers(&mEncoderOutputBuffers);
}
-void Converter::feedAccessUnit(const sp<ABuffer> &accessUnit) {
- sp<AMessage> msg = new AMessage(kWhatFeedAccessUnit, id());
- msg->setBuffer("accessUnit", accessUnit);
- msg->post();
-}
-
-void Converter::signalEOS() {
- (new AMessage(kWhatInputEOS, id()))->post();
-}
-
void Converter::notifyError(status_t err) {
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kWhatError);
@@ -174,32 +173,70 @@ void Converter::notifyError(status_t err) {
void Converter::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
- case kWhatFeedAccessUnit:
+ case kWhatMediaPullerNotify:
{
- sp<ABuffer> accessUnit;
- CHECK(msg->findBuffer("accessUnit", &accessUnit));
+ int32_t what;
+ CHECK(msg->findInt32("what", &what));
- mInputBufferQueue.push_back(accessUnit);
+ if (mEncoder == NULL) {
+ ALOGV("got msg '%s' after encoder shutdown.",
+ msg->debugString().c_str());
- feedEncoderInputBuffers();
+ if (what == MediaPuller::kWhatAccessUnit) {
+ sp<ABuffer> accessUnit;
+ CHECK(msg->findBuffer("accessUnit", &accessUnit));
- scheduleDoMoreWork();
- break;
- }
+ void *mbuf;
+ if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf)
+ && mbuf != NULL) {
+ ALOGV("releasing mbuf %p", mbuf);
- case kWhatInputEOS:
- {
- mInputBufferQueue.push_back(NULL);
+ accessUnit->meta()->setPointer("mediaBuffer", NULL);
- feedEncoderInputBuffers();
+ static_cast<MediaBuffer *>(mbuf)->release();
+ mbuf = NULL;
+ }
+ }
+ break;
+ }
+
+ if (what == MediaPuller::kWhatEOS) {
+ mInputBufferQueue.push_back(NULL);
- scheduleDoMoreWork();
+ feedEncoderInputBuffers();
+
+ scheduleDoMoreWork();
+ } else {
+ CHECK_EQ(what, MediaPuller::kWhatAccessUnit);
+
+ sp<ABuffer> accessUnit;
+ CHECK(msg->findBuffer("accessUnit", &accessUnit));
+
+#if 0
+ void *mbuf;
+ if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf)
+ && mbuf != NULL) {
+ ALOGI("queueing mbuf %p", mbuf);
+ }
+#endif
+
+ mInputBufferQueue.push_back(accessUnit);
+
+ feedEncoderInputBuffers();
+
+ scheduleDoMoreWork();
+ }
break;
}
case kWhatDoMoreWork:
{
mDoMoreWorkPending = false;
+
+ if (mEncoder == NULL) {
+ break;
+ }
+
status_t err = doMoreWork();
if (err != OK) {
@@ -212,6 +249,10 @@ void Converter::onMessageReceived(const sp<AMessage> &msg) {
case kWhatRequestIDRFrame:
{
+ if (mEncoder == NULL) {
+ break;
+ }
+
if (mIsVideo) {
ALOGI("requesting IDR frame");
mEncoder->requestIDRFrame();
@@ -219,6 +260,18 @@ void Converter::onMessageReceived(const sp<AMessage> &msg) {
break;
}
+ case kWhatShutdown:
+ {
+ ALOGI("shutting down encoder");
+ mEncoder->release();
+ mEncoder.clear();
+
+ AString mime;
+ CHECK(mInputFormat->findString("mime", &mime));
+ ALOGI("encoder (%s) shut down.", mime.c_str());
+ break;
+ }
+
default:
TRESPASS();
}
diff --git a/media/libstagefright/wifi-display/source/Converter.h b/media/libstagefright/wifi-display/source/Converter.h
index 901ae2e..9f54523 100644
--- a/media/libstagefright/wifi-display/source/Converter.h
+++ b/media/libstagefright/wifi-display/source/Converter.h
@@ -51,18 +51,20 @@ struct Converter : public AHandler {
kWhatError,
};
-protected:
- virtual ~Converter();
- virtual void onMessageReceived(const sp<AMessage> &msg);
-
-private:
enum {
- kWhatFeedAccessUnit,
- kWhatInputEOS,
kWhatDoMoreWork,
kWhatRequestIDRFrame,
+ kWhatShutdown,
+ kWhatMediaPullerNotify,
};
+ void shutdownAsync();
+
+protected:
+ virtual ~Converter();
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
status_t mInitCheck;
sp<AMessage> mNotify;
sp<ALooper> mCodecLooper;
diff --git a/media/libstagefright/wifi-display/source/MediaPuller.cpp b/media/libstagefright/wifi-display/source/MediaPuller.cpp
index 35ae539..82ae001 100644
--- a/media/libstagefright/wifi-display/source/MediaPuller.cpp
+++ b/media/libstagefright/wifi-display/source/MediaPuller.cpp
@@ -65,33 +65,20 @@ status_t MediaPuller::start() {
return postSynchronouslyAndReturnError(new AMessage(kWhatStart, id()));
}
-status_t MediaPuller::stop() {
- return postSynchronouslyAndReturnError(new AMessage(kWhatStop, id()));
+void MediaPuller::stopAsync(const sp<AMessage> &notify) {
+ sp<AMessage> msg = new AMessage(kWhatStop, id());
+ msg->setMessage("notify", notify);
+ msg->post();
}
void MediaPuller::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatStart:
- case kWhatStop:
{
- status_t err;
+ status_t err = mSource->start();
- if (msg->what() == kWhatStart) {
- err = mSource->start();
-
- if (err == OK) {
- schedulePull();
- }
- } else {
- sp<MetaData> meta = mSource->getFormat();
- const char *tmp;
- CHECK(meta->findCString(kKeyMIMEType, &tmp));
- AString mime = tmp;
-
- ALOGI("MediaPuller(%s) stopping.", mime.c_str());
- err = mSource->stop();
- ALOGI("MediaPuller(%s) stopped.", mime.c_str());
- ++mPullGeneration;
+ if (err == OK) {
+ schedulePull();
}
sp<AMessage> response = new AMessage;
@@ -104,6 +91,24 @@ void MediaPuller::onMessageReceived(const sp<AMessage> &msg) {
break;
}
+ case kWhatStop:
+ {
+ sp<MetaData> meta = mSource->getFormat();
+ const char *tmp;
+ CHECK(meta->findCString(kKeyMIMEType, &tmp));
+ AString mime = tmp;
+
+ ALOGI("MediaPuller(%s) stopping.", mime.c_str());
+ mSource->stop();
+ ALOGI("MediaPuller(%s) stopped.", mime.c_str());
+ ++mPullGeneration;
+
+ sp<AMessage> notify;
+ CHECK(msg->findMessage("notify", &notify));
+ notify->post();
+ break;
+ }
+
case kWhatPull:
{
int32_t generation;
@@ -153,6 +158,10 @@ void MediaPuller::onMessageReceived(const sp<AMessage> &msg) {
notify->setBuffer("accessUnit", accessUnit);
notify->post();
+ if (mbuf != NULL) {
+ ALOGV("posted mbuf %p", mbuf);
+ }
+
schedulePull();
}
break;
diff --git a/media/libstagefright/wifi-display/source/MediaPuller.h b/media/libstagefright/wifi-display/source/MediaPuller.h
index 134e1c0..728da7b 100644
--- a/media/libstagefright/wifi-display/source/MediaPuller.h
+++ b/media/libstagefright/wifi-display/source/MediaPuller.h
@@ -33,7 +33,7 @@ struct MediaPuller : public AHandler {
MediaPuller(const sp<MediaSource> &source, const sp<AMessage> &notify);
status_t start();
- status_t stop();
+ void stopAsync(const sp<AMessage> &notify);
protected:
virtual void onMessageReceived(const sp<AMessage> &msg);
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index 0facafe..775f23b 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -53,14 +53,17 @@ namespace android {
static size_t kMaxRTPPacketSize = 1500;
static size_t kMaxNumTSPacketsPerRTPPacket = (kMaxRTPPacketSize - 12) / 188;
-struct WifiDisplaySource::PlaybackSession::Track : public RefBase {
- Track(const sp<ALooper> &pullLooper,
+struct WifiDisplaySource::PlaybackSession::Track : public AHandler {
+ enum {
+ kWhatStopped,
+ };
+
+ Track(const sp<AMessage> &notify,
+ const sp<ALooper> &pullLooper,
const sp<ALooper> &codecLooper,
const sp<MediaPuller> &mediaPuller,
const sp<Converter> &converter);
- Track(const sp<AMessage> &format);
-
sp<AMessage> getFormat();
bool isAudio() const;
@@ -70,20 +73,27 @@ struct WifiDisplaySource::PlaybackSession::Track : public RefBase {
void setPacketizerTrackIndex(size_t index);
status_t start();
- status_t stop();
+ void stopAsync();
+
+ bool isStopped() const { return !mStarted; }
void queueAccessUnit(const sp<ABuffer> &accessUnit);
sp<ABuffer> dequeueAccessUnit();
protected:
+ virtual void onMessageReceived(const sp<AMessage> &msg);
virtual ~Track();
private:
+ enum {
+ kWhatMediaPullerStopped,
+ };
+
+ sp<AMessage> mNotify;
sp<ALooper> mPullLooper;
sp<ALooper> mCodecLooper;
sp<MediaPuller> mMediaPuller;
sp<Converter> mConverter;
- sp<AMessage> mFormat;
bool mStarted;
ssize_t mPacketizerTrackIndex;
bool mIsAudio;
@@ -95,11 +105,13 @@ private:
};
WifiDisplaySource::PlaybackSession::Track::Track(
+ const sp<AMessage> &notify,
const sp<ALooper> &pullLooper,
const sp<ALooper> &codecLooper,
const sp<MediaPuller> &mediaPuller,
const sp<Converter> &converter)
- : mPullLooper(pullLooper),
+ : mNotify(notify),
+ mPullLooper(pullLooper),
mCodecLooper(codecLooper),
mMediaPuller(mediaPuller),
mConverter(converter),
@@ -108,14 +120,8 @@ WifiDisplaySource::PlaybackSession::Track::Track(
mIsAudio(IsAudioFormat(mConverter->getOutputFormat())) {
}
-WifiDisplaySource::PlaybackSession::Track::Track(const sp<AMessage> &format)
- : mFormat(format),
- mPacketizerTrackIndex(-1),
- mIsAudio(IsAudioFormat(mFormat)) {
-}
-
WifiDisplaySource::PlaybackSession::Track::~Track() {
- stop();
+ CHECK(!mStarted);
}
// static
@@ -128,10 +134,6 @@ bool WifiDisplaySource::PlaybackSession::Track::IsAudioFormat(
}
sp<AMessage> WifiDisplaySource::PlaybackSession::Track::getFormat() {
- if (mFormat != NULL) {
- return mFormat;
- }
-
return mConverter->getOutputFormat();
}
@@ -155,9 +157,7 @@ void WifiDisplaySource::PlaybackSession::Track::setPacketizerTrackIndex(size_t i
status_t WifiDisplaySource::PlaybackSession::Track::start() {
ALOGV("Track::start isAudio=%d", mIsAudio);
- if (mStarted) {
- return INVALID_OPERATION;
- }
+ CHECK(!mStarted);
status_t err = OK;
@@ -172,24 +172,40 @@ status_t WifiDisplaySource::PlaybackSession::Track::start() {
return err;
}
-status_t WifiDisplaySource::PlaybackSession::Track::stop() {
- ALOGV("Track::stop isAudio=%d", mIsAudio);
+void WifiDisplaySource::PlaybackSession::Track::stopAsync() {
+ ALOGV("Track::stopAsync isAudio=%d", mIsAudio);
- if (!mStarted) {
- return INVALID_OPERATION;
- }
+ CHECK(mStarted);
- status_t err = OK;
+ mConverter->shutdownAsync();
+
+ sp<AMessage> msg = new AMessage(kWhatMediaPullerStopped, id());
if (mMediaPuller != NULL) {
- err = mMediaPuller->stop();
+ mMediaPuller->stopAsync(msg);
+ } else {
+ msg->post();
}
+}
- mConverter.clear();
+void WifiDisplaySource::PlaybackSession::Track::onMessageReceived(
+ const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatMediaPullerStopped:
+ {
+ mConverter.clear();
- mStarted = false;
+ mStarted = false;
- return err;
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatStopped);
+ notify->post();
+ break;
+ }
+
+ default:
+ TRESPASS();
+ }
}
void WifiDisplaySource::PlaybackSession::Track::queueAccessUnit(
@@ -482,15 +498,7 @@ status_t WifiDisplaySource::PlaybackSession::onFinishPlay2() {
}
for (size_t i = 0; i < mTracks.size(); ++i) {
- status_t err = mTracks.editValueAt(i)->start();
-
- if (err != OK) {
- for (size_t j = 0; j < i; ++j) {
- mTracks.editValueAt(j)->stop();
- }
-
- return err;
- }
+ CHECK_EQ((status_t)OK, mTracks.editValueAt(i)->start());
}
sp<AMessage> notify = mNotify->dup();
@@ -506,32 +514,12 @@ status_t WifiDisplaySource::PlaybackSession::pause() {
return OK;
}
-status_t WifiDisplaySource::PlaybackSession::destroy() {
- mTracks.clear();
-
- mPacketizer.clear();
-
- mTracks.clear();
-
-#if ENABLE_RETRANSMISSION
- if (mRTCPRetransmissionSessionID != 0) {
- mNetSession->destroySession(mRTCPRetransmissionSessionID);
- }
-
- if (mRTPRetransmissionSessionID != 0) {
- mNetSession->destroySession(mRTPRetransmissionSessionID);
- }
-#endif
-
- if (mRTCPSessionID != 0) {
- mNetSession->destroySession(mRTCPSessionID);
- }
+void WifiDisplaySource::PlaybackSession::destroyAsync() {
+ ALOGI("destroyAsync");
- if (mRTPSessionID != 0) {
- mNetSession->destroySession(mRTPSessionID);
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ mTracks.valueAt(i)->stopAsync();
}
-
- return OK;
}
void WifiDisplaySource::PlaybackSession::onMessageReceived(
@@ -669,32 +657,6 @@ void WifiDisplaySource::PlaybackSession::onMessageReceived(
break;
}
- case kWhatMediaPullerNotify:
- {
- int32_t what;
- CHECK(msg->findInt32("what", &what));
-
- if (what == MediaPuller::kWhatEOS) {
- ALOGI("input eos");
-
- for (size_t i = 0; i < mTracks.size(); ++i) {
- mTracks.valueAt(i)->converter()->signalEOS();
- }
- } else {
- CHECK_EQ(what, MediaPuller::kWhatAccessUnit);
-
- size_t trackIndex;
- CHECK(msg->findSize("trackIndex", &trackIndex));
-
- sp<ABuffer> accessUnit;
- CHECK(msg->findBuffer("accessUnit", &accessUnit));
-
- mTracks.valueFor(trackIndex)->converter()
- ->feedAccessUnit(accessUnit);
- }
- break;
- }
-
case kWhatConverterNotify:
{
int32_t what;
@@ -776,6 +738,57 @@ void WifiDisplaySource::PlaybackSession::onMessageReceived(
break;
}
+ case kWhatTrackNotify:
+ {
+ int32_t what;
+ CHECK(msg->findInt32("what", &what));
+
+ size_t trackIndex;
+ CHECK(msg->findSize("trackIndex", &trackIndex));
+
+ if (what == Track::kWhatStopped) {
+ bool allTracksAreStopped = true;
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ const sp<Track> &track = mTracks.valueAt(i);
+ if (!track->isStopped()) {
+ allTracksAreStopped = false;
+ break;
+ }
+ }
+
+ if (!allTracksAreStopped) {
+ break;
+ }
+
+ mTracks.clear();
+
+ mPacketizer.clear();
+
+#if ENABLE_RETRANSMISSION
+ if (mRTCPRetransmissionSessionID != 0) {
+ mNetSession->destroySession(mRTCPRetransmissionSessionID);
+ }
+
+ if (mRTPRetransmissionSessionID != 0) {
+ mNetSession->destroySession(mRTPRetransmissionSessionID);
+ }
+#endif
+
+ if (mRTCPSessionID != 0) {
+ mNetSession->destroySession(mRTCPSessionID);
+ }
+
+ if (mRTPSessionID != 0) {
+ mNetSession->destroySession(mRTPSessionID);
+ }
+
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatSessionDestroyed);
+ notify->post();
+ }
+ break;
+ }
+
default:
TRESPASS();
}
@@ -817,11 +830,6 @@ status_t WifiDisplaySource::PlaybackSession::addSource(
trackIndex = mTracks.size();
- notify = new AMessage(kWhatMediaPullerNotify, id());
- notify->setSize("trackIndex", trackIndex);
- sp<MediaPuller> puller = new MediaPuller(source, notify);
- pullLooper->registerHandler(puller);
-
sp<AMessage> format;
status_t err = convertMetaDataToMessage(source->getFormat(), &format);
CHECK_EQ(err, (status_t)OK);
@@ -838,15 +846,32 @@ status_t WifiDisplaySource::PlaybackSession::addSource(
sp<Converter> converter =
new Converter(notify, codecLooper, format);
- CHECK_EQ(converter->initCheck(), (status_t)OK);
+
+ if (converter->initCheck() != OK) {
+ return converter->initCheck();
+ }
looper()->registerHandler(converter);
+ notify = new AMessage(Converter::kWhatMediaPullerNotify, converter->id());
+ notify->setSize("trackIndex", trackIndex);
+
+ sp<MediaPuller> puller = new MediaPuller(source, notify);
+ pullLooper->registerHandler(puller);
+
if (numInputBuffers != NULL) {
*numInputBuffers = converter->getInputBufferCount();
}
- mTracks.add(trackIndex, new Track(pullLooper, codecLooper, puller, converter));
+ notify = new AMessage(kWhatTrackNotify, id());
+ notify->setSize("trackIndex", trackIndex);
+
+ sp<Track> track = new Track(
+ notify, pullLooper, codecLooper, puller, converter);
+
+ looper()->registerHandler(track);
+
+ mTracks.add(trackIndex, track);
if (isVideo) {
mVideoTrackIndex = trackIndex;
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.h b/media/libstagefright/wifi-display/source/PlaybackSession.h
index 342fc85..9237a72 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.h
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.h
@@ -51,7 +51,7 @@ struct WifiDisplaySource::PlaybackSession : public AHandler {
const char *clientIP, int32_t clientRtp, int32_t clientRtcp,
TransportMode transportMode);
- status_t destroy();
+ void destroyAsync();
int32_t getRTPPort() const;
@@ -72,6 +72,7 @@ struct WifiDisplaySource::PlaybackSession : public AHandler {
kWhatSessionDead,
kWhatBinaryData,
kWhatSessionEstablished,
+ kWhatSessionDestroyed,
};
protected:
@@ -91,6 +92,7 @@ private:
#endif
kWhatMediaPullerNotify,
kWhatConverterNotify,
+ kWhatTrackNotify,
kWhatUpdateSurface,
kWhatFinishPlay,
};
diff --git a/media/libstagefright/wifi-display/source/RepeaterSource.cpp b/media/libstagefright/wifi-display/source/RepeaterSource.cpp
index 56e8860..483d29c 100644
--- a/media/libstagefright/wifi-display/source/RepeaterSource.cpp
+++ b/media/libstagefright/wifi-display/source/RepeaterSource.cpp
@@ -50,6 +50,8 @@ status_t RepeaterSource::start(MetaData *params) {
}
status_t RepeaterSource::stop() {
+ ALOGV("stopping");
+
if (mLooper != NULL) {
mLooper->stop();
mLooper.clear();
@@ -57,7 +59,17 @@ status_t RepeaterSource::stop() {
mReflector.clear();
}
- return mSource->stop();
+ if (mBuffer != NULL) {
+ ALOGV("releasing mbuf %p", mBuffer);
+ mBuffer->release();
+ mBuffer = NULL;
+ }
+
+ status_t err = mSource->stop();
+
+ ALOGV("stopped");
+
+ return err;
}
sp<MetaData> RepeaterSource::getFormat() {
@@ -117,6 +129,8 @@ void RepeaterSource::onMessageReceived(const sp<AMessage> &msg) {
MediaBuffer *buffer;
status_t err = mSource->read(&buffer);
+ ALOGV("read mbuf %p", buffer);
+
Mutex::Autolock autoLock(mLock);
if (mBuffer != NULL) {
mBuffer->release();
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
index 16c0f35..d5ffc65 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
@@ -293,6 +293,8 @@ void WifiDisplaySource::onMessageReceived(const sp<AMessage> &msg) {
mClientInfo.mPlaybackSession->height(),
0 /* flags */);
}
+ } else if (what == PlaybackSession::kWhatSessionDestroyed) {
+ disconnectClient2();
} else {
CHECK_EQ(what, PlaybackSession::kWhatBinaryData);
@@ -1117,6 +1119,12 @@ status_t WifiDisplaySource::onTeardownRequest(
void WifiDisplaySource::finishStop() {
ALOGV("finishStop");
+ disconnectClientAsync();
+}
+
+void WifiDisplaySource::finishStopAfterDisconnectingClient() {
+ ALOGV("finishStopAfterDisconnectingClient");
+
#if REQUIRE_HDCP
if (mHDCP != NULL) {
ALOGI("Initiating HDCP shutdown.");
@@ -1132,19 +1140,19 @@ void WifiDisplaySource::finishStop2() {
ALOGV("finishStop2");
#if REQUIRE_HDCP
- mHDCP->setObserver(NULL);
- mHDCPObserver.clear();
- mHDCP.clear();
+ if (mHDCP != NULL) {
+ mHDCP->setObserver(NULL);
+ mHDCPObserver.clear();
+ mHDCP.clear();
+ }
#endif
- disconnectClient();
-
if (mSessionID != 0) {
mNetSession->destroySession(mSessionID);
mSessionID = 0;
}
- ALOGV("finishStop2 completed.");
+ ALOGI("We're stopped.");
status_t err = OK;
@@ -1264,14 +1272,26 @@ sp<WifiDisplaySource::PlaybackSession> WifiDisplaySource::findPlaybackSession(
return mClientInfo.mPlaybackSession;
}
-void WifiDisplaySource::disconnectClient() {
+void WifiDisplaySource::disconnectClientAsync() {
+ ALOGV("disconnectClient");
+
+ if (mClientInfo.mPlaybackSession == NULL) {
+ disconnectClient2();
+ return;
+ }
+
+ if (mClientInfo.mPlaybackSession != NULL) {
+ ALOGV("Destroying PlaybackSession");
+ mClientInfo.mPlaybackSession->destroyAsync();
+ }
+}
+
+void WifiDisplaySource::disconnectClient2() {
+ ALOGV("disconnectClient2");
+
if (mClientInfo.mPlaybackSession != NULL) {
- sp<PlaybackSession> playbackSession = mClientInfo.mPlaybackSession;
+ looper()->unregisterHandler(mClientInfo.mPlaybackSession->id());
mClientInfo.mPlaybackSession.clear();
-
- ALOGI("Destroying PlaybackSession");
- playbackSession->destroy();
- looper()->unregisterHandler(playbackSession->id());
}
if (mClientSessionID != 0) {
@@ -1280,6 +1300,8 @@ void WifiDisplaySource::disconnectClient() {
}
mClient->onDisplayDisconnected();
+
+ finishStopAfterDisconnectingClient();
}
#if REQUIRE_HDCP
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.h b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
index 77b15f8..ade623a 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.h
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
@@ -200,11 +200,10 @@ private:
sp<PlaybackSession> findPlaybackSession(
const sp<ParsedMessage> &data, int32_t *playbackSessionID) const;
- // Disconnects the current client and shuts down its playback session
- // (if any).
- void disconnectClient();
-
void finishStop();
+ void disconnectClientAsync();
+ void disconnectClient2();
+ void finishStopAfterDisconnectingClient();
void finishStop2();
DISALLOW_EVIL_CONSTRUCTORS(WifiDisplaySource);
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index c7927fe..eff47c8 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -14,6 +14,7 @@ LOCAL_SRC_FILES:= \
camera2/CameraMetadata.cpp \
camera2/Parameters.cpp \
camera2/FrameProcessor.cpp \
+ camera2/StreamingProcessor.cpp \
camera2/JpegProcessor.cpp \
camera2/CallbackProcessor.cpp \
camera2/ZslProcessor.cpp \
diff --git a/services/camera/libcameraservice/Camera2Client.cpp b/services/camera/libcameraservice/Camera2Client.cpp
index a83977f..33e0b56 100644
--- a/services/camera/libcameraservice/Camera2Client.cpp
+++ b/services/camera/libcameraservice/Camera2Client.cpp
@@ -24,7 +24,6 @@
#include <cutils/properties.h>
#include <gui/SurfaceTextureClient.h>
#include <gui/Surface.h>
-#include <media/hardware/MetadataBufferType.h>
#include "camera2/Parameters.h"
#include "Camera2Client.h"
@@ -52,10 +51,7 @@ Camera2Client::Camera2Client(const sp<CameraService>& cameraService,
Client(cameraService, cameraClient,
cameraId, cameraFacing, clientPid),
mSharedCameraClient(cameraClient),
- mParameters(cameraId, cameraFacing),
- mPreviewStreamId(NO_STREAM),
- mRecordingStreamId(NO_STREAM),
- mRecordingHeapCount(kDefaultRecordingHeapCount)
+ mParameters(cameraId, cameraFacing)
{
ATRACE_CALL();
ALOGI("Camera %d: Opened", cameraId);
@@ -101,6 +97,8 @@ status_t Camera2Client::initialize(camera_module_t *module)
String8 threadName;
+ mStreamingProcessor = new StreamingProcessor(this);
+
mFrameProcessor = new FrameProcessor(this);
threadName = String8::format("C2-%d-FrameProc",
mCameraId);
@@ -140,9 +138,12 @@ Camera2Client::~Camera2Client() {
mDestructionStarted = true;
- // Rewrite mClientPid to allow shutdown by CameraService
- mClientPid = getCallingPid();
- disconnect();
+ SharedParameters::Lock l(mParameters);
+ if (l.mParameters.state != Parameters::DISCONNECTED) {
+ // Rewrite mClientPid to allow shutdown by CameraService
+ mClientPid = getCallingPid();
+ disconnect();
+ }
ALOGI("Camera %d: Closed", mCameraId);
}
@@ -314,25 +315,9 @@ status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
getCaptureStreamId());
result.appendFormat(" Recording stream ID: %d\n",
getRecordingStreamId());
+ write(fd, result.string(), result.size());
- result.append(" Current requests:\n");
- if (mPreviewRequest.entryCount() != 0) {
- result.append(" Preview request:\n");
- write(fd, result.string(), result.size());
- mPreviewRequest.dump(fd, 2, 6);
- } else {
- result.append(" Preview request: undefined\n");
- write(fd, result.string(), result.size());
- }
-
- if (mRecordingRequest.entryCount() != 0) {
- result = " Recording request:\n";
- write(fd, result.string(), result.size());
- mRecordingRequest.dump(fd, 2, 6);
- } else {
- result = " Recording request: undefined\n";
- write(fd, result.string(), result.size());
- }
+ mStreamingProcessor->dump(fd, args);
mCaptureSequencer->dump(fd, args);
@@ -373,20 +358,10 @@ void Camera2Client::disconnect() {
l.mParameters.state = Parameters::DISCONNECTED;
}
- if (mPreviewStreamId != NO_STREAM) {
- mDevice->deleteStream(mPreviewStreamId);
- mPreviewStreamId = NO_STREAM;
- }
-
+ mStreamingProcessor->deletePreviewStream();
+ mStreamingProcessor->deleteRecordingStream();
mJpegProcessor->deleteStream();
-
- if (mRecordingStreamId != NO_STREAM) {
- mDevice->deleteStream(mRecordingStreamId);
- mRecordingStreamId = NO_STREAM;
- }
-
mCallbackProcessor->deleteStream();
-
mZslProcessor->deleteStream();
mFrameProcessor->requestExit();
@@ -546,24 +521,13 @@ status_t Camera2Client::setPreviewWindowL(const sp<IBinder>& binder,
break;
}
- if (mPreviewStreamId != NO_STREAM) {
- res = mDevice->waitUntilDrained();
- if (res != OK) {
- ALOGE("%s: Error waiting for preview to drain: %s (%d)",
- __FUNCTION__, strerror(-res), res);
- return res;
- }
- res = mDevice->deleteStream(mPreviewStreamId);
- if (res != OK) {
- ALOGE("%s: Unable to delete old preview stream: %s (%d)",
- __FUNCTION__, strerror(-res), res);
- return res;
- }
- mPreviewStreamId = NO_STREAM;
- }
-
mPreviewSurface = binder;
- mPreviewWindow = window;
+ res = mStreamingProcessor->setPreviewWindow(window);
+ if (res != OK) {
+ ALOGE("%s: Unable to set new preview window: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
if (l.mParameters.state == Parameters::WAITING_FOR_PREVIEW_WINDOW) {
return startPreviewL(l.mParameters, false);
@@ -624,25 +588,33 @@ status_t Camera2Client::startPreview() {
status_t Camera2Client::startPreviewL(Parameters &params, bool restart) {
ATRACE_CALL();
status_t res;
- if (params.state >= Parameters::PREVIEW && !restart) {
+ if (params.state == Parameters::PREVIEW && !restart) {
+ // Succeed attempt to re-enter preview state
+ ALOGI("%s: Not starting preview; already in preview state.",
+ __FUNCTION__);
+ return OK;
+ }
+ if (params.state > Parameters::PREVIEW && !restart) {
ALOGE("%s: Can't start preview in state %s",
__FUNCTION__,
Parameters::getStateName(params.state));
return INVALID_OPERATION;
}
- if (mPreviewWindow == 0) {
+ if (!mStreamingProcessor->haveValidPreviewWindow()) {
params.state = Parameters::WAITING_FOR_PREVIEW_WINDOW;
return OK;
}
params.state = Parameters::STOPPED;
- res = updatePreviewStream(params);
+ res = mStreamingProcessor->updatePreviewStream(params);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update preview stream: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
return res;
}
+
+ Vector<uint8_t> outputStreams;
bool callbacksEnabled = params.previewCallbackFlags &
CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK;
if (callbacksEnabled) {
@@ -652,6 +624,7 @@ status_t Camera2Client::startPreviewL(Parameters &params, bool restart) {
__FUNCTION__, mCameraId, strerror(-res), res);
return res;
}
+ outputStreams.push(getCallbackStreamId());
}
if (params.zslMode && !params.recordingHint) {
res = mZslProcessor->updateStream(params);
@@ -660,38 +633,28 @@ status_t Camera2Client::startPreviewL(Parameters &params, bool restart) {
__FUNCTION__, mCameraId, strerror(-res), res);
return res;
}
+ outputStreams.push(getZslStreamId());
}
- CameraMetadata *request;
+ outputStreams.push(getPreviewStreamId());
+
if (!params.recordingHint) {
- if (mPreviewRequest.entryCount() == 0) {
- res = updatePreviewRequest(params);
+ if (!restart) {
+ res = mStreamingProcessor->updatePreviewRequest(params);
if (res != OK) {
- ALOGE("%s: Camera %d: Unable to create preview request: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
+ ALOGE("%s: Camera %d: Can't set up preview request: "
+ "%s (%d)", __FUNCTION__, mCameraId,
+ strerror(-res), res);
return res;
}
}
- request = &mPreviewRequest;
+ res = mStreamingProcessor->startStream(StreamingProcessor::PREVIEW,
+ outputStreams);
} else {
// With recording hint set, we're going to be operating under the
// assumption that the user will record video. To optimize recording
// startup time, create the necessary output streams for recording and
// video snapshot now if they don't already exist.
- if (mRecordingRequest.entryCount() == 0) {
- res = updateRecordingRequest(params);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to create recording preview "
- "request: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
- }
- request = &mRecordingRequest;
-
- // TODO: Re-enable recording stream creation/update here once issues are
- // resolved
-
res = mJpegProcessor->updateStream(params);
if (res != OK) {
ALOGE("%s: Camera %d: Can't pre-configure still image "
@@ -699,43 +662,26 @@ status_t Camera2Client::startPreviewL(Parameters &params, bool restart) {
__FUNCTION__, mCameraId, strerror(-res), res);
return res;
}
- }
-
- Vector<uint8_t> outputStreams;
- outputStreams.push(getPreviewStreamId());
-
- if (callbacksEnabled) {
- outputStreams.push(getCallbackStreamId());
- }
- if (params.zslMode && !params.recordingHint) {
- outputStreams.push(getZslStreamId());
- }
- res = request->update(
- ANDROID_REQUEST_OUTPUT_STREAMS,
- outputStreams);
-
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to set up preview request: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
+ if (!restart) {
+ res = mStreamingProcessor->updateRecordingRequest(params);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't set up preview request with "
+ "record hint: %s (%d)", __FUNCTION__, mCameraId,
+ strerror(-res), res);
+ return res;
+ }
+ }
+ res = mStreamingProcessor->startStream(StreamingProcessor::RECORD,
+ outputStreams);
}
- res = request->sort();
if (res != OK) {
- ALOGE("%s: Camera %d: Error sorting preview request: %s (%d)",
+ ALOGE("%s: Camera %d: Unable to start streaming preview: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
return res;
}
- res = mDevice->setStreamingRequest(*request);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to set preview request to start preview: "
- "%s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
params.state = Parameters::PREVIEW;
-
return OK;
}
@@ -770,8 +716,7 @@ void Camera2Client::stopPreviewL() {
case Parameters::RECORD:
// no break - identical to preview
case Parameters::PREVIEW:
- mDevice->clearStreamingRequest();
- mDevice->waitUntilDrained();
+ mStreamingProcessor->stopStream();
// no break
case Parameters::WAITING_FOR_PREVIEW_WINDOW: {
SharedParameters::Lock l(mParameters);
@@ -860,14 +805,24 @@ status_t Camera2Client::startRecordingL(Parameters &params, bool restart) {
return INVALID_OPERATION;
}
- mCameraService->playSound(CameraService::SOUND_RECORDING);
+ if (!restart) {
+ mCameraService->playSound(CameraService::SOUND_RECORDING);
+ mStreamingProcessor->updateRecordingRequest(params);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to update recording request: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ return res;
+ }
+ }
- res = updateRecordingStream(params);
+ res = mStreamingProcessor->updateRecordingStream(params);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update recording stream: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
return res;
}
+
+ Vector<uint8_t> outputStreams;
bool callbacksEnabled = params.previewCallbackFlags &
CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK;
if (callbacksEnabled) {
@@ -877,54 +832,19 @@ status_t Camera2Client::startRecordingL(Parameters &params, bool restart) {
__FUNCTION__, mCameraId, strerror(-res), res);
return res;
}
+ outputStreams.push(getCallbackStreamId());
}
+ outputStreams.push(getPreviewStreamId());
+ outputStreams.push(getRecordingStreamId());
- if (mRecordingRequest.entryCount() == 0) {
- res = updateRecordingRequest(params);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to create recording request: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
- }
-
- if (callbacksEnabled) {
- uint8_t outputStreams[3] ={
- getPreviewStreamId(),
- getRecordingStreamId(),
- getCallbackStreamId()
- };
- res = mRecordingRequest.update(
- ANDROID_REQUEST_OUTPUT_STREAMS,
- outputStreams, 3);
- } else {
- uint8_t outputStreams[2] = {
- getPreviewStreamId(),
- getRecordingStreamId()
- };
- res = mRecordingRequest.update(
- ANDROID_REQUEST_OUTPUT_STREAMS,
- outputStreams, 2);
- }
+ res = mStreamingProcessor->startStream(StreamingProcessor::RECORD,
+ outputStreams);
if (res != OK) {
- ALOGE("%s: Camera %d: Unable to set up recording request: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
- res = mRecordingRequest.sort();
- if (res != OK) {
- ALOGE("%s: Camera %d: Error sorting recording request: %s (%d)",
+ ALOGE("%s: Camera %d: Unable to start recording stream: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
return res;
}
- res = mDevice->setStreamingRequest(mRecordingRequest);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to set recording request to start "
- "recording: %s (%d)", __FUNCTION__, mCameraId,
- strerror(-res), res);
- return res;
- }
if (params.state < Parameters::RECORD) {
params.state = Parameters::RECORD;
}
@@ -985,60 +905,9 @@ bool Camera2Client::recordingEnabledL() {
void Camera2Client::releaseRecordingFrame(const sp<IMemory>& mem) {
ATRACE_CALL();
Mutex::Autolock icl(mICameraLock);
- status_t res;
if ( checkPid(__FUNCTION__) != OK) return;
- SharedParameters::Lock l(mParameters);
-
- // Make sure this is for the current heap
- ssize_t offset;
- size_t size;
- sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
- if (heap->getHeapID() != mRecordingHeap->mHeap->getHeapID()) {
- ALOGW("%s: Camera %d: Mismatched heap ID, ignoring release "
- "(got %x, expected %x)", __FUNCTION__, mCameraId,
- heap->getHeapID(), mRecordingHeap->mHeap->getHeapID());
- return;
- }
- uint8_t *data = (uint8_t*)heap->getBase() + offset;
- uint32_t type = *(uint32_t*)data;
- if (type != kMetadataBufferTypeGrallocSource) {
- ALOGE("%s: Camera %d: Recording frame type invalid (got %x, expected %x)",
- __FUNCTION__, mCameraId, type, kMetadataBufferTypeGrallocSource);
- return;
- }
-
- // Release the buffer back to the recording queue
-
- buffer_handle_t imgHandle = *(buffer_handle_t*)(data + 4);
-
- size_t itemIndex;
- for (itemIndex = 0; itemIndex < mRecordingBuffers.size(); itemIndex++) {
- const BufferItemConsumer::BufferItem item = mRecordingBuffers[itemIndex];
- if (item.mBuf != BufferItemConsumer::INVALID_BUFFER_SLOT &&
- item.mGraphicBuffer->handle == imgHandle) {
- break;
- }
- }
- if (itemIndex == mRecordingBuffers.size()) {
- ALOGE("%s: Camera %d: Can't find buffer_handle_t %p in list of "
- "outstanding buffers", __FUNCTION__, mCameraId, imgHandle);
- return;
- }
-
- ALOGV("%s: Camera %d: Freeing buffer_handle_t %p", __FUNCTION__, mCameraId,
- imgHandle);
-
- res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to free recording frame (buffer_handle_t: %p):"
- "%s (%d)",
- __FUNCTION__, mCameraId, imgHandle, strerror(-res), res);
- return;
- }
- mRecordingBuffers.replaceAt(itemIndex);
-
- mRecordingHeapFree++;
+ mStreamingProcessor->releaseRecordingFrame(mem);
}
status_t Camera2Client::autoFocus() {
@@ -1216,8 +1085,8 @@ status_t Camera2Client::commandSetDisplayOrientationL(int degrees) {
}
SharedParameters::Lock l(mParameters);
if (transform != l.mParameters.previewTransform &&
- mPreviewStreamId != NO_STREAM) {
- mDevice->setStreamTransform(mPreviewStreamId, transform);
+ getPreviewStreamId() != NO_STREAM) {
+ mDevice->setStreamTransform(getPreviewStreamId(), transform);
}
l.mParameters.previewTransform = transform;
return OK;
@@ -1330,23 +1199,7 @@ status_t Camera2Client::commandSetVideoBufferCountL(size_t count) {
return INVALID_OPERATION;
}
- // 32 is the current upper limit on the video buffer count for BufferQueue
- if (count > 32) {
- ALOGE("%s: Camera %d: Error setting %d as video buffer count value",
- __FUNCTION__, mCameraId, count);
- return BAD_VALUE;
- }
-
- // Need to reallocate memory for heap
- if (mRecordingHeapCount != count) {
- if (mRecordingHeap != 0) {
- mRecordingHeap.clear();
- mRecordingHeap = NULL;
- }
- mRecordingHeapCount = count;
- }
-
- return OK;
+ return mStreamingProcessor->setRecordingBufferCount(count);
}
/** Device-related methods */
@@ -1497,7 +1350,7 @@ camera2::SharedParameters& Camera2Client::getParameters() {
}
int Camera2Client::getPreviewStreamId() const {
- return mPreviewStreamId;
+ return mStreamingProcessor->getPreviewStreamId();
}
int Camera2Client::getCaptureStreamId() const {
@@ -1509,7 +1362,7 @@ int Camera2Client::getCallbackStreamId() const {
}
int Camera2Client::getRecordingStreamId() const {
- return mRecordingStreamId;
+ return mStreamingProcessor->getRecordingStreamId();
}
int Camera2Client::getZslStreamId() const {
@@ -1555,117 +1408,18 @@ const int32_t Camera2Client::kPreviewRequestId;
const int32_t Camera2Client::kRecordRequestId;
const int32_t Camera2Client::kFirstCaptureRequestId;
-void Camera2Client::onRecordingFrameAvailable() {
- ATRACE_CALL();
- status_t res;
- sp<Camera2Heap> recordingHeap;
- size_t heapIdx = 0;
- nsecs_t timestamp;
- {
- SharedParameters::Lock l(mParameters);
-
- BufferItemConsumer::BufferItem imgBuffer;
- res = mRecordingConsumer->acquireBuffer(&imgBuffer);
- if (res != OK) {
- ALOGE("%s: Camera %d: Error receiving recording buffer: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return;
- }
- timestamp = imgBuffer.mTimestamp;
-
- mRecordingFrameCount++;
- ALOGV("OnRecordingFrame: Frame %d", mRecordingFrameCount);
-
- // TODO: Signal errors here upstream
- if (l.mParameters.state != Parameters::RECORD &&
- l.mParameters.state != Parameters::VIDEO_SNAPSHOT) {
- ALOGV("%s: Camera %d: Discarding recording image buffers received after "
- "recording done",
- __FUNCTION__, mCameraId);
- mRecordingConsumer->releaseBuffer(imgBuffer);
- return;
- }
-
- if (mRecordingHeap == 0) {
- const size_t bufferSize = 4 + sizeof(buffer_handle_t);
- ALOGV("%s: Camera %d: Creating recording heap with %d buffers of "
- "size %d bytes", __FUNCTION__, mCameraId,
- mRecordingHeapCount, bufferSize);
-
- mRecordingHeap = new Camera2Heap(bufferSize, mRecordingHeapCount,
- "Camera2Client::RecordingHeap");
- if (mRecordingHeap->mHeap->getSize() == 0) {
- ALOGE("%s: Camera %d: Unable to allocate memory for recording",
- __FUNCTION__, mCameraId);
- mRecordingConsumer->releaseBuffer(imgBuffer);
- return;
- }
- for (size_t i = 0; i < mRecordingBuffers.size(); i++) {
- if (mRecordingBuffers[i].mBuf !=
- BufferItemConsumer::INVALID_BUFFER_SLOT) {
- ALOGE("%s: Camera %d: Non-empty recording buffers list!",
- __FUNCTION__, mCameraId);
- }
- }
- mRecordingBuffers.clear();
- mRecordingBuffers.setCapacity(mRecordingHeapCount);
- mRecordingBuffers.insertAt(0, mRecordingHeapCount);
-
- mRecordingHeapHead = 0;
- mRecordingHeapFree = mRecordingHeapCount;
- }
-
- if ( mRecordingHeapFree == 0) {
- ALOGE("%s: Camera %d: No free recording buffers, dropping frame",
- __FUNCTION__, mCameraId);
- mRecordingConsumer->releaseBuffer(imgBuffer);
- return;
- }
-
- heapIdx = mRecordingHeapHead;
- mRecordingHeapHead = (mRecordingHeapHead + 1) % mRecordingHeapCount;
- mRecordingHeapFree--;
-
- ALOGV("%s: Camera %d: Timestamp %lld",
- __FUNCTION__, mCameraId, timestamp);
-
- ssize_t offset;
- size_t size;
- sp<IMemoryHeap> heap =
- mRecordingHeap->mBuffers[heapIdx]->getMemory(&offset,
- &size);
-
- uint8_t *data = (uint8_t*)heap->getBase() + offset;
- uint32_t type = kMetadataBufferTypeGrallocSource;
- *((uint32_t*)data) = type;
- *((buffer_handle_t*)(data + 4)) = imgBuffer.mGraphicBuffer->handle;
- ALOGV("%s: Camera %d: Sending out buffer_handle_t %p",
- __FUNCTION__, mCameraId, imgBuffer.mGraphicBuffer->handle);
- mRecordingBuffers.replaceAt(imgBuffer, heapIdx);
- recordingHeap = mRecordingHeap;
- }
-
- // Call outside locked parameters to allow re-entrancy from notification
- SharedCameraClient::Lock l(mSharedCameraClient);
- if (l.mCameraClient != 0) {
- l.mCameraClient->dataCallbackTimestamp(timestamp,
- CAMERA_MSG_VIDEO_FRAME,
- recordingHeap->mBuffers[heapIdx]);
- }
-}
-
/** Utility methods */
status_t Camera2Client::updateRequests(Parameters &params) {
status_t res;
- res = updatePreviewRequest(params);
+ res = mStreamingProcessor->updatePreviewRequest(params);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update preview request: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
return res;
}
- res = updateRecordingRequest(params);
+ res = mStreamingProcessor->updateRecordingRequest(params);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update recording request: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
@@ -1681,7 +1435,7 @@ status_t Camera2Client::updateRequests(Parameters &params) {
}
} else if (params.state == Parameters::RECORD ||
params.state == Parameters::VIDEO_SNAPSHOT) {
- res = mDevice->setStreamingRequest(mRecordingRequest);
+ res = startRecordingL(params, true);
if (res != OK) {
ALOGE("%s: Camera %d: Error streaming new record request: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
@@ -1691,172 +1445,6 @@ status_t Camera2Client::updateRequests(Parameters &params) {
return res;
}
-status_t Camera2Client::updatePreviewStream(const Parameters &params) {
- ATRACE_CALL();
- status_t res;
-
- if (mPreviewStreamId != NO_STREAM) {
- // Check if stream parameters have to change
- uint32_t currentWidth, currentHeight;
- res = mDevice->getStreamInfo(mPreviewStreamId,
- &currentWidth, &currentHeight, 0);
- if (res != OK) {
- ALOGE("%s: Camera %d: Error querying preview stream info: "
- "%s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
- if (currentWidth != (uint32_t)params.previewWidth ||
- currentHeight != (uint32_t)params.previewHeight) {
- ALOGV("%s: Camera %d: Preview size switch: %d x %d -> %d x %d",
- __FUNCTION__, mCameraId, currentWidth, currentHeight,
- params.previewWidth, params.previewHeight);
- res = mDevice->waitUntilDrained();
- if (res != OK) {
- ALOGE("%s: Camera %d: Error waiting for preview to drain: "
- "%s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
- res = mDevice->deleteStream(mPreviewStreamId);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to delete old output stream "
- "for preview: %s (%d)", __FUNCTION__, mCameraId,
- strerror(-res), res);
- return res;
- }
- mPreviewStreamId = NO_STREAM;
- }
- }
-
- if (mPreviewStreamId == NO_STREAM) {
- res = mDevice->createStream(mPreviewWindow,
- params.previewWidth, params.previewHeight,
- CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0,
- &mPreviewStreamId);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to create preview stream: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
- }
-
- res = mDevice->setStreamTransform(mPreviewStreamId,
- params.previewTransform);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to set preview stream transform: "
- "%s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
-
- return OK;
-}
-
-status_t Camera2Client::updatePreviewRequest(const Parameters &params) {
- ATRACE_CALL();
- status_t res;
- if (mPreviewRequest.entryCount() == 0) {
- res = mDevice->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
- &mPreviewRequest);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to create default preview request: "
- "%s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
- }
-
- res = params.updateRequest(&mPreviewRequest);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to update common entries of preview "
- "request: %s (%d)", __FUNCTION__, mCameraId,
- strerror(-res), res);
- return res;
- }
-
- res = mPreviewRequest.update(ANDROID_REQUEST_ID,
- &kPreviewRequestId, 1);
-
- return OK;
-}
-
-status_t Camera2Client::updateRecordingRequest(const Parameters &params) {
- ATRACE_CALL();
- status_t res;
- if (mRecordingRequest.entryCount() == 0) {
- res = mDevice->createDefaultRequest(CAMERA2_TEMPLATE_VIDEO_RECORD,
- &mRecordingRequest);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to create default recording request:"
- " %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
- }
-
- res = params.updateRequest(&mRecordingRequest);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to update common entries of recording "
- "request: %s (%d)", __FUNCTION__, mCameraId,
- strerror(-res), res);
- return res;
- }
-
- return OK;
-}
-
-status_t Camera2Client::updateRecordingStream(const Parameters &params) {
- status_t res;
-
- if (mRecordingConsumer == 0) {
- // Create CPU buffer queue endpoint. We need one more buffer here so that we can
- // always acquire and free a buffer when the heap is full; otherwise the consumer
- // will have buffers in flight we'll never clear out.
- mRecordingConsumer = new BufferItemConsumer(
- GRALLOC_USAGE_HW_VIDEO_ENCODER,
- mRecordingHeapCount + 1,
- true);
- mRecordingConsumer->setFrameAvailableListener(new RecordingWaiter(this));
- mRecordingConsumer->setName(String8("Camera2Client::RecordingConsumer"));
- mRecordingWindow = new SurfaceTextureClient(
- mRecordingConsumer->getProducerInterface());
- // Allocate memory later, since we don't know buffer size until receipt
- }
-
- if (mRecordingStreamId != NO_STREAM) {
- // Check if stream parameters have to change
- uint32_t currentWidth, currentHeight;
- res = mDevice->getStreamInfo(mRecordingStreamId,
- &currentWidth, &currentHeight, 0);
- if (res != OK) {
- ALOGE("%s: Camera %d: Error querying recording output stream info: "
- "%s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
- if (currentWidth != (uint32_t)params.videoWidth ||
- currentHeight != (uint32_t)params.videoHeight) {
- // TODO: Should wait to be sure previous recording has finished
- res = mDevice->deleteStream(mRecordingStreamId);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to delete old output stream "
- "for recording: %s (%d)", __FUNCTION__, mCameraId,
- strerror(-res), res);
- return res;
- }
- mRecordingStreamId = NO_STREAM;
- }
- }
-
- if (mRecordingStreamId == NO_STREAM) {
- mRecordingFrameCount = 0;
- res = mDevice->createStream(mRecordingWindow,
- params.videoWidth, params.videoHeight,
- CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0, &mRecordingStreamId);
- if (res != OK) {
- ALOGE("%s: Camera %d: Can't create output stream for recording: "
- "%s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
- }
-
- return OK;
-}
size_t Camera2Client::calculateBufferSize(int width, int height,
int format, int stride) {
diff --git a/services/camera/libcameraservice/Camera2Client.h b/services/camera/libcameraservice/Camera2Client.h
index 1eb024a..3a9d307 100644
--- a/services/camera/libcameraservice/Camera2Client.h
+++ b/services/camera/libcameraservice/Camera2Client.h
@@ -21,17 +21,15 @@
#include "CameraService.h"
#include "camera2/Parameters.h"
#include "camera2/FrameProcessor.h"
+#include "camera2/StreamingProcessor.h"
#include "camera2/JpegProcessor.h"
#include "camera2/ZslProcessor.h"
#include "camera2/CaptureSequencer.h"
#include "camera2/CallbackProcessor.h"
-#include <binder/MemoryBase.h>
-#include <binder/MemoryHeapBase.h>
-#include <gui/CpuConsumer.h>
-#include <gui/BufferItemConsumer.h>
namespace android {
+class IMemory;
/**
* Implements the android.hardware.camera API on top of
* camera device HAL version 2.
@@ -184,15 +182,10 @@ private:
sp<camera2::FrameProcessor> mFrameProcessor;
- /* Preview related members */
+ /* Preview/Recording related members */
- int mPreviewStreamId;
- CameraMetadata mPreviewRequest;
sp<IBinder> mPreviewSurface;
- sp<ANativeWindow> mPreviewWindow;
-
- status_t updatePreviewRequest(const Parameters &params);
- status_t updatePreviewStream(const Parameters &params);
+ sp<camera2::StreamingProcessor> mStreamingProcessor;
/** Preview callback related members */
@@ -204,35 +197,6 @@ private:
sp<camera2::JpegProcessor> mJpegProcessor;
sp<camera2::ZslProcessor> mZslProcessor;
- /* Recording related members */
-
- int mRecordingStreamId;
- int mRecordingFrameCount;
- sp<BufferItemConsumer> mRecordingConsumer;
- sp<ANativeWindow> mRecordingWindow;
- // Simple listener that forwards frame available notifications from
- // a CPU consumer to the recording notification
- class RecordingWaiter: public BufferItemConsumer::FrameAvailableListener {
- public:
- RecordingWaiter(Camera2Client *parent) : mParent(parent) {}
- void onFrameAvailable() { mParent->onRecordingFrameAvailable(); }
- private:
- Camera2Client *mParent;
- };
- sp<RecordingWaiter> mRecordingWaiter;
- CameraMetadata mRecordingRequest;
- sp<camera2::Camera2Heap> mRecordingHeap;
-
- static const size_t kDefaultRecordingHeapCount = 8;
- size_t mRecordingHeapCount;
- Vector<BufferItemConsumer::BufferItem> mRecordingBuffers;
- size_t mRecordingHeapHead, mRecordingHeapFree;
- // Handle new recording image buffers
- void onRecordingFrameAvailable();
-
- status_t updateRecordingRequest(const Parameters &params);
- status_t updateRecordingStream(const Parameters &params);
-
/** Notification-related members */
bool mAfInMotion;
diff --git a/services/camera/libcameraservice/camera2/JpegProcessor.cpp b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
index 1fa56bd..7b368fa 100644
--- a/services/camera/libcameraservice/camera2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
@@ -20,6 +20,8 @@
#include <netinet/in.h>
+#include <binder/MemoryBase.h>
+#include <binder/MemoryHeapBase.h>
#include <utils/Log.h>
#include <utils/Trace.h>
@@ -233,10 +235,8 @@ status_t JpegProcessor::processNewCapture(sp<Camera2Client> &client) {
}
// Find size of JPEG image
- uint8_t *jpegStart; // points to start of buffer in imgBuffer.data
- size_t jpegSize = findJpegSize(imgBuffer.data, imgBuffer.width, &jpegStart);
+ size_t jpegSize = findJpegSize(imgBuffer.data, imgBuffer.width);
if (jpegSize == 0) { // failed to find size, default to whole buffer
- jpegStart = imgBuffer.data;
jpegSize = imgBuffer.width;
}
size_t heapSize = mCaptureHeap->getSize();
@@ -250,7 +250,7 @@ status_t JpegProcessor::processNewCapture(sp<Camera2Client> &client) {
// TODO: Optimize this to avoid memcopy
sp<MemoryBase> captureBuffer = new MemoryBase(mCaptureHeap, 0, jpegSize);
void* captureMemory = mCaptureHeap->getBase();
- memcpy(captureMemory, jpegStart, jpegSize);
+ memcpy(captureMemory, imgBuffer.data, jpegSize);
mCaptureConsumer->unlockBuffer(imgBuffer);
@@ -322,24 +322,20 @@ uint8_t checkJpegMarker(uint8_t *buf) {
}
// Return the size of the JPEG, 0 indicates failure
-size_t JpegProcessor::findJpegSize(uint8_t* jpegBuffer,
- size_t maxSize,
- uint8_t** jpegStart) {
- uint8_t *start;
+size_t JpegProcessor::findJpegSize(uint8_t* jpegBuffer, size_t maxSize) {
size_t size;
- // First check for JPEG transport header
- struct camera2_jpeg_blob *blob = (struct camera2_jpeg_blob*)(jpegBuffer);
+ // First check for JPEG transport header at the end of the buffer
+ uint8_t *header = jpegBuffer + (maxSize - sizeof(struct camera2_jpeg_blob));
+ struct camera2_jpeg_blob *blob = (struct camera2_jpeg_blob*)(header);
if (blob->jpeg_blob_id == CAMERA2_JPEG_BLOB_ID) {
size = blob->jpeg_size;
if (size > 0 && size <= maxSize - sizeof(struct camera2_jpeg_blob)) {
// Verify SOI and EOI markers
- uint8_t *start = blob->jpeg_data;
size_t offset = size - MARKER_LENGTH;
- uint8_t *end = blob->jpeg_data + offset;
- if (checkJpegStart(start) && checkJpegEnd(end)) {
+ uint8_t *end = jpegBuffer + offset;
+ if (checkJpegStart(jpegBuffer) && checkJpegEnd(end)) {
ALOGV("Found JPEG transport header, img size %d", size);
- *jpegStart = start;
return size;
} else {
ALOGW("Found JPEG transport header with bad Image Start/End");
@@ -349,30 +345,16 @@ size_t JpegProcessor::findJpegSize(uint8_t* jpegBuffer,
}
}
- // Find Start of Image
- // This lets us handle malformed transport headers by skipping them
- bool foundStart = false;
- for (size = 0; size <= sizeof(struct camera2_jpeg_blob); size++) {
- if ( checkJpegStart(jpegBuffer + size) ) {
- foundStart = true;
- start = jpegBuffer + size;
- maxSize = maxSize - size; // adjust accordingly
- break;
- }
- }
- if (!foundStart) {
+ // Check Start of Image
+ if ( !checkJpegStart(jpegBuffer) ) {
ALOGE("Could not find start of JPEG marker");
return 0;
}
- if (size != 0) { // Image starts at offset from beginning
- // We want the jpeg to start at the first byte; so emit warning
- ALOGW("JPEG Image starts at offset %d", size);
- }
// Read JFIF segment markers, skip over segment data
size = 0;
while (size <= maxSize - MARKER_LENGTH) {
- segment_t *segment = (segment_t*)(start + size);
+ segment_t *segment = (segment_t*)(jpegBuffer + size);
uint8_t type = checkJpegMarker(segment->marker);
if (type == 0) { // invalid marker, no more segments, begin JPEG data
ALOGV("JPEG stream found beginning at offset %d", size);
@@ -390,8 +372,8 @@ size_t JpegProcessor::findJpegSize(uint8_t* jpegBuffer,
// Find End of Image
// Scan JPEG buffer until End of Image (EOI)
bool foundEnd = false;
- for (size; size <= maxSize; size++) {
- if ( checkJpegEnd(start + size) ) {
+ for (size; size <= maxSize - MARKER_LENGTH; size++) {
+ if ( checkJpegEnd(jpegBuffer + size) ) {
foundEnd = true;
size += MARKER_LENGTH;
break;
@@ -406,8 +388,7 @@ size_t JpegProcessor::findJpegSize(uint8_t* jpegBuffer,
ALOGW("JPEG size %d too large, reducing to maxSize %d", size, maxSize);
size = maxSize;
}
- ALOGV("Final JPEG size %d, starting at %p", size, start);
- *jpegStart = start;
+ ALOGV("Final JPEG size %d", size);
return size;
}
diff --git a/services/camera/libcameraservice/camera2/JpegProcessor.h b/services/camera/libcameraservice/camera2/JpegProcessor.h
index da09178..836bd02 100644
--- a/services/camera/libcameraservice/camera2/JpegProcessor.h
+++ b/services/camera/libcameraservice/camera2/JpegProcessor.h
@@ -25,11 +25,11 @@
#include <gui/CpuConsumer.h>
#include "Parameters.h"
#include "CameraMetadata.h"
-#include "Camera2Heap.h"
namespace android {
class Camera2Client;
+class MemoryHeapBase;
namespace camera2 {
@@ -72,9 +72,7 @@ class JpegProcessor:
virtual bool threadLoop();
status_t processNewCapture(sp<Camera2Client> &client);
- size_t findJpegSize(uint8_t* jpegBuffer,
- size_t maxSize,
- uint8_t** jpegStart);
+ size_t findJpegSize(uint8_t* jpegBuffer, size_t maxSize);
};
diff --git a/services/camera/libcameraservice/camera2/Parameters.cpp b/services/camera/libcameraservice/camera2/Parameters.cpp
index 6383434..8ae390d 100644
--- a/services/camera/libcameraservice/camera2/Parameters.cpp
+++ b/services/camera/libcameraservice/camera2/Parameters.cpp
@@ -1331,10 +1331,6 @@ status_t Parameters::set(const String8& params) {
ALOGE("%s: Video stabilization not supported", __FUNCTION__);
}
- // LIGHTFX
- validatedParams.lightFx = lightFxStringToEnum(
- newParams.get(CameraParameters::KEY_LIGHTFX));
-
/** Update internal parameters */
validatedParams.paramsFlattened = params;
@@ -1746,18 +1742,6 @@ Parameters::Parameters::focusMode_t Parameters::focusModeStringToEnum(
Parameters::FOCUS_MODE_INVALID;
}
-Parameters::Parameters::lightFxMode_t Parameters::lightFxStringToEnum(
- const char *lightFxMode) {
- return
- !lightFxMode ?
- Parameters::LIGHTFX_NONE :
- !strcmp(lightFxMode, CameraParameters::LIGHTFX_LOWLIGHT) ?
- Parameters::LIGHTFX_LOWLIGHT :
- !strcmp(lightFxMode, CameraParameters::LIGHTFX_HDR) ?
- Parameters::LIGHTFX_HDR :
- Parameters::LIGHTFX_NONE;
-}
-
status_t Parameters::parseAreas(const char *areasCStr,
Vector<Parameters::Area> *areas) {
static const size_t NUM_FIELDS = 5;
diff --git a/services/camera/libcameraservice/camera2/Parameters.h b/services/camera/libcameraservice/camera2/Parameters.h
index af23a4e..f830e21 100644
--- a/services/camera/libcameraservice/camera2/Parameters.h
+++ b/services/camera/libcameraservice/camera2/Parameters.h
@@ -213,7 +213,6 @@ struct Parameters {
static int sceneModeStringToEnum(const char *sceneMode);
static flashMode_t flashModeStringToEnum(const char *flashMode);
static focusMode_t focusModeStringToEnum(const char *focusMode);
- static lightFxMode_t lightFxStringToEnum(const char *lightFxMode);
static status_t parseAreas(const char *areasCStr,
Vector<Area> *areas);
static status_t validateAreas(const Vector<Area> &areas,
diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
new file mode 100644
index 0000000..140138d
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
@@ -0,0 +1,604 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2-StreamingProcessor"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <gui/SurfaceTextureClient.h>
+#include <media/hardware/MetadataBufferType.h>
+
+#include "StreamingProcessor.h"
+#include "Camera2Heap.h"
+#include "../Camera2Client.h"
+#include "../Camera2Device.h"
+
+namespace android {
+namespace camera2 {
+
+StreamingProcessor::StreamingProcessor(wp<Camera2Client> client):
+ mClient(client),
+ mPreviewStreamId(NO_STREAM),
+ mRecordingStreamId(NO_STREAM),
+ mRecordingHeapCount(kDefaultRecordingHeapCount)
+{
+
+}
+
+StreamingProcessor::~StreamingProcessor() {
+ deletePreviewStream();
+ deleteRecordingStream();
+}
+
+status_t StreamingProcessor::setPreviewWindow(sp<ANativeWindow> window) {
+ ATRACE_CALL();
+ status_t res;
+
+ res = deletePreviewStream();
+ if (res != OK) return res;
+
+ Mutex::Autolock m(mMutex);
+
+ mPreviewWindow = window;
+
+ return OK;
+}
+
+bool StreamingProcessor::haveValidPreviewWindow() const {
+ Mutex::Autolock m(mMutex);
+ return mPreviewWindow != 0;
+}
+
+status_t StreamingProcessor::updatePreviewRequest(const Parameters &params) {
+ ATRACE_CALL();
+ status_t res;
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return INVALID_OPERATION;
+
+ Mutex::Autolock m(mMutex);
+ if (mPreviewRequest.entryCount() == 0) {
+ res = client->getCameraDevice()->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
+ &mPreviewRequest);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to create default preview request: "
+ "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ }
+
+ res = params.updateRequest(&mPreviewRequest);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to update common entries of preview "
+ "request: %s (%d)", __FUNCTION__, client->getCameraId(),
+ strerror(-res), res);
+ return res;
+ }
+
+ res = mPreviewRequest.update(ANDROID_REQUEST_ID,
+ &Camera2Client::kPreviewRequestId, 1);
+
+ return OK;
+}
+
+status_t StreamingProcessor::updatePreviewStream(const Parameters &params) {
+ ATRACE_CALL();
+ Mutex::Autolock m(mMutex);
+
+ status_t res;
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return INVALID_OPERATION;
+ sp<Camera2Device> device = client->getCameraDevice();
+
+ if (mPreviewStreamId != NO_STREAM) {
+ // Check if stream parameters have to change
+ uint32_t currentWidth, currentHeight;
+ res = device->getStreamInfo(mPreviewStreamId,
+ &currentWidth, &currentHeight, 0);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Error querying preview stream info: "
+ "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ if (currentWidth != (uint32_t)params.previewWidth ||
+ currentHeight != (uint32_t)params.previewHeight) {
+ ALOGV("%s: Camera %d: Preview size switch: %d x %d -> %d x %d",
+ __FUNCTION__, client->getCameraId(), currentWidth, currentHeight,
+ params.previewWidth, params.previewHeight);
+ res = device->waitUntilDrained();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Error waiting for preview to drain: "
+ "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ res = device->deleteStream(mPreviewStreamId);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete old output stream "
+ "for preview: %s (%d)", __FUNCTION__, client->getCameraId(),
+ strerror(-res), res);
+ return res;
+ }
+ mPreviewStreamId = NO_STREAM;
+ }
+ }
+
+ if (mPreviewStreamId == NO_STREAM) {
+ res = device->createStream(mPreviewWindow,
+ params.previewWidth, params.previewHeight,
+ CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0,
+ &mPreviewStreamId);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to create preview stream: %s (%d)",
+ __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ }
+
+ res = device->setStreamTransform(mPreviewStreamId,
+ params.previewTransform);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to set preview stream transform: "
+ "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+
+ return OK;
+}
+
+status_t StreamingProcessor::deletePreviewStream() {
+ ATRACE_CALL();
+ status_t res;
+
+ Mutex::Autolock m(mMutex);
+
+ if (mPreviewStreamId != NO_STREAM) {
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return INVALID_OPERATION;
+ sp<Camera2Device> device = client->getCameraDevice();
+
+ res = device->waitUntilDrained();
+ if (res != OK) {
+ ALOGE("%s: Error waiting for preview to drain: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ res = device->deleteStream(mPreviewStreamId);
+ if (res != OK) {
+ ALOGE("%s: Unable to delete old preview stream: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ mPreviewStreamId = NO_STREAM;
+ }
+ return OK;
+}
+
+status_t StreamingProcessor::getPreviewStreamId() const {
+ Mutex::Autolock m(mMutex);
+ return mPreviewStreamId;
+}
+
+status_t StreamingProcessor::setRecordingBufferCount(size_t count) {
+ ATRACE_CALL();
+ // 32 is the current upper limit on the video buffer count for BufferQueue
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return INVALID_OPERATION;
+ if (count > 32) {
+ ALOGE("%s: Camera %d: Error setting %d as video buffer count value",
+ __FUNCTION__, client->getCameraId(), count);
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock m(mMutex);
+
+ // Need to reallocate memory for heap
+ if (mRecordingHeapCount != count) {
+ if (mRecordingHeap != 0) {
+ mRecordingHeap.clear();
+ mRecordingHeap = NULL;
+ }
+ mRecordingHeapCount = count;
+ }
+
+ return OK;
+}
+
+status_t StreamingProcessor::updateRecordingRequest(const Parameters &params) {
+ ATRACE_CALL();
+ status_t res;
+ Mutex::Autolock m(mMutex);
+
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return INVALID_OPERATION;
+
+ if (mRecordingRequest.entryCount() == 0) {
+ res = client->getCameraDevice()->createDefaultRequest(CAMERA2_TEMPLATE_VIDEO_RECORD,
+ &mRecordingRequest);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to create default recording request:"
+ " %s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ }
+
+ res = params.updateRequest(&mRecordingRequest);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to update common entries of recording "
+ "request: %s (%d)", __FUNCTION__, client->getCameraId(),
+ strerror(-res), res);
+ return res;
+ }
+
+ return OK;
+}
+
+status_t StreamingProcessor::updateRecordingStream(const Parameters &params) {
+ ATRACE_CALL();
+ status_t res;
+ Mutex::Autolock m(mMutex);
+
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return INVALID_OPERATION;
+ sp<Camera2Device> device = client->getCameraDevice();
+
+ if (mRecordingConsumer == 0) {
+ // Create CPU buffer queue endpoint. We need one more buffer here so that we can
+ // always acquire and free a buffer when the heap is full; otherwise the consumer
+ // will have buffers in flight we'll never clear out.
+ mRecordingConsumer = new BufferItemConsumer(
+ GRALLOC_USAGE_HW_VIDEO_ENCODER,
+ mRecordingHeapCount + 1,
+ true);
+ mRecordingConsumer->setFrameAvailableListener(this);
+ mRecordingConsumer->setName(String8("Camera2-RecordingConsumer"));
+ mRecordingWindow = new SurfaceTextureClient(
+ mRecordingConsumer->getProducerInterface());
+ // Allocate memory later, since we don't know buffer size until receipt
+ }
+
+ if (mRecordingStreamId != NO_STREAM) {
+ // Check if stream parameters have to change
+ uint32_t currentWidth, currentHeight;
+ res = device->getStreamInfo(mRecordingStreamId,
+ &currentWidth, &currentHeight, 0);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Error querying recording output stream info: "
+ "%s (%d)", __FUNCTION__, client->getCameraId(),
+ strerror(-res), res);
+ return res;
+ }
+ if (currentWidth != (uint32_t)params.videoWidth ||
+ currentHeight != (uint32_t)params.videoHeight) {
+ // TODO: Should wait to be sure previous recording has finished
+ res = device->deleteStream(mRecordingStreamId);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete old output stream "
+ "for recording: %s (%d)", __FUNCTION__,
+ client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ mRecordingStreamId = NO_STREAM;
+ }
+ }
+
+ if (mRecordingStreamId == NO_STREAM) {
+ mRecordingFrameCount = 0;
+ res = device->createStream(mRecordingWindow,
+ params.videoWidth, params.videoHeight,
+ CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0, &mRecordingStreamId);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't create output stream for recording: "
+ "%s (%d)", __FUNCTION__, client->getCameraId(),
+ strerror(-res), res);
+ return res;
+ }
+ }
+
+ return OK;
+}
+
+status_t StreamingProcessor::deleteRecordingStream() {
+ ATRACE_CALL();
+ status_t res;
+
+ Mutex::Autolock m(mMutex);
+
+ if (mRecordingStreamId != NO_STREAM) {
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return INVALID_OPERATION;
+ sp<Camera2Device> device = client->getCameraDevice();
+
+ res = device->waitUntilDrained();
+ if (res != OK) {
+ ALOGE("%s: Error waiting for HAL to drain: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ res = device->deleteStream(mRecordingStreamId);
+ if (res != OK) {
+ ALOGE("%s: Unable to delete recording stream: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ mRecordingStreamId = NO_STREAM;
+ }
+ return OK;
+}
+
+status_t StreamingProcessor::getRecordingStreamId() const {
+ return mRecordingStreamId;
+}
+
+status_t StreamingProcessor::startStream(StreamType type,
+ const Vector<uint8_t> &outputStreams) {
+ ATRACE_CALL();
+ status_t res;
+
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return INVALID_OPERATION;
+
+ Mutex::Autolock m(mMutex);
+
+ CameraMetadata &request = (type == PREVIEW) ?
+ mPreviewRequest : mRecordingRequest;
+
+ res = request.update(
+ ANDROID_REQUEST_OUTPUT_STREAMS,
+ outputStreams);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to set up preview request: %s (%d)",
+ __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+
+ res = request.sort();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Error sorting preview request: %s (%d)",
+ __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+
+ res = client->getCameraDevice()->setStreamingRequest(request);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to set preview request to start preview: "
+ "%s (%d)",
+ __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+
+ return OK;
+}
+
+status_t StreamingProcessor::stopStream() {
+ ATRACE_CALL();
+ status_t res;
+
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return INVALID_OPERATION;
+ sp<Camera2Device> device = client->getCameraDevice();
+
+ res = device->clearStreamingRequest();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't clear stream request: %s (%d)",
+ __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ res = device->waitUntilDrained();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+ __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ return OK;
+}
+
+void StreamingProcessor::onFrameAvailable() {
+ ATRACE_CALL();
+ status_t res;
+ sp<Camera2Heap> recordingHeap;
+ size_t heapIdx = 0;
+ nsecs_t timestamp;
+
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return;
+
+ {
+ Mutex::Autolock m(mMutex);
+ BufferItemConsumer::BufferItem imgBuffer;
+ res = mRecordingConsumer->acquireBuffer(&imgBuffer);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Error receiving recording buffer: %s (%d)",
+ __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return;
+ }
+ timestamp = imgBuffer.mTimestamp;
+
+ mRecordingFrameCount++;
+ ALOGV("OnRecordingFrame: Frame %d", mRecordingFrameCount);
+
+ {
+ SharedParameters::Lock l(client->getParameters());
+ // TODO: Signal errors here upstream
+ if (l.mParameters.state != Parameters::RECORD &&
+ l.mParameters.state != Parameters::VIDEO_SNAPSHOT) {
+ ALOGV("%s: Camera %d: Discarding recording image buffers "
+ "received after recording done", __FUNCTION__,
+ client->getCameraId());
+ mRecordingConsumer->releaseBuffer(imgBuffer);
+ return;
+ }
+ }
+
+ if (mRecordingHeap == 0) {
+ const size_t bufferSize = 4 + sizeof(buffer_handle_t);
+ ALOGV("%s: Camera %d: Creating recording heap with %d buffers of "
+ "size %d bytes", __FUNCTION__, client->getCameraId(),
+ mRecordingHeapCount, bufferSize);
+
+ mRecordingHeap = new Camera2Heap(bufferSize, mRecordingHeapCount,
+ "Camera2Client::RecordingHeap");
+ if (mRecordingHeap->mHeap->getSize() == 0) {
+ ALOGE("%s: Camera %d: Unable to allocate memory for recording",
+ __FUNCTION__, client->getCameraId());
+ mRecordingConsumer->releaseBuffer(imgBuffer);
+ return;
+ }
+ for (size_t i = 0; i < mRecordingBuffers.size(); i++) {
+ if (mRecordingBuffers[i].mBuf !=
+ BufferItemConsumer::INVALID_BUFFER_SLOT) {
+ ALOGE("%s: Camera %d: Non-empty recording buffers list!",
+ __FUNCTION__, client->getCameraId());
+ }
+ }
+ mRecordingBuffers.clear();
+ mRecordingBuffers.setCapacity(mRecordingHeapCount);
+ mRecordingBuffers.insertAt(0, mRecordingHeapCount);
+
+ mRecordingHeapHead = 0;
+ mRecordingHeapFree = mRecordingHeapCount;
+ }
+
+ if ( mRecordingHeapFree == 0) {
+ ALOGE("%s: Camera %d: No free recording buffers, dropping frame",
+ __FUNCTION__, client->getCameraId());
+ mRecordingConsumer->releaseBuffer(imgBuffer);
+ return;
+ }
+
+ heapIdx = mRecordingHeapHead;
+ mRecordingHeapHead = (mRecordingHeapHead + 1) % mRecordingHeapCount;
+ mRecordingHeapFree--;
+
+ ALOGV("%s: Camera %d: Timestamp %lld",
+ __FUNCTION__, client->getCameraId(), timestamp);
+
+ ssize_t offset;
+ size_t size;
+ sp<IMemoryHeap> heap =
+ mRecordingHeap->mBuffers[heapIdx]->getMemory(&offset,
+ &size);
+
+ uint8_t *data = (uint8_t*)heap->getBase() + offset;
+ uint32_t type = kMetadataBufferTypeGrallocSource;
+ *((uint32_t*)data) = type;
+ *((buffer_handle_t*)(data + 4)) = imgBuffer.mGraphicBuffer->handle;
+ ALOGV("%s: Camera %d: Sending out buffer_handle_t %p",
+ __FUNCTION__, client->getCameraId(),
+ imgBuffer.mGraphicBuffer->handle);
+ mRecordingBuffers.replaceAt(imgBuffer, heapIdx);
+ recordingHeap = mRecordingHeap;
+ }
+
+ // Call outside locked parameters to allow re-entrancy from notification
+ Camera2Client::SharedCameraClient::Lock l(client->mSharedCameraClient);
+ if (l.mCameraClient != 0) {
+ l.mCameraClient->dataCallbackTimestamp(timestamp,
+ CAMERA_MSG_VIDEO_FRAME,
+ recordingHeap->mBuffers[heapIdx]);
+ }
+}
+
+void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) {
+ ATRACE_CALL();
+ status_t res;
+
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return;
+
+ Mutex::Autolock m(mMutex);
+ // Make sure this is for the current heap
+ ssize_t offset;
+ size_t size;
+ sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
+ if (heap->getHeapID() != mRecordingHeap->mHeap->getHeapID()) {
+ ALOGW("%s: Camera %d: Mismatched heap ID, ignoring release "
+ "(got %x, expected %x)", __FUNCTION__, client->getCameraId(),
+ heap->getHeapID(), mRecordingHeap->mHeap->getHeapID());
+ return;
+ }
+ uint8_t *data = (uint8_t*)heap->getBase() + offset;
+ uint32_t type = *(uint32_t*)data;
+ if (type != kMetadataBufferTypeGrallocSource) {
+ ALOGE("%s: Camera %d: Recording frame type invalid (got %x, expected %x)",
+ __FUNCTION__, client->getCameraId(), type,
+ kMetadataBufferTypeGrallocSource);
+ return;
+ }
+
+ // Release the buffer back to the recording queue
+
+ buffer_handle_t imgHandle = *(buffer_handle_t*)(data + 4);
+
+ size_t itemIndex;
+ for (itemIndex = 0; itemIndex < mRecordingBuffers.size(); itemIndex++) {
+ const BufferItemConsumer::BufferItem item =
+ mRecordingBuffers[itemIndex];
+ if (item.mBuf != BufferItemConsumer::INVALID_BUFFER_SLOT &&
+ item.mGraphicBuffer->handle == imgHandle) {
+ break;
+ }
+ }
+ if (itemIndex == mRecordingBuffers.size()) {
+ ALOGE("%s: Camera %d: Can't find buffer_handle_t %p in list of "
+ "outstanding buffers", __FUNCTION__, client->getCameraId(),
+ imgHandle);
+ return;
+ }
+
+ ALOGV("%s: Camera %d: Freeing buffer_handle_t %p", __FUNCTION__,
+ client->getCameraId(), imgHandle);
+
+ res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to free recording frame "
+ "(buffer_handle_t: %p): %s (%d)", __FUNCTION__,
+ client->getCameraId(), imgHandle, strerror(-res), res);
+ return;
+ }
+ mRecordingBuffers.replaceAt(itemIndex);
+
+ mRecordingHeapFree++;
+}
+
+
+status_t StreamingProcessor::dump(int fd, const Vector<String16>& args) {
+ String8 result;
+
+ result.append(" Current requests:\n");
+ if (mPreviewRequest.entryCount() != 0) {
+ result.append(" Preview request:\n");
+ write(fd, result.string(), result.size());
+ mPreviewRequest.dump(fd, 2, 6);
+ } else {
+ result.append(" Preview request: undefined\n");
+ write(fd, result.string(), result.size());
+ }
+
+ if (mRecordingRequest.entryCount() != 0) {
+ result = " Recording request:\n";
+ write(fd, result.string(), result.size());
+ mRecordingRequest.dump(fd, 2, 6);
+ } else {
+ result = " Recording request: undefined\n";
+ write(fd, result.string(), result.size());
+ }
+
+ return OK;
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.h b/services/camera/libcameraservice/camera2/StreamingProcessor.h
new file mode 100644
index 0000000..ac58614
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/StreamingProcessor.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_STREAMINGPROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_STREAMINGPROCESSOR_H
+
+#include <utils/Mutex.h>
+#include <utils/String16.h>
+#include <gui/BufferItemConsumer.h>
+
+#include "Parameters.h"
+#include "CameraMetadata.h"
+
+namespace android {
+
+class Camera2Client;
+class IMemory;
+
+namespace camera2 {
+
+class Camera2Heap;
+
+/**
+ * Management and processing for preview and recording streams
+ */
+class StreamingProcessor: public BufferItemConsumer::FrameAvailableListener {
+ public:
+ StreamingProcessor(wp<Camera2Client> client);
+ ~StreamingProcessor();
+
+ status_t setPreviewWindow(sp<ANativeWindow> window);
+
+ bool haveValidPreviewWindow() const;
+
+ status_t updatePreviewRequest(const Parameters &params);
+ status_t updatePreviewStream(const Parameters &params);
+ status_t deletePreviewStream();
+ int getPreviewStreamId() const;
+
+ status_t setRecordingBufferCount(size_t count);
+ status_t updateRecordingRequest(const Parameters &params);
+ status_t updateRecordingStream(const Parameters &params);
+ status_t deleteRecordingStream();
+ int getRecordingStreamId() const;
+
+ enum StreamType {
+ PREVIEW,
+ RECORD
+ };
+ status_t startStream(StreamType type,
+ const Vector<uint8_t> &outputStreams);
+
+ status_t stopStream();
+
+ // Callback for new recording frames from HAL
+ virtual void onFrameAvailable();
+ // Callback from stagefright which returns used recording frames
+ void releaseRecordingFrame(const sp<IMemory>& mem);
+
+ status_t dump(int fd, const Vector<String16>& args);
+
+ private:
+ mutable Mutex mMutex;
+
+ enum {
+ NO_STREAM = -1
+ };
+
+ wp<Camera2Client> mClient;
+
+ // Preview-related members
+ int mPreviewStreamId;
+ CameraMetadata mPreviewRequest;
+ sp<ANativeWindow> mPreviewWindow;
+
+ // Recording-related members
+ int mRecordingStreamId;
+ int mRecordingFrameCount;
+ sp<BufferItemConsumer> mRecordingConsumer;
+ sp<ANativeWindow> mRecordingWindow;
+ CameraMetadata mRecordingRequest;
+ sp<camera2::Camera2Heap> mRecordingHeap;
+
+ static const size_t kDefaultRecordingHeapCount = 8;
+ size_t mRecordingHeapCount;
+ Vector<BufferItemConsumer::BufferItem> mRecordingBuffers;
+ size_t mRecordingHeapHead, mRecordingHeapFree;
+
+};
+
+
+}; // namespace camera2
+}; // namespace android
+
+#endif