summaryrefslogtreecommitdiffstats
path: root/services
diff options
context:
space:
mode:
Diffstat (limited to 'services')
-rw-r--r--services/audioflinger/Android.mk2
-rw-r--r--services/audioflinger/AudioFlinger.cpp28
-rw-r--r--services/audioflinger/AudioFlinger.h7
-rw-r--r--services/audioflinger/AudioPolicyService.h5
-rw-r--r--services/audioflinger/AudioResampler.h3
-rw-r--r--services/audioflinger/PlaybackTracks.h7
-rw-r--r--services/audioflinger/RecordTracks.h1
-rw-r--r--services/audioflinger/Threads.cpp65
-rw-r--r--services/audioflinger/TrackBase.h2
-rw-r--r--services/audioflinger/Tracks.cpp276
-rw-r--r--services/camera/libcameraservice/Camera2Client.cpp95
-rw-r--r--services/camera/libcameraservice/Camera2Client.h3
-rw-r--r--services/camera/libcameraservice/Camera3Device.cpp29
-rw-r--r--services/camera/libcameraservice/CameraClient.cpp6
-rw-r--r--services/camera/libcameraservice/CameraClient.h2
-rw-r--r--services/camera/libcameraservice/CameraService.h2
-rw-r--r--services/camera/libcameraservice/camera2/CallbackProcessor.cpp44
-rw-r--r--services/camera/libcameraservice/camera2/CallbackProcessor.h5
-rw-r--r--services/camera/libcameraservice/camera2/JpegCompressor.cpp3
-rw-r--r--services/camera/libcameraservice/camera2/Parameters.cpp108
-rw-r--r--services/camera/libcameraservice/camera2/Parameters.h1
-rw-r--r--services/camera/libcameraservice/camera3/Camera3OutputStream.cpp9
-rw-r--r--services/camera/libcameraservice/camera3/Camera3Stream.cpp2
-rw-r--r--services/camera/libcameraservice/gui/RingBufferConsumer.cpp13
24 files changed, 417 insertions, 301 deletions
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 061a079..714854e 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -81,6 +81,8 @@ else
LOCAL_CFLAGS += -DANDROID_SMP=0
endif
+LOCAL_CFLAGS += -fvisibility=hidden
+
include $(BUILD_SHARED_LIBRARY)
#
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 87eb6aa..a6edb77 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1453,10 +1453,18 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module,
}
mPlaybackThreads.add(id, thread);
- if (pSamplingRate != NULL) *pSamplingRate = config.sample_rate;
- if (pFormat != NULL) *pFormat = config.format;
- if (pChannelMask != NULL) *pChannelMask = config.channel_mask;
- if (pLatencyMs != NULL) *pLatencyMs = thread->latency();
+ if (pSamplingRate != NULL) {
+ *pSamplingRate = config.sample_rate;
+ }
+ if (pFormat != NULL) {
+ *pFormat = config.format;
+ }
+ if (pChannelMask != NULL) {
+ *pChannelMask = config.channel_mask;
+ }
+ if (pLatencyMs != NULL) {
+ *pLatencyMs = thread->latency();
+ }
// notify client processes of the new output creation
thread->audioConfigChanged_l(AudioSystem::OUTPUT_OPENED);
@@ -1698,9 +1706,15 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module,
);
mRecordThreads.add(id, thread);
ALOGV("openInput() created record thread: ID %d thread %p", id, thread);
- if (pSamplingRate != NULL) *pSamplingRate = reqSamplingRate;
- if (pFormat != NULL) *pFormat = config.format;
- if (pChannelMask != NULL) *pChannelMask = reqChannels;
+ if (pSamplingRate != NULL) {
+ *pSamplingRate = reqSamplingRate;
+ }
+ if (pFormat != NULL) {
+ *pFormat = config.format;
+ }
+ if (pChannelMask != NULL) {
+ *pChannelMask = reqChannels;
+ }
// notify client processes of the new input creation
thread->audioConfigChanged_l(AudioSystem::INPUT_OPENED);
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index b0efef6..05dbab1 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -24,6 +24,8 @@
#include <common_time/cc_helper.h>
+#include <cutils/compiler.h>
+
#include <media/IAudioFlinger.h>
#include <media/IAudioFlingerClient.h>
#include <media/IAudioTrack.h>
@@ -54,6 +56,7 @@
#include <powermanager/IPowerManager.h>
#include <media/nbaio/NBLog.h>
+#include <private/media/AudioTrackShared.h>
namespace android {
@@ -89,7 +92,7 @@ class AudioFlinger :
{
friend class BinderService<AudioFlinger>; // for AudioFlinger()
public:
- static const char* getServiceName() { return "media.audio_flinger"; }
+ static const char* getServiceName() ANDROID_API { return "media.audio_flinger"; }
virtual status_t dump(int fd, const Vector<String16>& args);
@@ -278,7 +281,7 @@ private:
bool btNrecIsOff() const { return mBtNrecIsOff; }
- AudioFlinger();
+ AudioFlinger() ANDROID_API;
virtual ~AudioFlinger();
// call in any IAudioFlinger method that accesses mPrimaryHardwareDev
diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h
index 35cf368..53238fa 100644
--- a/services/audioflinger/AudioPolicyService.h
+++ b/services/audioflinger/AudioPolicyService.h
@@ -19,6 +19,7 @@
#include <cutils/misc.h>
#include <cutils/config_utils.h>
+#include <cutils/compiler.h>
#include <utils/String8.h>
#include <utils/Vector.h>
#include <utils/SortedVector.h>
@@ -44,7 +45,7 @@ class AudioPolicyService :
public:
// for BinderService
- static const char *getServiceName() { return "media.audio_policy"; }
+ static const char *getServiceName() ANDROID_API { return "media.audio_policy"; }
virtual status_t dump(int fd, const Vector<String16>& args);
@@ -137,7 +138,7 @@ public:
virtual status_t setVoiceVolume(float volume, int delayMs = 0);
private:
- AudioPolicyService();
+ AudioPolicyService() ANDROID_API;
virtual ~AudioPolicyService();
status_t dumpInternals(int fd);
diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h
index 2b8694f..29dc5b6 100644
--- a/services/audioflinger/AudioResampler.h
+++ b/services/audioflinger/AudioResampler.h
@@ -19,13 +19,14 @@
#include <stdint.h>
#include <sys/types.h>
+#include <cutils/compiler.h>
#include <media/AudioBufferProvider.h>
namespace android {
// ----------------------------------------------------------------------------
-class AudioResampler {
+class ANDROID_API AudioResampler {
public:
// Determines quality of SRC.
// LOW_QUALITY: linear interpolator (1st order)
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index a749d7a..b1286d3 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -46,6 +46,8 @@ public:
void destroy();
int name() const { return mName; }
+ virtual uint32_t sampleRate() const;
+
audio_stream_type_t streamType() const {
return mStreamType;
}
@@ -139,6 +141,7 @@ private:
// 'volatile' means accessed without lock or
// barrier, but is read/written atomically
bool mIsInvalid; // non-resettable latch, set by invalidate()
+ AudioTrackServerProxy* mAudioTrackServerProxy;
}; // end of Track
class TimedTrack : public Track {
@@ -255,10 +258,6 @@ public:
private:
- enum {
- NO_MORE_BUFFERS = 0x80000001, // same in AudioTrack.h, ok to be different value
- };
-
status_t obtainBuffer(AudioBufferProvider::Buffer* buffer,
uint32_t waitTimeMs);
void clearBufferQueue();
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 6c0d1d3..ffe3e9f 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -57,4 +57,5 @@ private:
// releaseBuffer() not overridden
bool mOverflow; // overflow on most recent attempt to fill client buffer
+ AudioRecordServerProxy* mAudioRecordServerProxy;
};
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 97f66f4..0773534 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -139,7 +139,7 @@ static const int kPriorityFastMixer = 3;
// FIXME It would be better for client to tell AudioFlinger whether it wants double-buffering or
// N-buffering, so AudioFlinger could allocate the right amount of memory.
// See the client's minBufCount and mNotificationFramesAct calculations for details.
-static const int kFastTrackMultiplier = 2;
+static const int kFastTrackMultiplier = 1;
// ----------------------------------------------------------------------------
@@ -495,7 +495,8 @@ void AudioFlinger::ThreadBase::acquireWakeLock_l()
sp<IBinder> binder = new BBinder();
status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
binder,
- String16(mName));
+ String16(mName),
+ String16("media"));
if (status == NO_ERROR) {
mWakeLockToken = binder;
}
@@ -1326,7 +1327,7 @@ status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track)
// the track is newly added, make sure it fills up all its
// buffers before playing. This is to ensure the client will
// effectively get the latency it requested.
- track->mFillingUpStatus = Track::FS_FILLING;
+ track->mFillingUpStatus = track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
track->mResetDone = false;
track->mPresentationCompleteFrames = 0;
mActiveTracks.add(track);
@@ -1713,7 +1714,7 @@ void AudioFlinger::PlaybackThread::cacheParameters_l()
void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
{
- ALOGV ("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d",
+ ALOGV("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d",
this, streamType, mTracks.size());
Mutex::Autolock _l(mLock);
@@ -2595,24 +2596,35 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac
// app does not call stop() and relies on underrun to stop:
// hence the test on (mMixerStatus == MIXER_TRACKS_READY) meaning the track was mixed
// during last round
+ size_t desiredFrames;
+ if (t->sampleRate() == mSampleRate) {
+ desiredFrames = mNormalFrameCount;
+ } else {
+ // +1 for rounding and +1 for additional sample needed for interpolation
+ desiredFrames = (mNormalFrameCount * t->sampleRate()) / mSampleRate + 1 + 1;
+ // add frames already consumed but not yet released by the resampler
+ // because cblk->framesReady() will include these frames
+ desiredFrames += mAudioMixer->getUnreleasedFrames(track->name());
+ // the minimum track buffer size is normally twice the number of frames necessary
+ // to fill one buffer and the resampler should not leave more than one buffer worth
+ // of unreleased frames after each pass, but just in case...
+ ALOG_ASSERT(desiredFrames <= cblk->frameCount_);
+ }
uint32_t minFrames = 1;
if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() &&
(mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) {
- if (t->sampleRate() == mSampleRate) {
- minFrames = mNormalFrameCount;
- } else {
- // +1 for rounding and +1 for additional sample needed for interpolation
- minFrames = (mNormalFrameCount * t->sampleRate()) / mSampleRate + 1 + 1;
- // add frames already consumed but not yet released by the resampler
- // because cblk->framesReady() will include these frames
- minFrames += mAudioMixer->getUnreleasedFrames(track->name());
- // the minimum track buffer size is normally twice the number of frames necessary
- // to fill one buffer and the resampler should not leave more than one buffer worth
- // of unreleased frames after each pass, but just in case...
- ALOG_ASSERT(minFrames <= cblk->frameCount_);
- }
+ minFrames = desiredFrames;
}
- if ((track->framesReady() >= minFrames) && track->isReady() &&
+ // It's not safe to call framesReady() for a static buffer track, so assume it's ready
+ size_t framesReady;
+ if (track->sharedBuffer() == 0) {
+ framesReady = track->framesReady();
+ } else if (track->isStopped()) {
+ framesReady = 0;
+ } else {
+ framesReady = 1;
+ }
+ if ((framesReady >= minFrames) && track->isReady() &&
!track->isPaused() && !track->isTerminated())
{
ALOGVV("track %d u=%08x, s=%08x [OK] on thread %p", name, cblk->user, cblk->server,
@@ -2663,7 +2675,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac
// read original volumes with volume control
float typeVolume = mStreamTypes[track->streamType()].volume;
float v = masterVolume * typeVolume;
- ServerProxy *proxy = track->mServerProxy;
+ AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy;
uint32_t vlr = proxy->getVolumeLR();
vl = vlr & 0xFFFF;
vr = vlr >> 16;
@@ -2736,7 +2748,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac
AudioMixer::CHANNEL_MASK, (void *)track->channelMask());
// limit track sample rate to 2 x output sample rate, which changes at re-configuration
uint32_t maxSampleRate = mSampleRate * 2;
- uint32_t reqSampleRate = track->mServerProxy->getSampleRate();
+ uint32_t reqSampleRate = track->mAudioTrackServerProxy->getSampleRate();
if (reqSampleRate == 0) {
reqSampleRate = mSampleRate;
} else if (reqSampleRate > maxSampleRate) {
@@ -2767,6 +2779,13 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac
mixerStatus = MIXER_TRACKS_READY;
}
} else {
+ // only implemented for normal tracks, not fast tracks
+ if (framesReady < desiredFrames && !track->isStopped() && !track->isPaused()) {
+ // we missed desiredFrames whatever the actual number of frames missing was
+ cblk->u.mStreaming.mUnderrunFrames += desiredFrames;
+ // FIXME also wake futex so that underrun is noticed more quickly
+ (void) android_atomic_or(CBLK_UNDERRUN, &cblk->flags);
+ }
// clear effect chain input buffer if an active track underruns to avoid sending
// previous audio buffer again to effects
chain = getEffectChain_l(track->sessionId());
@@ -3169,7 +3188,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prep
} else {
float typeVolume = mStreamTypes[track->streamType()].volume;
float v = mMasterVolume * typeVolume;
- uint32_t vlr = track->mServerProxy->getVolumeLR();
+ uint32_t vlr = track->mAudioTrackServerProxy->getVolumeLR();
float v_clamped = v * (vlr & 0xFFFF);
if (v_clamped > MAX_GAIN) {
v_clamped = MAX_GAIN;
@@ -3695,7 +3714,8 @@ bool AudioFlinger::RecordThread::threadLoop()
}
buffer.frameCount = mFrameCount;
- if (CC_LIKELY(mActiveTrack->getNextBuffer(&buffer) == NO_ERROR)) {
+ status_t status = mActiveTrack->getNextBuffer(&buffer);
+ if (CC_LIKELY(status == NO_ERROR)) {
readOnce = true;
size_t framesOut = buffer.frameCount;
if (mResampler == NULL) {
@@ -3969,6 +3989,7 @@ status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrac
ALOGV("Record started OK");
return status;
}
+
startError:
AudioSystem::stopInput(mId);
clearSyncStartEvent();
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index fac7071..55d96fa 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -74,7 +74,7 @@ protected:
audio_channel_mask_t channelMask() const { return mChannelMask; }
- uint32_t sampleRate() const; // FIXME inline after cblk sr moved
+ virtual uint32_t sampleRate() const { return mSampleRate; }
// Return a pointer to the start of a contiguous slice of the track buffer.
// Parameter 'offset' is the requested start position, expressed in
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 5ac3129..bfc197c 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -98,7 +98,7 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase(
// ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
size_t size = sizeof(audio_track_cblk_t);
- size_t bufferSize = frameCount * mFrameSize;
+ size_t bufferSize = (sharedBuffer == 0 ? roundup(frameCount) : frameCount) * mFrameSize;
if (sharedBuffer == 0) {
size += bufferSize;
}
@@ -124,22 +124,16 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase(
new(mCblk) audio_track_cblk_t();
// clear all buffers
mCblk->frameCount_ = frameCount;
-// uncomment the following lines to quickly test 32-bit wraparound
-// mCblk->user = 0xffff0000;
-// mCblk->server = 0xffff0000;
-// mCblk->userBase = 0xffff0000;
-// mCblk->serverBase = 0xffff0000;
if (sharedBuffer == 0) {
mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
memset(mBuffer, 0, bufferSize);
- // Force underrun condition to avoid false underrun callback until first data is
- // written to buffer (other flags are cleared)
- mCblk->flags = CBLK_UNDERRUN;
} else {
mBuffer = sharedBuffer->pointer();
+#if 0
+ mCblk->flags = CBLK_FORCEREADY; // FIXME hack, need to fix the track ready logic
+#endif
}
mBufferEnd = (uint8_t *)mBuffer + bufferSize;
- mServerProxy = new ServerProxy(mCblk, mBuffer, frameCount, mFrameSize, isOut);
#ifdef TEE_SINK
if (mTeeSinkTrackEnabled) {
@@ -199,51 +193,17 @@ void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buf
}
#endif
- buffer->raw = NULL;
- mStepCount = buffer->frameCount;
- // FIXME See note at getNextBuffer()
- (void) step(); // ignore return value of step()
+ ServerProxy::Buffer buf;
+ buf.mFrameCount = buffer->frameCount;
+ buf.mRaw = buffer->raw;
buffer->frameCount = 0;
-}
-
-bool AudioFlinger::ThreadBase::TrackBase::step() {
- bool result = mServerProxy->step(mStepCount);
- if (!result) {
- ALOGV("stepServer failed acquiring cblk mutex");
- mStepServerFailed = true;
- }
- return result;
+ buffer->raw = NULL;
+ mServerProxy->releaseBuffer(&buf);
}
void AudioFlinger::ThreadBase::TrackBase::reset() {
- audio_track_cblk_t* cblk = this->cblk();
-
- cblk->user = 0;
- cblk->server = 0;
- cblk->userBase = 0;
- cblk->serverBase = 0;
- mStepServerFailed = false;
ALOGV("TrackBase::reset");
-}
-
-uint32_t AudioFlinger::ThreadBase::TrackBase::sampleRate() const {
- return mServerProxy->getSampleRate();
-}
-
-void* AudioFlinger::ThreadBase::TrackBase::getBuffer(uint32_t offset, uint32_t frames) const {
- audio_track_cblk_t* cblk = this->cblk();
- int8_t *bufferStart = (int8_t *)mBuffer + (offset-cblk->serverBase) * mFrameSize;
- int8_t *bufferEnd = bufferStart + frames * mFrameSize;
-
- // Check validity of returned pointer in case the track control block would have been corrupted.
- ALOG_ASSERT(!(bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd),
- "TrackBase::getBuffer buffer out of range:\n"
- " start: %p, end %p , mBuffer %p mBufferEnd %p\n"
- " server %u, serverBase %u, user %u, userBase %u, frameSize %u",
- bufferStart, bufferEnd, mBuffer, mBufferEnd,
- cblk->server, cblk->serverBase, cblk->user, cblk->userBase, mFrameSize);
-
- return bufferStart;
+ // FIXME still needed?
}
status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
@@ -362,9 +322,18 @@ AudioFlinger::PlaybackThread::Track::Track(
mFastIndex(-1),
mUnderrunCount(0),
mCachedVolume(1.0),
- mIsInvalid(false)
+ mIsInvalid(false),
+ mAudioTrackServerProxy(NULL)
{
if (mCblk != NULL) {
+ if (sharedBuffer == 0) {
+ mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
+ mFrameSize);
+ } else {
+ mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
+ mFrameSize);
+ }
+ mServerProxy = mAudioTrackServerProxy;
// to avoid leaking a track name, do not allocate one unless there is an mCblk
mName = thread->getTrackName_l(channelMask, sessionId);
mCblk->mName = mName;
@@ -374,6 +343,7 @@ AudioFlinger::PlaybackThread::Track::Track(
}
// only allocate a fast track index if we were able to allocate a normal track name
if (flags & IAudioFlinger::TRACK_FAST) {
+ mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
int i = __builtin_ctz(thread->mFastTrackAvailMask);
ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);
@@ -432,12 +402,12 @@ void AudioFlinger::PlaybackThread::Track::destroy()
/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
{
result.append(" Name Client Type Fmt Chn mask Session StpCnt fCount S F SRate "
- "L dB R dB Server User Main buf Aux Buf Flags Underruns\n");
+ "L dB R dB Server Main buf Aux Buf Flags Underruns\n");
}
void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
{
- uint32_t vlr = mServerProxy->getVolumeLR();
+ uint32_t vlr = mAudioTrackServerProxy->getVolumeLR();
if (isFastTrack()) {
sprintf(buffer, " F %2d", mFastIndex);
} else {
@@ -496,7 +466,7 @@ void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
break;
}
snprintf(&buffer[7], size-7, " %6d %4u %3u 0x%08x %7u %6u %6u %1c %1d %5u %5.2g %5.2g "
- "0x%08x 0x%08x 0x%08x 0x%08x %#5x %9u%c\n",
+ "0x%08x 0x%08x 0x%08x %#5x %9u%c\n",
(mClient == 0) ? getpid_cached : mClient->pid(),
mStreamType,
mFormat,
@@ -506,11 +476,10 @@ void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
mFrameCount,
stateChar,
mFillingUpStatus,
- mServerProxy->getSampleRate(),
+ mAudioTrackServerProxy->getSampleRate(),
20.0 * log10((vlr & 0xFFFF) / 4096.0),
20.0 * log10((vlr >> 16) / 4096.0),
mCblk->server,
- mCblk->user,
(int)mMainBuffer,
(int)mAuxBuffer,
mCblk->flags,
@@ -518,53 +487,27 @@ void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
nowInUnderrun);
}
+uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
+ return mAudioTrackServerProxy->getSampleRate();
+}
+
// AudioBufferProvider interface
status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
AudioBufferProvider::Buffer* buffer, int64_t pts)
{
- audio_track_cblk_t* cblk = this->cblk();
- uint32_t framesReady;
- uint32_t framesReq = buffer->frameCount;
-
- // Check if last stepServer failed, try to step now
- if (mStepServerFailed) {
- // FIXME When called by fast mixer, this takes a mutex with tryLock().
- // Since the fast mixer is higher priority than client callback thread,
- // it does not result in priority inversion for client.
- // But a non-blocking solution would be preferable to avoid
- // fast mixer being unable to tryLock(), and
- // to avoid the extra context switches if the client wakes up,
- // discovers the mutex is locked, then has to wait for fast mixer to unlock.
- if (!step()) goto getNextBuffer_exit;
- ALOGV("stepServer recovered");
- mStepServerFailed = false;
+ ServerProxy::Buffer buf;
+ size_t desiredFrames = buffer->frameCount;
+ buf.mFrameCount = desiredFrames;
+ status_t status = mServerProxy->obtainBuffer(&buf);
+ buffer->frameCount = buf.mFrameCount;
+ buffer->raw = buf.mRaw;
+ if (buf.mFrameCount == 0) {
+ // only implemented so far for normal tracks, not fast tracks
+ mCblk->u.mStreaming.mUnderrunFrames += desiredFrames;
+ // FIXME also wake futex so that underrun is noticed more quickly
+ (void) android_atomic_or(CBLK_UNDERRUN, &mCblk->flags);
}
-
- // FIXME Same as above
- framesReady = mServerProxy->framesReady();
-
- if (CC_LIKELY(framesReady)) {
- uint32_t s = cblk->server;
- uint32_t bufferEnd = cblk->serverBase + mFrameCount;
-
- bufferEnd = (cblk->loopEnd < bufferEnd) ? cblk->loopEnd : bufferEnd;
- if (framesReq > framesReady) {
- framesReq = framesReady;
- }
- if (framesReq > bufferEnd - s) {
- framesReq = bufferEnd - s;
- }
-
- buffer->raw = getBuffer(s, framesReq);
- buffer->frameCount = framesReq;
- return NO_ERROR;
- }
-
-getNextBuffer_exit:
- buffer->raw = NULL;
- buffer->frameCount = 0;
- ALOGV("getNextBuffer() no more data for track %d on thread %p", mName, mThread.unsafe_get());
- return NOT_ENOUGH_DATA;
+ return status;
}
// Note that framesReady() takes a mutex on the control block using tryLock().
@@ -576,7 +519,7 @@ getNextBuffer_exit:
// the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer.
// FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue.
size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
- return mServerProxy->framesReady();
+ return mAudioTrackServerProxy->framesReady();
}
// Don't call for fast tracks; the framesReady() could result in priority inversion
@@ -732,7 +675,6 @@ void AudioFlinger::PlaybackThread::Track::reset()
// Force underrun condition to avoid false underrun callback until first data is
// written to buffer
android_atomic_and(~CBLK_FORCEREADY, &mCblk->flags);
- android_atomic_or(CBLK_UNDERRUN, &mCblk->flags);
mFillingUpStatus = FS_FILLING;
mResetDone = true;
if (mState == FLUSHED) {
@@ -833,7 +775,7 @@ uint32_t AudioFlinger::PlaybackThread::Track::getVolumeLR()
{
// called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
- uint32_t vlr = mServerProxy->getVolumeLR();
+ uint32_t vlr = mAudioTrackServerProxy->getVolumeLR();
uint32_t vl = vlr & 0xFFFF;
uint32_t vr = vlr >> 16;
// track volumes come from shared memory, so can't be trusted and must be clamped
@@ -870,9 +812,12 @@ status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>&
void AudioFlinger::PlaybackThread::Track::invalidate()
{
- // FIXME should use proxy
- android_atomic_or(CBLK_INVALID, &mCblk->flags);
- mCblk->cv.signal();
+ // FIXME should use proxy, and needs work
+ audio_track_cblk_t* cblk = mCblk;
+ android_atomic_or(CBLK_INVALID, &cblk->flags);
+ android_atomic_release_store(0x40000000, &cblk->mFutex);
+ // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
+ (void) __futex_syscall3(&cblk->mFutex, FUTEX_WAKE, INT_MAX);
mIsInvalid = true;
}
@@ -1418,6 +1363,8 @@ AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
mClientProxy->setVolumeLR((uint32_t(uint16_t(0x1000)) << 16) | uint16_t(0x1000));
mClientProxy->setSendLevel(0.0);
mClientProxy->setSampleRate(sampleRate);
+ mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
+ true /*clientInServer*/);
} else {
ALOGW("Error creating output track on thread %p", playbackThread);
}
@@ -1477,7 +1424,7 @@ bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t fr
memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t));
mBufferQueue.add(pInBuffer);
} else {
- ALOGW ("OutputTrack::write() %p no more buffers in queue", this);
+ ALOGW("OutputTrack::write() %p no more buffers in queue", this);
}
}
}
@@ -1498,9 +1445,10 @@ bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t fr
if (mOutBuffer.frameCount == 0) {
mOutBuffer.frameCount = pInBuffer->frameCount;
nsecs_t startTime = systemTime();
- if (obtainBuffer(&mOutBuffer, waitTimeLeftMs) == (status_t)NO_MORE_BUFFERS) {
- ALOGV ("OutputTrack::write() %p thread %p no more output buffers", this,
- mThread.unsafe_get());
+ status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
+ if (status != NO_ERROR) {
+ ALOGV("OutputTrack::write() %p thread %p no more output buffers; status %d", this,
+ mThread.unsafe_get(), status);
outputBufferFull = true;
break;
}
@@ -1515,7 +1463,10 @@ bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t fr
uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
pInBuffer->frameCount;
memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t));
- mClientProxy->stepUser(outFrames);
+ Proxy::Buffer buf;
+ buf.mFrameCount = outFrames;
+ buf.mRaw = NULL;
+ mClientProxy->releaseBuffer(&buf);
pInBuffer->frameCount -= outFrames;
pInBuffer->i16 += outFrames * channelCount;
mOutBuffer.frameCount -= outFrames;
@@ -1559,8 +1510,10 @@ bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t fr
// If no more buffers are pending, fill output track buffer to make sure it is started
// by output mixer.
if (frames == 0 && mBufferQueue.size() == 0) {
- if (mCblk->user < mFrameCount) {
- frames = mFrameCount - mCblk->user;
+ // FIXME borken, replace by getting framesReady() from proxy
+ size_t user = 0; // was mCblk->user
+ if (user < mFrameCount) {
+ frames = mFrameCount - user;
pInBuffer = new Buffer;
pInBuffer->mBuffer = new int16_t[frames * channelCount];
pInBuffer->frameCount = frames;
@@ -1578,46 +1531,17 @@ bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t fr
status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
{
- audio_track_cblk_t* cblk = mCblk;
- uint32_t framesReq = buffer->frameCount;
-
- ALOGVV("OutputTrack::obtainBuffer user %d, server %d", cblk->user, cblk->server);
- buffer->frameCount = 0;
-
- size_t framesAvail;
- {
- Mutex::Autolock _l(cblk->lock);
-
- // read the server count again
- while (!(framesAvail = mClientProxy->framesAvailable_l())) {
- if (CC_UNLIKELY(!mActive)) {
- ALOGV("Not active and NO_MORE_BUFFERS");
- return NO_MORE_BUFFERS;
- }
- status_t result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
- if (result != NO_ERROR) {
- return NO_MORE_BUFFERS;
- }
- }
- }
-
- if (framesReq > framesAvail) {
- framesReq = framesAvail;
- }
-
- uint32_t u = cblk->user;
- uint32_t bufferEnd = cblk->userBase + mFrameCount;
-
- if (framesReq > bufferEnd - u) {
- framesReq = bufferEnd - u;
- }
-
- buffer->frameCount = framesReq;
- buffer->raw = mClientProxy->buffer(u);
- return NO_ERROR;
+ ClientProxy::Buffer buf;
+ buf.mFrameCount = buffer->frameCount;
+ struct timespec timeout;
+ timeout.tv_sec = waitTimeMs / 1000;
+ timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000;
+ status_t status = mClientProxy->obtainBuffer(&buf, &timeout);
+ buffer->frameCount = buf.mFrameCount;
+ buffer->raw = buf.mRaw;
+ return status;
}
-
void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
{
size_t size = mBufferQueue.size();
@@ -1688,6 +1612,11 @@ AudioFlinger::RecordThread::RecordTrack::RecordTrack(
mOverflow(false)
{
ALOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer);
+ if (mCblk != NULL) {
+ mAudioRecordServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
+ mFrameSize);
+ mServerProxy = mAudioRecordServerProxy;
+ }
}
AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
@@ -1699,42 +1628,16 @@ AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
int64_t pts)
{
- audio_track_cblk_t* cblk = this->cblk();
- uint32_t framesAvail;
- uint32_t framesReq = buffer->frameCount;
-
- // Check if last stepServer failed, try to step now
- if (mStepServerFailed) {
- if (!step()) {
- goto getNextBuffer_exit;
- }
- ALOGV("stepServer recovered");
- mStepServerFailed = false;
+ ServerProxy::Buffer buf;
+ buf.mFrameCount = buffer->frameCount;
+ status_t status = mServerProxy->obtainBuffer(&buf);
+ buffer->frameCount = buf.mFrameCount;
+ buffer->raw = buf.mRaw;
+ if (buf.mFrameCount == 0) {
+ // FIXME also wake futex so that overrun is noticed more quickly
+ (void) android_atomic_or(CBLK_OVERRUN, &mCblk->flags);
}
-
- // FIXME lock is not actually held, so overrun is possible
- framesAvail = mServerProxy->framesAvailableIn_l();
-
- if (CC_LIKELY(framesAvail)) {
- uint32_t s = cblk->server;
- uint32_t bufferEnd = cblk->serverBase + mFrameCount;
-
- if (framesReq > framesAvail) {
- framesReq = framesAvail;
- }
- if (framesReq > bufferEnd - s) {
- framesReq = bufferEnd - s;
- }
-
- buffer->raw = getBuffer(s, framesReq);
- buffer->frameCount = framesReq;
- return NO_ERROR;
- }
-
-getNextBuffer_exit:
- buffer->raw = NULL;
- buffer->frameCount = 0;
- return NOT_ENOUGH_DATA;
+ return status;
}
status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
@@ -1790,12 +1693,12 @@ void AudioFlinger::RecordThread::RecordTrack::destroy()
/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
{
- result.append(" Clien Fmt Chn mask Session Step S Serv User FrameCount\n");
+ result.append(" Clien Fmt Chn mask Session Step S Serv FrameCount\n");
}
void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size)
{
- snprintf(buffer, size, " %05d %03u 0x%08x %05d %04u %01d %08x %08x %05d\n",
+ snprintf(buffer, size, " %05d %03u 0x%08x %05d %04u %01d %08x %05d\n",
(mClient == 0) ? getpid_cached : mClient->pid(),
mFormat,
mChannelMask,
@@ -1803,7 +1706,6 @@ void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size)
mStepCount,
mState,
mCblk->server,
- mCblk->user,
mFrameCount);
}
diff --git a/services/camera/libcameraservice/Camera2Client.cpp b/services/camera/libcameraservice/Camera2Client.cpp
index a1971e3..d1ab7eb 100644
--- a/services/camera/libcameraservice/Camera2Client.cpp
+++ b/services/camera/libcameraservice/Camera2Client.cpp
@@ -616,27 +616,94 @@ void Camera2Client::setPreviewCallbackFlagL(Parameters &params, int flag) {
params.previewCallbackOneShot = true;
}
if (params.previewCallbackFlags != (uint32_t)flag) {
+
+ if (flag != CAMERA_FRAME_CALLBACK_FLAG_NOOP) {
+ // Disable any existing preview callback window when enabling
+ // preview callback flags
+ res = mCallbackProcessor->setCallbackWindow(NULL);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to clear preview callback surface:"
+ " %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
+ return;
+ }
+ params.previewCallbackSurface = false;
+ }
+
params.previewCallbackFlags = flag;
+
switch(params.state) {
+ case Parameters::PREVIEW:
+ res = startPreviewL(params, true);
+ break;
+ case Parameters::RECORD:
+ case Parameters::VIDEO_SNAPSHOT:
+ res = startRecordingL(params, true);
+ break;
+ default:
+ break;
+ }
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to refresh request in state %s",
+ __FUNCTION__, mCameraId,
+ Parameters::getStateName(params.state));
+ }
+ }
+
+}
+
+status_t Camera2Client::setPreviewCallbackTarget(
+ const sp<IGraphicBufferProducer>& callbackProducer) {
+ ATRACE_CALL();
+ ALOGV("%s: E", __FUNCTION__);
+ Mutex::Autolock icl(mBinderSerializationLock);
+ status_t res;
+ if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+ sp<ANativeWindow> window;
+ if (callbackProducer != 0) {
+ window = new Surface(callbackProducer);
+ }
+
+ res = mCallbackProcessor->setCallbackWindow(window);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to set preview callback surface: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ return res;
+ }
+
+ SharedParameters::Lock l(mParameters);
+
+ if (window != NULL) {
+ // Disable traditional callbacks when a valid callback target is given
+ l.mParameters.previewCallbackFlags = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
+ l.mParameters.previewCallbackOneShot = false;
+ l.mParameters.previewCallbackSurface = true;
+ } else {
+ // Disable callback target if given a NULL interface.
+ l.mParameters.previewCallbackSurface = false;
+ }
+
+ switch(l.mParameters.state) {
case Parameters::PREVIEW:
- res = startPreviewL(params, true);
+ res = startPreviewL(l.mParameters, true);
break;
case Parameters::RECORD:
case Parameters::VIDEO_SNAPSHOT:
- res = startRecordingL(params, true);
+ res = startRecordingL(l.mParameters, true);
break;
default:
break;
- }
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to refresh request in state %s",
- __FUNCTION__, mCameraId,
- Parameters::getStateName(params.state));
- }
+ }
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to refresh request in state %s",
+ __FUNCTION__, mCameraId,
+ Parameters::getStateName(l.mParameters.state));
}
+ return OK;
}
+
status_t Camera2Client::startPreview() {
ATRACE_CALL();
ALOGV("%s: E", __FUNCTION__);
@@ -699,8 +766,10 @@ status_t Camera2Client::startPreviewL(Parameters &params, bool restart) {
}
Vector<uint8_t> outputStreams;
- bool callbacksEnabled = params.previewCallbackFlags &
- CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK;
+ bool callbacksEnabled = (params.previewCallbackFlags &
+ CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) ||
+ params.previewCallbackSurface;
+
if (callbacksEnabled) {
res = mCallbackProcessor->updateStream(params);
if (res != OK) {
@@ -909,8 +978,10 @@ status_t Camera2Client::startRecordingL(Parameters &params, bool restart) {
}
Vector<uint8_t> outputStreams;
- bool callbacksEnabled = params.previewCallbackFlags &
- CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK;
+ bool callbacksEnabled = (params.previewCallbackFlags &
+ CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) ||
+ params.previewCallbackSurface;
+
if (callbacksEnabled) {
res = mCallbackProcessor->updateStream(params);
if (res != OK) {
diff --git a/services/camera/libcameraservice/Camera2Client.h b/services/camera/libcameraservice/Camera2Client.h
index 8ab46b1..078e3a3 100644
--- a/services/camera/libcameraservice/Camera2Client.h
+++ b/services/camera/libcameraservice/Camera2Client.h
@@ -51,6 +51,9 @@ public:
virtual status_t setPreviewTexture(
const sp<IGraphicBufferProducer>& bufferProducer);
virtual void setPreviewCallbackFlag(int flag);
+ virtual status_t setPreviewCallbackTarget(
+ const sp<IGraphicBufferProducer>& callbackProducer);
+
virtual status_t startPreview();
virtual void stopPreview();
virtual bool previewEnabled();
diff --git a/services/camera/libcameraservice/Camera3Device.cpp b/services/camera/libcameraservice/Camera3Device.cpp
index cc7802b..73bf30c 100644
--- a/services/camera/libcameraservice/Camera3Device.cpp
+++ b/services/camera/libcameraservice/Camera3Device.cpp
@@ -128,7 +128,10 @@ status_t Camera3Device::initialize(camera_module_t *module)
/** Initialize device with callback functions */
+ ATRACE_BEGIN("camera3->initialize");
res = device->ops->initialize(device, this);
+ ATRACE_END();
+
if (res != OK) {
SET_ERR_L("Unable to initialize HAL device: %s (%d)",
strerror(-res), res);
@@ -140,7 +143,9 @@ status_t Camera3Device::initialize(camera_module_t *module)
mVendorTagOps.get_camera_vendor_section_name = NULL;
+ ATRACE_BEGIN("camera3->get_metadata_vendor_tag_ops");
device->ops->get_metadata_vendor_tag_ops(device, &mVendorTagOps);
+ ATRACE_END();
if (mVendorTagOps.get_camera_vendor_section_name != NULL) {
res = set_camera_metadata_vendor_tag_ops(&mVendorTagOps);
@@ -736,7 +741,7 @@ status_t Camera3Device::deleteReprocessStream(int id) {
status_t Camera3Device::createDefaultRequest(int templateId,
CameraMetadata *request) {
ATRACE_CALL();
- ALOGV("%s: E", __FUNCTION__);
+ ALOGV("%s: for template %d", __FUNCTION__, templateId);
Mutex::Autolock l(mLock);
switch (mStatus) {
@@ -756,8 +761,10 @@ status_t Camera3Device::createDefaultRequest(int templateId,
}
const camera_metadata_t *rawRequest;
+ ATRACE_BEGIN("camera3->construct_default_request_settings");
rawRequest = mHal3Device->ops->construct_default_request_settings(
mHal3Device, templateId);
+ ATRACE_END();
if (rawRequest == NULL) {
SET_ERR_L("HAL is unable to construct default settings for template %d",
templateId);
@@ -1068,8 +1075,9 @@ status_t Camera3Device::configureStreamsLocked() {
// Do the HAL configuration; will potentially touch stream
// max_buffers, usage, priv fields.
-
+ ATRACE_BEGIN("camera3->configure_streams");
res = mHal3Device->ops->configure_streams(mHal3Device, &config);
+ ATRACE_END();
if (res != OK) {
SET_ERR_L("Unable to configure streams with HAL: %s (%d)",
@@ -1224,6 +1232,7 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) {
}
if (request.haveResultMetadata && request.numBuffersLeft == 0) {
+ ATRACE_ASYNC_END("frame capture", frameNumber);
mInFlightMap.removeItemsAt(idx, 1);
}
@@ -1274,8 +1283,7 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) {
if (entry.count == 0) {
SET_ERR("No timestamp provided by HAL for frame %d!",
frameNumber);
- }
- if (timestamp != entry.data.i64[0]) {
+ } else if (timestamp != entry.data.i64[0]) {
SET_ERR("Timestamp mismatch between shutter notify and result"
" metadata for frame %d (%lld vs %lld respectively)",
frameNumber, timestamp, entry.data.i64[0]);
@@ -1374,6 +1382,7 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) {
}
void Camera3Device::notify(const camera3_notify_msg *msg) {
+ ATRACE_CALL();
NotificationListener *listener;
{
Mutex::Autolock l(mOutputLock);
@@ -1394,6 +1403,9 @@ void Camera3Device::notify(const camera3_notify_msg *msg) {
msg->message.error.error_stream);
streamId = stream->getId();
}
+ ALOGV("Camera %d: %s: HAL error, frame %d, stream %d: %d",
+ mId, __FUNCTION__, msg->message.error.frame_number,
+ streamId, msg->message.error.error_code);
if (listener != NULL) {
listener->notifyError(msg->message.error.error_code,
msg->message.error.frame_number, streamId);
@@ -1429,7 +1441,8 @@ void Camera3Device::notify(const camera3_notify_msg *msg) {
frameNumber);
break;
}
-
+ ALOGVV("Camera %d: %s: Shutter fired for frame %d at %lld",
+ mId, __FUNCTION__, frameNumber, timestamp);
// Call listener, if any
if (listener != NULL) {
listener->notifyShutter(frameNumber, timestamp);
@@ -1550,6 +1563,7 @@ void Camera3Device::RequestThread::setPaused(bool paused) {
}
status_t Camera3Device::RequestThread::waitUntilPaused(nsecs_t timeout) {
+ ATRACE_CALL();
status_t res;
Mutex::Autolock l(mPauseLock);
while (!mPaused) {
@@ -1696,8 +1710,11 @@ bool Camera3Device::RequestThread::threadLoop() {
}
// Submit request and block until ready for next one
-
+ ATRACE_ASYNC_BEGIN("frame capture", request.frame_number);
+ ATRACE_BEGIN("camera3->process_capture_request");
res = mHal3Device->ops->process_capture_request(mHal3Device, &request);
+ ATRACE_END();
+
if (res != OK) {
SET_ERR("RequestThread: Unable to submit capture request %d to HAL"
" device: %s (%d)", request.frame_number, strerror(-res), res);
diff --git a/services/camera/libcameraservice/CameraClient.cpp b/services/camera/libcameraservice/CameraClient.cpp
index e577fa3..be78f69 100644
--- a/services/camera/libcameraservice/CameraClient.cpp
+++ b/services/camera/libcameraservice/CameraClient.cpp
@@ -347,6 +347,12 @@ void CameraClient::setPreviewCallbackFlag(int callback_flag) {
}
}
+status_t CameraClient::setPreviewCallbackTarget(
+ const sp<IGraphicBufferProducer>& callbackProducer) {
+ ALOGE("%s: Unimplemented!", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
// start preview mode
status_t CameraClient::startPreview() {
LOG1("startPreview (pid %d)", getCallingPid());
diff --git a/services/camera/libcameraservice/CameraClient.h b/services/camera/libcameraservice/CameraClient.h
index 7f0cb29..abde75a 100644
--- a/services/camera/libcameraservice/CameraClient.h
+++ b/services/camera/libcameraservice/CameraClient.h
@@ -40,6 +40,8 @@ public:
virtual status_t setPreviewDisplay(const sp<Surface>& surface);
virtual status_t setPreviewTexture(const sp<IGraphicBufferProducer>& bufferProducer);
virtual void setPreviewCallbackFlag(int flag);
+ virtual status_t setPreviewCallbackTarget(
+ const sp<IGraphicBufferProducer>& callbackProducer);
virtual status_t startPreview();
virtual void stopPreview();
virtual bool previewEnabled();
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 710f164..eaa316a 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -190,6 +190,8 @@ public:
virtual status_t setPreviewDisplay(const sp<Surface>& surface) = 0;
virtual status_t setPreviewTexture(const sp<IGraphicBufferProducer>& bufferProducer)=0;
virtual void setPreviewCallbackFlag(int flag) = 0;
+ virtual status_t setPreviewCallbackTarget(
+ const sp<IGraphicBufferProducer>& callbackProducer) = 0;
virtual status_t startPreview() = 0;
virtual void stopPreview() = 0;
virtual bool previewEnabled() = 0;
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
index 4987ab6..78210ab 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
@@ -37,6 +37,7 @@ CallbackProcessor::CallbackProcessor(sp<Camera2Client> client):
mDevice(client->getCameraDevice()),
mId(client->getCameraId()),
mCallbackAvailable(false),
+ mCallbackToApp(false),
mCallbackStreamId(NO_STREAM) {
}
@@ -53,6 +54,35 @@ void CallbackProcessor::onFrameAvailable() {
}
}
+status_t CallbackProcessor::setCallbackWindow(
+ sp<ANativeWindow> callbackWindow) {
+ ATRACE_CALL();
+ status_t res;
+
+ Mutex::Autolock l(mInputMutex);
+
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return OK;
+ sp<CameraDeviceBase> device = client->getCameraDevice();
+
+ // If the window is changing, clear out stream if it already exists
+ if (mCallbackWindow != callbackWindow && mCallbackStreamId != NO_STREAM) {
+ res = device->deleteStream(mCallbackStreamId);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete old stream "
+ "for callbacks: %s (%d)", __FUNCTION__,
+ client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ mCallbackStreamId = NO_STREAM;
+ mCallbackConsumer.clear();
+ }
+ mCallbackWindow = callbackWindow;
+ mCallbackToApp = (mCallbackWindow != NULL);
+
+ return OK;
+}
+
status_t CallbackProcessor::updateStream(const Parameters &params) {
ATRACE_CALL();
status_t res;
@@ -67,14 +97,18 @@ status_t CallbackProcessor::updateStream(const Parameters &params) {
// If possible, use the flexible YUV format
int32_t callbackFormat = params.previewFormat;
- if (params.fastInfo.useFlexibleYuv &&
+ if (mCallbackToApp) {
+ // TODO: etalvala: This should use the flexible YUV format as well, but
+ // need to reconcile HAL2/HAL3 requirements.
+ callbackFormat = HAL_PIXEL_FORMAT_YV12;
+ } else if(params.fastInfo.useFlexibleYuv &&
(params.previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP ||
params.previewFormat == HAL_PIXEL_FORMAT_YV12) ) {
callbackFormat = HAL_PIXEL_FORMAT_YCbCr_420_888;
}
- if (mCallbackConsumer == 0) {
- // Create CPU buffer queue endpoint
+ if (!mCallbackToApp && mCallbackConsumer == 0) {
+ // Create CPU buffer queue endpoint, since app hasn't given us one
mCallbackConsumer = new CpuConsumer(kCallbackHeapCount);
mCallbackConsumer->setFrameAvailableListener(this);
mCallbackConsumer->setName(String8("Camera2Client::CallbackConsumer"));
@@ -104,8 +138,8 @@ status_t CallbackProcessor::updateStream(const Parameters &params) {
res = device->deleteStream(mCallbackStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to delete old output stream "
- "for callbacks: %s (%d)", __FUNCTION__, mId,
- strerror(-res), res);
+ "for callbacks: %s (%d)", __FUNCTION__,
+ mId, strerror(-res), res);
return res;
}
mCallbackStreamId = NO_STREAM;
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.h b/services/camera/libcameraservice/camera2/CallbackProcessor.h
index d851a84..17dcfb1 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.h
+++ b/services/camera/libcameraservice/camera2/CallbackProcessor.h
@@ -45,6 +45,8 @@ class CallbackProcessor:
void onFrameAvailable();
+ // Set to NULL to disable the direct-to-app callback window
+ status_t setCallbackWindow(sp<ANativeWindow> callbackWindow);
status_t updateStream(const Parameters &params);
status_t deleteStream();
int getStreamId() const;
@@ -64,6 +66,9 @@ class CallbackProcessor:
NO_STREAM = -1
};
+ // True if mCallbackWindow is a remote consumer, false if just the local
+ // mCallbackConsumer
+ bool mCallbackToApp;
int mCallbackStreamId;
static const size_t kCallbackHeapCount = 6;
sp<CpuConsumer> mCallbackConsumer;
diff --git a/services/camera/libcameraservice/camera2/JpegCompressor.cpp b/services/camera/libcameraservice/camera2/JpegCompressor.cpp
index c9af71e..2f0c67d 100644
--- a/services/camera/libcameraservice/camera2/JpegCompressor.cpp
+++ b/services/camera/libcameraservice/camera2/JpegCompressor.cpp
@@ -210,7 +210,8 @@ boolean JpegCompressor::jpegEmptyOutputBuffer(j_compress_ptr /*cinfo*/) {
return true;
}
-void JpegCompressor::jpegTermDestination(j_compress_ptr /*cinfo*/) {
+void JpegCompressor::jpegTermDestination(j_compress_ptr cinfo) {
+ (void) cinfo; // TODO: clean up
ALOGV("%s", __FUNCTION__);
ALOGV("%s: Done writing JPEG data. %d bytes left in buffer",
__FUNCTION__, cinfo->dest->free_in_buffer);
diff --git a/services/camera/libcameraservice/camera2/Parameters.cpp b/services/camera/libcameraservice/camera2/Parameters.cpp
index a248b76..e8f3f50 100644
--- a/services/camera/libcameraservice/camera2/Parameters.cpp
+++ b/services/camera/libcameraservice/camera2/Parameters.cpp
@@ -292,8 +292,11 @@ status_t Parameters::initialize(const CameraMetadata *info) {
CameraParameters::WHITE_BALANCE_AUTO);
camera_metadata_ro_entry_t availableWhiteBalanceModes =
- staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES);
- {
+ staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES, 0, 0, false);
+ if (!availableWhiteBalanceModes.count) {
+ params.set(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE,
+ CameraParameters::WHITE_BALANCE_AUTO);
+ } else {
String8 supportedWhiteBalance;
bool addComma = false;
for (size_t i=0; i < availableWhiteBalanceModes.count; i++) {
@@ -353,9 +356,11 @@ status_t Parameters::initialize(const CameraMetadata *info) {
CameraParameters::EFFECT_NONE);
camera_metadata_ro_entry_t availableEffects =
- staticInfo(ANDROID_CONTROL_AVAILABLE_EFFECTS);
- if (!availableEffects.count) return NO_INIT;
- {
+ staticInfo(ANDROID_CONTROL_AVAILABLE_EFFECTS, 0, 0, false);
+ if (!availableEffects.count) {
+ params.set(CameraParameters::KEY_SUPPORTED_EFFECTS,
+ CameraParameters::EFFECT_NONE);
+ } else {
String8 supportedEffects;
bool addComma = false;
for (size_t i=0; i < availableEffects.count; i++) {
@@ -413,9 +418,11 @@ status_t Parameters::initialize(const CameraMetadata *info) {
CameraParameters::ANTIBANDING_AUTO);
camera_metadata_ro_entry_t availableAntibandingModes =
- staticInfo(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES);
- if (!availableAntibandingModes.count) return NO_INIT;
- {
+ staticInfo(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES, 0, 0, false);
+ if (!availableAntibandingModes.count) {
+ params.set(CameraParameters::KEY_SUPPORTED_ANTIBANDING,
+ CameraParameters::ANTIBANDING_OFF);
+ } else {
String8 supportedAntibanding;
bool addComma = false;
for (size_t i=0; i < availableAntibandingModes.count; i++) {
@@ -455,9 +462,10 @@ status_t Parameters::initialize(const CameraMetadata *info) {
CameraParameters::SCENE_MODE_AUTO);
camera_metadata_ro_entry_t availableSceneModes =
- staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
- if (!availableSceneModes.count) return NO_INIT;
- {
+ staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES, 0, 0, false);
+ if (!availableSceneModes.count) {
+ params.remove(CameraParameters::KEY_SCENE_MODE);
+ } else {
String8 supportedSceneModes(CameraParameters::SCENE_MODE_AUTO);
bool addComma = true;
bool noSceneModes = false;
@@ -548,15 +556,17 @@ status_t Parameters::initialize(const CameraMetadata *info) {
}
}
+ bool isFlashAvailable = false;
camera_metadata_ro_entry_t flashAvailable =
- staticInfo(ANDROID_FLASH_INFO_AVAILABLE, 1, 1);
- if (!flashAvailable.count) return NO_INIT;
+ staticInfo(ANDROID_FLASH_INFO_AVAILABLE, 0, 1, false);
+ if (flashAvailable.count) {
+ isFlashAvailable = flashAvailable.data.u8[0];
+ }
camera_metadata_ro_entry_t availableAeModes =
- staticInfo(ANDROID_CONTROL_AE_AVAILABLE_MODES);
- if (!availableAeModes.count) return NO_INIT;
+ staticInfo(ANDROID_CONTROL_AE_AVAILABLE_MODES, 0, 0, false);
- if (flashAvailable.data.u8[0]) {
+ if (isFlashAvailable) {
flashMode = Parameters::FLASH_MODE_OFF;
params.set(CameraParameters::KEY_FLASH_MODE,
CameraParameters::FLASH_MODE_OFF);
@@ -585,14 +595,12 @@ status_t Parameters::initialize(const CameraMetadata *info) {
}
camera_metadata_ro_entry_t minFocusDistance =
- staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 1, 1);
- if (!minFocusDistance.count) return NO_INIT;
+ staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 1, false);
camera_metadata_ro_entry_t availableAfModes =
- staticInfo(ANDROID_CONTROL_AF_AVAILABLE_MODES);
- if (!availableAfModes.count) return NO_INIT;
+ staticInfo(ANDROID_CONTROL_AF_AVAILABLE_MODES, 0, 0, false);
- if (minFocusDistance.data.f[0] == 0) {
+ if (!minFocusDistance.count || minFocusDistance.data.f[0] == 0) {
// Fixed-focus lens
focusMode = Parameters::FOCUS_MODE_FIXED;
params.set(CameraParameters::KEY_FOCUS_MODE,
@@ -662,7 +670,7 @@ status_t Parameters::initialize(const CameraMetadata *info) {
focusingAreas.add(Parameters::Area(0,0,0,0,0));
camera_metadata_ro_entry_t availableFocalLengths =
- staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS);
+ staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, 0, 0, false);
if (!availableFocalLengths.count) return NO_INIT;
float minFocalLength = availableFocalLengths.data.f[0];
@@ -768,8 +776,8 @@ status_t Parameters::initialize(const CameraMetadata *info) {
CameraParameters::FALSE);
camera_metadata_ro_entry_t availableVideoStabilizationModes =
- staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES);
- if (!availableVideoStabilizationModes.count) return NO_INIT;
+ staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, 0, 0,
+ false);
if (availableVideoStabilizationModes.count > 1) {
params.set(CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED,
@@ -794,9 +802,10 @@ status_t Parameters::initialize(const CameraMetadata *info) {
previewCallbackFlags = 0;
previewCallbackOneShot = false;
+ previewCallbackSurface = false;
camera_metadata_ro_entry_t supportedHardwareLevel =
- staticInfo(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL);
+ staticInfo(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL, 0, 0, false);
if (!supportedHardwareLevel.count || (supportedHardwareLevel.data.u8[0] ==
ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED)) {
ALOGI("Camera %d: ZSL mode disabled for limited mode HALs", cameraId);
@@ -828,14 +837,21 @@ String8 Parameters::get() const {
status_t Parameters::buildFastInfo() {
camera_metadata_ro_entry_t activeArraySize =
- staticInfo(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, 2, 2);
+ staticInfo(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, 2, 4);
if (!activeArraySize.count) return NO_INIT;
- int32_t arrayWidth = activeArraySize.data.i32[0];
- int32_t arrayHeight = activeArraySize.data.i32[1];
+ int32_t arrayWidth;
+ int32_t arrayHeight;
+ if (activeArraySize.count == 2) {
+ arrayWidth = activeArraySize.data.i32[0];
+ arrayHeight = activeArraySize.data.i32[1];
+ } else if (activeArraySize.count == 4) {
+ arrayWidth = activeArraySize.data.i32[2];
+ arrayHeight = activeArraySize.data.i32[3];
+ } else return NO_INIT;
camera_metadata_ro_entry_t availableFaceDetectModes =
- staticInfo(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES);
- if (!availableFaceDetectModes.count) return NO_INIT;
+ staticInfo(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES, 0, 0,
+ false);
uint8_t bestFaceDetectMode =
ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
@@ -862,19 +878,21 @@ status_t Parameters::buildFastInfo() {
}
}
+ int32_t maxFaces = 0;
camera_metadata_ro_entry_t maxFacesDetected =
- staticInfo(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, 1, 1);
- if (!maxFacesDetected.count) return NO_INIT;
-
- int32_t maxFaces = maxFacesDetected.data.i32[0];
+ staticInfo(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, 0, 1, false);
+ if (maxFacesDetected.count) {
+ maxFaces = maxFacesDetected.data.i32[0];
+ }
camera_metadata_ro_entry_t availableSceneModes =
- staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
+ staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES, 0, 0, false);
camera_metadata_ro_entry_t sceneModeOverrides =
- staticInfo(ANDROID_CONTROL_SCENE_MODE_OVERRIDES);
+ staticInfo(ANDROID_CONTROL_SCENE_MODE_OVERRIDES, 0, 0, false);
camera_metadata_ro_entry_t minFocusDistance =
- staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE);
- bool fixedLens = (minFocusDistance.data.f[0] == 0);
+ staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 0, false);
+ bool fixedLens = minFocusDistance.count == 0 ||
+ minFocusDistance.data.f[0] == 0;
camera_metadata_ro_entry_t availableFocalLengths =
staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS);
@@ -1465,7 +1483,7 @@ status_t Parameters::set(const String8& paramString) {
}
if (validatedParams.wbMode != wbMode) {
camera_metadata_ro_entry_t availableWbModes =
- staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES);
+ staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES, 0, 0, false);
for (i = 0; i < availableWbModes.count; i++) {
if (validatedParams.wbMode == availableWbModes.data.u8[i]) break;
}
@@ -1496,8 +1514,9 @@ status_t Parameters::set(const String8& paramString) {
validatedParams.currentAfTriggerId = -1;
if (validatedParams.focusMode != Parameters::FOCUS_MODE_FIXED) {
camera_metadata_ro_entry_t minFocusDistance =
- staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE);
- if (minFocusDistance.data.f[0] == 0) {
+ staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 0,
+ false);
+ if (minFocusDistance.count && minFocusDistance.data.f[0] == 0) {
ALOGE("%s: Requested focus mode \"%s\" is not available: "
"fixed focus lens",
__FUNCTION__,
@@ -1617,7 +1636,8 @@ status_t Parameters::set(const String8& paramString) {
validatedParams.videoStabilization = boolFromString(
newParams.get(CameraParameters::KEY_VIDEO_STABILIZATION) );
camera_metadata_ro_entry_t availableVideoStabilizationModes =
- staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES);
+ staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, 0, 0,
+ false);
if (validatedParams.videoStabilization &&
availableVideoStabilizationModes.count == 1) {
ALOGE("%s: Video stabilization not supported", __FUNCTION__);
@@ -2544,10 +2564,6 @@ status_t Parameters::calculatePictureFovs(float *horizFov, float *vertFov)
staticInfo(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, 2, 2);
if (!sensorSize.count) return NO_INIT;
- camera_metadata_ro_entry_t availableFocalLengths =
- staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS);
- if (!availableFocalLengths.count) return NO_INIT;
-
float arrayAspect = static_cast<float>(fastInfo.arrayWidth) /
fastInfo.arrayHeight;
float stillAspect = static_cast<float>(pictureWidth) / pictureHeight;
diff --git a/services/camera/libcameraservice/camera2/Parameters.h b/services/camera/libcameraservice/camera2/Parameters.h
index be05b54..464830c 100644
--- a/services/camera/libcameraservice/camera2/Parameters.h
+++ b/services/camera/libcameraservice/camera2/Parameters.h
@@ -142,6 +142,7 @@ struct Parameters {
uint32_t previewCallbackFlags;
bool previewCallbackOneShot;
+ bool previewCallbackSurface;
bool zslMode;
diff --git a/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp b/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp
index 2efeede..f085443 100644
--- a/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp
@@ -301,8 +301,13 @@ status_t Camera3OutputStream::configureQueueLocked() {
return res;
}
- ALOGV("%s: Consumer wants %d buffers", __FUNCTION__,
- maxConsumerBuffers);
+ ALOGV("%s: Consumer wants %d buffers, HAL wants %d", __FUNCTION__,
+ maxConsumerBuffers, camera3_stream::max_buffers);
+ if (camera3_stream::max_buffers == 0) {
+ ALOGE("%s: Camera HAL requested no max_buffers, requires at least 1",
+ __FUNCTION__, camera3_stream::max_buffers);
+ return INVALID_OPERATION;
+ }
mTotalBufferCount = maxConsumerBuffers + camera3_stream::max_buffers;
mDequeuedBufferCount = 0;
diff --git a/services/camera/libcameraservice/camera3/Camera3Stream.cpp b/services/camera/libcameraservice/camera3/Camera3Stream.cpp
index f05658a..ab563df 100644
--- a/services/camera/libcameraservice/camera3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/camera3/Camera3Stream.cpp
@@ -312,8 +312,10 @@ status_t Camera3Stream::registerBuffersLocked(camera3_device *hal3Device) {
// Got all buffers, register with HAL
ALOGV("%s: Registering %d buffers with camera HAL",
__FUNCTION__, bufferCount);
+ ATRACE_BEGIN("camera3->register_stream_buffers");
res = hal3Device->ops->register_stream_buffers(hal3Device,
&bufferSet);
+ ATRACE_END();
}
// Return all valid buffers to stream, in ERROR state to indicate
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
index cd39bad..dfa1066 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
@@ -214,7 +214,11 @@ status_t RingBufferConsumer::releaseOldestBufferLocked(size_t* pinnedFrames) {
// In case the object was never pinned, pass the acquire fence
// back to the release fence. If the fence was already waited on,
// it'll just be a no-op to wait on it again.
- err = addReleaseFenceLocked(item.mBuf, item.mFence);
+
+ // item.mGraphicBuffer was populated with the proper graphic-buffer
+ // at acquire even if it was previously acquired
+ err = addReleaseFenceLocked(item.mBuf,
+ item.mGraphicBuffer, item.mFence);
if (err != OK) {
BI_LOGE("Failed to add release fence to buffer "
@@ -226,7 +230,9 @@ status_t RingBufferConsumer::releaseOldestBufferLocked(size_t* pinnedFrames) {
BI_LOGV("Attempting to release buffer timestamp %lld, frame %lld",
item.mTimestamp, item.mFrameNumber);
- err = releaseBufferLocked(item.mBuf,
+ // item.mGraphicBuffer was populated with the proper graphic-buffer
+ // at acquire even if it was previously acquired
+ err = releaseBufferLocked(item.mBuf, item.mGraphicBuffer,
EGL_NO_DISPLAY,
EGL_NO_SYNC_KHR);
if (err != OK) {
@@ -310,7 +316,8 @@ void RingBufferConsumer::unpinBuffer(const BufferItem& item) {
RingBufferItem& find = *it;
if (item.mGraphicBuffer == find.mGraphicBuffer) {
- status_t res = addReleaseFenceLocked(item.mBuf, item.mFence);
+ status_t res = addReleaseFenceLocked(item.mBuf,
+ item.mGraphicBuffer, item.mFence);
if (res != OK) {
BI_LOGE("Failed to add release fence to buffer "