diff options
Diffstat (limited to 'libs/surfaceflinger_client/SharedBufferStack.cpp')
-rw-r--r-- | libs/surfaceflinger_client/SharedBufferStack.cpp | 222 |
1 files changed, 165 insertions, 57 deletions
diff --git a/libs/surfaceflinger_client/SharedBufferStack.cpp b/libs/surfaceflinger_client/SharedBufferStack.cpp index a17e8ac..c42cd53 100644 --- a/libs/surfaceflinger_client/SharedBufferStack.cpp +++ b/libs/surfaceflinger_client/SharedBufferStack.cpp @@ -67,19 +67,47 @@ void SharedBufferStack::init(int32_t i) identity = i; } +status_t SharedBufferStack::setCrop(int buffer, const Rect& crop) +{ + if (uint32_t(buffer) >= NUM_BUFFER_MAX) + return BAD_INDEX; + + buffers[buffer].crop.l = uint16_t(crop.left); + buffers[buffer].crop.t = uint16_t(crop.top); + buffers[buffer].crop.r = uint16_t(crop.right); + buffers[buffer].crop.b = uint16_t(crop.bottom); + return NO_ERROR; +} + status_t SharedBufferStack::setDirtyRegion(int buffer, const Region& dirty) { if (uint32_t(buffer) >= NUM_BUFFER_MAX) return BAD_INDEX; - // in the current implementation we only send a single rectangle - const Rect bounds(dirty.getBounds()); - FlatRegion& reg(dirtyRegion[buffer]); - reg.count = 1; - reg.rects[0] = uint16_t(bounds.left); - reg.rects[1] = uint16_t(bounds.top); - reg.rects[2] = uint16_t(bounds.right); - reg.rects[3] = uint16_t(bounds.bottom); + FlatRegion& reg(buffers[buffer].dirtyRegion); + if (dirty.isEmpty()) { + reg.count = 0; + return NO_ERROR; + } + + size_t count; + Rect const* r = dirty.getArray(&count); + if (count > FlatRegion::NUM_RECT_MAX) { + const Rect bounds(dirty.getBounds()); + reg.count = 1; + reg.rects[0].l = uint16_t(bounds.left); + reg.rects[0].t = uint16_t(bounds.top); + reg.rects[0].r = uint16_t(bounds.right); + reg.rects[0].b = uint16_t(bounds.bottom); + } else { + reg.count = count; + for (size_t i=0 ; i<count ; i++) { + reg.rects[i].l = uint16_t(r[i].left); + reg.rects[i].t = uint16_t(r[i].top); + reg.rects[i].r = uint16_t(r[i].right); + reg.rects[i].b = uint16_t(r[i].bottom); + } + } return NO_ERROR; } @@ -89,8 +117,27 @@ Region SharedBufferStack::getDirtyRegion(int buffer) const if (uint32_t(buffer) >= NUM_BUFFER_MAX) return res; - const FlatRegion& reg(dirtyRegion[buffer]); - res.set(Rect(reg.rects[0], reg.rects[1], reg.rects[2], reg.rects[3])); + const FlatRegion& reg(buffers[buffer].dirtyRegion); + if (reg.count > FlatRegion::NUM_RECT_MAX) + return res; + + if (reg.count == 1) { + const Rect r( + reg.rects[0].l, + reg.rects[0].t, + reg.rects[0].r, + reg.rects[0].b); + res.set(r); + } else { + for (size_t i=0 ; i<reg.count ; i++) { + const Rect r( + reg.rects[i].l, + reg.rects[i].t, + reg.rects[i].r, + reg.rects[i].b); + res.orSelf(r); + } + } return res; } @@ -132,7 +179,7 @@ String8 SharedBufferBase::dump(char const* prefix) const char buffer[SIZE]; String8 result; SharedBufferStack& stack( *mSharedStack ); - int tail = (mNumBuffers + stack.head - stack.available + 1) % mNumBuffers; + int tail = computeTail(); snprintf(buffer, SIZE, "%s[ head=%2d, available=%2d, queued=%2d, tail=%2d ] " "reallocMask=%08x, inUse=%2d, identity=%d, status=%d\n", @@ -142,6 +189,48 @@ String8 SharedBufferBase::dump(char const* prefix) const return result; } +int32_t SharedBufferBase::computeTail() const +{ + SharedBufferStack& stack( *mSharedStack ); + return (mNumBuffers + stack.head - stack.available + 1) % mNumBuffers; +} + +status_t SharedBufferBase::waitForCondition(const ConditionBase& condition) +{ + const SharedBufferStack& stack( *mSharedStack ); + SharedClient& client( *mSharedClient ); + const nsecs_t TIMEOUT = s2ns(1); + const int identity = mIdentity; + + Mutex::Autolock _l(client.lock); + while ((condition()==false) && + (stack.identity == identity) && + (stack.status == NO_ERROR)) + { + status_t err = client.cv.waitRelative(client.lock, TIMEOUT); + // handle errors and timeouts + if (CC_UNLIKELY(err != NO_ERROR)) { + if (err == TIMED_OUT) { + if (condition()) { + LOGE("waitForCondition(%s) timed out (identity=%d), " + "but condition is true! We recovered but it " + "shouldn't happen." , condition.name(), stack.identity); + break; + } else { + LOGW("waitForCondition(%s) timed out " + "(identity=%d, status=%d). " + "CPU may be pegged. trying again.", condition.name(), + stack.identity, stack.status); + } + } else { + LOGE("waitForCondition(%s) error (%s) ", + condition.name(), strerror(-err)); + return err; + } + } + } + return (stack.identity != mIdentity) ? status_t(BAD_INDEX) : stack.status; +} // ============================================================================ // conditions and updates // ============================================================================ @@ -149,24 +238,34 @@ String8 SharedBufferBase::dump(char const* prefix) const SharedBufferClient::DequeueCondition::DequeueCondition( SharedBufferClient* sbc) : ConditionBase(sbc) { } -bool SharedBufferClient::DequeueCondition::operator()() { +bool SharedBufferClient::DequeueCondition::operator()() const { return stack.available > 0; } SharedBufferClient::LockCondition::LockCondition( SharedBufferClient* sbc, int buf) : ConditionBase(sbc), buf(buf) { } -bool SharedBufferClient::LockCondition::operator()() { - return (buf != stack.head || +bool SharedBufferClient::LockCondition::operator()() const { + // NOTE: if stack.head is messed up, we could crash the client + // or cause some drawing artifacts. This is okay, as long as it is + // limited to the client. + return (buf != stack.index[stack.head] || (stack.queued > 0 && stack.inUse != buf)); } SharedBufferServer::ReallocateCondition::ReallocateCondition( SharedBufferBase* sbb, int buf) : ConditionBase(sbb), buf(buf) { } -bool SharedBufferServer::ReallocateCondition::operator()() { +bool SharedBufferServer::ReallocateCondition::operator()() const { + int32_t head = stack.head; + if (uint32_t(head) >= NUM_BUFFER_MAX) { + // if stack.head is messed up, we cannot allow the server to + // crash (since stack.head is mapped on the client side) + stack.status = BAD_VALUE; + return false; + } // TODO: we should also check that buf has been dequeued - return (buf != stack.head); + return (buf != stack.index[head]); } // ---------------------------------------------------------------------------- @@ -208,9 +307,11 @@ SharedBufferServer::RetireUpdate::RetireUpdate( ssize_t SharedBufferServer::RetireUpdate::operator()() { // head is only written in this function, which is single-thread. int32_t head = stack.head; + if (uint32_t(head) >= NUM_BUFFER_MAX) + return BAD_VALUE; // Preventively lock the current buffer before updating queued. - android_atomic_write(head, &stack.inUse); + android_atomic_write(stack.index[head], &stack.inUse); // Decrement the number of queued buffers int32_t queued; @@ -226,7 +327,7 @@ ssize_t SharedBufferServer::RetireUpdate::operator()() { // lock the buffer before advancing head, which automatically unlocks // the buffer we preventively locked upon entering this function - android_atomic_write(head, &stack.inUse); + android_atomic_write(stack.index[head], &stack.inUse); // advance head android_atomic_write(head, &stack.head); @@ -250,37 +351,19 @@ ssize_t SharedBufferServer::StatusUpdate::operator()() { SharedBufferClient::SharedBufferClient(SharedClient* sharedClient, int surface, int num, int32_t identity) - : SharedBufferBase(sharedClient, surface, num, identity), tail(0) -{ - tail = computeTail(); -} - -int32_t SharedBufferClient::computeTail() const + : SharedBufferBase(sharedClient, surface, num, identity), + tail(0), undoDequeueTail(0) { SharedBufferStack& stack( *mSharedStack ); - // we need to make sure we read available and head coherently, - // w.r.t RetireUpdate. - int32_t newTail; - int32_t avail; - int32_t head; - do { - avail = stack.available; - head = stack.head; - } while (stack.available != avail); - newTail = head - avail + 1; - if (newTail < 0) { - newTail += mNumBuffers; - } else if (newTail >= mNumBuffers) { - newTail -= mNumBuffers; - } - return newTail; + tail = computeTail(); + queued_head = stack.head; } ssize_t SharedBufferClient::dequeue() { SharedBufferStack& stack( *mSharedStack ); - if (stack.head == tail && stack.available == 2) { + if (stack.head == tail && stack.available == mNumBuffers) { LOGW("dequeue: tail=%d, head=%d, avail=%d, queued=%d", tail, stack.head, stack.available, stack.queued); } @@ -301,9 +384,10 @@ ssize_t SharedBufferClient::dequeue() LOGW("dequeue probably called from multiple threads!"); } - int dequeued = tail; + undoDequeueTail = tail; + int dequeued = stack.index[tail]; tail = ((tail+1 >= mNumBuffers) ? 0 : tail+1); - LOGD_IF(DEBUG_ATOMICS, "dequeued=%d, tail=%d, %s", + LOGD_IF(DEBUG_ATOMICS, "dequeued=%d, tail++=%d, %s", dequeued, tail, dump("").string()); mDequeueTime[dequeued] = dequeueTime; @@ -313,16 +397,19 @@ ssize_t SharedBufferClient::dequeue() status_t SharedBufferClient::undoDequeue(int buf) { + // TODO: we can only undo the previous dequeue, we should + // enforce that in the api UndoDequeueUpdate update(this); status_t err = updateCondition( update ); if (err == NO_ERROR) { - tail = computeTail(); + tail = undoDequeueTail; } return err; } status_t SharedBufferClient::lock(int buf) { + SharedBufferStack& stack( *mSharedStack ); LockCondition condition(this, buf); status_t err = waitForCondition(condition); return err; @@ -330,26 +417,37 @@ status_t SharedBufferClient::lock(int buf) status_t SharedBufferClient::queue(int buf) { + SharedBufferStack& stack( *mSharedStack ); + + queued_head = ((queued_head+1 >= mNumBuffers) ? 0 : queued_head+1); + stack.index[queued_head] = buf; + QueueUpdate update(this); status_t err = updateCondition( update ); LOGD_IF(DEBUG_ATOMICS, "queued=%d, %s", buf, dump("").string()); - SharedBufferStack& stack( *mSharedStack ); + const nsecs_t now = systemTime(SYSTEM_TIME_THREAD); stack.stats.totalTime = ns2us(now - mDequeueTime[buf]); return err; } -bool SharedBufferClient::needNewBuffer(int buffer) const +bool SharedBufferClient::needNewBuffer(int buf) const { SharedBufferStack& stack( *mSharedStack ); - const uint32_t mask = 1<<buffer; + const uint32_t mask = 1<<buf; return (android_atomic_and(~mask, &stack.reallocMask) & mask) != 0; } -status_t SharedBufferClient::setDirtyRegion(int buffer, const Region& reg) +status_t SharedBufferClient::setCrop(int buf, const Rect& crop) { SharedBufferStack& stack( *mSharedStack ); - return stack.setDirtyRegion(buffer, reg); + return stack.setCrop(buf, crop); +} + +status_t SharedBufferClient::setDirtyRegion(int buf, const Region& reg) +{ + SharedBufferStack& stack( *mSharedStack ); + return stack.setDirtyRegion(buf, reg); } // ---------------------------------------------------------------------------- @@ -363,20 +461,30 @@ SharedBufferServer::SharedBufferServer(SharedClient* sharedClient, mSharedStack->available = num; mSharedStack->queued = 0; mSharedStack->reallocMask = 0; - memset(mSharedStack->dirtyRegion, 0, sizeof(mSharedStack->dirtyRegion)); + memset(mSharedStack->buffers, 0, sizeof(mSharedStack->buffers)); + for (int i=0 ; i<num ; i++) { + mSharedStack->index[i] = i; + } } ssize_t SharedBufferServer::retireAndLock() { RetireUpdate update(this, mNumBuffers); ssize_t buf = updateCondition( update ); - LOGD_IF(DEBUG_ATOMICS && buf>=0, "retire=%d, %s", int(buf), dump("").string()); + if (buf >= 0) { + if (uint32_t(buf) >= NUM_BUFFER_MAX) + return BAD_VALUE; + SharedBufferStack& stack( *mSharedStack ); + buf = stack.index[buf]; + LOGD_IF(DEBUG_ATOMICS && buf>=0, "retire=%d, %s", + int(buf), dump("").string()); + } return buf; } -status_t SharedBufferServer::unlock(int buffer) +status_t SharedBufferServer::unlock(int buf) { - UnlockUpdate update(this, buffer); + UnlockUpdate update(this, buf); status_t err = updateCondition( update ); return err; } @@ -403,17 +511,17 @@ int32_t SharedBufferServer::getQueuedCount() const return stack.queued; } -status_t SharedBufferServer::assertReallocate(int buffer) +status_t SharedBufferServer::assertReallocate(int buf) { - ReallocateCondition condition(this, buffer); + ReallocateCondition condition(this, buf); status_t err = waitForCondition(condition); return err; } -Region SharedBufferServer::getDirtyRegion(int buffer) const +Region SharedBufferServer::getDirtyRegion(int buf) const { SharedBufferStack& stack( *mSharedStack ); - return stack.getDirtyRegion(buffer); + return stack.getDirtyRegion(buf); } SharedBufferStack::Statistics SharedBufferServer::getStats() const |