diff options
Diffstat (limited to 'media')
| -rw-r--r-- | media/libmedia/AudioTrack.cpp | 5 | ||||
| -rw-r--r-- | media/libstagefright/AudioPlayer.cpp | 2 | ||||
| -rw-r--r-- | media/libstagefright/AwesomePlayer.cpp | 1 | ||||
| -rwxr-xr-x | media/libstagefright/CameraSource.cpp | 9 | ||||
| -rw-r--r-- | media/libstagefright/MP3Extractor.cpp | 31 | ||||
| -rwxr-xr-x | media/libstagefright/MPEG4Writer.cpp | 4 | ||||
| -rwxr-xr-x | media/libstagefright/OMXCodec.cpp | 16 | ||||
| -rw-r--r-- | media/libstagefright/SurfaceMediaSource.cpp | 178 | ||||
| -rw-r--r-- | media/libstagefright/foundation/ALooperRoster.cpp | 49 | ||||
| -rw-r--r-- | media/libstagefright/foundation/AMessage.cpp | 25 | ||||
| -rw-r--r-- | media/libstagefright/mpeg2ts/AnotherPacketSource.cpp | 32 | ||||
| -rw-r--r-- | media/libstagefright/tests/Android.mk | 7 | ||||
| -rw-r--r-- | media/libstagefright/tests/SurfaceMediaSource_test.cpp | 769 | 
13 files changed, 927 insertions, 201 deletions
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp index 3949c39..cecedb5 100644 --- a/media/libmedia/AudioTrack.cpp +++ b/media/libmedia/AudioTrack.cpp @@ -259,6 +259,7 @@ status_t AudioTrack::set(      mMarkerReached = false;      mNewPosition = 0;      mUpdatePeriod = 0; +    mFlushed = false;      mFlags = flags;      AudioSystem::acquireAudioSessionId(mSessionId); @@ -337,6 +338,7 @@ void AudioTrack::start()      audio_track_cblk_t* cblk = mCblk;      if (mActive == 0) { +        mFlushed = false;          mActive = 1;          mNewPosition = cblk->server + mUpdatePeriod;          cblk->lock.lock(); @@ -437,6 +439,7 @@ void AudioTrack::flush_l()      mUpdatePeriod = 0;      if (!mActive) { +        mFlushed = true;          mAudioTrack->flush();          // Release AudioTrack callback thread in case it was waiting for new buffers          // in AudioTrack::obtainBuffer() @@ -655,7 +658,7 @@ status_t AudioTrack::getPosition(uint32_t *position)  {      if (position == 0) return BAD_VALUE;      AutoMutex lock(mLock); -    *position = mCblk->server; +    *position = mFlushed ? 0 : mCblk->server;      return NO_ERROR;  } diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp index dd69e6b..d41ab1b 100644 --- a/media/libstagefright/AudioPlayer.cpp +++ b/media/libstagefright/AudioPlayer.cpp @@ -180,6 +180,8 @@ void AudioPlayer::pause(bool playPendingSamples) {          } else {              mAudioTrack->stop();          } + +        mNumFramesPlayed = 0;      } else {          if (mAudioSink.get() != NULL) {              mAudioSink->pause(); diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp index f2673b3..bc42a42 100644 --- a/media/libstagefright/AwesomePlayer.cpp +++ b/media/libstagefright/AwesomePlayer.cpp @@ -1318,6 +1318,7 @@ void AwesomePlayer::onRTSPSeekDone() {  status_t AwesomePlayer::seekTo_l(int64_t timeUs) {      if (mRTSPController != NULL) { +        mSeekNotificationSent = false;          mRTSPController->seekAsync(timeUs, OnRTSPSeekDoneWrapper, this);          return OK;      } diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp index ac3565f..256f3ba 100755 --- a/media/libstagefright/CameraSource.cpp +++ b/media/libstagefright/CameraSource.cpp @@ -635,6 +635,12 @@ status_t CameraSource::stop() {      mStarted = false;      mFrameAvailableCondition.signal(); +    int64_t token; +    bool isTokenValid = false; +    if (mCamera != 0) { +        token = IPCThreadState::self()->clearCallingIdentity(); +        isTokenValid = true; +    }      releaseQueuedFrames();      while (!mFramesBeingEncoded.empty()) {          if (NO_ERROR != @@ -645,6 +651,9 @@ status_t CameraSource::stop() {      }      stopCameraRecording();      releaseCamera(); +    if (isTokenValid) { +        IPCThreadState::self()->restoreCallingIdentity(token); +    }      if (mCollectStats) {          LOGI("Frames received/encoded/dropped: %d/%d/%d in %lld us", diff --git a/media/libstagefright/MP3Extractor.cpp b/media/libstagefright/MP3Extractor.cpp index 92e84c2..09f91f5 100644 --- a/media/libstagefright/MP3Extractor.cpp +++ b/media/libstagefright/MP3Extractor.cpp @@ -52,7 +52,10 @@ static bool Resync(          *post_id3_pos = 0;      } +    bool resync_from_head = false;      if (*inout_pos == 0) { +        resync_from_head = true; +          // Skip an optional ID3 header if syncing at the very beginning          // of the datasource. @@ -137,22 +140,20 @@ static bool Resync(          uint32_t header = U32_AT(tmp); -        if (match_header != 0 && (header & kMask) != (match_header & kMask)) { -            ++pos; -            ++tmp; -            --remainingBytes; -            continue; -        } -          size_t frame_size; -        int sample_rate, num_channels, bitrate; -        if (!GetMPEGAudioFrameSize( -                    header, &frame_size, -                    &sample_rate, &num_channels, &bitrate)) { -            ++pos; -            ++tmp; -            --remainingBytes; -            continue; +        if ((match_header != 0 && (header & kMask) != (match_header & kMask)) +                || !GetMPEGAudioFrameSize(header, &frame_size)) { +            if (resync_from_head) { +                // This isn't a valid mp3 file because it failed to detect +                // a header while a valid mp3 file should have a valid +                // header here. +                break; +            } else { +                ++pos; +                ++tmp; +                --remainingBytes; +                continue; +            }          }          LOGV("found possible 1st frame at %lld (header = 0x%08x)", pos, header); diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp index 5f58090..46d87df 100755 --- a/media/libstagefright/MPEG4Writer.cpp +++ b/media/libstagefright/MPEG4Writer.cpp @@ -1173,7 +1173,7 @@ void MPEG4Writer::Track::addOneSttsTableEntry(          size_t sampleCount, int32_t duration) {      if (duration == 0) { -        LOGW("%d 0-duration samples found: %d", sampleCount); +        LOGW("0-duration samples found: %d", sampleCount);      }      SttsTableEntry sttsEntry(sampleCount, duration);      mSttsTableEntries.push_back(sttsEntry); @@ -1304,7 +1304,7 @@ void MPEG4Writer::bufferChunk(const Chunk& chunk) {  void MPEG4Writer::writeChunkToFile(Chunk* chunk) {      LOGV("writeChunkToFile: %lld from %s track", -        chunk.mTimestampUs, chunk.mTrack->isAudio()? "audio": "video"); +        chunk->mTimeStampUs, chunk->mTrack->isAudio()? "audio": "video");      int32_t isFirstSample = true;      while (!chunk->mSamples.empty()) { diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp index 27dfeab..7f09319 100755 --- a/media/libstagefright/OMXCodec.cpp +++ b/media/libstagefright/OMXCodec.cpp @@ -808,7 +808,7 @@ status_t OMXCodec::setVideoPortFormatType(          }          if (format.eCompressionFormat == compressionFormat -            && format.eColorFormat == colorFormat) { +                && format.eColorFormat == colorFormat) {              found = true;              break;          } @@ -838,6 +838,15 @@ static size_t getFrameSize(          case OMX_COLOR_FormatYUV420Planar:          case OMX_COLOR_FormatYUV420SemiPlanar:          case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar: +        /* +        * FIXME: For the Opaque color format, the frame size does not +        * need to be (w*h*3)/2. It just needs to +        * be larger than certain minimum buffer size. However, +        * currently, this opaque foramt has been tested only on +        * YUV420 formats. If that is changed, then we need to revisit +        * this part in the future +        */ +        case OMX_COLOR_FormatAndroidOpaque:              return (width * height * 3) / 2;          default: @@ -887,7 +896,7 @@ status_t OMXCodec::isColorFormatSupported(          // Make sure that omx component does not overwrite          // the incremented index (bug 2897413).          CHECK_EQ(index, portFormat.nIndex); -        if ((portFormat.eColorFormat == colorFormat)) { +        if (portFormat.eColorFormat == colorFormat) {              LOGV("Found supported color format: %d", portFormat.eColorFormat);              return OK;  // colorFormat is supported!          } @@ -2316,6 +2325,7 @@ void OMXCodec::onEvent(OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {          {              CODEC_LOGV("OMX_EventPortSettingsChanged(port=%ld, data2=0x%08lx)",                         data1, data2); +            CHECK(mFilledBuffers.empty());              if (data2 == 0 || data2 == OMX_IndexParamPortDefinition) {                  onPortSettingsChanged(data1); @@ -2923,6 +2933,7 @@ bool OMXCodec::drainInputBuffer(BufferInfo *info) {      size_t offset = 0;      int32_t n = 0; +      for (;;) {          MediaBuffer *srcBuffer;          if (mSeekTimeUs >= 0) { @@ -3021,6 +3032,7 @@ bool OMXCodec::drainInputBuffer(BufferInfo *info) {                  CHECK(info->mMediaBuffer == NULL);                  info->mMediaBuffer = srcBuffer;              } else { +                CHECK(srcBuffer->data() != NULL) ;                  memcpy((uint8_t *)info->mData + offset,                          (const uint8_t *)srcBuffer->data()                              + srcBuffer->range_offset(), diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp index f219fa5..91b81c2 100644 --- a/media/libstagefright/SurfaceMediaSource.cpp +++ b/media/libstagefright/SurfaceMediaSource.cpp @@ -13,7 +13,6 @@   * See the License for the specific language governing permissions and   * limitations under the License.   */ -  // #define LOG_NDEBUG 0  #define LOG_TAG "SurfaceMediaSource" @@ -47,7 +46,9 @@ SurfaceMediaSource::SurfaceMediaSource(uint32_t bufW, uint32_t bufH) :                  mSynchronousMode(true),                  mConnectedApi(NO_CONNECTED_API),                  mFrameRate(30), -                mStarted(false)   { +                mNumFramesReceived(0), +                mNumFramesEncoded(0), +                mStopped(false) {      LOGV("SurfaceMediaSource::SurfaceMediaSource");      sp<ISurfaceComposer> composer(ComposerService::getComposerService());      mGraphicBufferAlloc = composer->createGraphicBufferAlloc(); @@ -55,10 +56,9 @@ SurfaceMediaSource::SurfaceMediaSource(uint32_t bufW, uint32_t bufH) :  SurfaceMediaSource::~SurfaceMediaSource() {      LOGV("SurfaceMediaSource::~SurfaceMediaSource"); -    if (mStarted) { +    if (!mStopped) {          stop();      } -    freeAllBuffers();  }  size_t SurfaceMediaSource::getQueuedCount() const { @@ -139,12 +139,12 @@ status_t SurfaceMediaSource::setBufferCount(int bufferCount) {      // here we're guaranteed that the client doesn't have dequeued buffers      // and will release all of its buffer references. -    freeAllBuffers();      mBufferCount = bufferCount;      mClientBufferCount = bufferCount;      mCurrentSlot = INVALID_BUFFER_SLOT;      mQueue.clear();      mDequeueCondition.signal(); +    freeAllBuffersLocked();      return OK;  } @@ -164,7 +164,7 @@ status_t SurfaceMediaSource::requestBuffer(int slot, sp<GraphicBuffer>* buf) {  status_t SurfaceMediaSource::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h,                                              uint32_t format, uint32_t usage) {      LOGV("dequeueBuffer"); - +    Mutex::Autolock lock(mMutex);      // Check for the buffer size- the client should just use the      // default width and height, and not try to set those. @@ -186,10 +186,7 @@ status_t SurfaceMediaSource::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h,          }      } -    Mutex::Autolock lock(mMutex); -      status_t returnFlags(OK); -      int found, foundSync;      int dequeuedCount = 0;      bool tryAgain = true; @@ -220,6 +217,9 @@ status_t SurfaceMediaSource::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h,                  LOGV("Waiting for the FIFO to drain");                  mDequeueCondition.wait(mMutex);              } +            if (mStopped) { +                return NO_INIT; +            }              // need to check again since the mode could have changed              // while we were waiting              minBufferCountNeeded = mSynchronousMode ? @@ -230,7 +230,7 @@ status_t SurfaceMediaSource::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h,                  ((mServerBufferCount != mBufferCount) ||                          (mServerBufferCount < minBufferCountNeeded))) {              // here we're guaranteed that mQueue is empty -            freeAllBuffers(); +            freeAllBuffersLocked();              mBufferCount = mServerBufferCount;              if (mBufferCount < minBufferCountNeeded)                  mBufferCount = minBufferCountNeeded; @@ -292,9 +292,12 @@ status_t SurfaceMediaSource::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h,          // for for some buffers to be consumed          tryAgain = mSynchronousMode && (foundSync == INVALID_BUFFER_SLOT);          if (tryAgain) { -            LOGW("Waiting..In synchronous mode and no buffer to dQ"); +            LOGV("Waiting..In synchronous mode and no buffer to dequeue");              mDequeueCondition.wait(mMutex);          } +        if (mStopped) { +            return NO_INIT; +        }      }      if (mSynchronousMode && found == INVALID_BUFFER_SLOT) { @@ -306,7 +309,7 @@ status_t SurfaceMediaSource::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h,          return -EBUSY;      } -    const int buf = found; +    const int bufIndex = found;      *outBuf = found;      const bool useDefaultSize = !w && !h; @@ -324,9 +327,9 @@ status_t SurfaceMediaSource::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h,      // buffer is now in DEQUEUED (but can also be current at the same time,      // if we're in synchronous mode) -    mSlots[buf].mBufferState = BufferSlot::DEQUEUED; +    mSlots[bufIndex].mBufferState = BufferSlot::DEQUEUED; -    const sp<GraphicBuffer>& buffer(mSlots[buf].mGraphicBuffer); +    const sp<GraphicBuffer>& buffer(mSlots[bufIndex].mGraphicBuffer);      if ((buffer == NULL) ||          (uint32_t(buffer->width)  != w) ||          (uint32_t(buffer->height) != h) || @@ -344,22 +347,25 @@ status_t SurfaceMediaSource::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h,              if (updateFormat) {                  mPixelFormat = format;              } -            mSlots[buf].mGraphicBuffer = graphicBuffer; -            mSlots[buf].mRequestBufferCalled = false; +            mSlots[bufIndex].mGraphicBuffer = graphicBuffer; +            mSlots[bufIndex].mRequestBufferCalled = false;              returnFlags |= ISurfaceTexture::BUFFER_NEEDS_REALLOCATION;      }      return returnFlags;  } +// TODO: clean this up  status_t SurfaceMediaSource::setSynchronousMode(bool enabled) {      Mutex::Autolock lock(mMutex); +    if (mStopped) { +        LOGE("setSynchronousMode: SurfaceMediaSource has been stopped!"); +        return NO_INIT; +    } -    status_t err = OK;      if (!enabled) { -        // going to asynchronous mode, drain the queue -        while (mSynchronousMode != enabled && !mQueue.isEmpty()) { -            mDequeueCondition.wait(mMutex); -        } +        // Async mode is not allowed +        LOGE("SurfaceMediaSource can be used only synchronous mode!"); +        return INVALID_OPERATION;      }      if (mSynchronousMode != enabled) { @@ -370,13 +376,19 @@ status_t SurfaceMediaSource::setSynchronousMode(bool enabled) {          mSynchronousMode = enabled;          mDequeueCondition.signal();      } -    return err; +    return OK;  }  status_t SurfaceMediaSource::connect(int api,          uint32_t* outWidth, uint32_t* outHeight, uint32_t* outTransform) {      LOGV("SurfaceMediaSource::connect");      Mutex::Autolock lock(mMutex); + +    if (mStopped) { +        LOGE("Connect: SurfaceMediaSource has been stopped!"); +        return NO_INIT; +    } +      status_t err = NO_ERROR;      switch (api) {          case NATIVE_WINDOW_API_EGL: @@ -399,9 +411,25 @@ status_t SurfaceMediaSource::connect(int api,      return err;  } +// This is called by the client side when it is done +// TODO: Currently, this also sets mStopped to true which +// is needed for unblocking the encoder which might be +// waiting to read more frames. So if on the client side, +// the same thread supplies the frames and also calls stop +// on the encoder, the client has to call disconnect before +// it calls stop. +// In the case of the camera, +// that need not be required since the thread supplying the +// frames is separate than the one calling stop.  status_t SurfaceMediaSource::disconnect(int api) {      LOGV("SurfaceMediaSource::disconnect");      Mutex::Autolock lock(mMutex); + +    if (mStopped) { +        LOGE("disconnect: SurfaceMediaSoource is already stopped!"); +        return NO_INIT; +    } +      status_t err = NO_ERROR;      switch (api) {          case NATIVE_WINDOW_API_EGL: @@ -410,6 +438,9 @@ status_t SurfaceMediaSource::disconnect(int api) {          case NATIVE_WINDOW_API_CAMERA:              if (mConnectedApi == api) {                  mConnectedApi = NO_CONNECTED_API; +                mStopped = true; +                mDequeueCondition.signal(); +                mFrameAvailableCondition.signal();              } else {                  err = -EINVAL;              } @@ -421,45 +452,47 @@ status_t SurfaceMediaSource::disconnect(int api) {      return err;  } -status_t SurfaceMediaSource::queueBuffer(int buf, int64_t timestamp, +status_t SurfaceMediaSource::queueBuffer(int bufIndex, int64_t timestamp,          uint32_t* outWidth, uint32_t* outHeight, uint32_t* outTransform) {      LOGV("queueBuffer");      Mutex::Autolock lock(mMutex); -    if (buf < 0 || buf >= mBufferCount) { +    if (bufIndex < 0 || bufIndex >= mBufferCount) {          LOGE("queueBuffer: slot index out of range [0, %d]: %d", -                mBufferCount, buf); +                mBufferCount, bufIndex);          return -EINVAL; -    } else if (mSlots[buf].mBufferState != BufferSlot::DEQUEUED) { +    } else if (mSlots[bufIndex].mBufferState != BufferSlot::DEQUEUED) {          LOGE("queueBuffer: slot %d is not owned by the client (state=%d)", -                buf, mSlots[buf].mBufferState); +                bufIndex, mSlots[bufIndex].mBufferState);          return -EINVAL; -    } else if (!mSlots[buf].mRequestBufferCalled) { +    } else if (!mSlots[bufIndex].mRequestBufferCalled) {          LOGE("queueBuffer: slot %d was enqueued without requesting a " -                "buffer", buf); +                "buffer", bufIndex);          return -EINVAL;      }      if (mSynchronousMode) {          // in synchronous mode we queue all buffers in a FIFO -        mQueue.push_back(buf); -        LOGV("Client queued buffer on slot: %d, Q size = %d", -                                                buf, mQueue.size()); +        mQueue.push_back(bufIndex); +        mNumFramesReceived++; +        LOGV("Client queued buf# %d @slot: %d, Q size = %d, handle = %p, timestamp = %lld", +            mNumFramesReceived, bufIndex, mQueue.size(), +            mSlots[bufIndex].mGraphicBuffer->handle, timestamp);      } else {          // in asynchronous mode we only keep the most recent buffer          if (mQueue.empty()) { -            mQueue.push_back(buf); +            mQueue.push_back(bufIndex);          } else {              Fifo::iterator front(mQueue.begin());              // buffer currently queued is freed              mSlots[*front].mBufferState = BufferSlot::FREE;              // and we record the new buffer index in the queued list -            *front = buf; +            *front = bufIndex;          }      } -    mSlots[buf].mBufferState = BufferSlot::QUEUED; -    mSlots[buf].mTimestamp = timestamp; +    mSlots[bufIndex].mBufferState = BufferSlot::QUEUED; +    mSlots[bufIndex].mTimestamp = timestamp;      // TODO: (Confirm) Don't want to signal dequeue here.      // May be just in asynchronous mode?      // mDequeueCondition.signal(); @@ -484,7 +517,7 @@ status_t SurfaceMediaSource::queueBuffer(int buf, int64_t timestamp,  // wait to hear from StageFrightRecorder to set the buffer FREE  // Make sure this is called when the mutex is locked  status_t SurfaceMediaSource::onFrameReceivedLocked() { -    LOGV("On Frame Received"); +    LOGV("On Frame Received locked");      // Signal the encoder that a new frame has arrived      mFrameAvailableCondition.signal(); @@ -503,19 +536,19 @@ status_t SurfaceMediaSource::onFrameReceivedLocked() {  } -void SurfaceMediaSource::cancelBuffer(int buf) { +void SurfaceMediaSource::cancelBuffer(int bufIndex) {      LOGV("SurfaceMediaSource::cancelBuffer");      Mutex::Autolock lock(mMutex); -    if (buf < 0 || buf >= mBufferCount) { +    if (bufIndex < 0 || bufIndex >= mBufferCount) {          LOGE("cancelBuffer: slot index out of range [0, %d]: %d", -                mBufferCount, buf); +                mBufferCount, bufIndex);          return; -    } else if (mSlots[buf].mBufferState != BufferSlot::DEQUEUED) { +    } else if (mSlots[bufIndex].mBufferState != BufferSlot::DEQUEUED) {          LOGE("cancelBuffer: slot %d is not owned by the client (state=%d)", -                buf, mSlots[buf].mBufferState); +                bufIndex, mSlots[bufIndex].mBufferState);          return;      } -    mSlots[buf].mBufferState = BufferSlot::FREE; +    mSlots[bufIndex].mBufferState = BufferSlot::FREE;      mDequeueCondition.signal();  } @@ -533,8 +566,8 @@ void SurfaceMediaSource::setFrameAvailableListener(      mFrameAvailableListener = listener;  } -void SurfaceMediaSource::freeAllBuffers() { -    LOGV("freeAllBuffers"); +void SurfaceMediaSource::freeAllBuffersLocked() { +    LOGV("freeAllBuffersLocked");      for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {          mSlots[i].mGraphicBuffer = 0;          mSlots[i].mBufferState = BufferSlot::FREE; @@ -650,10 +683,7 @@ int32_t SurfaceMediaSource::getFrameRate( ) const {  status_t SurfaceMediaSource::start(MetaData *params)  { -    LOGV("start"); -    Mutex::Autolock lock(mMutex); -    CHECK(!mStarted); -    mStarted = true; +    LOGV("started!");      return OK;  } @@ -664,8 +694,11 @@ status_t SurfaceMediaSource::stop()      Mutex::Autolock lock(mMutex);      // TODO: Add waiting on mFrameCompletedCondition here? -    mStarted = false; +    mStopped = true;      mFrameAvailableCondition.signal(); +    mDequeueCondition.signal(); +    mQueue.clear(); +    freeAllBuffersLocked();      return OK;  } @@ -690,23 +723,25 @@ sp<MetaData> SurfaceMediaSource::getFormat()  }  status_t SurfaceMediaSource::read( MediaBuffer **buffer, -                                const ReadOptions *options) +                                    const ReadOptions *options)  { +    Mutex::Autolock autoLock(mMutex) ; +      LOGV("Read. Size of queued buffer: %d", mQueue.size());      *buffer = NULL; -    Mutex::Autolock autoLock(mMutex) ;      // If the recording has started and the queue is empty, then just      // wait here till the frames come in from the client side -    while (mStarted && mQueue.empty()) { +    while (!mStopped && mQueue.empty()) {          LOGV("NO FRAMES! Recorder waiting for FrameAvailableCondition");          mFrameAvailableCondition.wait(mMutex);      }      // If the loop was exited as a result of stopping the recording,      // it is OK -    if (!mStarted) { -        return OK; +    if (mStopped) { +        LOGV("Read: SurfaceMediaSource is stopped. Returning NO_INIT;"); +        return NO_INIT;      }      // Update the current buffer info @@ -714,15 +749,20 @@ status_t SurfaceMediaSource::read( MediaBuffer **buffer,      // can be more than one "current" slots.      Fifo::iterator front(mQueue.begin());      mCurrentSlot = *front; +    mQueue.erase(front);      mCurrentBuf = mSlots[mCurrentSlot].mGraphicBuffer; +    int64_t prevTimeStamp = mCurrentTimestamp;      mCurrentTimestamp = mSlots[mCurrentSlot].mTimestamp; - +    mNumFramesEncoded++;      // Pass the data to the MediaBuffer. Pass in only the metadata      passMetadataBufferLocked(buffer);      (*buffer)->setObserver(this);      (*buffer)->add_ref(); -    (*buffer)->meta_data()->setInt64(kKeyTime, mCurrentTimestamp); +    (*buffer)->meta_data()->setInt64(kKeyTime, mCurrentTimestamp / 1000); +    LOGV("Frames encoded = %d, timestamp = %lld, time diff = %lld", +            mNumFramesEncoded, mCurrentTimestamp / 1000, +            mCurrentTimestamp / 1000 - prevTimeStamp / 1000);      return OK;  } @@ -745,15 +785,17 @@ void SurfaceMediaSource::passMetadataBufferLocked(MediaBuffer **buffer) {          new MediaBuffer(4 + sizeof(buffer_handle_t));      char *data = (char *)tempBuffer->data();      if (data == NULL) { -        LOGE("Cannot allocate memory for passing buffer metadata!"); +        LOGE("Cannot allocate memory for metadata buffer!");          return;      }      OMX_U32 type = kMetadataBufferTypeGrallocSource;      memcpy(data, &type, 4);      memcpy(data + 4, &(mCurrentBuf->handle), sizeof(buffer_handle_t));      *buffer = tempBuffer; -} +    LOGV("handle = %p, , offset = %d, length = %d", +            mCurrentBuf->handle, (*buffer)->range_length(), (*buffer)->range_offset()); +}  void SurfaceMediaSource::signalBufferReturned(MediaBuffer *buffer) {      LOGV("signalBufferReturned"); @@ -761,16 +803,19 @@ void SurfaceMediaSource::signalBufferReturned(MediaBuffer *buffer) {      bool foundBuffer = false;      Mutex::Autolock autoLock(mMutex); -    if (!mStarted) { -        LOGW("signalBufferReturned: mStarted = false! Nothing to do!"); +    if (mStopped) { +        LOGV("signalBufferReturned: mStopped = true! Nothing to do!");          return;      } -    for (Fifo::iterator it = mQueue.begin(); it != mQueue.end(); ++it) { -        CHECK(mSlots[*it].mGraphicBuffer != NULL); -        if (checkBufferMatchesSlot(*it, buffer)) { -            mSlots[*it].mBufferState = BufferSlot::FREE; -            mQueue.erase(it); +    for (int id = 0; id < NUM_BUFFER_SLOTS; id++) { +        if (mSlots[id].mGraphicBuffer == NULL) { +            continue; +        } +        if (checkBufferMatchesSlot(id, buffer)) { +            LOGV("Slot %d returned, matches handle = %p", id, +                    mSlots[id].mGraphicBuffer->handle); +            mSlots[id].mBufferState = BufferSlot::FREE;              buffer->setObserver(0);              buffer->release();              mDequeueCondition.signal(); @@ -794,5 +839,4 @@ bool SurfaceMediaSource::checkBufferMatchesSlot(int slot, MediaBuffer *buffer) {      return mSlots[slot].mGraphicBuffer->handle  ==  bufferHandle;  } -  } // end of namespace android diff --git a/media/libstagefright/foundation/ALooperRoster.cpp b/media/libstagefright/foundation/ALooperRoster.cpp index 8aa1b15..e399f2f 100644 --- a/media/libstagefright/foundation/ALooperRoster.cpp +++ b/media/libstagefright/foundation/ALooperRoster.cpp @@ -27,7 +27,8 @@  namespace android {  ALooperRoster::ALooperRoster() -    : mNextHandlerID(1) { +    : mNextHandlerID(1), +      mNextReplyID(1) {  }  ALooper::handler_id ALooperRoster::registerHandler( @@ -70,15 +71,19 @@ void ALooperRoster::unregisterHandler(ALooper::handler_id handlerID) {      mHandlers.removeItemsAt(index);  } -void ALooperRoster::postMessage( +status_t ALooperRoster::postMessage(          const sp<AMessage> &msg, int64_t delayUs) {      Mutex::Autolock autoLock(mLock); +    return postMessage_l(msg, delayUs); +} +status_t ALooperRoster::postMessage_l( +        const sp<AMessage> &msg, int64_t delayUs) {      ssize_t index = mHandlers.indexOfKey(msg->target());      if (index < 0) {          LOGW("failed to post message. Target handler not registered."); -        return; +        return -ENOENT;      }      const HandlerInfo &info = mHandlers.valueAt(index); @@ -91,10 +96,12 @@ void ALooperRoster::postMessage(               msg->target());          mHandlers.removeItemsAt(index); -        return; +        return -ENOENT;      }      looper->post(msg, delayUs); + +    return OK;  }  void ALooperRoster::deliverMessage(const sp<AMessage> &msg) { @@ -145,4 +152,38 @@ sp<ALooper> ALooperRoster::findLooper(ALooper::handler_id handlerID) {      return looper;  } +status_t ALooperRoster::postAndAwaitResponse( +        const sp<AMessage> &msg, sp<AMessage> *response) { +    Mutex::Autolock autoLock(mLock); + +    uint32_t replyID = mNextReplyID++; + +    msg->setInt32("replyID", replyID); + +    status_t err = postMessage_l(msg, 0 /* delayUs */); + +    if (err != OK) { +        response->clear(); +        return err; +    } + +    ssize_t index; +    while ((index = mReplies.indexOfKey(replyID)) < 0) { +        mRepliesCondition.wait(mLock); +    } + +    *response = mReplies.valueAt(index); +    mReplies.removeItemsAt(index); + +    return OK; +} + +void ALooperRoster::postReply(uint32_t replyID, const sp<AMessage> &reply) { +    Mutex::Autolock autoLock(mLock); + +    CHECK(mReplies.indexOfKey(replyID) < 0); +    mReplies.add(replyID, reply); +    mRepliesCondition.broadcast(); +} +  }  // namespace android diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp index b592c3f..582bdba 100644 --- a/media/libstagefright/foundation/AMessage.cpp +++ b/media/libstagefright/foundation/AMessage.cpp @@ -27,6 +27,8 @@  namespace android { +extern ALooperRoster gLooperRoster; +  AMessage::AMessage(uint32_t what, ALooper::handler_id target)      : mWhat(what),        mTarget(target), @@ -227,11 +229,30 @@ bool AMessage::findRect(  }  void AMessage::post(int64_t delayUs) { -    extern ALooperRoster gLooperRoster; -      gLooperRoster.postMessage(this, delayUs);  } +status_t AMessage::postAndAwaitResponse(sp<AMessage> *response) { +    return gLooperRoster.postAndAwaitResponse(this, response); +} + +void AMessage::postReply(uint32_t replyID) { +    gLooperRoster.postReply(replyID, this); +} + +bool AMessage::senderAwaitsResponse(uint32_t *replyID) const { +    int32_t tmp; +    bool found = findInt32("replyID", &tmp); + +    if (!found) { +        return false; +    } + +    *replyID = static_cast<uint32_t>(tmp); + +    return true; +} +  sp<AMessage> AMessage::dup() const {      sp<AMessage> msg = new AMessage(mWhat, mTarget);      msg->mNumItems = mNumItems; diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp index 2e66a2c..ce07e32 100644 --- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp +++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp @@ -136,24 +136,28 @@ void AnotherPacketSource::queueAccessUnit(const sp<ABuffer> &buffer) {  void AnotherPacketSource::queueDiscontinuity(          ATSParser::DiscontinuityType type,          const sp<AMessage> &extra) { -    sp<ABuffer> buffer = new ABuffer(0); -    buffer->meta()->setInt32("discontinuity", static_cast<int32_t>(type)); -    buffer->meta()->setMessage("extra", extra); -      Mutex::Autolock autoLock(mLock); -#if 0 -    if (type == ATSParser::DISCONTINUITY_SEEK -            || type == ATSParser::DISCONTINUITY_FORMATCHANGE) { -        // XXX Fix this: This will also clear any pending discontinuities, -        // If there's a pending DISCONTINUITY_FORMATCHANGE and the new -        // discontinuity is "just" a DISCONTINUITY_SEEK, this will effectively -        // downgrade the type of discontinuity received by the client. +    // Leave only discontinuities in the queue. +    List<sp<ABuffer> >::iterator it = mBuffers.begin(); +    while (it != mBuffers.end()) { +        sp<ABuffer> oldBuffer = *it; + +        int32_t oldDiscontinuityType; +        if (!oldBuffer->meta()->findInt32( +                    "discontinuity", &oldDiscontinuityType)) { +            it = mBuffers.erase(it); +            continue; +        } -        mBuffers.clear(); -        mEOSResult = OK; +        ++it;      } -#endif + +    mEOSResult = OK; + +    sp<ABuffer> buffer = new ABuffer(0); +    buffer->meta()->setInt32("discontinuity", static_cast<int32_t>(type)); +    buffer->meta()->setMessage("extra", extra);      mBuffers.push_back(buffer);      mCondition.signal(); diff --git a/media/libstagefright/tests/Android.mk b/media/libstagefright/tests/Android.mk index 3ea8f39..357feb1 100644 --- a/media/libstagefright/tests/Android.mk +++ b/media/libstagefright/tests/Android.mk @@ -19,12 +19,13 @@ LOCAL_SHARED_LIBRARIES := \  	libbinder \  	libcutils \  	libgui \ -	libstlport \ -	libui \ -	libutils \ +	libmedia \  	libstagefright \  	libstagefright_omx \  	libstagefright_foundation \ +	libstlport \ +	libui \ +	libutils \  LOCAL_STATIC_LIBRARIES := \  	libgtest \ diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp index 5b32b68..d643a0b 100644 --- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp +++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp @@ -14,14 +14,17 @@   * limitations under the License.   */ -#define LOG_TAG "SurfaceMediaSource_test"  // #define LOG_NDEBUG 0 +#define LOG_TAG "SurfaceMediaSource_test"  #include <gtest/gtest.h>  #include <utils/String8.h>  #include <utils/Errors.h> +#include <fcntl.h> +#include <unistd.h>  #include <media/stagefright/SurfaceMediaSource.h> +#include <media/mediarecorder.h>  #include <gui/SurfaceTextureClient.h>  #include <ui/GraphicBuffer.h> @@ -33,24 +36,322 @@  #include <ui/FramebufferNativeWindow.h>  #include <media/stagefright/MediaDebug.h> +#include <media/stagefright/MediaBufferGroup.h>  #include <media/stagefright/MediaDefs.h>  #include <media/stagefright/MetaData.h> -#include <media/stagefright/MPEG4Writer.h>  #include <media/stagefright/OMXClient.h>  #include <media/stagefright/OMXCodec.h>  #include <OMX_Component.h>  #include "DummyRecorder.h" +  namespace android { +class GLTest : public ::testing::Test { +protected: + +    GLTest(): +            mEglDisplay(EGL_NO_DISPLAY), +            mEglSurface(EGL_NO_SURFACE), +            mEglContext(EGL_NO_CONTEXT) { +    } + +    virtual void SetUp() { +        LOGV("GLTest::SetUp()"); +        mEglDisplay = eglGetDisplay(EGL_DEFAULT_DISPLAY); +        ASSERT_EQ(EGL_SUCCESS, eglGetError()); +        ASSERT_NE(EGL_NO_DISPLAY, mEglDisplay); + +        EGLint majorVersion; +        EGLint minorVersion; +        EXPECT_TRUE(eglInitialize(mEglDisplay, &majorVersion, &minorVersion)); +        ASSERT_EQ(EGL_SUCCESS, eglGetError()); +        RecordProperty("EglVersionMajor", majorVersion); +        RecordProperty("EglVersionMajor", minorVersion); + +        EGLint numConfigs = 0; +        EXPECT_TRUE(eglChooseConfig(mEglDisplay, getConfigAttribs(), &mGlConfig, +                1, &numConfigs)); +        ASSERT_EQ(EGL_SUCCESS, eglGetError()); + +        char* displaySecsEnv = getenv("GLTEST_DISPLAY_SECS"); +        if (displaySecsEnv != NULL) { +            mDisplaySecs = atoi(displaySecsEnv); +            if (mDisplaySecs < 0) { +                mDisplaySecs = 0; +            } +        } else { +            mDisplaySecs = 0; +        } + +        if (mDisplaySecs > 0) { +            mComposerClient = new SurfaceComposerClient; +            ASSERT_EQ(NO_ERROR, mComposerClient->initCheck()); + +            mSurfaceControl = mComposerClient->createSurface( +                    String8("Test Surface"), 0, +                    getSurfaceWidth(), getSurfaceHeight(), +                    PIXEL_FORMAT_RGB_888, 0); + +            ASSERT_TRUE(mSurfaceControl != NULL); +            ASSERT_TRUE(mSurfaceControl->isValid()); + +            SurfaceComposerClient::openGlobalTransaction(); +            ASSERT_EQ(NO_ERROR, mSurfaceControl->setLayer(0x7FFFFFFF)); +            ASSERT_EQ(NO_ERROR, mSurfaceControl->show()); +            SurfaceComposerClient::closeGlobalTransaction(); + +            sp<ANativeWindow> window = mSurfaceControl->getSurface(); +            mEglSurface = eglCreateWindowSurface(mEglDisplay, mGlConfig, +                    window.get(), NULL); +        } else { +            EGLint pbufferAttribs[] = { +                EGL_WIDTH, getSurfaceWidth(), +                EGL_HEIGHT, getSurfaceHeight(), +                EGL_NONE }; + +            mEglSurface = eglCreatePbufferSurface(mEglDisplay, mGlConfig, +                    pbufferAttribs); +        } +        ASSERT_EQ(EGL_SUCCESS, eglGetError()); +        ASSERT_NE(EGL_NO_SURFACE, mEglSurface); + +        mEglContext = eglCreateContext(mEglDisplay, mGlConfig, EGL_NO_CONTEXT, +                getContextAttribs()); +        ASSERT_EQ(EGL_SUCCESS, eglGetError()); +        ASSERT_NE(EGL_NO_CONTEXT, mEglContext); + +        EXPECT_TRUE(eglMakeCurrent(mEglDisplay, mEglSurface, mEglSurface, +                mEglContext)); +        ASSERT_EQ(EGL_SUCCESS, eglGetError()); + +        EGLint w, h; +        EXPECT_TRUE(eglQuerySurface(mEglDisplay, mEglSurface, EGL_WIDTH, &w)); +        ASSERT_EQ(EGL_SUCCESS, eglGetError()); +        EXPECT_TRUE(eglQuerySurface(mEglDisplay, mEglSurface, EGL_HEIGHT, &h)); +        ASSERT_EQ(EGL_SUCCESS, eglGetError()); +        RecordProperty("EglSurfaceWidth", w); +        RecordProperty("EglSurfaceHeight", h); + +        glViewport(0, 0, w, h); +        ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError()); +    } + +    virtual void TearDown() { +        // Display the result +        if (mDisplaySecs > 0 && mEglSurface != EGL_NO_SURFACE) { +            eglSwapBuffers(mEglDisplay, mEglSurface); +            sleep(mDisplaySecs); +        } + +        if (mComposerClient != NULL) { +            mComposerClient->dispose(); +        } +        if (mEglContext != EGL_NO_CONTEXT) { +            eglDestroyContext(mEglDisplay, mEglContext); +        } +        if (mEglSurface != EGL_NO_SURFACE) { +            eglDestroySurface(mEglDisplay, mEglSurface); +        } +        if (mEglDisplay != EGL_NO_DISPLAY) { +            eglMakeCurrent(mEglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, +                    EGL_NO_CONTEXT); +            eglTerminate(mEglDisplay); +        } +        ASSERT_EQ(EGL_SUCCESS, eglGetError()); +    } + +    virtual EGLint const* getConfigAttribs() { +        LOGV("GLTest getConfigAttribs"); +        static EGLint sDefaultConfigAttribs[] = { +            EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, +            EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, +            EGL_RED_SIZE, 8, +            EGL_GREEN_SIZE, 8, +            EGL_BLUE_SIZE, 8, +            EGL_ALPHA_SIZE, 8, +            EGL_DEPTH_SIZE, 16, +            EGL_STENCIL_SIZE, 8, +            EGL_NONE }; + +        return sDefaultConfigAttribs; +    } + +    virtual EGLint const* getContextAttribs() { +        static EGLint sDefaultContextAttribs[] = { +            EGL_CONTEXT_CLIENT_VERSION, 2, +            EGL_NONE }; + +        return sDefaultContextAttribs; +    } + +    virtual EGLint getSurfaceWidth() { +        return 512; +    } + +    virtual EGLint getSurfaceHeight() { +        return 512; +    } + +    void loadShader(GLenum shaderType, const char* pSource, GLuint* outShader) { +        GLuint shader = glCreateShader(shaderType); +        ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError()); +        if (shader) { +            glShaderSource(shader, 1, &pSource, NULL); +            ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError()); +            glCompileShader(shader); +            ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError()); +            GLint compiled = 0; +            glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled); +            ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError()); +            if (!compiled) { +                GLint infoLen = 0; +                glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen); +                ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError()); +                if (infoLen) { +                    char* buf = (char*) malloc(infoLen); +                    if (buf) { +                        glGetShaderInfoLog(shader, infoLen, NULL, buf); +                        printf("Shader compile log:\n%s\n", buf); +                        free(buf); +                        FAIL(); +                    } +                } else { +                    char* buf = (char*) malloc(0x1000); +                    if (buf) { +                        glGetShaderInfoLog(shader, 0x1000, NULL, buf); +                        printf("Shader compile log:\n%s\n", buf); +                        free(buf); +                        FAIL(); +                    } +                } +                glDeleteShader(shader); +                shader = 0; +            } +        } +        ASSERT_TRUE(shader != 0); +        *outShader = shader; +    } + +    void createProgram(const char* pVertexSource, const char* pFragmentSource, +            GLuint* outPgm) { +        GLuint vertexShader, fragmentShader; +        { +            SCOPED_TRACE("compiling vertex shader"); +            loadShader(GL_VERTEX_SHADER, pVertexSource, &vertexShader); +            if (HasFatalFailure()) { +                return; +            } +        } +        { +            SCOPED_TRACE("compiling fragment shader"); +            loadShader(GL_FRAGMENT_SHADER, pFragmentSource, &fragmentShader); +            if (HasFatalFailure()) { +                return; +            } +        } + +        GLuint program = glCreateProgram(); +        ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError()); +        if (program) { +            glAttachShader(program, vertexShader); +            ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError()); +            glAttachShader(program, fragmentShader); +            ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError()); +            glLinkProgram(program); +            GLint linkStatus = GL_FALSE; +            glGetProgramiv(program, GL_LINK_STATUS, &linkStatus); +            if (linkStatus != GL_TRUE) { +                GLint bufLength = 0; +                glGetProgramiv(program, GL_INFO_LOG_LENGTH, &bufLength); +                if (bufLength) { +                    char* buf = (char*) malloc(bufLength); +                    if (buf) { +                        glGetProgramInfoLog(program, bufLength, NULL, buf); +                        printf("Program link log:\n%s\n", buf); +                        free(buf); +                        FAIL(); +                    } +                } +                glDeleteProgram(program); +                program = 0; +            } +        } +        glDeleteShader(vertexShader); +        glDeleteShader(fragmentShader); +        ASSERT_TRUE(program != 0); +        *outPgm = program; +    } +    static int abs(int value) { +        return value > 0 ? value : -value; +    } + +    ::testing::AssertionResult checkPixel(int x, int y, int r, +            int g, int b, int a, int tolerance=2) { +        GLubyte pixel[4]; +        String8 msg; +        glReadPixels(x, y, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, pixel); +        GLenum err = glGetError(); +        if (err != GL_NO_ERROR) { +            msg += String8::format("error reading pixel: %#x", err); +            while ((err = glGetError()) != GL_NO_ERROR) { +                msg += String8::format(", %#x", err); +            } +            fprintf(stderr, "pixel check failure: %s\n", msg.string()); +            return ::testing::AssertionFailure( +                    ::testing::Message(msg.string())); +        } +        if (r >= 0 && abs(r - int(pixel[0])) > tolerance) { +            msg += String8::format("r(%d isn't %d)", pixel[0], r); +        } +        if (g >= 0 && abs(g - int(pixel[1])) > tolerance) { +            if (!msg.isEmpty()) { +                msg += " "; +            } +            msg += String8::format("g(%d isn't %d)", pixel[1], g); +        } +        if (b >= 0 && abs(b - int(pixel[2])) > tolerance) { +            if (!msg.isEmpty()) { +                msg += " "; +            } +            msg += String8::format("b(%d isn't %d)", pixel[2], b); +        } +        if (a >= 0 && abs(a - int(pixel[3])) > tolerance) { +            if (!msg.isEmpty()) { +                msg += " "; +            } +            msg += String8::format("a(%d isn't %d)", pixel[3], a); +        } +        if (!msg.isEmpty()) { +            fprintf(stderr, "pixel check failure: %s\n", msg.string()); +            return ::testing::AssertionFailure( +                    ::testing::Message(msg.string())); +        } else { +            return ::testing::AssertionSuccess(); +        } +    } + +    int mDisplaySecs; +    sp<SurfaceComposerClient> mComposerClient; +    sp<SurfaceControl> mSurfaceControl; + +    EGLDisplay mEglDisplay; +    EGLSurface mEglSurface; +    EGLContext mEglContext; +    EGLConfig  mGlConfig; +}; + +/////////////////////////////////////////////////////////////////////// +//    Class for  the NON-GL tests +///////////////////////////////////////////////////////////////////////  class SurfaceMediaSourceTest : public ::testing::Test {  public: -    SurfaceMediaSourceTest( ): mYuvTexWidth(64), mYuvTexHeight(66) { } -    sp<MPEG4Writer>  setUpWriter(OMXClient &client ); +    SurfaceMediaSourceTest( ): mYuvTexWidth(176), mYuvTexHeight(144) { }      void oneBufferPass(int width, int height ); +    void oneBufferPassNoFill(int width, int height );      static void fillYV12Buffer(uint8_t* buf, int w, int h, int stride) ;      static void fillYV12BufferRect(uint8_t* buf, int w, int h,                          int stride, const android_native_rect_t& rect) ; @@ -62,27 +363,156 @@ protected:          mSMS->setSynchronousMode(true);          mSTC = new SurfaceTextureClient(mSMS);          mANW = mSTC; +    } +    virtual void TearDown() { +        mSMS.clear(); +        mSTC.clear(); +        mANW.clear();      } +    const int mYuvTexWidth; +    const int mYuvTexHeight; + +    sp<SurfaceMediaSource> mSMS; +    sp<SurfaceTextureClient> mSTC; +    sp<ANativeWindow> mANW; +}; + +/////////////////////////////////////////////////////////////////////// +//    Class for  the GL tests +/////////////////////////////////////////////////////////////////////// +class SurfaceMediaSourceGLTest : public GLTest { +public: + +    SurfaceMediaSourceGLTest( ): mYuvTexWidth(176), mYuvTexHeight(144) { } +    virtual EGLint const* getConfigAttribs(); +    void oneBufferPassGL(int num = 0); +    static sp<MediaRecorder> setUpMediaRecorder(int fileDescriptor, int videoSource, +        int outputFormat, int videoEncoder, int width, int height, int fps); +protected: + +    virtual void SetUp() { +        LOGV("SMS-GLTest::SetUp()"); +        android::ProcessState::self()->startThreadPool(); +        mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight); +        mSTC = new SurfaceTextureClient(mSMS); +        mANW = mSTC; + +        // Doing the setup related to the GL Side +        GLTest::SetUp(); +    }      virtual void TearDown() {          mSMS.clear();          mSTC.clear();          mANW.clear(); +        GLTest::TearDown(); +        eglDestroySurface(mEglDisplay, mSmsEglSurface);      } +    void setUpEGLSurfaceFromMediaRecorder(sp<MediaRecorder>& mr); +      const int mYuvTexWidth;      const int mYuvTexHeight;      sp<SurfaceMediaSource> mSMS;      sp<SurfaceTextureClient> mSTC;      sp<ANativeWindow> mANW; - +    EGLConfig  mSMSGlConfig; +    EGLSurface  mSmsEglSurface;  }; +///////////////////////////////////////////////////////////////////// +// Methods in SurfaceMediaSourceGLTest +///////////////////////////////////////////////////////////////////// +EGLint const* SurfaceMediaSourceGLTest::getConfigAttribs() { +        LOGV("SurfaceMediaSourceGLTest getConfigAttribs"); +    static EGLint sDefaultConfigAttribs[] = { +        EGL_SURFACE_TYPE, EGL_WINDOW_BIT, +        EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, +        EGL_RED_SIZE, 8, +        EGL_GREEN_SIZE, 8, +        EGL_BLUE_SIZE, 8, +        EGL_RECORDABLE_ANDROID, EGL_TRUE, +        EGL_NONE }; + +    return sDefaultConfigAttribs; +} + +// One pass of dequeuing and queuing a GLBuffer +void SurfaceMediaSourceGLTest::oneBufferPassGL(int num) { +    int d = num % 50; +    float f = 0.2f; // 0.1f * d; + +    glClearColor(0, 0.3, 0, 0.6); +    glClear(GL_COLOR_BUFFER_BIT); + +    glEnable(GL_SCISSOR_TEST); +    glScissor(4 + d, 4 + d, 4, 4); +    glClearColor(1.0 - f, f, f, 1.0); +    glClear(GL_COLOR_BUFFER_BIT); + +    glScissor(24 + d, 48 + d, 4, 4); +    glClearColor(f, 1.0 - f, f, 1.0); +    glClear(GL_COLOR_BUFFER_BIT); + +    glScissor(37 + d, 17 + d, 4, 4); +    glClearColor(f, f, 1.0 - f, 1.0); +    glClear(GL_COLOR_BUFFER_BIT); + +    // The following call dequeues and queues the buffer +    eglSwapBuffers(mEglDisplay, mSmsEglSurface); +    glDisable(GL_SCISSOR_TEST); +} + +// Set up the MediaRecorder which runs in the same process as mediaserver +sp<MediaRecorder> SurfaceMediaSourceGLTest::setUpMediaRecorder(int fd, int videoSource, +        int outputFormat, int videoEncoder, int width, int height, int fps) { +    sp<MediaRecorder> mr = new MediaRecorder(); +    mr->setVideoSource(videoSource); +    mr->setOutputFormat(outputFormat); +    mr->setVideoEncoder(videoEncoder); +    mr->setOutputFile(fd, 0, 0); +    mr->setVideoSize(width, height); +    mr->setVideoFrameRate(fps); +    mr->prepare(); +    LOGV("Starting MediaRecorder..."); +    CHECK_EQ(OK, mr->start()); +    return mr; +} + +// query the mediarecorder for a surfacemeidasource and create an egl surface with that +void SurfaceMediaSourceGLTest::setUpEGLSurfaceFromMediaRecorder(sp<MediaRecorder>& mr) { +    sp<ISurfaceTexture> iST = mr->querySurfaceMediaSourceFromMediaServer(); +    mSTC = new SurfaceTextureClient(iST); +    mANW = mSTC; + +    EGLint numConfigs = 0; +    EXPECT_TRUE(eglChooseConfig(mEglDisplay, getConfigAttribs(), &mSMSGlConfig, +            1, &numConfigs)); +    ASSERT_EQ(EGL_SUCCESS, eglGetError()); + +    LOGV("Native Window = %p, mSTC = %p", mANW.get(), mSTC.get()); + +    mSmsEglSurface = eglCreateWindowSurface(mEglDisplay, mSMSGlConfig, +                                mANW.get(), NULL); +    ASSERT_EQ(EGL_SUCCESS, eglGetError()); +    ASSERT_NE(EGL_NO_SURFACE, mSmsEglSurface) ; + +    EXPECT_TRUE(eglMakeCurrent(mEglDisplay, mSmsEglSurface, mSmsEglSurface, +            mEglContext)); +    ASSERT_EQ(EGL_SUCCESS, eglGetError()); +} + + +///////////////////////////////////////////////////////////////////// +// Methods in SurfaceMediaSourceTest +///////////////////////////////////////////////////////////////////// + +// One pass of dequeuing and queuing the buffer. Fill it in with +// cpu YV12 buffer  void SurfaceMediaSourceTest::oneBufferPass(int width, int height ) { -    LOGV("One Buffer Pass");      ANativeWindowBuffer* anb;      ASSERT_EQ(NO_ERROR, mANW->dequeueBuffer(mANW.get(), &anb));      ASSERT_TRUE(anb != NULL); @@ -99,42 +529,16 @@ void SurfaceMediaSourceTest::oneBufferPass(int width, int height ) {      ASSERT_EQ(NO_ERROR, mANW->queueBuffer(mANW.get(), buf->getNativeBuffer()));  } -sp<MPEG4Writer> SurfaceMediaSourceTest::setUpWriter(OMXClient &client ) { -    // Writing to a file -    const char *fileName = "/sdcard/outputSurfEnc.mp4"; -    sp<MetaData> enc_meta = new MetaData; -    enc_meta->setInt32(kKeyBitRate, 300000); -    enc_meta->setInt32(kKeyFrameRate, 30); - -    enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4); - -    sp<MetaData> meta = mSMS->getFormat(); - -    int32_t width, height, stride, sliceHeight, colorFormat; -    CHECK(meta->findInt32(kKeyWidth, &width)); -    CHECK(meta->findInt32(kKeyHeight, &height)); -    CHECK(meta->findInt32(kKeyStride, &stride)); -    CHECK(meta->findInt32(kKeySliceHeight, &sliceHeight)); -    CHECK(meta->findInt32(kKeyColorFormat, &colorFormat)); - -    enc_meta->setInt32(kKeyWidth, width); -    enc_meta->setInt32(kKeyHeight, height); -    enc_meta->setInt32(kKeyIFramesInterval, 1); -    enc_meta->setInt32(kKeyStride, stride); -    enc_meta->setInt32(kKeySliceHeight, sliceHeight); -    // TODO: overwriting the colorformat since the format set by GRAlloc -    // could be wrong or not be read by OMX -    enc_meta->setInt32(kKeyColorFormat, OMX_COLOR_FormatYUV420Planar); - - -    sp<MediaSource> encoder = -        OMXCodec::Create( -                client.interface(), enc_meta, true /* createEncoder */, mSMS); - -    sp<MPEG4Writer> writer = new MPEG4Writer(fileName); -    writer->addSource(encoder); +// Dequeuing and queuing the buffer without really filling it in. +void SurfaceMediaSourceTest::oneBufferPassNoFill(int width, int height ) { +    ANativeWindowBuffer* anb; +    ASSERT_EQ(NO_ERROR, mANW->dequeueBuffer(mANW.get(), &anb)); +    ASSERT_TRUE(anb != NULL); -    return writer; +    sp<GraphicBuffer> buf(new GraphicBuffer(anb, false)); +    // ASSERT_EQ(NO_ERROR, mANW->lockBuffer(mANW.get(), buf->getNativeBuffer())); +    // We do not fill the buffer in. Just queue it back. +    ASSERT_EQ(NO_ERROR, mANW->queueBuffer(mANW.get(), buf->getNativeBuffer()));  }  // Fill a YV12 buffer with a multi-colored checkerboard pattern @@ -216,46 +620,53 @@ struct SimpleDummyRecorder {              return OK;          }  }; -  ///////////////////////////////////////////////////////////////////  //           TESTS +// SurfaceMediaSourceTest class contains tests that fill the buffers +// using the cpu calls +// SurfaceMediaSourceGLTest class contains tests that fill the buffers +// using the GL calls. +// TODO: None of the tests actually verify the encoded images.. so at this point, +// these are mostly functionality tests + visual inspection +////////////////////////////////////////////////////////////////////// +  // Just pass one buffer from the native_window to the SurfaceMediaSource -TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotOneBufferPass) { +// Dummy Encoder +static int testId = 1; +TEST_F(SurfaceMediaSourceTest, DISABLED_DummyEncodingFromCpuFilledYV12BufferNpotOneBufferPass) { +    LOGV("Test # %d", testId++);      LOGV("Testing OneBufferPass ******************************"); -    ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(), -            0, 0, HAL_PIXEL_FORMAT_YV12)); -    ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(), -            GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN)); - +    ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(), +            HAL_PIXEL_FORMAT_YV12));      oneBufferPass(mYuvTexWidth, mYuvTexHeight);  }  // Pass the buffer with the wrong height and weight and should not be accepted -TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotWrongSizeBufferPass) { +// Dummy Encoder +TEST_F(SurfaceMediaSourceTest, DISABLED_DummyEncodingFromCpuFilledYV12BufferNpotWrongSizeBufferPass) { +    LOGV("Test # %d", testId++);      LOGV("Testing Wrong size BufferPass ******************************");      // setting the client side buffer size different than the server size -    ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(), -             10, 10, HAL_PIXEL_FORMAT_YV12)); -    ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(), -            GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN)); +    ASSERT_EQ(NO_ERROR, native_window_set_buffers_dimensions(mANW.get(), +             10, 10)); +    ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(), +            HAL_PIXEL_FORMAT_YV12));      ANativeWindowBuffer* anb; -    // make sure we get an error back when dequeuing! +    // Note: make sure we get an ERROR back when dequeuing!      ASSERT_NE(NO_ERROR, mANW->dequeueBuffer(mANW.get(), &anb));  } -  // pass multiple buffers from the native_window the SurfaceMediaSource -// A dummy writer is used to simulate actual MPEG4Writer -TEST_F(SurfaceMediaSourceTest,  EncodingFromCpuFilledYV12BufferNpotMultiBufferPass) { +// Dummy Encoder +TEST_F(SurfaceMediaSourceTest,  DISABLED_DummyEncodingFromCpuFilledYV12BufferNpotMultiBufferPass) { +    LOGV("Test # %d", testId++);      LOGV("Testing MultiBufferPass, Dummy Recorder *********************"); -    ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(), -            0, 0, HAL_PIXEL_FORMAT_YV12)); -    ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(), -            GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN)); +    ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(), +            HAL_PIXEL_FORMAT_YV12));      SimpleDummyRecorder writer(mSMS);      writer.start(); @@ -272,14 +683,13 @@ TEST_F(SurfaceMediaSourceTest,  EncodingFromCpuFilledYV12BufferNpotMultiBufferPa  }  // Delayed pass of multiple buffers from the native_window the SurfaceMediaSource -// A dummy writer is used to simulate actual MPEG4Writer -TEST_F(SurfaceMediaSourceTest,  EncodingFromCpuFilledYV12BufferNpotMultiBufferPassLag) { +// Dummy Encoder +TEST_F(SurfaceMediaSourceTest,  DISABLED_DummyLagEncodingFromCpuFilledYV12BufferNpotMultiBufferPass) { +    LOGV("Test # %d", testId++);      LOGV("Testing MultiBufferPass, Dummy Recorder Lagging **************"); -    ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(), -            0, 0, HAL_PIXEL_FORMAT_YV12)); -    ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(), -            GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN)); +    ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(), +            HAL_PIXEL_FORMAT_YV12));      SimpleDummyRecorder writer(mSMS);      writer.start(); @@ -299,12 +709,11 @@ TEST_F(SurfaceMediaSourceTest,  EncodingFromCpuFilledYV12BufferNpotMultiBufferPa  // pass multiple buffers from the native_window the SurfaceMediaSource  // A dummy writer (MULTITHREADED) is used to simulate actual MPEG4Writer -TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotMultiBufferPassThreaded) { +TEST_F(SurfaceMediaSourceTest, DISABLED_DummyThreadedEncodingFromCpuFilledYV12BufferNpotMultiBufferPass) { +    LOGV("Test # %d", testId++);      LOGV("Testing MultiBufferPass, Dummy Recorder Multi-Threaded **********"); -    ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(), -            0, 0, HAL_PIXEL_FORMAT_YV12)); -    ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(), -            GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN)); +    ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(), +            HAL_PIXEL_FORMAT_YV12));      DummyRecorder writer(mSMS);      writer.start(); @@ -318,32 +727,210 @@ TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotMultiBufferPas      writer.stop();  } -// Test to examine the actual encoding. Temporarily disabled till the -// colorformat and encoding from GRAlloc data is resolved -TEST_F(SurfaceMediaSourceTest, DISABLED_EncodingFromCpuFilledYV12BufferNpotWrite) { -    LOGV("Testing the whole pipeline with actual Recorder"); -    ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(), -            0, 0, HAL_PIXEL_FORMAT_YV12)); -    ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(), -            GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN)); -    OMXClient client; -    CHECK_EQ(OK, client.connect()); - -    sp<MPEG4Writer> writer = setUpWriter(client); -    int64_t start = systemTime(); -    CHECK_EQ(OK, writer->start()); +// Test to examine actual encoding using mediarecorder +// We use the mediaserver to create a mediarecorder and send +// it back to us. So SurfaceMediaSource lives in the same process +// as the mediaserver. +// Very close to the actual camera, except that the +// buffers are filled and queueud by the CPU instead of GL. +TEST_F(SurfaceMediaSourceTest, DISABLED_EncodingFromCpuYV12BufferNpotWriteMediaServer) { +    LOGV("Test # %d", testId++); +    LOGV("************** Testing the whole pipeline with actual MediaRecorder ***********"); +    LOGV("************** SurfaceMediaSource is same process as mediaserver    ***********"); + +    const char *fileName = "/sdcard/outputSurfEncMSource.mp4"; +    int fd = open(fileName, O_RDWR | O_CREAT, 0744); +    if (fd < 0) { +        LOGE("ERROR: Could not open the the file %s, fd = %d !!", fileName, fd); +    } +    CHECK(fd >= 0); + +    sp<MediaRecorder> mr = SurfaceMediaSourceGLTest::setUpMediaRecorder(fd, +            VIDEO_SOURCE_GRALLOC_BUFFER, +            OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264, mYuvTexWidth, +            mYuvTexHeight, 30); +    // get the reference to the surfacemediasource living in +    // mediaserver that is created by stagefrightrecorder +    sp<ISurfaceTexture> iST = mr->querySurfaceMediaSourceFromMediaServer(); +    mSTC = new SurfaceTextureClient(iST); +    mANW = mSTC; +    ASSERT_EQ(NO_ERROR, native_window_api_connect(mANW.get(), NATIVE_WINDOW_API_CPU)); +    ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(), +                                                HAL_PIXEL_FORMAT_YV12));      int32_t nFramesCount = 0;      while (nFramesCount <= 300) { -        oneBufferPass(mYuvTexWidth, mYuvTexHeight); +        oneBufferPassNoFill(mYuvTexWidth, mYuvTexHeight);          nFramesCount++; +        LOGV("framesCount = %d", nFramesCount);      } -    CHECK_EQ(OK, writer->stop()); -    writer.clear(); -    int64_t end = systemTime(); -    client.disconnect(); +    ASSERT_EQ(NO_ERROR, native_window_api_disconnect(mANW.get(), NATIVE_WINDOW_API_CPU)); +    LOGV("Stopping MediaRecorder..."); +    CHECK_EQ(OK, mr->stop()); +    mr.clear(); +    close(fd);  } +////////////////////////////////////////////////////////////////////// +// GL tests +///////////////////////////////////////////////////////////////////// + +// Test to examine whether we can choose the Recordable Android GLConfig +// DummyRecorder used- no real encoding here +TEST_F(SurfaceMediaSourceGLTest, ChooseAndroidRecordableEGLConfigDummyWrite) { +    LOGV("Test # %d", testId++); +    LOGV("Test to verify creating a surface w/ right config *********"); + +    mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight); +    mSTC = new SurfaceTextureClient(mSMS); +    mANW = mSTC; + +    DummyRecorder writer(mSMS); +    writer.start(); + +    EGLint numConfigs = 0; +    EXPECT_TRUE(eglChooseConfig(mEglDisplay, getConfigAttribs(), &mSMSGlConfig, +            1, &numConfigs)); +    ASSERT_EQ(EGL_SUCCESS, eglGetError()); + +    mSmsEglSurface = eglCreateWindowSurface(mEglDisplay, mSMSGlConfig, +                                mANW.get(), NULL); +    ASSERT_EQ(EGL_SUCCESS, eglGetError()); +    ASSERT_NE(EGL_NO_SURFACE, mSmsEglSurface) ; + +    EXPECT_TRUE(eglMakeCurrent(mEglDisplay, mSmsEglSurface, mSmsEglSurface, +            mEglContext)); +    ASSERT_EQ(EGL_SUCCESS, eglGetError()); +    int32_t nFramesCount = 0; +    while (nFramesCount <= 300) { +        oneBufferPassGL(); +        nFramesCount++; +        LOGV("framesCount = %d", nFramesCount); +    } + +    ASSERT_EQ(NO_ERROR, native_window_api_disconnect(mANW.get(), NATIVE_WINDOW_API_EGL)); +    writer.stop(); +} +// Test to examine whether we can render GL buffers in to the surface +// created with the native window handle +TEST_F(SurfaceMediaSourceGLTest, RenderingToRecordableEGLSurfaceWorks) { +    LOGV("Test # %d", testId++); +    LOGV("RenderingToRecordableEGLSurfaceWorks *********************"); +    // Do the producer side of things +    glClearColor(0.6, 0.6, 0.6, 0.6); +    glClear(GL_COLOR_BUFFER_BIT); + +    glEnable(GL_SCISSOR_TEST); +    glScissor(4, 4, 4, 4); +    glClearColor(1.0, 0.0, 0.0, 1.0); +    glClear(GL_COLOR_BUFFER_BIT); + +    glScissor(24, 48, 4, 4); +    glClearColor(0.0, 1.0, 0.0, 1.0); +    glClear(GL_COLOR_BUFFER_BIT); + +    glScissor(37, 17, 4, 4); +    glClearColor(0.0, 0.0, 1.0, 1.0); +    glClear(GL_COLOR_BUFFER_BIT); + +    EXPECT_TRUE(checkPixel( 0,  0, 153, 153, 153, 153)); +    EXPECT_TRUE(checkPixel(63,  0, 153, 153, 153, 153)); +    EXPECT_TRUE(checkPixel(63, 63, 153, 153, 153, 153)); +    EXPECT_TRUE(checkPixel( 0, 63, 153, 153, 153, 153)); + +    EXPECT_TRUE(checkPixel( 4,  7, 255,   0,   0, 255)); +    EXPECT_TRUE(checkPixel(25, 51,   0, 255,   0, 255)); +    EXPECT_TRUE(checkPixel(40, 19,   0,   0, 255, 255)); +    EXPECT_TRUE(checkPixel(29, 51, 153, 153, 153, 153)); +    EXPECT_TRUE(checkPixel( 5, 32, 153, 153, 153, 153)); +    EXPECT_TRUE(checkPixel(13,  8, 153, 153, 153, 153)); +    EXPECT_TRUE(checkPixel(46,  3, 153, 153, 153, 153)); +    EXPECT_TRUE(checkPixel(30, 33, 153, 153, 153, 153)); +    EXPECT_TRUE(checkPixel( 6, 52, 153, 153, 153, 153)); +    EXPECT_TRUE(checkPixel(55, 33, 153, 153, 153, 153)); +    EXPECT_TRUE(checkPixel(16, 29, 153, 153, 153, 153)); +    EXPECT_TRUE(checkPixel( 1, 30, 153, 153, 153, 153)); +    EXPECT_TRUE(checkPixel(41, 37, 153, 153, 153, 153)); +    EXPECT_TRUE(checkPixel(46, 29, 153, 153, 153, 153)); +    EXPECT_TRUE(checkPixel(15, 25, 153, 153, 153, 153)); +    EXPECT_TRUE(checkPixel( 3, 52, 153, 153, 153, 153)); +} + +// Test to examine the actual encoding with GL buffers +// Actual encoder, Actual GL Buffers Filled SurfaceMediaSource +// The same pattern is rendered every frame +TEST_F(SurfaceMediaSourceGLTest, EncodingFromGLRgbaSameImageEachBufNpotWrite) { +    LOGV("Test # %d", testId++); +    LOGV("************** Testing the whole pipeline with actual Recorder ***********"); +    LOGV("************** GL Filling the buffers ***********"); +    // Note: No need to set the colorformat for the buffers. The colorformat is +    // in the GRAlloc buffers itself. + +    const char *fileName = "/sdcard/outputSurfEncMSourceGL.mp4"; +    int fd = open(fileName, O_RDWR | O_CREAT, 0744); +    if (fd < 0) { +        LOGE("ERROR: Could not open the the file %s, fd = %d !!", fileName, fd); +    } +    CHECK(fd >= 0); + +    sp<MediaRecorder> mr = setUpMediaRecorder(fd, VIDEO_SOURCE_GRALLOC_BUFFER, +            OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264, mYuvTexWidth, mYuvTexHeight, 30); + +    // get the reference to the surfacemediasource living in +    // mediaserver that is created by stagefrightrecorder +    setUpEGLSurfaceFromMediaRecorder(mr); + +    int32_t nFramesCount = 0; +    while (nFramesCount <= 300) { +        oneBufferPassGL(); +        nFramesCount++; +        LOGV("framesCount = %d", nFramesCount); +    } + +    ASSERT_EQ(NO_ERROR, native_window_api_disconnect(mANW.get(), NATIVE_WINDOW_API_EGL)); +    LOGV("Stopping MediaRecorder..."); +    CHECK_EQ(OK, mr->stop()); +    mr.clear(); +    close(fd); +} + +// Test to examine the actual encoding from the GL Buffers +// Actual encoder, Actual GL Buffers Filled SurfaceMediaSource +// A different pattern is rendered every frame +TEST_F(SurfaceMediaSourceGLTest, EncodingFromGLRgbaDiffImageEachBufNpotWrite) { +    LOGV("Test # %d", testId++); +    LOGV("************** Testing the whole pipeline with actual Recorder ***********"); +    LOGV("************** Diff GL Filling the buffers ***********"); +    // Note: No need to set the colorformat for the buffers. The colorformat is +    // in the GRAlloc buffers itself. + +    const char *fileName = "/sdcard/outputSurfEncMSourceGLDiff.mp4"; +    int fd = open(fileName, O_RDWR | O_CREAT, 0744); +    if (fd < 0) { +        LOGE("ERROR: Could not open the the file %s, fd = %d !!", fileName, fd); +    } +    CHECK(fd >= 0); + +    sp<MediaRecorder> mr = setUpMediaRecorder(fd, VIDEO_SOURCE_GRALLOC_BUFFER, +            OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264, mYuvTexWidth, mYuvTexHeight, 30); + +    // get the reference to the surfacemediasource living in +    // mediaserver that is created by stagefrightrecorder +    setUpEGLSurfaceFromMediaRecorder(mr); + +    int32_t nFramesCount = 0; +    while (nFramesCount <= 300) { +        oneBufferPassGL(nFramesCount); +        nFramesCount++; +        LOGV("framesCount = %d", nFramesCount); +    } + +    ASSERT_EQ(NO_ERROR, native_window_api_disconnect(mANW.get(), NATIVE_WINDOW_API_EGL)); +    LOGV("Stopping MediaRecorder..."); +    CHECK_EQ(OK, mr->stop()); +    mr.clear(); +    close(fd); +}  } // namespace android  | 
