summaryrefslogtreecommitdiffstats
path: root/media/libstagefright
diff options
context:
space:
mode:
Diffstat (limited to 'media/libstagefright')
-rw-r--r--media/libstagefright/Android.mk4
-rw-r--r--media/libstagefright/AwesomePlayer.cpp120
-rw-r--r--media/libstagefright/CameraSource.cpp507
-rw-r--r--media/libstagefright/CameraSourceTimeLapse.cpp518
-rw-r--r--media/libstagefright/MPEG4Writer.cpp3
-rw-r--r--media/libstagefright/MediaBuffer.cpp29
-rw-r--r--media/libstagefright/MediaSourceSplitter.cpp234
-rw-r--r--media/libstagefright/OMXCodec.cpp335
-rw-r--r--media/libstagefright/VideoSourceDownSampler.cpp142
-rw-r--r--media/libstagefright/colorconversion/Android.mk8
-rw-r--r--media/libstagefright/colorconversion/SoftwareRenderer.cpp183
-rw-r--r--media/libstagefright/include/AwesomePlayer.h8
-rw-r--r--media/libstagefright/include/OMX.h10
-rw-r--r--media/libstagefright/include/OMXNodeInstance.h8
-rw-r--r--media/libstagefright/include/SoftwareRenderer.h18
-rw-r--r--media/libstagefright/omx/OMX.cpp23
-rw-r--r--media/libstagefright/omx/OMXNodeInstance.cpp130
-rw-r--r--media/libstagefright/rtsp/ARTSPConnection.cpp263
-rw-r--r--media/libstagefright/rtsp/ARTSPConnection.h20
-rw-r--r--media/libstagefright/rtsp/ASessionDescription.cpp16
-rw-r--r--media/libstagefright/rtsp/Android.mk1
-rw-r--r--media/libstagefright/rtsp/MyHandler.h25
-rw-r--r--media/libstagefright/yuv/Android.mk13
-rw-r--r--media/libstagefright/yuv/YUVCanvas.cpp111
-rw-r--r--media/libstagefright/yuv/YUVImage.cpp413
25 files changed, 2993 insertions, 149 deletions
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index d674547..d1870ee 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -10,6 +10,8 @@ LOCAL_SRC_FILES:= \
AudioSource.cpp \
AwesomePlayer.cpp \
CameraSource.cpp \
+ CameraSourceTimeLapse.cpp \
+ VideoSourceDownSampler.cpp \
DataSource.cpp \
DRMExtractor.cpp \
ESDS.cpp \
@@ -25,6 +27,7 @@ LOCAL_SRC_FILES:= \
MediaDefs.cpp \
MediaExtractor.cpp \
MediaSource.cpp \
+ MediaSourceSplitter.cpp \
MetaData.cpp \
NuCachedSource2.cpp \
NuHTTPDataSource.cpp \
@@ -60,6 +63,7 @@ LOCAL_SHARED_LIBRARIES := \
libsonivox \
libvorbisidec \
libsurfaceflinger_client \
+ libstagefright_yuv \
libcamera_client \
libdrmframework
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index 6eeab05..cf04e92 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -45,7 +45,7 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/OMXCodec.h>
-#include <surfaceflinger/ISurface.h>
+#include <surfaceflinger/Surface.h>
#include <media/stagefright/foundation/ALooper.h>
@@ -101,13 +101,14 @@ struct AwesomeLocalRenderer : public AwesomeRenderer {
bool previewOnly,
const char *componentName,
OMX_COLOR_FORMATTYPE colorFormat,
- const sp<ISurface> &surface,
+ const sp<ISurface> &isurface,
+ const sp<Surface> &surface,
size_t displayWidth, size_t displayHeight,
size_t decodedWidth, size_t decodedHeight)
: mTarget(NULL),
mLibHandle(NULL) {
init(previewOnly, componentName,
- colorFormat, surface, displayWidth,
+ colorFormat, isurface, surface, displayWidth,
displayHeight, decodedWidth, decodedHeight);
}
@@ -139,7 +140,8 @@ private:
bool previewOnly,
const char *componentName,
OMX_COLOR_FORMATTYPE colorFormat,
- const sp<ISurface> &surface,
+ const sp<ISurface> &isurface,
+ const sp<Surface> &surface,
size_t displayWidth, size_t displayHeight,
size_t decodedWidth, size_t decodedHeight);
@@ -151,7 +153,8 @@ void AwesomeLocalRenderer::init(
bool previewOnly,
const char *componentName,
OMX_COLOR_FORMATTYPE colorFormat,
- const sp<ISurface> &surface,
+ const sp<ISurface> &isurface,
+ const sp<Surface> &surface,
size_t displayWidth, size_t displayHeight,
size_t decodedWidth, size_t decodedHeight) {
if (!previewOnly) {
@@ -177,7 +180,7 @@ void AwesomeLocalRenderer::init(
if (func) {
mTarget =
- (*func)(surface, componentName, colorFormat,
+ (*func)(isurface, componentName, colorFormat,
displayWidth, displayHeight,
decodedWidth, decodedHeight);
}
@@ -191,6 +194,35 @@ void AwesomeLocalRenderer::init(
}
}
+struct AwesomeNativeWindowRenderer : public AwesomeRenderer {
+ AwesomeNativeWindowRenderer(const sp<ANativeWindow> &nativeWindow)
+ : mNativeWindow(nativeWindow) {
+ }
+
+ virtual void render(MediaBuffer *buffer) {
+ status_t err = mNativeWindow->queueBuffer(
+ mNativeWindow.get(), buffer->graphicBuffer().get());
+ if (err != 0) {
+ LOGE("queueBuffer failed with error %s (%d)", strerror(-err),
+ -err);
+ return;
+ }
+
+ sp<MetaData> metaData = buffer->meta_data();
+ metaData->setInt32(kKeyRendered, 1);
+ }
+
+protected:
+ virtual ~AwesomeNativeWindowRenderer() {}
+
+private:
+ sp<ANativeWindow> mNativeWindow;
+
+ AwesomeNativeWindowRenderer(const AwesomeNativeWindowRenderer &);
+ AwesomeNativeWindowRenderer &operator=(
+ const AwesomeNativeWindowRenderer &);
+};
+
AwesomePlayer::AwesomePlayer()
: mQueueStarted(false),
mTimeSource(NULL),
@@ -406,6 +438,12 @@ void AwesomePlayer::reset_l() {
LOGI("interrupting the connection process");
mConnectingDataSource->disconnect();
}
+
+ if (mFlags & PREPARING_CONNECTED) {
+ // We are basically done preparing, we're just buffering
+ // enough data to start playback, we can safely interrupt that.
+ finishAsyncPrepare_l();
+ }
}
while (mFlags & PREPARING) {
@@ -806,8 +844,18 @@ status_t AwesomePlayer::play_l() {
return OK;
}
+void AwesomePlayer::notifyVideoSize_l() {
+ sp<MetaData> meta = mVideoSource->getFormat();
+
+ int32_t decodedWidth, decodedHeight;
+ CHECK(meta->findInt32(kKeyWidth, &decodedWidth));
+ CHECK(meta->findInt32(kKeyHeight, &decodedHeight));
+
+ notifyListener_l(MEDIA_SET_VIDEO_SIZE, decodedWidth, decodedHeight);
+}
+
void AwesomePlayer::initRenderer_l() {
- if (mISurface != NULL) {
+ if (mSurface != NULL || mISurface != NULL) {
sp<MetaData> meta = mVideoSource->getFormat();
int32_t format;
@@ -824,7 +872,27 @@ void AwesomePlayer::initRenderer_l() {
// before creating a new one.
IPCThreadState::self()->flushCommands();
- if (!strncmp("OMX.", component, 4)) {
+ if (mSurface != NULL) {
+ if (strncmp(component, "OMX.", 4) == 0) {
+ // Hardware decoders avoid the CPU color conversion by decoding
+ // directly to ANativeBuffers, so we must use a renderer that
+ // just pushes those buffers to the ANativeWindow.
+ mVideoRenderer = new AwesomeNativeWindowRenderer(mSurface);
+ } else {
+ // Other decoders are instantiated locally and as a consequence
+ // allocate their buffers in local address space. This renderer
+ // then performs a color conversion and copy to get the data
+ // into the ANativeBuffer.
+ mVideoRenderer = new AwesomeLocalRenderer(
+ false, // previewOnly
+ component,
+ (OMX_COLOR_FORMATTYPE)format,
+ mISurface,
+ mSurface,
+ mVideoWidth, mVideoHeight,
+ decodedWidth, decodedHeight);
+ }
+ } else {
// Our OMX codecs allocate buffers on the media_server side
// therefore they require a remote IOMXRenderer that knows how
// to display them.
@@ -834,16 +902,6 @@ void AwesomePlayer::initRenderer_l() {
(OMX_COLOR_FORMATTYPE)format,
decodedWidth, decodedHeight,
mVideoWidth, mVideoHeight));
- } else {
- // Other decoders are instantiated locally and as a consequence
- // allocate their buffers in local address space.
- mVideoRenderer = new AwesomeLocalRenderer(
- false, // previewOnly
- component,
- (OMX_COLOR_FORMATTYPE)format,
- mISurface,
- mVideoWidth, mVideoHeight,
- decodedWidth, decodedHeight);
}
}
}
@@ -894,6 +952,12 @@ void AwesomePlayer::setISurface(const sp<ISurface> &isurface) {
mISurface = isurface;
}
+void AwesomePlayer::setSurface(const sp<Surface> &surface) {
+ Mutex::Autolock autoLock(mLock);
+
+ mSurface = surface;
+}
+
void AwesomePlayer::setAudioSink(
const sp<MediaPlayerBase::AudioSink> &audioSink) {
Mutex::Autolock autoLock(mLock);
@@ -1079,7 +1143,7 @@ status_t AwesomePlayer::initVideoDecoder(uint32_t flags) {
mClient.interface(), mVideoTrack->getFormat(),
false, // createEncoder
mVideoTrack,
- NULL, flags);
+ NULL, flags, mSurface);
if (mVideoSource != NULL) {
int64_t durationUs;
@@ -1110,7 +1174,7 @@ void AwesomePlayer::finishSeekIfNecessary(int64_t videoTimeUs) {
}
if (mAudioPlayer != NULL) {
- LOGV("seeking audio to %lld us (%.2f secs).", timeUs, timeUs / 1E6);
+ LOGV("seeking audio to %lld us (%.2f secs).", videoTimeUs, videoTimeUs / 1E6);
// If we don't have a video time, seek audio to the originally
// requested seek time instead.
@@ -1184,6 +1248,8 @@ void AwesomePlayer::onVideoEvent() {
if (err == INFO_FORMAT_CHANGED) {
LOGV("VideoSource signalled format change.");
+ notifyVideoSize_l();
+
if (mVideoRenderer != NULL) {
mVideoRendererIsPreview = false;
initRenderer_l();
@@ -1628,7 +1694,7 @@ void AwesomePlayer::abortPrepare(status_t err) {
}
mPrepareResult = err;
- mFlags &= ~(PREPARING|PREPARE_CANCELLED);
+ mFlags &= ~(PREPARING|PREPARE_CANCELLED|PREPARING_CONNECTED);
mAsyncPrepareEvent = NULL;
mPreparedCondition.broadcast();
}
@@ -1676,6 +1742,8 @@ void AwesomePlayer::onPrepareAsyncEvent() {
}
}
+ mFlags |= PREPARING_CONNECTED;
+
if (mCachedSource != NULL || mRTSPController != NULL) {
postBufferingEvent_l();
} else {
@@ -1685,17 +1753,17 @@ void AwesomePlayer::onPrepareAsyncEvent() {
void AwesomePlayer::finishAsyncPrepare_l() {
if (mIsAsyncPrepare) {
- if (mVideoWidth < 0 || mVideoHeight < 0) {
+ if (mVideoSource == NULL) {
notifyListener_l(MEDIA_SET_VIDEO_SIZE, 0, 0);
} else {
- notifyListener_l(MEDIA_SET_VIDEO_SIZE, mVideoWidth, mVideoHeight);
+ notifyVideoSize_l();
}
notifyListener_l(MEDIA_PREPARED);
}
mPrepareResult = OK;
- mFlags &= ~(PREPARING|PREPARE_CANCELLED);
+ mFlags &= ~(PREPARING|PREPARE_CANCELLED|PREPARING_CONNECTED);
mFlags |= PREPARED;
mAsyncPrepareEvent = NULL;
mPreparedCondition.broadcast();
@@ -1810,13 +1878,14 @@ status_t AwesomePlayer::resume() {
mFlags = state->mFlags & (AUTO_LOOPING | LOOPING | AT_EOS);
- if (state->mLastVideoFrame && mISurface != NULL) {
+ if (state->mLastVideoFrame && (mSurface != NULL || mISurface != NULL)) {
mVideoRenderer =
new AwesomeLocalRenderer(
true, // previewOnly
"",
(OMX_COLOR_FORMATTYPE)state->mColorFormat,
mISurface,
+ mSurface,
state->mVideoWidth,
state->mVideoHeight,
state->mDecodedWidth,
@@ -1851,4 +1920,3 @@ void AwesomePlayer::postAudioSeekComplete() {
}
} // namespace android
-
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 89cb135..95afb1d 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -27,6 +27,7 @@
#include <media/stagefright/MetaData.h>
#include <camera/Camera.h>
#include <camera/CameraParameters.h>
+#include <surfaceflinger/Surface.h>
#include <utils/String8.h>
#include <cutils/properties.h>
@@ -65,6 +66,11 @@ void CameraSourceListener::notify(int32_t msgType, int32_t ext1, int32_t ext2) {
void CameraSourceListener::postData(int32_t msgType, const sp<IMemory> &dataPtr) {
LOGV("postData(%d, ptr:%p, size:%d)",
msgType, dataPtr->pointer(), dataPtr->size());
+
+ sp<CameraSource> source = mSource.promote();
+ if (source.get() != NULL) {
+ source->dataCallback(msgType, dataPtr);
+ }
}
void CameraSourceListener::postDataTimestamp(
@@ -77,6 +83,10 @@ void CameraSourceListener::postDataTimestamp(
}
static int32_t getColorFormat(const char* colorFormat) {
+ if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV420P)) {
+ return OMX_COLOR_FormatYUV420Planar;
+ }
+
if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV422SP)) {
return OMX_COLOR_FormatYUV422SemiPlanar;
}
@@ -99,72 +109,415 @@ static int32_t getColorFormat(const char* colorFormat) {
CHECK_EQ(0, "Unknown color format");
}
-// static
CameraSource *CameraSource::Create() {
- sp<Camera> camera = Camera::connect(0);
-
- if (camera.get() == NULL) {
- return NULL;
- }
+ Size size;
+ size.width = -1;
+ size.height = -1;
- return new CameraSource(camera);
+ sp<ICamera> camera;
+ return new CameraSource(camera, 0, size, -1, NULL, false);
}
// static
-CameraSource *CameraSource::CreateFromCamera(const sp<Camera> &camera) {
- if (camera.get() == NULL) {
- return NULL;
+CameraSource *CameraSource::CreateFromCamera(
+ const sp<ICamera>& camera,
+ int32_t cameraId,
+ Size videoSize,
+ int32_t frameRate,
+ const sp<Surface>& surface,
+ bool storeMetaDataInVideoBuffers) {
+
+ CameraSource *source = new CameraSource(camera, cameraId,
+ videoSize, frameRate, surface,
+ storeMetaDataInVideoBuffers);
+
+ if (source != NULL) {
+ if (source->initCheck() != OK) {
+ delete source;
+ return NULL;
+ }
}
-
- return new CameraSource(camera);
+ return source;
}
-CameraSource::CameraSource(const sp<Camera> &camera)
- : mCamera(camera),
- mFirstFrameTimeUs(0),
- mLastFrameTimestampUs(0),
+CameraSource::CameraSource(
+ const sp<ICamera>& camera,
+ int32_t cameraId,
+ Size videoSize,
+ int32_t frameRate,
+ const sp<Surface>& surface,
+ bool storeMetaDataInVideoBuffers)
+ : mCameraFlags(0),
+ mVideoFrameRate(-1),
+ mCamera(0),
+ mSurface(surface),
mNumFramesReceived(0),
+ mLastFrameTimestampUs(0),
+ mStarted(false),
+ mFirstFrameTimeUs(0),
mNumFramesEncoded(0),
mNumFramesDropped(0),
mNumGlitches(0),
mGlitchDurationThresholdUs(200000),
- mCollectStats(false),
- mStarted(false) {
+ mCollectStats(false) {
+
+ mVideoSize.width = -1;
+ mVideoSize.height = -1;
+
+ mInitCheck = init(camera, cameraId,
+ videoSize, frameRate,
+ storeMetaDataInVideoBuffers);
+}
+
+status_t CameraSource::initCheck() const {
+ return mInitCheck;
+}
+
+status_t CameraSource::isCameraAvailable(
+ const sp<ICamera>& camera, int32_t cameraId) {
+
+ if (camera == 0) {
+ mCamera = Camera::connect(cameraId);
+ mCameraFlags &= ~FLAGS_HOT_CAMERA;
+ } else {
+ mCamera = Camera::create(camera);
+ mCameraFlags |= FLAGS_HOT_CAMERA;
+ }
+
+ // Is camera available?
+ if (mCamera == 0) {
+ LOGE("Camera connection could not be established.");
+ return -EBUSY;
+ }
+ if (!(mCameraFlags & FLAGS_HOT_CAMERA)) {
+ mCamera->lock();
+ }
+ return OK;
+}
+
+
+/*
+ * Check to see whether the requested video width and height is one
+ * of the supported sizes.
+ * @param width the video frame width in pixels
+ * @param height the video frame height in pixels
+ * @param suppportedSizes the vector of sizes that we check against
+ * @return true if the dimension (width and height) is supported.
+ */
+static bool isVideoSizeSupported(
+ int32_t width, int32_t height,
+ const Vector<Size>& supportedSizes) {
+
+ LOGV("isVideoSizeSupported");
+ for (size_t i = 0; i < supportedSizes.size(); ++i) {
+ if (width == supportedSizes[i].width &&
+ height == supportedSizes[i].height) {
+ return true;
+ }
+ }
+ return false;
+}
+/*
+ * If the preview and video output is separate, we only set the
+ * the video size, and applications should set the preview size
+ * to some proper value, and the recording framework will not
+ * change the preview size; otherwise, if the video and preview
+ * output is the same, we need to set the preview to be the same
+ * as the requested video size.
+ *
+ */
+/*
+ * Query the camera to retrieve the supported video frame sizes
+ * and also to see whether CameraParameters::setVideoSize()
+ * is supported or not.
+ * @param params CameraParameters to retrieve the information
+ * @@param isSetVideoSizeSupported retunrs whether method
+ * CameraParameters::setVideoSize() is supported or not.
+ * @param sizes returns the vector of Size objects for the
+ * supported video frame sizes advertised by the camera.
+ */
+static void getSupportedVideoSizes(
+ const CameraParameters& params,
+ bool *isSetVideoSizeSupported,
+ Vector<Size>& sizes) {
+
+ *isSetVideoSizeSupported = true;
+ params.getSupportedVideoSizes(sizes);
+ if (sizes.size() == 0) {
+ LOGD("Camera does not support setVideoSize()");
+ params.getSupportedPreviewSizes(sizes);
+ *isSetVideoSizeSupported = false;
+ }
+}
+
+/*
+ * Check whether the camera has the supported color format
+ * @param params CameraParameters to retrieve the information
+ * @return OK if no error.
+ */
+status_t CameraSource::isCameraColorFormatSupported(
+ const CameraParameters& params) {
+ mColorFormat = getColorFormat(params.get(
+ CameraParameters::KEY_VIDEO_FRAME_FORMAT));
+ if (mColorFormat == -1) {
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+/*
+ * Configure the camera to use the requested video size
+ * (width and height) and/or frame rate. If both width and
+ * height are -1, configuration on the video size is skipped.
+ * if frameRate is -1, configuration on the frame rate
+ * is skipped. Skipping the configuration allows one to
+ * use the current camera setting without the need to
+ * actually know the specific values (see Create() method).
+ *
+ * @param params the CameraParameters to be configured
+ * @param width the target video frame width in pixels
+ * @param height the target video frame height in pixels
+ * @param frameRate the target frame rate in frames per second.
+ * @return OK if no error.
+ */
+status_t CameraSource::configureCamera(
+ CameraParameters* params,
+ int32_t width, int32_t height,
+ int32_t frameRate) {
+
+ Vector<Size> sizes;
+ bool isSetVideoSizeSupportedByCamera = true;
+ getSupportedVideoSizes(*params, &isSetVideoSizeSupportedByCamera, sizes);
+ bool isCameraParamChanged = false;
+ if (width != -1 && height != -1) {
+ if (!isVideoSizeSupported(width, height, sizes)) {
+ LOGE("Video dimension (%dx%d) is unsupported", width, height);
+ return BAD_VALUE;
+ }
+ if (isSetVideoSizeSupportedByCamera) {
+ params->setVideoSize(width, height);
+ } else {
+ params->setPreviewSize(width, height);
+ }
+ isCameraParamChanged = true;
+ } else if ((width == -1 && height != -1) ||
+ (width != -1 && height == -1)) {
+ // If one and only one of the width and height is -1
+ // we reject such a request.
+ LOGE("Requested video size (%dx%d) is not supported", width, height);
+ return BAD_VALUE;
+ } else { // width == -1 && height == -1
+ // Do not configure the camera.
+ // Use the current width and height value setting from the camera.
+ }
+
+ if (frameRate != -1) {
+ params->setPreviewFrameRate(frameRate);
+ isCameraParamChanged = true;
+ } else { // frameRate == -1
+ // Do not configure the camera.
+ // Use the current frame rate value setting from the camera
+ }
+
+ if (isCameraParamChanged) {
+ // Either frame rate or frame size needs to be changed.
+ String8 s = params->flatten();
+ if (OK != mCamera->setParameters(s)) {
+ LOGE("Could not change settings."
+ " Someone else is using camera %p?", mCamera.get());
+ return -EBUSY;
+ }
+ }
+ return OK;
+}
+
+/*
+ * Check whether the requested video frame size
+ * has been successfully configured or not. If both width and height
+ * are -1, check on the current width and height value setting
+ * is performed.
+ *
+ * @param params CameraParameters to retrieve the information
+ * @param the target video frame width in pixels to check against
+ * @param the target video frame height in pixels to check against
+ * @return OK if no error
+ */
+status_t CameraSource::checkVideoSize(
+ const CameraParameters& params,
+ int32_t width, int32_t height) {
+
+ // The actual video size is the same as the preview size
+ // if the camera hal does not support separate video and
+ // preview output. In this case, we retrieve the video
+ // size from preview.
+ int32_t frameWidthActual = -1;
+ int32_t frameHeightActual = -1;
+ Vector<Size> sizes;
+ params.getSupportedVideoSizes(sizes);
+ if (sizes.size() == 0) {
+ // video size is the same as preview size
+ params.getPreviewSize(&frameWidthActual, &frameHeightActual);
+ } else {
+ // video size may not be the same as preview
+ params.getVideoSize(&frameWidthActual, &frameHeightActual);
+ }
+ if (frameWidthActual < 0 || frameHeightActual < 0) {
+ LOGE("Failed to retrieve video frame size (%dx%d)",
+ frameWidthActual, frameHeightActual);
+ return UNKNOWN_ERROR;
+ }
+
+ // Check the actual video frame size against the target/requested
+ // video frame size.
+ if (width != -1 && height != -1) {
+ if (frameWidthActual != width || frameHeightActual != height) {
+ LOGE("Failed to set video frame size to %dx%d. "
+ "The actual video size is %dx%d ", width, height,
+ frameWidthActual, frameHeightActual);
+ return UNKNOWN_ERROR;
+ }
+ }
+
+ // Good now.
+ mVideoSize.width = frameWidthActual;
+ mVideoSize.height = frameHeightActual;
+ return OK;
+}
+
+/*
+ * Check the requested frame rate has been successfully configured or not.
+ * If the target frameRate is -1, check on the current frame rate value
+ * setting is performed.
+ *
+ * @param params CameraParameters to retrieve the information
+ * @param the target video frame rate to check against
+ * @return OK if no error.
+ */
+status_t CameraSource::checkFrameRate(
+ const CameraParameters& params,
+ int32_t frameRate) {
+
+ int32_t frameRateActual = params.getPreviewFrameRate();
+ if (frameRateActual < 0) {
+ LOGE("Failed to retrieve preview frame rate (%d)", frameRateActual);
+ return UNKNOWN_ERROR;
+ }
+
+ // Check the actual video frame rate against the target/requested
+ // video frame rate.
+ if (frameRate != -1 && (frameRateActual - frameRate) != 0) {
+ LOGE("Failed to set preview frame rate to %d fps. The actual "
+ "frame rate is %d", frameRate, frameRateActual);
+ return UNKNOWN_ERROR;
+ }
+
+ // Good now.
+ mVideoFrameRate = frameRateActual;
+ return OK;
+}
+
+/*
+ * Initialize the CameraSource to so that it becomes
+ * ready for providing the video input streams as requested.
+ * @param camera the camera object used for the video source
+ * @param cameraId if camera == 0, use camera with this id
+ * as the video source
+ * @param videoSize the target video frame size. If both
+ * width and height in videoSize is -1, use the current
+ * width and heigth settings by the camera
+ * @param frameRate the target frame rate in frames per second.
+ * if it is -1, use the current camera frame rate setting.
+ * @param storeMetaDataInVideoBuffers request to store meta
+ * data or real YUV data in video buffers. Request to
+ * store meta data in video buffers may not be honored
+ * if the source does not support this feature.
+ *
+ * @return OK if no error.
+ */
+status_t CameraSource::init(
+ const sp<ICamera>& camera,
+ int32_t cameraId,
+ Size videoSize,
+ int32_t frameRate,
+ bool storeMetaDataInVideoBuffers) {
+
+ status_t err = OK;
int64_t token = IPCThreadState::self()->clearCallingIdentity();
- String8 s = mCamera->getParameters();
- IPCThreadState::self()->restoreCallingIdentity(token);
- printf("params: \"%s\"\n", s.string());
+ if ((err = isCameraAvailable(camera, cameraId)) != OK) {
+ return err;
+ }
+ CameraParameters params(mCamera->getParameters());
+ if ((err = isCameraColorFormatSupported(params)) != OK) {
+ return err;
+ }
- int32_t width, height, stride, sliceHeight;
- CameraParameters params(s);
- params.getPreviewSize(&width, &height);
+ // Set the camera to use the requested video frame size
+ // and/or frame rate.
+ if ((err = configureCamera(&params,
+ videoSize.width, videoSize.height,
+ frameRate))) {
+ return err;
+ }
- // Calculate glitch duraton threshold based on frame rate
- int32_t frameRate = params.getPreviewFrameRate();
- int64_t glitchDurationUs = (1000000LL / frameRate);
+ // Check on video frame size and frame rate.
+ CameraParameters newCameraParams(mCamera->getParameters());
+ if ((err = checkVideoSize(newCameraParams,
+ videoSize.width, videoSize.height)) != OK) {
+ return err;
+ }
+ if ((err = checkFrameRate(newCameraParams, frameRate)) != OK) {
+ return err;
+ }
+
+ // This CHECK is good, since we just passed the lock/unlock
+ // check earlier by calling mCamera->setParameters().
+ CHECK_EQ(OK, mCamera->setPreviewDisplay(mSurface));
+
+ mIsMetaDataStoredInVideoBuffers = false;
+ if (storeMetaDataInVideoBuffers &&
+ OK == mCamera->storeMetaDataInBuffers(true)) {
+ mIsMetaDataStoredInVideoBuffers = true;
+ }
+
+ /*
+ * mCamera->startRecording() signals camera hal to make
+ * available the video buffers (for instance, allocation
+ * of the video buffers may be triggered when camera hal's
+ * startRecording() method is called). Making available these
+ * video buffers earlier (before calling start()) is critical,
+ * if one wants to configure omx video encoders to use these
+ * buffers for passing video frame data during video recording
+ * without the need to memcpy the video frame data stored
+ * in these buffers. Eliminating memcpy for video frame data
+ * is crucial in performance for HD quality video recording
+ * applications.
+ *
+ * Based on OMX IL spec, configuring the omx video encoders
+ * must occur in loaded state. When start() is called, omx
+ * video encoders are already in idle state, which is too
+ * late. Thus, we must call mCamera->startRecording() earlier.
+ */
+ startCameraRecording();
+
+ IPCThreadState::self()->restoreCallingIdentity(token);
+
+ int64_t glitchDurationUs = (1000000LL / mVideoFrameRate);
if (glitchDurationUs > mGlitchDurationThresholdUs) {
mGlitchDurationThresholdUs = glitchDurationUs;
}
- const char *colorFormatStr = params.get(CameraParameters::KEY_VIDEO_FRAME_FORMAT);
- CHECK(colorFormatStr != NULL);
- int32_t colorFormat = getColorFormat(colorFormatStr);
-
// XXX: query camera for the stride and slice height
// when the capability becomes available.
- stride = width;
- sliceHeight = height;
-
mMeta = new MetaData;
- mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_RAW);
- mMeta->setInt32(kKeyColorFormat, colorFormat);
- mMeta->setInt32(kKeyWidth, width);
- mMeta->setInt32(kKeyHeight, height);
- mMeta->setInt32(kKeyStride, stride);
- mMeta->setInt32(kKeySliceHeight, sliceHeight);
-
+ mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_RAW);
+ mMeta->setInt32(kKeyColorFormat, mColorFormat);
+ mMeta->setInt32(kKeyWidth, mVideoSize.width);
+ mMeta->setInt32(kKeyHeight, mVideoSize.height);
+ mMeta->setInt32(kKeyStride, mVideoSize.width);
+ mMeta->setInt32(kKeySliceHeight, mVideoSize.height);
+ return OK;
}
CameraSource::~CameraSource() {
@@ -173,8 +526,17 @@ CameraSource::~CameraSource() {
}
}
+void CameraSource::startCameraRecording() {
+ CHECK_EQ(OK, mCamera->startRecording());
+ CHECK(mCamera->recordingEnabled());
+}
+
status_t CameraSource::start(MetaData *meta) {
CHECK(!mStarted);
+ if (mInitCheck != OK) {
+ LOGE("CameraSource is not initialized yet");
+ return mInitCheck;
+ }
char value[PROPERTY_VALUE_MAX];
if (property_get("media.stagefright.record-stats", value, NULL)
@@ -190,13 +552,17 @@ status_t CameraSource::start(MetaData *meta) {
int64_t token = IPCThreadState::self()->clearCallingIdentity();
mCamera->setListener(new CameraSourceListener(this));
- CHECK_EQ(OK, mCamera->startRecording());
IPCThreadState::self()->restoreCallingIdentity(token);
mStarted = true;
return OK;
}
+void CameraSource::stopCameraRecording() {
+ mCamera->setListener(NULL);
+ mCamera->stopRecording();
+}
+
status_t CameraSource::stop() {
LOGV("stop");
Mutex::Autolock autoLock(mLock);
@@ -204,15 +570,23 @@ status_t CameraSource::stop() {
mFrameAvailableCondition.signal();
int64_t token = IPCThreadState::self()->clearCallingIdentity();
- mCamera->setListener(NULL);
- mCamera->stopRecording();
+ stopCameraRecording();
releaseQueuedFrames();
while (!mFramesBeingEncoded.empty()) {
LOGI("Waiting for outstanding frames being encoded: %d",
mFramesBeingEncoded.size());
mFrameCompleteCondition.wait(mLock);
}
- mCamera = NULL;
+
+ LOGV("Disconnect camera");
+ if ((mCameraFlags & FLAGS_HOT_CAMERA) == 0) {
+ LOGV("Camera was cold when we started, stopping preview");
+ mCamera->stopPreview();
+ }
+ mCamera->unlock();
+ mCamera.clear();
+ mCamera = 0;
+ mCameraFlags = 0;
IPCThreadState::self()->restoreCallingIdentity(token);
if (mCollectStats) {
@@ -225,11 +599,15 @@ status_t CameraSource::stop() {
return OK;
}
+void CameraSource::releaseRecordingFrame(const sp<IMemory>& frame) {
+ mCamera->releaseRecordingFrame(frame);
+}
+
void CameraSource::releaseQueuedFrames() {
List<sp<IMemory> >::iterator it;
while (!mFramesReceived.empty()) {
it = mFramesReceived.begin();
- mCamera->releaseRecordingFrame(*it);
+ releaseRecordingFrame(*it);
mFramesReceived.erase(it);
++mNumFramesDropped;
}
@@ -241,7 +619,7 @@ sp<MetaData> CameraSource::getFormat() {
void CameraSource::releaseOneRecordingFrame(const sp<IMemory>& frame) {
int64_t token = IPCThreadState::self()->clearCallingIdentity();
- mCamera->releaseRecordingFrame(frame);
+ releaseRecordingFrame(frame);
IPCThreadState::self()->restoreCallingIdentity(token);
}
@@ -251,7 +629,6 @@ void CameraSource::signalBufferReturned(MediaBuffer *buffer) {
for (List<sp<IMemory> >::iterator it = mFramesBeingEncoded.begin();
it != mFramesBeingEncoded.end(); ++it) {
if ((*it)->pointer() == buffer->data()) {
-
releaseOneRecordingFrame((*it));
mFramesBeingEncoded.erase(it);
++mNumFramesEncoded;
@@ -343,6 +720,13 @@ void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
++mNumGlitches;
}
+ // May need to skip frame or modify timestamp. Currently implemented
+ // by the subclass CameraSourceTimeLapse.
+ if(skipCurrentFrame(timestampUs)) {
+ releaseOneRecordingFrame(data);
+ return;
+ }
+
mLastFrameTimestampUs = timestampUs;
if (mNumFramesReceived == 0) {
mFirstFrameTimeUs = timestampUs;
@@ -367,4 +751,31 @@ void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
mFrameAvailableCondition.signal();
}
+size_t CameraSource::getNumberOfVideoBuffers() const {
+ LOGV("getNumberOfVideoBuffers");
+ size_t nBuffers = 0;
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ if (mInitCheck == OK && mCamera != 0) {
+ nBuffers = mCamera->getNumberOfVideoBuffers();
+ }
+ IPCThreadState::self()->restoreCallingIdentity(token);
+ return nBuffers;
+}
+
+sp<IMemory> CameraSource::getVideoBuffer(size_t index) const {
+ LOGV("getVideoBuffer: %d", index);
+ sp<IMemory> buffer = 0;
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ if (mInitCheck == OK && mCamera != 0) {
+ buffer = mCamera->getVideoBuffer(index);
+ }
+ IPCThreadState::self()->restoreCallingIdentity(token);
+ return buffer;
+}
+
+bool CameraSource::isMetaDataStoredInVideoBuffers() const {
+ LOGV("isMetaDataStoredInVideoBuffers");
+ return mIsMetaDataStoredInVideoBuffers;
+}
+
} // namespace android
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
new file mode 100644
index 0000000..6fd1825
--- /dev/null
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -0,0 +1,518 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CameraSourceTimeLapse"
+
+#include <binder/IPCThreadState.h>
+#include <binder/MemoryBase.h>
+#include <binder/MemoryHeapBase.h>
+#include <media/stagefright/CameraSource.h>
+#include <media/stagefright/CameraSourceTimeLapse.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/YUVImage.h>
+#include <media/stagefright/YUVCanvas.h>
+#include <camera/Camera.h>
+#include <camera/CameraParameters.h>
+#include <ui/Rect.h>
+#include <utils/String8.h>
+#include <utils/Vector.h>
+#include "OMX_Video.h"
+#include <limits.h>
+
+namespace android {
+
+// static
+CameraSourceTimeLapse *CameraSourceTimeLapse::CreateFromCamera(
+ const sp<ICamera> &camera,
+ int32_t cameraId,
+ Size videoSize,
+ int32_t videoFrameRate,
+ const sp<Surface>& surface,
+ int64_t timeBetweenTimeLapseFrameCaptureUs) {
+
+ CameraSourceTimeLapse *source = new
+ CameraSourceTimeLapse(camera, cameraId,
+ videoSize, videoFrameRate, surface,
+ timeBetweenTimeLapseFrameCaptureUs);
+
+ if (source != NULL) {
+ if (source->initCheck() != OK) {
+ delete source;
+ return NULL;
+ }
+ }
+ return source;
+}
+
+CameraSourceTimeLapse::CameraSourceTimeLapse(
+ const sp<ICamera>& camera,
+ int32_t cameraId,
+ Size videoSize,
+ int32_t videoFrameRate,
+ const sp<Surface>& surface,
+ int64_t timeBetweenTimeLapseFrameCaptureUs)
+ : CameraSource(camera, cameraId, videoSize, videoFrameRate, surface, false),
+ mTimeBetweenTimeLapseFrameCaptureUs(timeBetweenTimeLapseFrameCaptureUs),
+ mTimeBetweenTimeLapseVideoFramesUs(1E6/videoFrameRate),
+ mLastTimeLapseFrameRealTimestampUs(0),
+ mSkipCurrentFrame(false) {
+
+ LOGV("starting time lapse mode");
+ mVideoWidth = videoSize.width;
+ mVideoHeight = videoSize.height;
+
+ if (trySettingPreviewSize(videoSize.width, videoSize.height)) {
+ mUseStillCameraForTimeLapse = false;
+ } else {
+ // TODO: Add a check to see that mTimeBetweenTimeLapseFrameCaptureUs is greater
+ // than the fastest rate at which the still camera can take pictures.
+ mUseStillCameraForTimeLapse = true;
+ CHECK(setPictureSizeToClosestSupported(videoSize.width, videoSize.height));
+ mNeedCropping = computeCropRectangleOffset();
+ mMeta->setInt32(kKeyWidth, videoSize.width);
+ mMeta->setInt32(kKeyHeight, videoSize.height);
+ }
+
+ // Initialize quick stop variables.
+ mQuickStop = false;
+ mForceRead = false;
+ mLastReadBufferCopy = NULL;
+ mStopWaitingForIdleCamera = false;
+}
+
+CameraSourceTimeLapse::~CameraSourceTimeLapse() {
+}
+
+void CameraSourceTimeLapse::startQuickReadReturns() {
+ Mutex::Autolock autoLock(mQuickStopLock);
+ LOGV("Enabling quick read returns");
+
+ // Enable quick stop mode.
+ mQuickStop = true;
+
+ if (mUseStillCameraForTimeLapse) {
+ // wake up the thread right away.
+ mTakePictureCondition.signal();
+ } else {
+ // Force dataCallbackTimestamp() coming from the video camera to not skip the
+ // next frame as we want read() to get a get a frame right away.
+ mForceRead = true;
+ }
+}
+
+bool CameraSourceTimeLapse::trySettingPreviewSize(int32_t width, int32_t height) {
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ String8 s = mCamera->getParameters();
+ IPCThreadState::self()->restoreCallingIdentity(token);
+
+ CameraParameters params(s);
+ Vector<Size> supportedSizes;
+ params.getSupportedPreviewSizes(supportedSizes);
+
+ bool previewSizeSupported = false;
+ for (uint32_t i = 0; i < supportedSizes.size(); ++i) {
+ int32_t pictureWidth = supportedSizes[i].width;
+ int32_t pictureHeight = supportedSizes[i].height;
+
+ if ((pictureWidth == width) && (pictureHeight == height)) {
+ previewSizeSupported = true;
+ }
+ }
+
+ if (previewSizeSupported) {
+ LOGV("Video size (%d, %d) is a supported preview size", width, height);
+ params.setPreviewSize(width, height);
+ CHECK(mCamera->setParameters(params.flatten()));
+ return true;
+ }
+
+ return false;
+}
+
+bool CameraSourceTimeLapse::setPictureSizeToClosestSupported(int32_t width, int32_t height) {
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ String8 s = mCamera->getParameters();
+ IPCThreadState::self()->restoreCallingIdentity(token);
+
+ CameraParameters params(s);
+ Vector<Size> supportedSizes;
+ params.getSupportedPictureSizes(supportedSizes);
+
+ int32_t minPictureSize = INT_MAX;
+ for (uint32_t i = 0; i < supportedSizes.size(); ++i) {
+ int32_t pictureWidth = supportedSizes[i].width;
+ int32_t pictureHeight = supportedSizes[i].height;
+
+ if ((pictureWidth >= width) && (pictureHeight >= height)) {
+ int32_t pictureSize = pictureWidth*pictureHeight;
+ if (pictureSize < minPictureSize) {
+ minPictureSize = pictureSize;
+ mPictureWidth = pictureWidth;
+ mPictureHeight = pictureHeight;
+ }
+ }
+ }
+ LOGV("Picture size = (%d, %d)", mPictureWidth, mPictureHeight);
+ return (minPictureSize != INT_MAX);
+}
+
+bool CameraSourceTimeLapse::computeCropRectangleOffset() {
+ if ((mPictureWidth == mVideoWidth) && (mPictureHeight == mVideoHeight)) {
+ return false;
+ }
+
+ CHECK((mPictureWidth > mVideoWidth) && (mPictureHeight > mVideoHeight));
+
+ int32_t widthDifference = mPictureWidth - mVideoWidth;
+ int32_t heightDifference = mPictureHeight - mVideoHeight;
+
+ mCropRectStartX = widthDifference/2;
+ mCropRectStartY = heightDifference/2;
+
+ LOGV("setting crop rectangle offset to (%d, %d)", mCropRectStartX, mCropRectStartY);
+
+ return true;
+}
+
+void CameraSourceTimeLapse::signalBufferReturned(MediaBuffer* buffer) {
+ Mutex::Autolock autoLock(mQuickStopLock);
+ if (mQuickStop && (buffer == mLastReadBufferCopy)) {
+ buffer->setObserver(NULL);
+ buffer->release();
+ } else {
+ return CameraSource::signalBufferReturned(buffer);
+ }
+}
+
+void createMediaBufferCopy(const MediaBuffer& sourceBuffer, int64_t frameTime, MediaBuffer **newBuffer) {
+ size_t sourceSize = sourceBuffer.size();
+ void* sourcePointer = sourceBuffer.data();
+
+ (*newBuffer) = new MediaBuffer(sourceSize);
+ memcpy((*newBuffer)->data(), sourcePointer, sourceSize);
+
+ (*newBuffer)->meta_data()->setInt64(kKeyTime, frameTime);
+}
+
+void CameraSourceTimeLapse::fillLastReadBufferCopy(MediaBuffer& sourceBuffer) {
+ int64_t frameTime;
+ CHECK(sourceBuffer.meta_data()->findInt64(kKeyTime, &frameTime));
+ createMediaBufferCopy(sourceBuffer, frameTime, &mLastReadBufferCopy);
+ mLastReadBufferCopy->add_ref();
+ mLastReadBufferCopy->setObserver(this);
+}
+
+status_t CameraSourceTimeLapse::read(
+ MediaBuffer **buffer, const ReadOptions *options) {
+ if (mLastReadBufferCopy == NULL) {
+ mLastReadStatus = CameraSource::read(buffer, options);
+
+ // mQuickStop may have turned to true while read was blocked. Make a copy of
+ // the buffer in that case.
+ Mutex::Autolock autoLock(mQuickStopLock);
+ if (mQuickStop && *buffer) {
+ fillLastReadBufferCopy(**buffer);
+ }
+ return mLastReadStatus;
+ } else {
+ (*buffer) = mLastReadBufferCopy;
+ (*buffer)->add_ref();
+ return mLastReadStatus;
+ }
+}
+
+// static
+void *CameraSourceTimeLapse::ThreadTimeLapseWrapper(void *me) {
+ CameraSourceTimeLapse *source = static_cast<CameraSourceTimeLapse *>(me);
+ source->threadTimeLapseEntry();
+ return NULL;
+}
+
+void CameraSourceTimeLapse::threadTimeLapseEntry() {
+ while (mStarted) {
+ {
+ Mutex::Autolock autoLock(mCameraIdleLock);
+ if (!mCameraIdle) {
+ mCameraIdleCondition.wait(mCameraIdleLock);
+ }
+ CHECK(mCameraIdle);
+ mCameraIdle = false;
+ }
+
+ // Even if mQuickStop == true we need to take one more picture
+ // as a read() may be blocked, waiting for a frame to get available.
+ // After this takePicture, if mQuickStop == true, we can safely exit
+ // this thread as read() will make a copy of this last frame and keep
+ // returning it in the quick stop mode.
+ Mutex::Autolock autoLock(mQuickStopLock);
+ CHECK_EQ(OK, mCamera->takePicture());
+ if (mQuickStop) {
+ LOGV("threadTimeLapseEntry: Exiting due to mQuickStop = true");
+ return;
+ }
+ mTakePictureCondition.waitRelative(mQuickStopLock,
+ mTimeBetweenTimeLapseFrameCaptureUs * 1000);
+ }
+ LOGV("threadTimeLapseEntry: Exiting due to mStarted = false");
+}
+
+void CameraSourceTimeLapse::startCameraRecording() {
+ if (mUseStillCameraForTimeLapse) {
+ LOGV("start time lapse recording using still camera");
+
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ String8 s = mCamera->getParameters();
+ IPCThreadState::self()->restoreCallingIdentity(token);
+
+ CameraParameters params(s);
+ params.setPictureSize(mPictureWidth, mPictureHeight);
+ mCamera->setParameters(params.flatten());
+ mCameraIdle = true;
+ mStopWaitingForIdleCamera = false;
+
+ // disable shutter sound and play the recording sound.
+ mCamera->sendCommand(CAMERA_CMD_ENABLE_SHUTTER_SOUND, 0, 0);
+ mCamera->sendCommand(CAMERA_CMD_PLAY_RECORDING_SOUND, 0, 0);
+
+ // create a thread which takes pictures in a loop
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+ pthread_create(&mThreadTimeLapse, &attr, ThreadTimeLapseWrapper, this);
+ pthread_attr_destroy(&attr);
+ } else {
+ LOGV("start time lapse recording using video camera");
+ CHECK_EQ(OK, mCamera->startRecording());
+ }
+}
+
+void CameraSourceTimeLapse::stopCameraRecording() {
+ if (mUseStillCameraForTimeLapse) {
+ void *dummy;
+ pthread_join(mThreadTimeLapse, &dummy);
+
+ // Last takePicture may still be underway. Wait for the camera to get
+ // idle.
+ Mutex::Autolock autoLock(mCameraIdleLock);
+ mStopWaitingForIdleCamera = true;
+ if (!mCameraIdle) {
+ mCameraIdleCondition.wait(mCameraIdleLock);
+ }
+ CHECK(mCameraIdle);
+ mCamera->setListener(NULL);
+
+ // play the recording sound.
+ mCamera->sendCommand(CAMERA_CMD_PLAY_RECORDING_SOUND, 0, 0);
+ } else {
+ mCamera->setListener(NULL);
+ mCamera->stopRecording();
+ }
+ if (mLastReadBufferCopy) {
+ mLastReadBufferCopy->release();
+ mLastReadBufferCopy = NULL;
+ }
+}
+
+void CameraSourceTimeLapse::releaseRecordingFrame(const sp<IMemory>& frame) {
+ if (!mUseStillCameraForTimeLapse) {
+ mCamera->releaseRecordingFrame(frame);
+ }
+}
+
+sp<IMemory> CameraSourceTimeLapse::createIMemoryCopy(const sp<IMemory> &source_data) {
+ size_t source_size = source_data->size();
+ void* source_pointer = source_data->pointer();
+
+ sp<MemoryHeapBase> newMemoryHeap = new MemoryHeapBase(source_size);
+ sp<MemoryBase> newMemory = new MemoryBase(newMemoryHeap, 0, source_size);
+ memcpy(newMemory->pointer(), source_pointer, source_size);
+ return newMemory;
+}
+
+// Allocates IMemory of final type MemoryBase with the given size.
+sp<IMemory> allocateIMemory(size_t size) {
+ sp<MemoryHeapBase> newMemoryHeap = new MemoryHeapBase(size);
+ sp<MemoryBase> newMemory = new MemoryBase(newMemoryHeap, 0, size);
+ return newMemory;
+}
+
+// static
+void *CameraSourceTimeLapse::ThreadStartPreviewWrapper(void *me) {
+ CameraSourceTimeLapse *source = static_cast<CameraSourceTimeLapse *>(me);
+ source->threadStartPreview();
+ return NULL;
+}
+
+void CameraSourceTimeLapse::threadStartPreview() {
+ CHECK_EQ(OK, mCamera->startPreview());
+ Mutex::Autolock autoLock(mCameraIdleLock);
+ mCameraIdle = true;
+ mCameraIdleCondition.signal();
+}
+
+void CameraSourceTimeLapse::restartPreview() {
+ // Start this in a different thread, so that the dataCallback can return
+ LOGV("restartPreview");
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
+
+ pthread_t threadPreview;
+ pthread_create(&threadPreview, &attr, ThreadStartPreviewWrapper, this);
+ pthread_attr_destroy(&attr);
+}
+
+sp<IMemory> CameraSourceTimeLapse::cropYUVImage(const sp<IMemory> &source_data) {
+ // find the YUV format
+ int32_t srcFormat;
+ CHECK(mMeta->findInt32(kKeyColorFormat, &srcFormat));
+ YUVImage::YUVFormat yuvFormat;
+ if (srcFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
+ yuvFormat = YUVImage::YUV420SemiPlanar;
+ } else {
+ CHECK_EQ(srcFormat, OMX_COLOR_FormatYUV420Planar);
+ yuvFormat = YUVImage::YUV420Planar;
+ }
+
+ // allocate memory for cropped image and setup a canvas using it.
+ sp<IMemory> croppedImageMemory = allocateIMemory(
+ YUVImage::bufferSize(yuvFormat, mVideoWidth, mVideoHeight));
+ YUVImage yuvImageCropped(yuvFormat,
+ mVideoWidth, mVideoHeight,
+ (uint8_t *)croppedImageMemory->pointer());
+ YUVCanvas yuvCanvasCrop(yuvImageCropped);
+
+ YUVImage yuvImageSource(yuvFormat,
+ mPictureWidth, mPictureHeight,
+ (uint8_t *)source_data->pointer());
+ yuvCanvasCrop.CopyImageRect(
+ Rect(mCropRectStartX, mCropRectStartY,
+ mCropRectStartX + mVideoWidth,
+ mCropRectStartY + mVideoHeight),
+ 0, 0,
+ yuvImageSource);
+
+ return croppedImageMemory;
+}
+
+void CameraSourceTimeLapse::dataCallback(int32_t msgType, const sp<IMemory> &data) {
+ if (msgType == CAMERA_MSG_COMPRESSED_IMAGE) {
+ // takePicture will complete after this callback, so restart preview.
+ restartPreview();
+ return;
+ }
+ if (msgType != CAMERA_MSG_RAW_IMAGE) {
+ return;
+ }
+
+ LOGV("dataCallback for timelapse still frame");
+ CHECK_EQ(true, mUseStillCameraForTimeLapse);
+
+ int64_t timestampUs;
+ if (mNumFramesReceived == 0) {
+ timestampUs = mStartTimeUs;
+ } else {
+ timestampUs = mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs;
+ }
+
+ if (mNeedCropping) {
+ sp<IMemory> croppedImageData = cropYUVImage(data);
+ dataCallbackTimestamp(timestampUs, msgType, croppedImageData);
+ } else {
+ sp<IMemory> dataCopy = createIMemoryCopy(data);
+ dataCallbackTimestamp(timestampUs, msgType, dataCopy);
+ }
+}
+
+bool CameraSourceTimeLapse::skipCurrentFrame(int64_t timestampUs) {
+ if (mSkipCurrentFrame) {
+ mSkipCurrentFrame = false;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool CameraSourceTimeLapse::skipFrameAndModifyTimeStamp(int64_t *timestampUs) {
+ if (!mUseStillCameraForTimeLapse) {
+ if (mLastTimeLapseFrameRealTimestampUs == 0) {
+ // First time lapse frame. Initialize mLastTimeLapseFrameRealTimestampUs
+ // to current time (timestampUs) and save frame data.
+ LOGV("dataCallbackTimestamp timelapse: initial frame");
+
+ mLastTimeLapseFrameRealTimestampUs = *timestampUs;
+ return false;
+ }
+
+ {
+ Mutex::Autolock autoLock(mQuickStopLock);
+
+ // mForceRead may be set to true by startQuickReadReturns(). In that
+ // case don't skip this frame.
+ if (mForceRead) {
+ LOGV("dataCallbackTimestamp timelapse: forced read");
+ mForceRead = false;
+ *timestampUs = mLastFrameTimestampUs;
+ return false;
+ }
+ }
+
+ if (*timestampUs <
+ (mLastTimeLapseFrameRealTimestampUs + mTimeBetweenTimeLapseFrameCaptureUs)) {
+ // Skip all frames from last encoded frame until
+ // sufficient time (mTimeBetweenTimeLapseFrameCaptureUs) has passed.
+ // Tell the camera to release its recording frame and return.
+ LOGV("dataCallbackTimestamp timelapse: skipping intermediate frame");
+ return true;
+ } else {
+ // Desired frame has arrived after mTimeBetweenTimeLapseFrameCaptureUs time:
+ // - Reset mLastTimeLapseFrameRealTimestampUs to current time.
+ // - Artificially modify timestampUs to be one frame time (1/framerate) ahead
+ // of the last encoded frame's time stamp.
+ LOGV("dataCallbackTimestamp timelapse: got timelapse frame");
+
+ mLastTimeLapseFrameRealTimestampUs = *timestampUs;
+ *timestampUs = mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs;
+ return false;
+ }
+ }
+ return false;
+}
+
+void CameraSourceTimeLapse::dataCallbackTimestamp(int64_t timestampUs, int32_t msgType,
+ const sp<IMemory> &data) {
+ if (!mUseStillCameraForTimeLapse) {
+ mSkipCurrentFrame = skipFrameAndModifyTimeStamp(&timestampUs);
+ } else {
+ Mutex::Autolock autoLock(mCameraIdleLock);
+ // If we are using the still camera and stop() has been called, it may
+ // be waiting for the camera to get idle. In that case return
+ // immediately. Calling CameraSource::dataCallbackTimestamp() will lead
+ // to a deadlock since it tries to access CameraSource::mLock which in
+ // this case is held by CameraSource::stop() currently waiting for the
+ // camera to get idle. And camera will not get idle until this call
+ // returns.
+ if (mStopWaitingForIdleCamera) {
+ return;
+ }
+ }
+ CameraSource::dataCallbackTimestamp(timestampUs, msgType, data);
+}
+
+} // namespace android
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index a15c274..9e79aa9 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -2217,6 +2217,9 @@ void MPEG4Writer::Track::writeTrackHeader(
CHECK(mCodecSpecificData);
CHECK(mCodecSpecificDataSize > 0);
+ // Make sure all sizes encode to a single byte.
+ CHECK(mCodecSpecificDataSize + 23 < 128);
+
mOwner->writeInt32(0); // version=0, flags=0
mOwner->writeInt8(0x03); // ES_DescrTag
mOwner->writeInt8(23 + mCodecSpecificDataSize);
diff --git a/media/libstagefright/MediaBuffer.cpp b/media/libstagefright/MediaBuffer.cpp
index b973745..cbccd31 100644
--- a/media/libstagefright/MediaBuffer.cpp
+++ b/media/libstagefright/MediaBuffer.cpp
@@ -25,6 +25,8 @@
#include <media/stagefright/MediaDebug.h>
#include <media/stagefright/MetaData.h>
+#include <ui/GraphicBuffer.h>
+
namespace android {
// XXX make this truly atomic.
@@ -61,6 +63,20 @@ MediaBuffer::MediaBuffer(size_t size)
mOriginal(NULL) {
}
+MediaBuffer::MediaBuffer(const sp<GraphicBuffer>& graphicBuffer)
+ : mObserver(NULL),
+ mNextBuffer(NULL),
+ mRefCount(0),
+ mData(NULL),
+ mSize(1),
+ mRangeOffset(0),
+ mRangeLength(mSize),
+ mGraphicBuffer(graphicBuffer),
+ mOwnsData(false),
+ mMetaData(new MetaData),
+ mOriginal(NULL) {
+}
+
void MediaBuffer::release() {
if (mObserver == NULL) {
CHECK_EQ(mRefCount, 0);
@@ -92,10 +108,12 @@ void MediaBuffer::add_ref() {
}
void *MediaBuffer::data() const {
+ CHECK(mGraphicBuffer == NULL);
return mData;
}
size_t MediaBuffer::size() const {
+ CHECK(mGraphicBuffer == NULL);
return mSize;
}
@@ -108,15 +126,19 @@ size_t MediaBuffer::range_length() const {
}
void MediaBuffer::set_range(size_t offset, size_t length) {
- if (offset + length > mSize) {
+ if ((mGraphicBuffer == NULL) && (offset + length > mSize)) {
LOGE("offset = %d, length = %d, mSize = %d", offset, length, mSize);
}
- CHECK(offset + length <= mSize);
+ CHECK((mGraphicBuffer != NULL) || (offset + length <= mSize));
mRangeOffset = offset;
mRangeLength = length;
}
+sp<GraphicBuffer> MediaBuffer::graphicBuffer() const {
+ return mGraphicBuffer;
+}
+
sp<MetaData> MediaBuffer::meta_data() {
return mMetaData;
}
@@ -158,6 +180,8 @@ int MediaBuffer::refcount() const {
}
MediaBuffer *MediaBuffer::clone() {
+ CHECK_EQ(mGraphicBuffer, NULL);
+
MediaBuffer *buffer = new MediaBuffer(mData, mSize);
buffer->set_range(mRangeOffset, mRangeLength);
buffer->mMetaData = new MetaData(*mMetaData.get());
@@ -169,4 +193,3 @@ MediaBuffer *MediaBuffer::clone() {
}
} // namespace android
-
diff --git a/media/libstagefright/MediaSourceSplitter.cpp b/media/libstagefright/MediaSourceSplitter.cpp
new file mode 100644
index 0000000..abc7012
--- /dev/null
+++ b/media/libstagefright/MediaSourceSplitter.cpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaSourceSplitter"
+#include <utils/Log.h>
+
+#include <media/stagefright/MediaSourceSplitter.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MetaData.h>
+
+namespace android {
+
+MediaSourceSplitter::MediaSourceSplitter(sp<MediaSource> mediaSource) {
+ mNumberOfClients = 0;
+ mSource = mediaSource;
+ mSourceStarted = false;
+
+ mNumberOfClientsStarted = 0;
+ mNumberOfCurrentReads = 0;
+ mCurrentReadBit = 0;
+ mLastReadCompleted = true;
+}
+
+MediaSourceSplitter::~MediaSourceSplitter() {
+}
+
+sp<MediaSource> MediaSourceSplitter::createClient() {
+ Mutex::Autolock autoLock(mLock);
+
+ sp<MediaSource> client = new Client(this, mNumberOfClients++);
+ mClientsStarted.push(false);
+ mClientsDesiredReadBit.push(0);
+ return client;
+}
+
+status_t MediaSourceSplitter::start(int clientId, MetaData *params) {
+ Mutex::Autolock autoLock(mLock);
+
+ LOGV("start client (%d)", clientId);
+ if (mClientsStarted[clientId]) {
+ return OK;
+ }
+
+ mNumberOfClientsStarted++;
+
+ if (!mSourceStarted) {
+ LOGV("Starting real source from client (%d)", clientId);
+ status_t err = mSource->start(params);
+
+ if (err == OK) {
+ mSourceStarted = true;
+ mClientsStarted.editItemAt(clientId) = true;
+ mClientsDesiredReadBit.editItemAt(clientId) = !mCurrentReadBit;
+ }
+
+ return err;
+ } else {
+ mClientsStarted.editItemAt(clientId) = true;
+ if (mLastReadCompleted) {
+ // Last read was completed. So join in the threads for the next read.
+ mClientsDesiredReadBit.editItemAt(clientId) = !mCurrentReadBit;
+ } else {
+ // Last read is ongoing. So join in the threads for the current read.
+ mClientsDesiredReadBit.editItemAt(clientId) = mCurrentReadBit;
+ }
+ return OK;
+ }
+}
+
+status_t MediaSourceSplitter::stop(int clientId) {
+ Mutex::Autolock autoLock(mLock);
+
+ LOGV("stop client (%d)", clientId);
+ CHECK(clientId >= 0 && clientId < mNumberOfClients);
+ CHECK(mClientsStarted[clientId]);
+
+ if (--mNumberOfClientsStarted == 0) {
+ LOGV("Stopping real source from client (%d)", clientId);
+ status_t err = mSource->stop();
+ mSourceStarted = false;
+ mClientsStarted.editItemAt(clientId) = false;
+ return err;
+ } else {
+ mClientsStarted.editItemAt(clientId) = false;
+ if (!mLastReadCompleted && (mClientsDesiredReadBit[clientId] == mCurrentReadBit)) {
+ // !mLastReadCompleted implies that buffer has been read from source, but all
+ // clients haven't read it.
+ // mClientsDesiredReadBit[clientId] == mCurrentReadBit implies that this
+ // client would have wanted to read from this buffer. (i.e. it has not yet
+ // called read() for the current read buffer.)
+ // Since other threads may be waiting for all the clients' reads to complete,
+ // signal that this read has been aborted.
+ signalReadComplete_lock(true);
+ }
+ return OK;
+ }
+}
+
+sp<MetaData> MediaSourceSplitter::getFormat(int clientId) {
+ Mutex::Autolock autoLock(mLock);
+
+ LOGV("getFormat client (%d)", clientId);
+ return mSource->getFormat();
+}
+
+status_t MediaSourceSplitter::read(int clientId,
+ MediaBuffer **buffer, const MediaSource::ReadOptions *options) {
+ Mutex::Autolock autoLock(mLock);
+
+ CHECK(clientId >= 0 && clientId < mNumberOfClients);
+
+ LOGV("read client (%d)", clientId);
+ *buffer = NULL;
+
+ if (!mClientsStarted[clientId]) {
+ return OK;
+ }
+
+ if (mCurrentReadBit != mClientsDesiredReadBit[clientId]) {
+ // Desired buffer has not been read from source yet.
+
+ // If the current client is the special client with clientId = 0
+ // then read from source, else wait until the client 0 has finished
+ // reading from source.
+ if (clientId == 0) {
+ // Wait for all client's last read to complete first so as to not
+ // corrupt the buffer at mLastReadMediaBuffer.
+ waitForAllClientsLastRead_lock(clientId);
+
+ readFromSource_lock(options);
+ *buffer = mLastReadMediaBuffer;
+ } else {
+ waitForReadFromSource_lock(clientId);
+
+ *buffer = mLastReadMediaBuffer;
+ (*buffer)->add_ref();
+ }
+ CHECK(mCurrentReadBit == mClientsDesiredReadBit[clientId]);
+ } else {
+ // Desired buffer has already been read from source. Use the cached data.
+ CHECK(clientId != 0);
+
+ *buffer = mLastReadMediaBuffer;
+ (*buffer)->add_ref();
+ }
+
+ mClientsDesiredReadBit.editItemAt(clientId) = !mClientsDesiredReadBit[clientId];
+ signalReadComplete_lock(false);
+
+ return mLastReadStatus;
+}
+
+void MediaSourceSplitter::readFromSource_lock(const MediaSource::ReadOptions *options) {
+ mLastReadStatus = mSource->read(&mLastReadMediaBuffer , options);
+
+ mCurrentReadBit = !mCurrentReadBit;
+ mLastReadCompleted = false;
+ mReadFromSourceCondition.broadcast();
+}
+
+void MediaSourceSplitter::waitForReadFromSource_lock(int32_t clientId) {
+ mReadFromSourceCondition.wait(mLock);
+}
+
+void MediaSourceSplitter::waitForAllClientsLastRead_lock(int32_t clientId) {
+ if (mLastReadCompleted) {
+ return;
+ }
+ mAllReadsCompleteCondition.wait(mLock);
+ CHECK(mLastReadCompleted);
+}
+
+void MediaSourceSplitter::signalReadComplete_lock(bool readAborted) {
+ if (!readAborted) {
+ mNumberOfCurrentReads++;
+ }
+
+ if (mNumberOfCurrentReads == mNumberOfClientsStarted) {
+ mLastReadCompleted = true;
+ mNumberOfCurrentReads = 0;
+ mAllReadsCompleteCondition.broadcast();
+ }
+}
+
+status_t MediaSourceSplitter::pause(int clientId) {
+ return ERROR_UNSUPPORTED;
+}
+
+// Client
+
+MediaSourceSplitter::Client::Client(
+ sp<MediaSourceSplitter> splitter,
+ int32_t clientId) {
+ mSplitter = splitter;
+ mClientId = clientId;
+}
+
+status_t MediaSourceSplitter::Client::start(MetaData *params) {
+ return mSplitter->start(mClientId, params);
+}
+
+status_t MediaSourceSplitter::Client::stop() {
+ return mSplitter->stop(mClientId);
+}
+
+sp<MetaData> MediaSourceSplitter::Client::getFormat() {
+ return mSplitter->getFormat(mClientId);
+}
+
+status_t MediaSourceSplitter::Client::read(
+ MediaBuffer **buffer, const ReadOptions *options) {
+ return mSplitter->read(mClientId, buffer, options);
+}
+
+status_t MediaSourceSplitter::Client::pause() {
+ return mSplitter->pause(mClientId);
+}
+
+} // namespace android
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 9a49a9b..2e368b6 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -39,6 +39,7 @@
#include <binder/MemoryDealer.h>
#include <binder/ProcessState.h>
#include <media/IMediaPlayerService.h>
+#include <media/stagefright/HardwareAPI.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaBufferGroup.h>
#include <media/stagefright/MediaDebug.h>
@@ -152,31 +153,38 @@ static sp<MediaSource> InstantiateSoftwareCodec(
static const CodecInfo kDecoderInfo[] = {
{ MEDIA_MIMETYPE_IMAGE_JPEG, "OMX.TI.JPEG.decode" },
+// { MEDIA_MIMETYPE_AUDIO_MPEG, "OMX.Nvidia.mp3.decoder" },
// { MEDIA_MIMETYPE_AUDIO_MPEG, "OMX.TI.MP3.decode" },
{ MEDIA_MIMETYPE_AUDIO_MPEG, "MP3Decoder" },
// { MEDIA_MIMETYPE_AUDIO_MPEG, "OMX.PV.mp3dec" },
// { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.TI.AMR.decode" },
+// { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.Nvidia.amr.decoder" },
{ MEDIA_MIMETYPE_AUDIO_AMR_NB, "AMRNBDecoder" },
// { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.PV.amrdec" },
+// { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.Nvidia.amrwb.decoder" },
{ MEDIA_MIMETYPE_AUDIO_AMR_WB, "OMX.TI.WBAMR.decode" },
{ MEDIA_MIMETYPE_AUDIO_AMR_WB, "AMRWBDecoder" },
// { MEDIA_MIMETYPE_AUDIO_AMR_WB, "OMX.PV.amrdec" },
+// { MEDIA_MIMETYPE_AUDIO_AAC, "OMX.Nvidia.aac.decoder" },
{ MEDIA_MIMETYPE_AUDIO_AAC, "OMX.TI.AAC.decode" },
{ MEDIA_MIMETYPE_AUDIO_AAC, "AACDecoder" },
// { MEDIA_MIMETYPE_AUDIO_AAC, "OMX.PV.aacdec" },
{ MEDIA_MIMETYPE_AUDIO_G711_ALAW, "G711Decoder" },
{ MEDIA_MIMETYPE_AUDIO_G711_MLAW, "G711Decoder" },
+// { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.Nvidia.mp4.decode" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.7x30.video.decoder.mpeg4" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.video.decoder.mpeg4" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.Video.Decoder" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.SEC.MPEG4.Decoder" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "M4vH263Decoder" },
// { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.PV.mpeg4dec" },
+// { MEDIA_MIMETYPE_VIDEO_H263, "OMX.Nvidia.h263.decode" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.7x30.video.decoder.h263" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.video.decoder.h263" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.SEC.H263.Decoder" },
{ MEDIA_MIMETYPE_VIDEO_H263, "M4vH263Decoder" },
// { MEDIA_MIMETYPE_VIDEO_H263, "OMX.PV.h263dec" },
+ { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.Nvidia.h264.decode" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.7x30.video.decoder.avc" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.video.decoder.avc" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.TI.Video.Decoder" },
@@ -198,18 +206,21 @@ static const CodecInfo kEncoderInfo[] = {
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.7x30.video.encoder.mpeg4" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.video.encoder.mpeg4" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.Video.encoder" },
+ { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.Nvidia.mp4.encoder" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.SEC.MPEG4.Encoder" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "M4vH263Encoder" },
// { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.PV.mpeg4enc" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.7x30.video.encoder.h263" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.video.encoder.h263" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.TI.Video.encoder" },
+ { MEDIA_MIMETYPE_VIDEO_H263, "OMX.Nvidia.h263.encoder" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.SEC.H263.Encoder" },
{ MEDIA_MIMETYPE_VIDEO_H263, "M4vH263Encoder" },
// { MEDIA_MIMETYPE_VIDEO_H263, "OMX.PV.h263enc" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.7x30.video.encoder.avc" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.video.encoder.avc" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.TI.Video.encoder" },
+ { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.Nvidia.h264.encoder" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.SEC.AVC.Encoder" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "AVCEncoder" },
// { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.PV.avcenc" },
@@ -350,6 +361,13 @@ uint32_t OMXCodec::getComponentQuirks(
const char *componentName, bool isEncoder) {
uint32_t quirks = 0;
+ if (!strcmp(componentName, "OMX.Nvidia.amr.decoder") ||
+ !strcmp(componentName, "OMX.Nvidia.amrwb.decoder") ||
+ !strcmp(componentName, "OMX.Nvidia.aac.decoder") ||
+ !strcmp(componentName, "OMX.Nvidia.mp3.decoder")) {
+ quirks |= kDecoderLiesAboutNumberOfChannels;
+ }
+
if (!strcmp(componentName, "OMX.PV.avcdec")) {
quirks |= kWantsNALFragments;
}
@@ -461,7 +479,8 @@ sp<MediaSource> OMXCodec::Create(
const sp<MetaData> &meta, bool createEncoder,
const sp<MediaSource> &source,
const char *matchComponentName,
- uint32_t flags) {
+ uint32_t flags,
+ const sp<ANativeWindow> &nativeWindow) {
const char *mime;
bool success = meta->findCString(kKeyMIMEType, &mime);
CHECK(success);
@@ -517,7 +536,7 @@ sp<MediaSource> OMXCodec::Create(
sp<OMXCodec> codec = new OMXCodec(
omx, node, quirks,
createEncoder, mime, componentName,
- source);
+ source, nativeWindow);
observer->setCodec(codec);
@@ -725,6 +744,16 @@ status_t OMXCodec::configureCodec(const sp<MetaData> &meta, uint32_t flags) {
mQuirks &= ~kOutputBuffersAreUnreadable;
}
+ if (mNativeWindow != NULL
+ && !mIsEncoder
+ && !strncasecmp(mMIME, "video/", 6)
+ && !strncmp(mComponentName, "OMX.", 4)) {
+ status_t err = initNativeWindow();
+ if (err != OK) {
+ return err;
+ }
+ }
+
return OK;
}
@@ -1283,6 +1312,10 @@ status_t OMXCodec::setupAVCEncoderParameters(const sp<MetaData>& meta) {
h264type.bMBAFF = OMX_FALSE;
h264type.eLoopFilterMode = OMX_VIDEO_AVCLoopFilterEnable;
+ if (!strcasecmp("OMX.Nvidia.h264.encoder", mComponentName)) {
+ h264type.eLevel = OMX_VIDEO_AVCLevelMax;
+ }
+
err = mOMX->setParameter(
mNode, OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
CHECK_EQ(err, OK);
@@ -1408,7 +1441,8 @@ OMXCodec::OMXCodec(
bool isEncoder,
const char *mime,
const char *componentName,
- const sp<MediaSource> &source)
+ const sp<MediaSource> &source,
+ const sp<ANativeWindow> &nativeWindow)
: mOMX(omx),
mOMXLivesLocally(omx->livesLocally(getpid())),
mNode(node),
@@ -1428,7 +1462,8 @@ OMXCodec::OMXCodec(
mTargetTimeUs(-1),
mSkipTimeUs(-1),
mLeftOverBuffer(NULL),
- mPaused(false) {
+ mPaused(false),
+ mNativeWindow(nativeWindow) {
mPortStatus[kPortIndexInput] = ENABLED;
mPortStatus[kPortIndexOutput] = ENABLED;
@@ -1572,6 +1607,10 @@ status_t OMXCodec::allocateBuffers() {
}
status_t OMXCodec::allocateBuffersOnPort(OMX_U32 portIndex) {
+ if (mNativeWindow != NULL && portIndex == kPortIndexOutput) {
+ return allocateOutputBuffersFromNativeWindow();
+ }
+
OMX_PARAM_PORTDEFINITIONTYPE def;
InitOMXParams(&def);
def.nPortIndex = portIndex;
@@ -1638,6 +1677,7 @@ status_t OMXCodec::allocateBuffersOnPort(OMX_U32 portIndex) {
info.mBuffer = buffer;
info.mOwnedByComponent = false;
+ info.mOwnedByNativeWindow = false;
info.mMem = mem;
info.mMediaBuffer = NULL;
@@ -1664,6 +1704,178 @@ status_t OMXCodec::allocateBuffersOnPort(OMX_U32 portIndex) {
return OK;
}
+status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
+ // Get the number of buffers needed.
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+ def.nPortIndex = kPortIndexOutput;
+
+ status_t err = mOMX->getParameter(
+ mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ if (err != OK) {
+ return err;
+ }
+
+ // Check that the color format is in the correct range.
+ CHECK(OMX_COLOR_FormatAndroidPrivateStart <= def.format.video.eColorFormat);
+ CHECK(def.format.video.eColorFormat < OMX_COLOR_FormatAndroidPrivateEnd);
+
+ err = native_window_set_buffers_geometry(
+ mNativeWindow.get(),
+ def.format.video.nFrameWidth,
+ def.format.video.nFrameHeight,
+ def.format.video.eColorFormat
+ - OMX_COLOR_FormatAndroidPrivateStart);
+
+ if (err != 0) {
+ LOGE("native_window_set_buffers_geometry failed: %s (%d)",
+ strerror(-err), -err);
+ return err;
+ }
+
+ // Increase the buffer count by one to allow for the ANativeWindow to hold
+ // on to one of the buffers.
+ def.nBufferCountActual++;
+ err = mOMX->setParameter(
+ mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ if (err != OK) {
+ return err;
+ }
+
+ // Set up the native window.
+ // XXX TODO: Get the gralloc usage flags from the OMX plugin!
+ err = native_window_set_usage(
+ mNativeWindow.get(), GRALLOC_USAGE_HW_TEXTURE);
+ if (err != 0) {
+ LOGE("native_window_set_usage failed: %s (%d)", strerror(-err), -err);
+ return err;
+ }
+
+ err = native_window_set_buffer_count(
+ mNativeWindow.get(), def.nBufferCountActual);
+ if (err != 0) {
+ LOGE("native_window_set_buffer_count failed: %s (%d)", strerror(-err),
+ -err);
+ return err;
+ }
+
+ // XXX TODO: Do something so the ANativeWindow knows that we'll need to get
+ // the same set of buffers.
+
+ CODEC_LOGI("allocating %lu buffers from a native window of size %lu on "
+ "output port", def.nBufferCountActual, def.nBufferSize);
+
+ // Dequeue buffers and send them to OMX
+ OMX_U32 i;
+ for (i = 0; i < def.nBufferCountActual; i++) {
+ android_native_buffer_t* buf;
+ err = mNativeWindow->dequeueBuffer(mNativeWindow.get(), &buf);
+ if (err != 0) {
+ LOGE("dequeueBuffer failed: %s (%d)", strerror(-err), -err);
+ break;
+ }
+
+ sp<GraphicBuffer> graphicBuffer(new GraphicBuffer(buf, false));
+ IOMX::buffer_id bufferId;
+ err = mOMX->useGraphicBuffer(mNode, kPortIndexOutput, graphicBuffer,
+ &bufferId);
+ if (err != 0) {
+ break;
+ }
+
+ CODEC_LOGV("registered graphic buffer with ID %p (pointer = %p)",
+ bufferId, graphicBuffer.get());
+
+ BufferInfo info;
+ info.mData = NULL;
+ info.mSize = def.nBufferSize;
+ info.mBuffer = bufferId;
+ info.mOwnedByComponent = false;
+ info.mOwnedByNativeWindow = false;
+ info.mMem = NULL;
+ info.mMediaBuffer = new MediaBuffer(graphicBuffer);
+ info.mMediaBuffer->setObserver(this);
+
+ mPortBuffers[kPortIndexOutput].push(info);
+ }
+
+ OMX_U32 cancelStart;
+ OMX_U32 cancelEnd;
+
+ if (err != 0) {
+ // If an error occurred while dequeuing we need to cancel any buffers
+ // that were dequeued.
+ cancelStart = 0;
+ cancelEnd = i;
+ } else {
+ // Return the last two buffers to the native window.
+ // XXX TODO: The number of buffers the native window owns should probably be
+ // queried from it when we put the native window in fixed buffer pool mode
+ // (which needs to be implemented). Currently it's hard-coded to 2.
+ cancelStart = def.nBufferCountActual - 2;
+ cancelEnd = def.nBufferCountActual;
+ }
+
+ for (OMX_U32 i = cancelStart; i < cancelEnd; i++) {
+ BufferInfo *info = &mPortBuffers[kPortIndexOutput].editItemAt(i);
+ cancelBufferToNativeWindow(info);
+ }
+
+ return err;
+}
+
+status_t OMXCodec::cancelBufferToNativeWindow(BufferInfo *info) {
+ CHECK(!info->mOwnedByNativeWindow);
+ CODEC_LOGV("Calling cancelBuffer on buffer %p", info->mBuffer);
+ int err = mNativeWindow->cancelBuffer(
+ mNativeWindow.get(), info->mMediaBuffer->graphicBuffer().get());
+ if (err != 0) {
+ CODEC_LOGE("cancelBuffer failed w/ error 0x%08x", err);
+
+ setState(ERROR);
+ return err;
+ }
+ info->mOwnedByNativeWindow = true;
+ return OK;
+}
+
+OMXCodec::BufferInfo* OMXCodec::dequeueBufferFromNativeWindow() {
+ // Dequeue the next buffer from the native window.
+ android_native_buffer_t* buf;
+ int err = mNativeWindow->dequeueBuffer(mNativeWindow.get(), &buf);
+ if (err != 0) {
+ CODEC_LOGE("dequeueBuffer failed w/ error 0x%08x", err);
+
+ setState(ERROR);
+ return 0;
+ }
+
+ // Determine which buffer we just dequeued.
+ Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexOutput];
+ BufferInfo *bufInfo = 0;
+ for (size_t i = 0; i < buffers->size(); i++) {
+ sp<GraphicBuffer> graphicBuffer = buffers->itemAt(i).
+ mMediaBuffer->graphicBuffer();
+ if (graphicBuffer->handle == buf->handle) {
+ bufInfo = &buffers->editItemAt(i);
+ break;
+ }
+ }
+
+ if (bufInfo == 0) {
+ CODEC_LOGE("dequeued unrecognized buffer: %p", buf);
+
+ setState(ERROR);
+ return 0;
+ }
+
+ // The native window no longer owns the buffer.
+ CHECK(bufInfo->mOwnedByNativeWindow);
+ bufInfo->mOwnedByNativeWindow = false;
+
+ return bufInfo;
+}
+
void OMXCodec::on_message(const omx_message &msg) {
Mutex::Autolock autoLock(mLock);
@@ -1748,6 +1960,15 @@ void OMXCodec::on_message(const omx_message &msg) {
mOMX->freeBuffer(mNode, kPortIndexOutput, buffer);
CHECK_EQ(err, OK);
+ // Cancel the buffer if it belongs to an ANativeWindow.
+ if (info->mMediaBuffer != NULL) {
+ sp<GraphicBuffer> graphicBuffer = info->mMediaBuffer->graphicBuffer();
+ if (!info->mOwnedByNativeWindow && graphicBuffer != 0) {
+ cancelBufferToNativeWindow(info);
+ // Ignore any errors
+ }
+ }
+
buffers->removeAt(i);
#if 0
} else if (mPortStatus[kPortIndexOutput] == ENABLED
@@ -1776,8 +1997,10 @@ void OMXCodec::on_message(const omx_message &msg) {
}
MediaBuffer *buffer = info->mMediaBuffer;
+ bool isGraphicBuffer = buffer->graphicBuffer() != NULL;
- if (msg.u.extended_buffer_data.range_offset
+ if (!isGraphicBuffer
+ && msg.u.extended_buffer_data.range_offset
+ msg.u.extended_buffer_data.range_length
> buffer->size()) {
CODEC_LOGE(
@@ -1801,7 +2024,7 @@ void OMXCodec::on_message(const omx_message &msg) {
buffer->meta_data()->setInt32(kKeyIsCodecConfig, true);
}
- if (mQuirks & kOutputBuffersAreUnreadable) {
+ if (isGraphicBuffer || mQuirks & kOutputBuffersAreUnreadable) {
buffer->meta_data()->setInt32(kKeyIsUnreadable, true);
}
@@ -1871,7 +2094,32 @@ void OMXCodec::onEvent(OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
case OMX_EventPortSettingsChanged:
{
- onPortSettingsChanged(data1);
+ CODEC_LOGV("OMX_EventPortSettingsChanged(port=%ld, data2=0x%08lx)",
+ data1, data2);
+
+ if (data2 == 0 || data2 == OMX_IndexParamPortDefinition) {
+ onPortSettingsChanged(data1);
+ } else if (data1 == kPortIndexOutput
+ && data2 == OMX_IndexConfigCommonOutputCrop) {
+
+ OMX_CONFIG_RECTTYPE rect;
+ rect.nPortIndex = kPortIndexOutput;
+ InitOMXParams(&rect);
+
+ status_t err =
+ mOMX->getConfig(
+ mNode, OMX_IndexConfigCommonOutputCrop,
+ &rect, sizeof(rect));
+
+ if (err == OK) {
+ CODEC_LOGV(
+ "output crop (%ld, %ld, %ld, %ld)",
+ rect.nLeft, rect.nTop, rect.nWidth, rect.nHeight);
+ } else {
+ CODEC_LOGE("getConfig(OMX_IndexConfigCommonOutputCrop) "
+ "returned error 0x%08x", err);
+ }
+ }
break;
}
@@ -2201,6 +2449,15 @@ status_t OMXCodec::freeBuffersOnPort(
// Make sure nobody but us owns this buffer at this point.
CHECK_EQ(info->mMediaBuffer->refcount(), 0);
+ // Cancel the buffer if it belongs to an ANativeWindow.
+ sp<GraphicBuffer> graphicBuffer = info->mMediaBuffer->graphicBuffer();
+ if (!info->mOwnedByNativeWindow && graphicBuffer != 0) {
+ status_t err = cancelBufferToNativeWindow(info);
+ if (err != OK) {
+ stickyErr = err;
+ }
+ }
+
info->mMediaBuffer->release();
}
@@ -2261,6 +2518,7 @@ void OMXCodec::disablePortAsync(OMX_U32 portIndex) {
CHECK_EQ(mPortStatus[portIndex], ENABLED);
mPortStatus[portIndex] = DISABLING;
+ CODEC_LOGV("sending OMX_CommandPortDisable(%ld)", portIndex);
status_t err =
mOMX->sendCommand(mNode, OMX_CommandPortDisable, portIndex);
CHECK_EQ(err, OK);
@@ -2274,6 +2532,7 @@ void OMXCodec::enablePortAsync(OMX_U32 portIndex) {
CHECK_EQ(mPortStatus[portIndex], DISABLED);
mPortStatus[portIndex] = ENABLING;
+ CODEC_LOGV("sending OMX_CommandPortEnable(%ld)", portIndex);
status_t err =
mOMX->sendCommand(mNode, OMX_CommandPortEnable, portIndex);
CHECK_EQ(err, OK);
@@ -2299,7 +2558,10 @@ void OMXCodec::fillOutputBuffers() {
Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexOutput];
for (size_t i = 0; i < buffers->size(); ++i) {
- fillOutputBuffer(&buffers->editItemAt(i));
+ BufferInfo *info = &buffers->editItemAt(i);
+ if (!info->mOwnedByNativeWindow) {
+ fillOutputBuffer(&buffers->editItemAt(i));
+ }
}
}
@@ -2516,7 +2778,23 @@ void OMXCodec::fillOutputBuffer(BufferInfo *info) {
return;
}
- CODEC_LOGV("Calling fill_buffer on buffer %p", info->mBuffer);
+ sp<GraphicBuffer> graphicBuffer = info->mMediaBuffer->graphicBuffer();
+ if (graphicBuffer != 0) {
+ // When using a native buffer we need to lock the buffer before giving
+ // it to OMX.
+ CHECK(!info->mOwnedByNativeWindow);
+ CODEC_LOGV("Calling lockBuffer on %p", info->mBuffer);
+ int err = mNativeWindow->lockBuffer(mNativeWindow.get(),
+ graphicBuffer.get());
+ if (err != 0) {
+ CODEC_LOGE("lockBuffer failed w/ error 0x%08x", err);
+
+ setState(ERROR);
+ return;
+ }
+ }
+
+ CODEC_LOGV("Calling fillBuffer on buffer %p", info->mBuffer);
status_t err = mOMX->fillBuffer(mNode, info->mBuffer);
if (err != OK) {
@@ -3099,7 +3377,32 @@ void OMXCodec::signalBufferReturned(MediaBuffer *buffer) {
if (info->mMediaBuffer == buffer) {
CHECK_EQ(mPortStatus[kPortIndexOutput], ENABLED);
- fillOutputBuffer(info);
+ if (buffer->graphicBuffer() == 0) {
+ fillOutputBuffer(info);
+ } else {
+ sp<MetaData> metaData = info->mMediaBuffer->meta_data();
+ int32_t rendered = 0;
+ if (!metaData->findInt32(kKeyRendered, &rendered)) {
+ rendered = 0;
+ }
+ if (!rendered) {
+ status_t err = cancelBufferToNativeWindow(info);
+ if (err < 0) {
+ return;
+ }
+ } else {
+ info->mOwnedByNativeWindow = true;
+ }
+
+ // Dequeue the next buffer from the native window.
+ BufferInfo *nextBufInfo = dequeueBufferFromNativeWindow();
+ if (nextBufInfo == 0) {
+ return;
+ }
+
+ // Give the buffer to the OMX node to fill.
+ fillOutputBuffer(nextBufInfo);
+ }
return;
}
}
@@ -3432,6 +3735,18 @@ void OMXCodec::dumpPortStatus(OMX_U32 portIndex) {
printf("}\n");
}
+status_t OMXCodec::initNativeWindow() {
+ // Enable use of a GraphicBuffer as the output for this node. This must
+ // happen before getting the IndexParamPortDefinition parameter because it
+ // will affect the pixel format that the node reports.
+ status_t err = mOMX->enableGraphicBuffers(mNode, kPortIndexOutput, OMX_TRUE);
+ if (err != 0) {
+ return err;
+ }
+
+ return OK;
+}
+
void OMXCodec::initOutputFormat(const sp<MetaData> &inputFormat) {
mOutputFormat = new MetaData;
mOutputFormat->setCString(kKeyDecoderComponent, mComponentName);
diff --git a/media/libstagefright/VideoSourceDownSampler.cpp b/media/libstagefright/VideoSourceDownSampler.cpp
new file mode 100644
index 0000000..ea7b09a
--- /dev/null
+++ b/media/libstagefright/VideoSourceDownSampler.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "VideoSourceDownSampler"
+
+#include <media/stagefright/VideoSourceDownSampler.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/YUVImage.h>
+#include <media/stagefright/YUVCanvas.h>
+#include "OMX_Video.h"
+
+namespace android {
+
+VideoSourceDownSampler::VideoSourceDownSampler(const sp<MediaSource> &videoSource,
+ int32_t width, int32_t height) {
+ LOGV("Construct VideoSourceDownSampler");
+ CHECK(width > 0);
+ CHECK(height > 0);
+
+ mRealVideoSource = videoSource;
+ mWidth = width;
+ mHeight = height;
+
+ mMeta = new MetaData(*(mRealVideoSource->getFormat()));
+ CHECK(mMeta->findInt32(kKeyWidth, &mRealSourceWidth));
+ CHECK(mMeta->findInt32(kKeyHeight, &mRealSourceHeight));
+
+ if ((mWidth != mRealSourceWidth) || (mHeight != mRealSourceHeight)) {
+ // Change meta data for width and height.
+ CHECK(mWidth <= mRealSourceWidth);
+ CHECK(mHeight <= mRealSourceHeight);
+
+ mNeedDownSampling = true;
+ computeDownSamplingParameters();
+ mMeta->setInt32(kKeyWidth, mWidth);
+ mMeta->setInt32(kKeyHeight, mHeight);
+ } else {
+ mNeedDownSampling = false;
+ }
+}
+
+VideoSourceDownSampler::~VideoSourceDownSampler() {
+}
+
+void VideoSourceDownSampler::computeDownSamplingParameters() {
+ mDownSampleSkipX = mRealSourceWidth / mWidth;
+ mDownSampleSkipY = mRealSourceHeight / mHeight;
+
+ mDownSampleOffsetX = mRealSourceWidth - mDownSampleSkipX * mWidth;
+ mDownSampleOffsetY = mRealSourceHeight - mDownSampleSkipY * mHeight;
+}
+
+void VideoSourceDownSampler::downSampleYUVImage(
+ const MediaBuffer &sourceBuffer, MediaBuffer **buffer) const {
+ // find the YUV format
+ int32_t srcFormat;
+ CHECK(mMeta->findInt32(kKeyColorFormat, &srcFormat));
+ YUVImage::YUVFormat yuvFormat;
+ if (srcFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
+ yuvFormat = YUVImage::YUV420SemiPlanar;
+ } else if (srcFormat == OMX_COLOR_FormatYUV420Planar) {
+ yuvFormat = YUVImage::YUV420Planar;
+ }
+
+ // allocate mediaBuffer for down sampled image and setup a canvas.
+ *buffer = new MediaBuffer(YUVImage::bufferSize(yuvFormat, mWidth, mHeight));
+ YUVImage yuvDownSampledImage(yuvFormat,
+ mWidth, mHeight,
+ (uint8_t *)(*buffer)->data());
+ YUVCanvas yuvCanvasDownSample(yuvDownSampledImage);
+
+ YUVImage yuvImageSource(yuvFormat,
+ mRealSourceWidth, mRealSourceHeight,
+ (uint8_t *)sourceBuffer.data());
+ yuvCanvasDownSample.downsample(mDownSampleOffsetX, mDownSampleOffsetY,
+ mDownSampleSkipX, mDownSampleSkipY,
+ yuvImageSource);
+}
+
+status_t VideoSourceDownSampler::start(MetaData *params) {
+ LOGV("start");
+ return mRealVideoSource->start();
+}
+
+status_t VideoSourceDownSampler::stop() {
+ LOGV("stop");
+ return mRealVideoSource->stop();
+}
+
+sp<MetaData> VideoSourceDownSampler::getFormat() {
+ LOGV("getFormat");
+ return mMeta;
+}
+
+status_t VideoSourceDownSampler::read(
+ MediaBuffer **buffer, const ReadOptions *options) {
+ LOGV("read");
+ MediaBuffer *realBuffer;
+ status_t err = mRealVideoSource->read(&realBuffer, options);
+
+ if (mNeedDownSampling) {
+ downSampleYUVImage(*realBuffer, buffer);
+
+ int64_t frameTime;
+ realBuffer->meta_data()->findInt64(kKeyTime, &frameTime);
+ (*buffer)->meta_data()->setInt64(kKeyTime, frameTime);
+
+ // We just want this buffer to be deleted when the encoder releases it.
+ // So don't add a reference to it and set the observer to NULL.
+ (*buffer)->setObserver(NULL);
+
+ // The original buffer is no longer required. Release it.
+ realBuffer->release();
+ } else {
+ *buffer = realBuffer;
+ }
+
+ return err;
+}
+
+status_t VideoSourceDownSampler::pause() {
+ LOGV("pause");
+ return mRealVideoSource->pause();
+}
+
+} // namespace android
diff --git a/media/libstagefright/colorconversion/Android.mk b/media/libstagefright/colorconversion/Android.mk
index 0dcbd73..ef2dba0 100644
--- a/media/libstagefright/colorconversion/Android.mk
+++ b/media/libstagefright/colorconversion/Android.mk
@@ -6,7 +6,8 @@ LOCAL_SRC_FILES:= \
SoftwareRenderer.cpp
LOCAL_C_INCLUDES := \
- $(TOP)/frameworks/base/include/media/stagefright/openmax
+ $(TOP)/frameworks/base/include/media/stagefright/openmax \
+ $(TOP)/hardware/msm7k
LOCAL_SHARED_LIBRARIES := \
libbinder \
@@ -17,6 +18,11 @@ LOCAL_SHARED_LIBRARIES := \
libsurfaceflinger_client\
libcamera_client
+# ifeq ($(TARGET_BOARD_PLATFORM),msm7k)
+ifeq ($(TARGET_PRODUCT),passion)
+ LOCAL_CFLAGS += -DHAS_YCBCR420_SP_ADRENO
+endif
+
LOCAL_MODULE:= libstagefright_color_conversion
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index a6dbf69..662a84a 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -22,65 +22,182 @@
#include <binder/MemoryHeapBase.h>
#include <binder/MemoryHeapPmem.h>
#include <media/stagefright/MediaDebug.h>
-#include <surfaceflinger/ISurface.h>
+#include <surfaceflinger/Surface.h>
+#include <ui/android_native_buffer.h>
+#include <ui/GraphicBufferMapper.h>
+
+// XXX: Temporary hack to allow referencing the _ADRENO pixel format here.
+#include <libgralloc-qsd8k/gralloc_priv.h>
namespace android {
SoftwareRenderer::SoftwareRenderer(
OMX_COLOR_FORMATTYPE colorFormat,
- const sp<ISurface> &surface,
+ const sp<Surface> &surface,
size_t displayWidth, size_t displayHeight,
size_t decodedWidth, size_t decodedHeight)
: mColorFormat(colorFormat),
- mConverter(colorFormat, OMX_COLOR_Format16bitRGB565),
- mISurface(surface),
+ mConverter(NULL),
+ mYUVMode(None),
+ mSurface(surface),
mDisplayWidth(displayWidth),
mDisplayHeight(displayHeight),
mDecodedWidth(decodedWidth),
- mDecodedHeight(decodedHeight),
- mFrameSize(mDecodedWidth * mDecodedHeight * 2), // RGB565
- mIndex(0) {
- mMemoryHeap = new MemoryHeapBase("/dev/pmem_adsp", 2 * mFrameSize);
- if (mMemoryHeap->heapID() < 0) {
- LOGI("Creating physical memory heap failed, reverting to regular heap.");
- mMemoryHeap = new MemoryHeapBase(2 * mFrameSize);
- } else {
- sp<MemoryHeapPmem> pmemHeap = new MemoryHeapPmem(mMemoryHeap);
- pmemHeap->slap();
- mMemoryHeap = pmemHeap;
+ mDecodedHeight(decodedHeight) {
+ LOGI("input format = %d", mColorFormat);
+ LOGI("display = %d x %d, decoded = %d x %d",
+ mDisplayWidth, mDisplayHeight, mDecodedWidth, mDecodedHeight);
+
+ mDecodedWidth = mDisplayWidth;
+ mDecodedHeight = mDisplayHeight;
+
+ int halFormat;
+ switch (mColorFormat) {
+#if HAS_YCBCR420_SP_ADRENO
+ case OMX_COLOR_FormatYUV420Planar:
+ {
+ halFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO;
+ mYUVMode = YUV420ToYUV420sp;
+ break;
+ }
+
+ case 0x7fa30c00:
+ {
+ halFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO;
+ mYUVMode = YUV420spToYUV420sp;
+ break;
+ }
+#endif
+
+ default:
+ halFormat = HAL_PIXEL_FORMAT_RGB_565;
+
+ mConverter = new ColorConverter(
+ mColorFormat, OMX_COLOR_Format16bitRGB565);
+ CHECK(mConverter->isValid());
+ break;
}
- CHECK(mISurface.get() != NULL);
+ CHECK(mSurface.get() != NULL);
CHECK(mDecodedWidth > 0);
CHECK(mDecodedHeight > 0);
- CHECK(mMemoryHeap->heapID() >= 0);
- CHECK(mConverter.isValid());
+ CHECK(mConverter == NULL || mConverter->isValid());
- ISurface::BufferHeap bufferHeap(
- mDisplayWidth, mDisplayHeight,
- mDecodedWidth, mDecodedHeight,
- PIXEL_FORMAT_RGB_565,
- mMemoryHeap);
+ CHECK_EQ(0,
+ native_window_set_usage(
+ mSurface.get(),
+ GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN
+ | GRALLOC_USAGE_HW_TEXTURE));
- status_t err = mISurface->registerBuffers(bufferHeap);
- CHECK_EQ(err, OK);
+ CHECK_EQ(0, native_window_set_buffer_count(mSurface.get(), 2));
+
+ // Width must be multiple of 32???
+ CHECK_EQ(0, native_window_set_buffers_geometry(
+ mSurface.get(), mDecodedWidth, mDecodedHeight,
+ halFormat));
}
SoftwareRenderer::~SoftwareRenderer() {
- mISurface->unregisterBuffers();
+ delete mConverter;
+ mConverter = NULL;
+}
+
+static inline size_t ALIGN(size_t x, size_t alignment) {
+ return (x + alignment - 1) & ~(alignment - 1);
}
void SoftwareRenderer::render(
const void *data, size_t size, void *platformPrivate) {
- size_t offset = mIndex * mFrameSize;
- void *dst = (uint8_t *)mMemoryHeap->getBase() + offset;
+ android_native_buffer_t *buf;
+ int err;
+ if ((err = mSurface->dequeueBuffer(mSurface.get(), &buf)) != 0) {
+ LOGW("Surface::dequeueBuffer returned error %d", err);
+ return;
+ }
+
+ CHECK_EQ(0, mSurface->lockBuffer(mSurface.get(), buf));
+
+ GraphicBufferMapper &mapper = GraphicBufferMapper::get();
+
+ Rect bounds(mDecodedWidth, mDecodedHeight);
- mConverter.convert(
- mDecodedWidth, mDecodedHeight,
- data, 0, dst, 2 * mDecodedWidth);
+ void *dst;
+ CHECK_EQ(0, mapper.lock(
+ buf->handle, GRALLOC_USAGE_SW_WRITE_OFTEN, bounds, &dst));
- mISurface->postBuffer(offset);
- mIndex = 1 - mIndex;
+ if (mConverter) {
+ mConverter->convert(
+ mDecodedWidth, mDecodedHeight,
+ data, 0, dst, buf->stride * 2);
+ } else if (mYUVMode == YUV420spToYUV420sp) {
+ // Input and output are both YUV420sp, but the alignment requirements
+ // are different.
+ size_t srcYStride = mDecodedWidth;
+ const uint8_t *srcY = (const uint8_t *)data;
+ uint8_t *dstY = (uint8_t *)dst;
+ for (size_t i = 0; i < mDecodedHeight; ++i) {
+ memcpy(dstY, srcY, mDecodedWidth);
+ srcY += srcYStride;
+ dstY += buf->stride;
+ }
+
+ size_t srcUVStride = (mDecodedWidth + 1) & ~1;
+ size_t dstUVStride = ALIGN(mDecodedWidth / 2, 32) * 2;
+
+ const uint8_t *srcUV = (const uint8_t *)data
+ + mDecodedHeight * mDecodedWidth;
+
+ size_t dstUVOffset = ALIGN(ALIGN(mDecodedHeight, 32) * buf->stride, 4096);
+ uint8_t *dstUV = (uint8_t *)dst + dstUVOffset;
+
+ for (size_t i = 0; i < (mDecodedHeight + 1) / 2; ++i) {
+ memcpy(dstUV, srcUV, (mDecodedWidth + 1) & ~1);
+ srcUV += srcUVStride;
+ dstUV += dstUVStride;
+ }
+ } else if (mYUVMode == YUV420ToYUV420sp) {
+ // Input is YUV420 planar, output is YUV420sp, adhere to proper
+ // alignment requirements.
+ size_t srcYStride = mDecodedWidth;
+ const uint8_t *srcY = (const uint8_t *)data;
+ uint8_t *dstY = (uint8_t *)dst;
+ for (size_t i = 0; i < mDecodedHeight; ++i) {
+ memcpy(dstY, srcY, mDecodedWidth);
+ srcY += srcYStride;
+ dstY += buf->stride;
+ }
+
+ size_t srcUVStride = (mDecodedWidth + 1) / 2;
+ size_t dstUVStride = ALIGN(mDecodedWidth / 2, 32) * 2;
+
+ const uint8_t *srcU = (const uint8_t *)data
+ + mDecodedHeight * mDecodedWidth;
+
+ const uint8_t *srcV =
+ srcU + ((mDecodedWidth + 1) / 2) * ((mDecodedHeight + 1) / 2);
+
+ size_t dstUVOffset = ALIGN(ALIGN(mDecodedHeight, 32) * buf->stride, 4096);
+ uint8_t *dstUV = (uint8_t *)dst + dstUVOffset;
+
+ for (size_t i = 0; i < (mDecodedHeight + 1) / 2; ++i) {
+ for (size_t j = 0; j < (mDecodedWidth + 1) / 2; ++j) {
+ dstUV[2 * j + 1] = srcU[j];
+ dstUV[2 * j] = srcV[j];
+ }
+ srcU += srcUVStride;
+ srcV += srcUVStride;
+ dstUV += dstUVStride;
+ }
+ } else {
+ memcpy(dst, data, size);
+ }
+
+ CHECK_EQ(0, mapper.unlock(buf->handle));
+
+ if ((err = mSurface->queueBuffer(mSurface.get(), buf)) != 0) {
+ LOGW("Surface::queueBuffer returned error %d", err);
+ }
+ buf = NULL;
}
} // namespace android
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 4526bf1..a0a7436 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -80,6 +80,7 @@ struct AwesomePlayer {
bool isPlaying() const;
void setISurface(const sp<ISurface> &isurface);
+ void setSurface(const sp<Surface> &surface);
void setAudioSink(const sp<MediaPlayerBase::AudioSink> &audioSink);
status_t setLooping(bool shouldLoop);
@@ -114,6 +115,11 @@ private:
AUDIO_AT_EOS = 256,
VIDEO_AT_EOS = 512,
AUTO_LOOPING = 1024,
+
+ // We are basically done preparing but are currently buffering
+ // sufficient data to begin playback and finish the preparation phase
+ // for good.
+ PREPARING_CONNECTED = 2048,
};
mutable Mutex mLock;
@@ -125,6 +131,7 @@ private:
wp<MediaPlayerBase> mListener;
sp<ISurface> mISurface;
+ sp<Surface> mSurface;
sp<MediaPlayerBase::AudioSink> mAudioSink;
SystemTimeSource mSystemTimeSource;
@@ -233,6 +240,7 @@ private:
status_t seekTo_l(int64_t timeUs);
status_t pause_l(bool at_eos = false);
void initRenderer_l();
+ void notifyVideoSize_l();
void seekAudioIfNecessary_l();
void cancelPlayerEvents(bool keepBufferingGoing = false);
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index c99da59..5a6c96f 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -59,10 +59,20 @@ public:
node_id node, OMX_INDEXTYPE index,
const void *params, size_t size);
+ virtual status_t enableGraphicBuffers(
+ node_id node, OMX_U32 port_index, OMX_BOOL enable);
+
+ virtual status_t storeMetaDataInBuffers(
+ node_id node, OMX_U32 port_index, OMX_BOOL enable);
+
virtual status_t useBuffer(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
buffer_id *buffer);
+ virtual status_t useGraphicBuffer(
+ node_id node, OMX_U32 port_index,
+ const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer);
+
virtual status_t allocateBuffer(
node_id node, OMX_U32 port_index, size_t size,
buffer_id *buffer, void **buffer_data);
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index b5b31ac..86c102c 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -49,10 +49,17 @@ struct OMXNodeInstance {
status_t getConfig(OMX_INDEXTYPE index, void *params, size_t size);
status_t setConfig(OMX_INDEXTYPE index, const void *params, size_t size);
+ status_t enableGraphicBuffers(OMX_U32 portIndex, OMX_BOOL enable);
+ status_t storeMetaDataInBuffers(OMX_U32 portIndex, OMX_BOOL enable);
+
status_t useBuffer(
OMX_U32 portIndex, const sp<IMemory> &params,
OMX::buffer_id *buffer);
+ status_t useGraphicBuffer(
+ OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
+ OMX::buffer_id *buffer);
+
status_t allocateBuffer(
OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer,
void **buffer_data);
@@ -125,4 +132,3 @@ private:
} // namespace android
#endif // OMX_NODE_INSTANCE_H_
-
diff --git a/media/libstagefright/include/SoftwareRenderer.h b/media/libstagefright/include/SoftwareRenderer.h
index 9eed089..8d58056 100644
--- a/media/libstagefright/include/SoftwareRenderer.h
+++ b/media/libstagefright/include/SoftwareRenderer.h
@@ -24,14 +24,14 @@
namespace android {
-class ISurface;
+class Surface;
class MemoryHeapBase;
class SoftwareRenderer : public VideoRenderer {
public:
SoftwareRenderer(
OMX_COLOR_FORMATTYPE colorFormat,
- const sp<ISurface> &surface,
+ const sp<Surface> &surface,
size_t displayWidth, size_t displayHeight,
size_t decodedWidth, size_t decodedHeight);
@@ -41,14 +41,18 @@ public:
const void *data, size_t size, void *platformPrivate);
private:
+ enum YUVMode {
+ None,
+ YUV420ToYUV420sp,
+ YUV420spToYUV420sp,
+ };
+
OMX_COLOR_FORMATTYPE mColorFormat;
- ColorConverter mConverter;
- sp<ISurface> mISurface;
+ ColorConverter *mConverter;
+ YUVMode mYUVMode;
+ sp<Surface> mSurface;
size_t mDisplayWidth, mDisplayHeight;
size_t mDecodedWidth, mDecodedHeight;
- size_t mFrameSize;
- sp<MemoryHeapBase> mMemoryHeap;
- int mIndex;
SoftwareRenderer(const SoftwareRenderer &);
SoftwareRenderer &operator=(const SoftwareRenderer &);
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index c927da1..f9f638f 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -289,6 +289,16 @@ status_t OMX::setConfig(
index, params, size);
}
+status_t OMX::enableGraphicBuffers(
+ node_id node, OMX_U32 port_index, OMX_BOOL enable) {
+ return findInstance(node)->enableGraphicBuffers(port_index, enable);
+}
+
+status_t OMX::storeMetaDataInBuffers(
+ node_id node, OMX_U32 port_index, OMX_BOOL enable) {
+ return findInstance(node)->storeMetaDataInBuffers(port_index, enable);
+}
+
status_t OMX::useBuffer(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
buffer_id *buffer) {
@@ -296,6 +306,13 @@ status_t OMX::useBuffer(
port_index, params, buffer);
}
+status_t OMX::useGraphicBuffer(
+ node_id node, OMX_U32 port_index,
+ const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) {
+ return findInstance(node)->useGraphicBuffer(
+ port_index, graphicBuffer, buffer);
+}
+
status_t OMX::allocateBuffer(
node_id node, OMX_U32 port_index, size_t size,
buffer_id *buffer, void **buffer_data) {
@@ -497,12 +514,17 @@ sp<IOMXRenderer> OMX::createRenderer(
}
if (!impl) {
+#if 0
LOGW("Using software renderer.");
impl = new SoftwareRenderer(
colorFormat,
surface,
displayWidth, displayHeight,
encodedWidth, encodedHeight);
+#else
+ CHECK(!"Should not be here.");
+ return NULL;
+#endif
}
return new OMXRenderer(impl);
@@ -527,4 +549,3 @@ void OMXRenderer::render(IOMX::buffer_id buffer) {
}
} // namespace android
-
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 5db516e..9b6d441 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -24,6 +24,7 @@
#include <OMX_Component.h>
#include <binder/IMemory.h>
+#include <media/stagefright/HardwareAPI.h>
#include <media/stagefright/MediaDebug.h>
#include <media/stagefright/MediaErrors.h>
@@ -40,6 +41,11 @@ struct BufferMeta {
mIsBackup(false) {
}
+ BufferMeta(const sp<GraphicBuffer> &graphicBuffer)
+ : mGraphicBuffer(graphicBuffer),
+ mIsBackup(false) {
+ }
+
void CopyFromOMX(const OMX_BUFFERHEADERTYPE *header) {
if (!mIsBackup) {
return;
@@ -61,6 +67,7 @@ struct BufferMeta {
}
private:
+ sp<GraphicBuffer> mGraphicBuffer;
sp<IMemory> mMem;
size_t mSize;
bool mIsBackup;
@@ -240,6 +247,74 @@ status_t OMXNodeInstance::setConfig(
return StatusFromOMXError(err);
}
+status_t OMXNodeInstance::enableGraphicBuffers(
+ OMX_U32 portIndex, OMX_BOOL enable) {
+ Mutex::Autolock autoLock(mLock);
+
+ OMX_INDEXTYPE index;
+ OMX_ERRORTYPE err = OMX_GetExtensionIndex(
+ mHandle,
+ const_cast<OMX_STRING>("OMX.google.android.index.enableAndroidNativeBuffers"),
+ &index);
+
+ if (err != OMX_ErrorNone) {
+ LOGE("OMX_GetExtensionIndex failed");
+
+ return StatusFromOMXError(err);
+ }
+
+ OMX_VERSIONTYPE ver;
+ ver.s.nVersionMajor = 1;
+ ver.s.nVersionMinor = 0;
+ ver.s.nRevision = 0;
+ ver.s.nStep = 0;
+ EnableAndroidNativeBuffersParams params = {
+ sizeof(EnableAndroidNativeBuffersParams), ver, portIndex, enable,
+ };
+
+ err = OMX_SetParameter(mHandle, index, &params);
+
+ if (err != OMX_ErrorNone) {
+ LOGE("OMX_EnableAndroidNativeBuffers failed with error %d (0x%08x)",
+ err, err);
+
+ return UNKNOWN_ERROR;
+ }
+
+ return OK;
+}
+
+status_t OMXNodeInstance::storeMetaDataInBuffers(
+ OMX_U32 portIndex,
+ OMX_BOOL enable) {
+ Mutex::Autolock autolock(mLock);
+
+ OMX_INDEXTYPE index;
+ OMX_STRING name = const_cast<OMX_STRING>(
+ "OMX.google.android.index.storeMetaDataInBuffers");
+
+ OMX_ERRORTYPE err = OMX_GetExtensionIndex(mHandle, name, &index);
+ if (err != OMX_ErrorNone) {
+ LOGE("OMX_GetExtensionIndex %s failed", name);
+ return StatusFromOMXError(err);
+ }
+
+ StoreMetaDataInBuffersParams params;
+ memset(&params, 0, sizeof(params));
+ params.nSize = sizeof(params);
+
+ // Version: 1.0.0.0
+ params.nVersion.s.nVersionMajor = 1;
+
+ params.nPortIndex = portIndex;
+ params.bStoreMetaData = enable;
+ if ((err = OMX_SetParameter(mHandle, index, &params)) != OMX_ErrorNone) {
+ LOGE("OMX_SetParameter() failed for StoreMetaDataInBuffers: 0x%08x", err);
+ return UNKNOWN_ERROR;
+ }
+ return err;
+}
+
status_t OMXNodeInstance::useBuffer(
OMX_U32 portIndex, const sp<IMemory> &params,
OMX::buffer_id *buffer) {
@@ -273,6 +348,60 @@ status_t OMXNodeInstance::useBuffer(
return OK;
}
+status_t OMXNodeInstance::useGraphicBuffer(
+ OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
+ OMX::buffer_id *buffer) {
+ Mutex::Autolock autoLock(mLock);
+
+ OMX_INDEXTYPE index;
+ OMX_ERRORTYPE err = OMX_GetExtensionIndex(
+ mHandle,
+ const_cast<OMX_STRING>("OMX.google.android.index.useAndroidNativeBuffer"),
+ &index);
+
+ if (err != OMX_ErrorNone) {
+ LOGE("OMX_GetExtensionIndex failed");
+
+ return StatusFromOMXError(err);
+ }
+
+ BufferMeta *bufferMeta = new BufferMeta(graphicBuffer);
+
+ OMX_BUFFERHEADERTYPE *header;
+
+ OMX_VERSIONTYPE ver;
+ ver.s.nVersionMajor = 1;
+ ver.s.nVersionMinor = 0;
+ ver.s.nRevision = 0;
+ ver.s.nStep = 0;
+ UseAndroidNativeBufferParams params = {
+ sizeof(UseAndroidNativeBufferParams), ver, portIndex, bufferMeta,
+ &header, graphicBuffer,
+ };
+
+ err = OMX_SetParameter(mHandle, index, &params);
+
+ if (err != OMX_ErrorNone) {
+ LOGE("OMX_UseAndroidNativeBuffer failed with error %d (0x%08x)", err,
+ err);
+
+ delete bufferMeta;
+ bufferMeta = NULL;
+
+ *buffer = 0;
+
+ return UNKNOWN_ERROR;
+ }
+
+ CHECK_EQ(header->pAppPrivate, bufferMeta);
+
+ *buffer = header;
+
+ addActiveBuffer(portIndex, *buffer);
+
+ return OK;
+}
+
status_t OMXNodeInstance::allocateBuffer(
OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer,
void **buffer_data) {
@@ -498,4 +627,3 @@ void OMXNodeInstance::freeActiveBuffers() {
}
} // namespace android
-
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index f928c06..e936923 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -23,11 +23,13 @@
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/base64.h>
#include <media/stagefright/MediaErrors.h>
#include <arpa/inet.h>
#include <fcntl.h>
#include <netdb.h>
+#include <openssl/md5.h>
#include <sys/socket.h>
namespace android {
@@ -37,6 +39,7 @@ const int64_t ARTSPConnection::kSelectTimeoutUs = 1000ll;
ARTSPConnection::ARTSPConnection()
: mState(DISCONNECTED),
+ mAuthType(NONE),
mSocket(-1),
mConnectionID(0),
mNextCSeq(0),
@@ -114,10 +117,13 @@ void ARTSPConnection::onMessageReceived(const sp<AMessage> &msg) {
// static
bool ARTSPConnection::ParseURL(
- const char *url, AString *host, unsigned *port, AString *path) {
+ const char *url, AString *host, unsigned *port, AString *path,
+ AString *user, AString *pass) {
host->clear();
*port = 0;
path->clear();
+ user->clear();
+ pass->clear();
if (strncasecmp("rtsp://", url, 7)) {
return false;
@@ -133,6 +139,24 @@ bool ARTSPConnection::ParseURL(
path->setTo(slashPos);
}
+ ssize_t atPos = host->find("@");
+
+ if (atPos >= 0) {
+ // Split of user:pass@ from hostname.
+
+ AString userPass(*host, 0, atPos);
+ host->erase(0, atPos + 1);
+
+ ssize_t colonPos = userPass.find(":");
+
+ if (colonPos < 0) {
+ *user = userPass;
+ } else {
+ user->setTo(userPass, 0, colonPos);
+ pass->setTo(userPass, colonPos + 1, userPass.size() - colonPos - 1);
+ }
+ }
+
const char *colonPos = strchr(host->c_str(), ':');
if (colonPos != NULL) {
@@ -187,7 +211,12 @@ void ARTSPConnection::onConnect(const sp<AMessage> &msg) {
AString host, path;
unsigned port;
- if (!ParseURL(url.c_str(), &host, &port, &path)) {
+ if (!ParseURL(url.c_str(), &host, &port, &path, &mUser, &mPass)
+ || (mUser.size() > 0 && mPass.size() == 0)) {
+ // If we have a user name but no password we have to give up
+ // right here, since we currently have no way of asking the user
+ // for this information.
+
LOGE("Malformed rtsp url %s", url.c_str());
reply->setInt32("result", ERROR_MALFORMED);
@@ -197,6 +226,10 @@ void ARTSPConnection::onConnect(const sp<AMessage> &msg) {
return;
}
+ if (mUser.size() > 0) {
+ LOGV("user = '%s', pass = '%s'", mUser.c_str(), mPass.c_str());
+ }
+
struct hostent *ent = gethostbyname(host.c_str());
if (ent == NULL) {
LOGE("Unknown host %s", host.c_str());
@@ -262,6 +295,11 @@ void ARTSPConnection::onDisconnect(const sp<AMessage> &msg) {
reply->setInt32("result", OK);
mState = DISCONNECTED;
+ mUser.clear();
+ mPass.clear();
+ mAuthType = NONE;
+ mNonce.clear();
+
reply->post();
}
@@ -335,6 +373,12 @@ void ARTSPConnection::onSendRequest(const sp<AMessage> &msg) {
AString request;
CHECK(msg->findString("request", &request));
+ // Just in case we need to re-issue the request with proper authentication
+ // later, stash it away.
+ reply->setString("original-request", request.c_str(), request.size());
+
+ addAuthentication(&request);
+
// Find the boundary between headers and the body.
ssize_t i = request.find("\r\n\r\n");
CHECK_GE(i, 0);
@@ -347,7 +391,7 @@ void ARTSPConnection::onSendRequest(const sp<AMessage> &msg) {
request.insert(cseqHeader, i + 2);
- LOGV("%s", request.c_str());
+ LOGV("request: '%s'", request.c_str());
size_t numBytesSent = 0;
while (numBytesSent < request.size()) {
@@ -612,6 +656,30 @@ bool ARTSPConnection::receiveRTSPReponse() {
}
}
+ if (response->mStatusCode == 401) {
+ if (mAuthType == NONE && mUser.size() > 0
+ && parseAuthMethod(response)) {
+ ssize_t i;
+ CHECK_EQ((status_t)OK, findPendingRequest(response, &i));
+ CHECK_GE(i, 0);
+
+ sp<AMessage> reply = mPendingRequests.valueAt(i);
+ mPendingRequests.removeItemsAt(i);
+
+ AString request;
+ CHECK(reply->findString("original-request", &request));
+
+ sp<AMessage> msg = new AMessage(kWhatSendRequest, id());
+ msg->setMessage("reply", reply);
+ msg->setString("request", request.c_str(), request.size());
+
+ LOGI("re-sending request with authentication headers...");
+ onSendRequest(msg);
+
+ return true;
+ }
+ }
+
return notifyResponseListener(response);
}
@@ -628,26 +696,47 @@ bool ARTSPConnection::ParseSingleUnsignedLong(
return true;
}
-bool ARTSPConnection::notifyResponseListener(
- const sp<ARTSPResponse> &response) {
+status_t ARTSPConnection::findPendingRequest(
+ const sp<ARTSPResponse> &response, ssize_t *index) const {
+ *index = 0;
+
ssize_t i = response->mHeaders.indexOfKey("cseq");
if (i < 0) {
- return true;
+ // This is an unsolicited server->client message.
+ return OK;
}
AString value = response->mHeaders.valueAt(i);
unsigned long cseq;
if (!ParseSingleUnsignedLong(value.c_str(), &cseq)) {
- return false;
+ return ERROR_MALFORMED;
}
i = mPendingRequests.indexOfKey(cseq);
if (i < 0) {
- // Unsolicited response?
- TRESPASS();
+ return -ENOENT;
+ }
+
+ *index = i;
+
+ return OK;
+}
+
+bool ARTSPConnection::notifyResponseListener(
+ const sp<ARTSPResponse> &response) {
+ ssize_t i;
+ status_t err = findPendingRequest(response, &i);
+
+ if (err == OK && i < 0) {
+ // An unsolicited server response is not a problem.
+ return true;
+ }
+
+ if (err != OK) {
+ return false;
}
sp<AMessage> reply = mPendingRequests.valueAt(i);
@@ -660,4 +749,160 @@ bool ARTSPConnection::notifyResponseListener(
return true;
}
+bool ARTSPConnection::parseAuthMethod(const sp<ARTSPResponse> &response) {
+ ssize_t i = response->mHeaders.indexOfKey("www-authenticate");
+
+ if (i < 0) {
+ return false;
+ }
+
+ AString value = response->mHeaders.valueAt(i);
+
+ if (!strncmp(value.c_str(), "Basic", 5)) {
+ mAuthType = BASIC;
+ } else {
+#if !defined(HAVE_ANDROID_OS)
+ // We don't have access to the MD5 implementation on the simulator,
+ // so we won't support digest authentication.
+ return false;
+#endif
+
+ CHECK(!strncmp(value.c_str(), "Digest", 6));
+ mAuthType = DIGEST;
+
+ i = value.find("nonce=");
+ CHECK_GE(i, 0);
+ CHECK_EQ(value.c_str()[i + 6], '\"');
+ ssize_t j = value.find("\"", i + 7);
+ CHECK_GE(j, 0);
+
+ mNonce.setTo(value, i + 7, j - i - 7);
+ }
+
+ return true;
+}
+
+#if defined(HAVE_ANDROID_OS)
+static void H(const AString &s, AString *out) {
+ out->clear();
+
+ MD5_CTX m;
+ MD5_Init(&m);
+ MD5_Update(&m, s.c_str(), s.size());
+
+ uint8_t key[16];
+ MD5_Final(key, &m);
+
+ for (size_t i = 0; i < 16; ++i) {
+ char nibble = key[i] >> 4;
+ if (nibble <= 9) {
+ nibble += '0';
+ } else {
+ nibble += 'a' - 10;
+ }
+ out->append(&nibble, 1);
+
+ nibble = key[i] & 0x0f;
+ if (nibble <= 9) {
+ nibble += '0';
+ } else {
+ nibble += 'a' - 10;
+ }
+ out->append(&nibble, 1);
+ }
+}
+#endif
+
+static void GetMethodAndURL(
+ const AString &request, AString *method, AString *url) {
+ ssize_t space1 = request.find(" ");
+ CHECK_GE(space1, 0);
+
+ ssize_t space2 = request.find(" ", space1 + 1);
+ CHECK_GE(space2, 0);
+
+ method->setTo(request, 0, space1);
+ url->setTo(request, space1 + 1, space2 - space1);
+}
+
+void ARTSPConnection::addAuthentication(AString *request) {
+ if (mAuthType == NONE) {
+ return;
+ }
+
+ // Find the boundary between headers and the body.
+ ssize_t i = request->find("\r\n\r\n");
+ CHECK_GE(i, 0);
+
+ if (mAuthType == BASIC) {
+ AString tmp;
+ tmp.append(mUser);
+ tmp.append(":");
+ tmp.append(mPass);
+
+ AString out;
+ encodeBase64(tmp.c_str(), tmp.size(), &out);
+
+ AString fragment;
+ fragment.append("Authorization: Basic ");
+ fragment.append(out);
+ fragment.append("\r\n");
+
+ request->insert(fragment, i + 2);
+
+ return;
+ }
+
+#if defined(HAVE_ANDROID_OS)
+ CHECK_EQ((int)mAuthType, (int)DIGEST);
+
+ AString method, url;
+ GetMethodAndURL(*request, &method, &url);
+
+ AString A1;
+ A1.append(mUser);
+ A1.append(":");
+ A1.append("Streaming Server");
+ A1.append(":");
+ A1.append(mPass);
+
+ AString A2;
+ A2.append(method);
+ A2.append(":");
+ A2.append(url);
+
+ AString HA1, HA2;
+ H(A1, &HA1);
+ H(A2, &HA2);
+
+ AString tmp;
+ tmp.append(HA1);
+ tmp.append(":");
+ tmp.append(mNonce);
+ tmp.append(":");
+ tmp.append(HA2);
+
+ AString digest;
+ H(tmp, &digest);
+
+ AString fragment;
+ fragment.append("Authorization: Digest ");
+ fragment.append("nonce=\"");
+ fragment.append(mNonce);
+ fragment.append("\", ");
+ fragment.append("username=\"");
+ fragment.append(mUser);
+ fragment.append("\", ");
+ fragment.append("uri=\"");
+ fragment.append(url);
+ fragment.append("\", ");
+ fragment.append("response=\"");
+ fragment.append(digest);
+ fragment.append("\"");
+ fragment.append("\r\n");
+
+ request->insert(fragment, i + 2);
+#endif
+}
+
} // namespace android
diff --git a/media/libstagefright/rtsp/ARTSPConnection.h b/media/libstagefright/rtsp/ARTSPConnection.h
index 96e0d5b..19be2a6 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.h
+++ b/media/libstagefright/rtsp/ARTSPConnection.h
@@ -42,6 +42,10 @@ struct ARTSPConnection : public AHandler {
void observeBinaryData(const sp<AMessage> &reply);
+ static bool ParseURL(
+ const char *url, AString *host, unsigned *port, AString *path,
+ AString *user, AString *pass);
+
protected:
virtual ~ARTSPConnection();
virtual void onMessageReceived(const sp<AMessage> &msg);
@@ -62,9 +66,18 @@ private:
kWhatObserveBinaryData = 'obin',
};
+ enum AuthType {
+ NONE,
+ BASIC,
+ DIGEST
+ };
+
static const int64_t kSelectTimeoutUs;
State mState;
+ AString mUser, mPass;
+ AuthType mAuthType;
+ AString mNonce;
int mSocket;
int32_t mConnectionID;
int32_t mNextCSeq;
@@ -90,8 +103,11 @@ private:
sp<ABuffer> receiveBinaryData();
bool notifyResponseListener(const sp<ARTSPResponse> &response);
- static bool ParseURL(
- const char *url, AString *host, unsigned *port, AString *path);
+ bool parseAuthMethod(const sp<ARTSPResponse> &response);
+ void addAuthentication(AString *request);
+
+ status_t findPendingRequest(
+ const sp<ARTSPResponse> &response, ssize_t *index) const;
static bool ParseSingleUnsignedLong(
const char *from, unsigned long *x);
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index 612caff..880aa85 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -57,12 +57,20 @@ bool ASessionDescription::parse(const void *data, size_t size) {
size_t i = 0;
for (;;) {
- ssize_t eolPos = desc.find("\r\n", i);
+ ssize_t eolPos = desc.find("\n", i);
+
if (eolPos < 0) {
break;
}
- AString line(desc, i, eolPos - i);
+ AString line;
+ if ((size_t)eolPos > i && desc.c_str()[eolPos - 1] == '\r') {
+ // We accept both '\n' and '\r\n' line endings, if it's
+ // the latter, strip the '\r' as well.
+ line.setTo(desc, i, eolPos - i - 1);
+ } else {
+ line.setTo(desc, i, eolPos - i);
+ }
if (line.size() < 2 || line.c_str()[1] != '=') {
return false;
@@ -141,7 +149,7 @@ bool ASessionDescription::parse(const void *data, size_t size) {
}
}
- i = eolPos + 2;
+ i = eolPos + 1;
}
return true;
@@ -245,7 +253,7 @@ bool ASessionDescription::getDurationUs(int64_t *durationUs) const {
return false;
}
- if (value == "npt=now-") {
+ if (value == "npt=now-" || value == "npt=0-") {
return false;
}
diff --git a/media/libstagefright/rtsp/Android.mk b/media/libstagefright/rtsp/Android.mk
index 081ae32..0bbadc1 100644
--- a/media/libstagefright/rtsp/Android.mk
+++ b/media/libstagefright/rtsp/Android.mk
@@ -23,6 +23,7 @@ LOCAL_C_INCLUDES:= \
$(JNI_H_INCLUDE) \
$(TOP)/frameworks/base/include/media/stagefright/openmax \
$(TOP)/frameworks/base/media/libstagefright/include \
+ $(TOP)/external/openssl/include
LOCAL_MODULE:= libstagefright_rtsp
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 6943608..9bb8c46 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -96,6 +96,7 @@ struct MyHandler : public AHandler {
mNetLooper(new ALooper),
mConn(new ARTSPConnection),
mRTPConn(new ARTPConnection),
+ mOriginalSessionURL(url),
mSessionURL(url),
mSetupTracksSuccessful(false),
mSeekPending(false),
@@ -113,6 +114,23 @@ struct MyHandler : public AHandler {
mNetLooper->start(false /* runOnCallingThread */,
false /* canCallJava */,
PRIORITY_HIGHEST);
+
+ // Strip any authentication info from the session url, we don't
+ // want to transmit user/pass in cleartext.
+ AString host, path, user, pass;
+ unsigned port;
+ if (ARTSPConnection::ParseURL(
+ mSessionURL.c_str(), &host, &port, &path, &user, &pass)
+ && user.size() > 0) {
+ mSessionURL.clear();
+ mSessionURL.append("rtsp://");
+ mSessionURL.append(host);
+ mSessionURL.append(":");
+ mSessionURL.append(StringPrintf("%u", port));
+ mSessionURL.append(path);
+
+ LOGI("rewritten session url: '%s'", mSessionURL.c_str());
+ }
}
void connect(const sp<AMessage> &doneMsg) {
@@ -126,7 +144,7 @@ struct MyHandler : public AHandler {
mConn->observeBinaryData(notify);
sp<AMessage> reply = new AMessage('conn', id());
- mConn->connect(mSessionURL.c_str(), reply);
+ mConn->connect(mOriginalSessionURL.c_str(), reply);
}
void disconnect(const sp<AMessage> &doneMsg) {
@@ -312,7 +330,7 @@ struct MyHandler : public AHandler {
int32_t reconnect;
if (msg->findInt32("reconnect", &reconnect) && reconnect) {
sp<AMessage> reply = new AMessage('conn', id());
- mConn->connect(mSessionURL.c_str(), reply);
+ mConn->connect(mOriginalSessionURL.c_str(), reply);
} else {
(new AMessage('quit', id()))->post();
}
@@ -922,7 +940,7 @@ struct MyHandler : public AHandler {
CHECK(GetAttribute(range.c_str(), "npt", &val));
float npt1, npt2;
- if (val == "now-") {
+ if (val == "now-" || val == "0-") {
// This is a live stream and therefore not seekable.
return;
} else {
@@ -992,6 +1010,7 @@ private:
sp<ARTSPConnection> mConn;
sp<ARTPConnection> mRTPConn;
sp<ASessionDescription> mSessionDesc;
+ AString mOriginalSessionURL; // This one still has user:pass@
AString mSessionURL;
AString mBaseURL;
AString mSessionID;
diff --git a/media/libstagefright/yuv/Android.mk b/media/libstagefright/yuv/Android.mk
new file mode 100644
index 0000000..0794ad1
--- /dev/null
+++ b/media/libstagefright/yuv/Android.mk
@@ -0,0 +1,13 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ YUVImage.cpp \
+ YUVCanvas.cpp
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils
+
+LOCAL_MODULE:= libstagefright_yuv
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/yuv/YUVCanvas.cpp b/media/libstagefright/yuv/YUVCanvas.cpp
new file mode 100644
index 0000000..38aa779
--- /dev/null
+++ b/media/libstagefright/yuv/YUVCanvas.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "YUVCanvas"
+
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/YUVCanvas.h>
+#include <media/stagefright/YUVImage.h>
+#include <ui/Rect.h>
+
+namespace android {
+
+YUVCanvas::YUVCanvas(YUVImage &yuvImage)
+ : mYUVImage(yuvImage) {
+}
+
+YUVCanvas::~YUVCanvas() {
+}
+
+void YUVCanvas::FillYUV(uint8_t yValue, uint8_t uValue, uint8_t vValue) {
+ for (int32_t y = 0; y < mYUVImage.height(); ++y) {
+ for (int32_t x = 0; x < mYUVImage.width(); ++x) {
+ mYUVImage.setPixelValue(x, y, yValue, uValue, vValue);
+ }
+ }
+}
+
+void YUVCanvas::FillYUVRectangle(const Rect& rect,
+ uint8_t yValue, uint8_t uValue, uint8_t vValue) {
+ for (int32_t y = rect.top; y < rect.bottom; ++y) {
+ for (int32_t x = rect.left; x < rect.right; ++x) {
+ mYUVImage.setPixelValue(x, y, yValue, uValue, vValue);
+ }
+ }
+}
+
+void YUVCanvas::CopyImageRect(
+ const Rect& srcRect,
+ int32_t destStartX, int32_t destStartY,
+ const YUVImage &srcImage) {
+
+ // Try fast copy first
+ if (YUVImage::fastCopyRectangle(
+ srcRect,
+ destStartX, destStartY,
+ srcImage, mYUVImage)) {
+ return;
+ }
+
+ int32_t srcStartX = srcRect.left;
+ int32_t srcStartY = srcRect.top;
+ for (int32_t offsetY = 0; offsetY < srcRect.height(); ++offsetY) {
+ for (int32_t offsetX = 0; offsetX < srcRect.width(); ++offsetX) {
+ int32_t srcX = srcStartX + offsetX;
+ int32_t srcY = srcStartY + offsetY;
+
+ int32_t destX = destStartX + offsetX;
+ int32_t destY = destStartY + offsetY;
+
+ uint8_t yValue;
+ uint8_t uValue;
+ uint8_t vValue;
+
+ srcImage.getPixelValue(srcX, srcY, &yValue, &uValue, &vValue);
+ mYUVImage.setPixelValue(destX, destY, yValue, uValue, vValue);
+ }
+ }
+}
+
+void YUVCanvas::downsample(
+ int32_t srcOffsetX, int32_t srcOffsetY,
+ int32_t skipX, int32_t skipY,
+ const YUVImage &srcImage) {
+ // TODO: Add a low pass filter for downsampling.
+
+ // Check that srcImage is big enough to fill mYUVImage.
+ CHECK((srcOffsetX + (mYUVImage.width() - 1) * skipX) < srcImage.width());
+ CHECK((srcOffsetY + (mYUVImage.height() - 1) * skipY) < srcImage.height());
+
+ uint8_t yValue;
+ uint8_t uValue;
+ uint8_t vValue;
+
+ int32_t srcY = srcOffsetY;
+ for (int32_t y = 0; y < mYUVImage.height(); ++y) {
+ int32_t srcX = srcOffsetX;
+ for (int32_t x = 0; x < mYUVImage.width(); ++x) {
+ srcImage.getPixelValue(srcX, srcY, &yValue, &uValue, &vValue);
+ mYUVImage.setPixelValue(x, y, yValue, uValue, vValue);
+
+ srcX += skipX;
+ }
+ srcY += skipY;
+ }
+}
+
+} // namespace android
diff --git a/media/libstagefright/yuv/YUVImage.cpp b/media/libstagefright/yuv/YUVImage.cpp
new file mode 100644
index 0000000..b712062
--- /dev/null
+++ b/media/libstagefright/yuv/YUVImage.cpp
@@ -0,0 +1,413 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "YUVImage"
+
+#include <media/stagefright/YUVImage.h>
+#include <ui/Rect.h>
+#include <media/stagefright/MediaDebug.h>
+
+namespace android {
+
+YUVImage::YUVImage(YUVFormat yuvFormat, int32_t width, int32_t height) {
+ mYUVFormat = yuvFormat;
+ mWidth = width;
+ mHeight = height;
+
+ size_t numberOfBytes = bufferSize(yuvFormat, width, height);
+ uint8_t *buffer = new uint8_t[numberOfBytes];
+ mBuffer = buffer;
+ mOwnBuffer = true;
+
+ initializeYUVPointers();
+}
+
+YUVImage::YUVImage(YUVFormat yuvFormat, int32_t width, int32_t height, uint8_t *buffer) {
+ mYUVFormat = yuvFormat;
+ mWidth = width;
+ mHeight = height;
+ mBuffer = buffer;
+ mOwnBuffer = false;
+
+ initializeYUVPointers();
+}
+
+//static
+size_t YUVImage::bufferSize(YUVFormat yuvFormat, int32_t width, int32_t height) {
+ int32_t numberOfPixels = width*height;
+ size_t numberOfBytes = 0;
+ if (yuvFormat == YUV420Planar || yuvFormat == YUV420SemiPlanar) {
+ // Y takes numberOfPixels bytes and U/V take numberOfPixels/4 bytes each.
+ numberOfBytes = (size_t)(numberOfPixels + (numberOfPixels >> 1));
+ } else {
+ LOGE("Format not supported");
+ }
+ return numberOfBytes;
+}
+
+bool YUVImage::initializeYUVPointers() {
+ int32_t numberOfPixels = mWidth * mHeight;
+
+ if (mYUVFormat == YUV420Planar) {
+ mYdata = (uint8_t *)mBuffer;
+ mUdata = mYdata + numberOfPixels;
+ mVdata = mUdata + (numberOfPixels >> 2);
+ } else if (mYUVFormat == YUV420SemiPlanar) {
+ // U and V channels are interleaved as VUVUVU.
+ // So V data starts at the end of Y channel and
+ // U data starts right after V's start.
+ mYdata = (uint8_t *)mBuffer;
+ mVdata = mYdata + numberOfPixels;
+ mUdata = mVdata + 1;
+ } else {
+ LOGE("Format not supported");
+ return false;
+ }
+ return true;
+}
+
+YUVImage::~YUVImage() {
+ if (mOwnBuffer) delete[] mBuffer;
+}
+
+bool YUVImage::getOffsets(int32_t x, int32_t y,
+ int32_t *yOffset, int32_t *uOffset, int32_t *vOffset) const {
+ *yOffset = y*mWidth + x;
+
+ int32_t uvOffset = (y >> 1) * (mWidth >> 1) + (x >> 1);
+ if (mYUVFormat == YUV420Planar) {
+ *uOffset = uvOffset;
+ *vOffset = uvOffset;
+ } else if (mYUVFormat == YUV420SemiPlanar) {
+ // Since U and V channels are interleaved, offsets need
+ // to be doubled.
+ *uOffset = 2*uvOffset;
+ *vOffset = 2*uvOffset;
+ } else {
+ LOGE("Format not supported");
+ return false;
+ }
+
+ return true;
+}
+
+bool YUVImage::getOffsetIncrementsPerDataRow(
+ int32_t *yDataOffsetIncrement,
+ int32_t *uDataOffsetIncrement,
+ int32_t *vDataOffsetIncrement) const {
+ *yDataOffsetIncrement = mWidth;
+
+ int32_t uvDataOffsetIncrement = mWidth >> 1;
+
+ if (mYUVFormat == YUV420Planar) {
+ *uDataOffsetIncrement = uvDataOffsetIncrement;
+ *vDataOffsetIncrement = uvDataOffsetIncrement;
+ } else if (mYUVFormat == YUV420SemiPlanar) {
+ // Since U and V channels are interleaved, offsets need
+ // to be doubled.
+ *uDataOffsetIncrement = 2*uvDataOffsetIncrement;
+ *vDataOffsetIncrement = 2*uvDataOffsetIncrement;
+ } else {
+ LOGE("Format not supported");
+ return false;
+ }
+
+ return true;
+}
+
+uint8_t* YUVImage::getYAddress(int32_t offset) const {
+ return mYdata + offset;
+}
+
+uint8_t* YUVImage::getUAddress(int32_t offset) const {
+ return mUdata + offset;
+}
+
+uint8_t* YUVImage::getVAddress(int32_t offset) const {
+ return mVdata + offset;
+}
+
+bool YUVImage::getYUVAddresses(int32_t x, int32_t y,
+ uint8_t **yAddr, uint8_t **uAddr, uint8_t **vAddr) const {
+ int32_t yOffset;
+ int32_t uOffset;
+ int32_t vOffset;
+ if (!getOffsets(x, y, &yOffset, &uOffset, &vOffset)) return false;
+
+ *yAddr = getYAddress(yOffset);
+ *uAddr = getUAddress(uOffset);
+ *vAddr = getVAddress(vOffset);
+
+ return true;
+}
+
+bool YUVImage::validPixel(int32_t x, int32_t y) const {
+ return (x >= 0 && x < mWidth &&
+ y >= 0 && y < mHeight);
+}
+
+bool YUVImage::getPixelValue(int32_t x, int32_t y,
+ uint8_t *yPtr, uint8_t *uPtr, uint8_t *vPtr) const {
+ CHECK(validPixel(x, y));
+
+ uint8_t *yAddr;
+ uint8_t *uAddr;
+ uint8_t *vAddr;
+ if (!getYUVAddresses(x, y, &yAddr, &uAddr, &vAddr)) return false;
+
+ *yPtr = *yAddr;
+ *uPtr = *uAddr;
+ *vPtr = *vAddr;
+
+ return true;
+}
+
+bool YUVImage::setPixelValue(int32_t x, int32_t y,
+ uint8_t yValue, uint8_t uValue, uint8_t vValue) {
+ CHECK(validPixel(x, y));
+
+ uint8_t *yAddr;
+ uint8_t *uAddr;
+ uint8_t *vAddr;
+ if (!getYUVAddresses(x, y, &yAddr, &uAddr, &vAddr)) return false;
+
+ *yAddr = yValue;
+ *uAddr = uValue;
+ *vAddr = vValue;
+
+ return true;
+}
+
+void YUVImage::fastCopyRectangle420Planar(
+ const Rect& srcRect,
+ int32_t destStartX, int32_t destStartY,
+ const YUVImage &srcImage, YUVImage &destImage) {
+ CHECK(srcImage.mYUVFormat == YUV420Planar);
+ CHECK(destImage.mYUVFormat == YUV420Planar);
+
+ int32_t srcStartX = srcRect.left;
+ int32_t srcStartY = srcRect.top;
+ int32_t width = srcRect.width();
+ int32_t height = srcRect.height();
+
+ // Get source and destination start addresses
+ uint8_t *ySrcAddrBase;
+ uint8_t *uSrcAddrBase;
+ uint8_t *vSrcAddrBase;
+ srcImage.getYUVAddresses(srcStartX, srcStartY,
+ &ySrcAddrBase, &uSrcAddrBase, &vSrcAddrBase);
+
+ uint8_t *yDestAddrBase;
+ uint8_t *uDestAddrBase;
+ uint8_t *vDestAddrBase;
+ destImage.getYUVAddresses(destStartX, destStartY,
+ &yDestAddrBase, &uDestAddrBase, &vDestAddrBase);
+
+ // Get source and destination offset increments incurred in going
+ // from one data row to next.
+ int32_t ySrcOffsetIncrement;
+ int32_t uSrcOffsetIncrement;
+ int32_t vSrcOffsetIncrement;
+ srcImage.getOffsetIncrementsPerDataRow(
+ &ySrcOffsetIncrement, &uSrcOffsetIncrement, &vSrcOffsetIncrement);
+
+ int32_t yDestOffsetIncrement;
+ int32_t uDestOffsetIncrement;
+ int32_t vDestOffsetIncrement;
+ destImage.getOffsetIncrementsPerDataRow(
+ &yDestOffsetIncrement, &uDestOffsetIncrement, &vDestOffsetIncrement);
+
+ // Copy Y
+ {
+ size_t numberOfYBytesPerRow = (size_t) width;
+ uint8_t *ySrcAddr = ySrcAddrBase;
+ uint8_t *yDestAddr = yDestAddrBase;
+ for (int32_t offsetY = 0; offsetY < height; ++offsetY) {
+ memcpy(yDestAddr, ySrcAddr, numberOfYBytesPerRow);
+
+ ySrcAddr += ySrcOffsetIncrement;
+ yDestAddr += yDestOffsetIncrement;
+ }
+ }
+
+ // Copy U
+ {
+ size_t numberOfUBytesPerRow = (size_t) (width >> 1);
+ uint8_t *uSrcAddr = uSrcAddrBase;
+ uint8_t *uDestAddr = uDestAddrBase;
+ // Every other row has an entry for U/V channel values. Hence only
+ // go half the height.
+ for (int32_t offsetY = 0; offsetY < (height >> 1); ++offsetY) {
+ memcpy(uDestAddr, uSrcAddr, numberOfUBytesPerRow);
+
+ uSrcAddr += uSrcOffsetIncrement;
+ uDestAddr += uDestOffsetIncrement;
+ }
+ }
+
+ // Copy V
+ {
+ size_t numberOfVBytesPerRow = (size_t) (width >> 1);
+ uint8_t *vSrcAddr = vSrcAddrBase;
+ uint8_t *vDestAddr = vDestAddrBase;
+ // Every other pixel row has a U/V data row. Hence only go half the height.
+ for (int32_t offsetY = 0; offsetY < (height >> 1); ++offsetY) {
+ memcpy(vDestAddr, vSrcAddr, numberOfVBytesPerRow);
+
+ vSrcAddr += vSrcOffsetIncrement;
+ vDestAddr += vDestOffsetIncrement;
+ }
+ }
+}
+
+void YUVImage::fastCopyRectangle420SemiPlanar(
+ const Rect& srcRect,
+ int32_t destStartX, int32_t destStartY,
+ const YUVImage &srcImage, YUVImage &destImage) {
+ CHECK(srcImage.mYUVFormat == YUV420SemiPlanar);
+ CHECK(destImage.mYUVFormat == YUV420SemiPlanar);
+
+ int32_t srcStartX = srcRect.left;
+ int32_t srcStartY = srcRect.top;
+ int32_t width = srcRect.width();
+ int32_t height = srcRect.height();
+
+ // Get source and destination start addresses
+ uint8_t *ySrcAddrBase;
+ uint8_t *uSrcAddrBase;
+ uint8_t *vSrcAddrBase;
+ srcImage.getYUVAddresses(srcStartX, srcStartY,
+ &ySrcAddrBase, &uSrcAddrBase, &vSrcAddrBase);
+
+ uint8_t *yDestAddrBase;
+ uint8_t *uDestAddrBase;
+ uint8_t *vDestAddrBase;
+ destImage.getYUVAddresses(destStartX, destStartY,
+ &yDestAddrBase, &uDestAddrBase, &vDestAddrBase);
+
+ // Get source and destination offset increments incurred in going
+ // from one data row to next.
+ int32_t ySrcOffsetIncrement;
+ int32_t uSrcOffsetIncrement;
+ int32_t vSrcOffsetIncrement;
+ srcImage.getOffsetIncrementsPerDataRow(
+ &ySrcOffsetIncrement, &uSrcOffsetIncrement, &vSrcOffsetIncrement);
+
+ int32_t yDestOffsetIncrement;
+ int32_t uDestOffsetIncrement;
+ int32_t vDestOffsetIncrement;
+ destImage.getOffsetIncrementsPerDataRow(
+ &yDestOffsetIncrement, &uDestOffsetIncrement, &vDestOffsetIncrement);
+
+ // Copy Y
+ {
+ size_t numberOfYBytesPerRow = (size_t) width;
+ uint8_t *ySrcAddr = ySrcAddrBase;
+ uint8_t *yDestAddr = yDestAddrBase;
+ for (int32_t offsetY = 0; offsetY < height; ++offsetY) {
+ memcpy(yDestAddr, ySrcAddr, numberOfYBytesPerRow);
+
+ ySrcAddr = ySrcAddr + ySrcOffsetIncrement;
+ yDestAddr = yDestAddr + yDestOffsetIncrement;
+ }
+ }
+
+ // Copy UV
+ {
+ // UV are interleaved. So number of UV bytes per row is 2*(width/2).
+ size_t numberOfUVBytesPerRow = (size_t) width;
+ uint8_t *vSrcAddr = vSrcAddrBase;
+ uint8_t *vDestAddr = vDestAddrBase;
+ // Every other pixel row has a U/V data row. Hence only go half the height.
+ for (int32_t offsetY = 0; offsetY < (height >> 1); ++offsetY) {
+ memcpy(vDestAddr, vSrcAddr, numberOfUVBytesPerRow);
+
+ vSrcAddr += vSrcOffsetIncrement;
+ vDestAddr += vDestOffsetIncrement;
+ }
+ }
+}
+
+// static
+bool YUVImage::fastCopyRectangle(
+ const Rect& srcRect,
+ int32_t destStartX, int32_t destStartY,
+ const YUVImage &srcImage, YUVImage &destImage) {
+ if (srcImage.mYUVFormat == destImage.mYUVFormat) {
+ if (srcImage.mYUVFormat == YUV420Planar) {
+ fastCopyRectangle420Planar(
+ srcRect,
+ destStartX, destStartY,
+ srcImage, destImage);
+ } else if (srcImage.mYUVFormat == YUV420SemiPlanar) {
+ fastCopyRectangle420SemiPlanar(
+ srcRect,
+ destStartX, destStartY,
+ srcImage, destImage);
+ }
+ return true;
+ }
+ return false;
+}
+
+uint8_t clamp(uint8_t v, uint8_t minValue, uint8_t maxValue) {
+ CHECK(maxValue >= minValue);
+
+ if (v < minValue) return minValue;
+ else if (v > maxValue) return maxValue;
+ else return v;
+}
+
+void YUVImage::yuv2rgb(uint8_t yValue, uint8_t uValue, uint8_t vValue,
+ uint8_t *r, uint8_t *g, uint8_t *b) const {
+ *r = yValue + (1.370705 * (vValue-128));
+ *g = yValue - (0.698001 * (vValue-128)) - (0.337633 * (uValue-128));
+ *b = yValue + (1.732446 * (uValue-128));
+
+ *r = clamp(*r, 0, 255);
+ *g = clamp(*g, 0, 255);
+ *b = clamp(*b, 0, 255);
+}
+
+bool YUVImage::writeToPPM(const char *filename) const {
+ FILE *fp = fopen(filename, "w");
+ if (fp == NULL) {
+ return false;
+ }
+ fprintf(fp, "P3\n");
+ fprintf(fp, "%d %d\n", mWidth, mHeight);
+ fprintf(fp, "255\n");
+ for (int32_t y = 0; y < mHeight; ++y) {
+ for (int32_t x = 0; x < mWidth; ++x) {
+ uint8_t yValue;
+ uint8_t uValue;
+ uint8_t vValue;
+ getPixelValue(x, y, &yValue, &uValue, & vValue);
+
+ uint8_t rValue;
+ uint8_t gValue;
+ uint8_t bValue;
+ yuv2rgb(yValue, uValue, vValue, &rValue, &gValue, &bValue);
+
+ fprintf(fp, "%d %d %d\n", (int32_t)rValue, (int32_t)gValue, (int32_t)bValue);
+ }
+ }
+ fclose(fp);
+ return true;
+}
+
+} // namespace android