summaryrefslogtreecommitdiffstats
path: root/media/libstagefright
diff options
context:
space:
mode:
Diffstat (limited to 'media/libstagefright')
-rw-r--r--media/libstagefright/Android.mk4
-rw-r--r--media/libstagefright/AwesomePlayer.cpp65
-rw-r--r--media/libstagefright/CameraSource.cpp451
-rw-r--r--media/libstagefright/CameraSourceTimeLapse.cpp518
-rw-r--r--media/libstagefright/MediaSourceSplitter.cpp234
-rw-r--r--media/libstagefright/NuHTTPDataSource.cpp2
-rw-r--r--media/libstagefright/OMXCodec.cpp49
-rw-r--r--media/libstagefright/VideoSourceDownSampler.cpp142
-rw-r--r--media/libstagefright/colorconversion/Android.mk6
-rw-r--r--media/libstagefright/colorconversion/SoftwareRenderer.cpp183
-rw-r--r--media/libstagefright/httplive/M3UParser.cpp2
-rw-r--r--media/libstagefright/include/AwesomePlayer.h3
-rw-r--r--media/libstagefright/include/OMX.h7
-rw-r--r--media/libstagefright/include/OMXNodeInstance.h7
-rw-r--r--media/libstagefright/include/SoftwareRenderer.h18
-rw-r--r--media/libstagefright/omx/OMX.cpp18
-rw-r--r--media/libstagefright/omx/OMXNodeInstance.cpp99
-rw-r--r--media/libstagefright/rtsp/ARTSPConnection.cpp2
-rw-r--r--media/libstagefright/rtsp/ASessionDescription.cpp2
-rw-r--r--media/libstagefright/rtsp/MyHandler.h2
-rw-r--r--media/libstagefright/yuv/Android.mk13
-rw-r--r--media/libstagefright/yuv/YUVCanvas.cpp111
-rw-r--r--media/libstagefright/yuv/YUVImage.cpp413
23 files changed, 2233 insertions, 118 deletions
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 615f3e8..a6e52a0 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -10,6 +10,8 @@ LOCAL_SRC_FILES:= \
AudioSource.cpp \
AwesomePlayer.cpp \
CameraSource.cpp \
+ CameraSourceTimeLapse.cpp \
+ VideoSourceDownSampler.cpp \
DataSource.cpp \
ESDS.cpp \
FileSource.cpp \
@@ -24,6 +26,7 @@ LOCAL_SRC_FILES:= \
MediaDefs.cpp \
MediaExtractor.cpp \
MediaSource.cpp \
+ MediaSourceSplitter.cpp \
MetaData.cpp \
NuCachedSource2.cpp \
NuHTTPDataSource.cpp \
@@ -59,6 +62,7 @@ LOCAL_SHARED_LIBRARIES := \
libsonivox \
libvorbisidec \
libsurfaceflinger_client \
+ libstagefright_yuv \
libcamera_client
LOCAL_STATIC_LIBRARIES := \
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index 57bea8c..a0b7f70 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -45,7 +45,7 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/OMXCodec.h>
-#include <surfaceflinger/ISurface.h>
+#include <surfaceflinger/Surface.h>
#include <media/stagefright/foundation/ALooper.h>
@@ -101,13 +101,14 @@ struct AwesomeLocalRenderer : public AwesomeRenderer {
bool previewOnly,
const char *componentName,
OMX_COLOR_FORMATTYPE colorFormat,
- const sp<ISurface> &surface,
+ const sp<ISurface> &isurface,
+ const sp<Surface> &surface,
size_t displayWidth, size_t displayHeight,
size_t decodedWidth, size_t decodedHeight)
: mTarget(NULL),
mLibHandle(NULL) {
init(previewOnly, componentName,
- colorFormat, surface, displayWidth,
+ colorFormat, isurface, surface, displayWidth,
displayHeight, decodedWidth, decodedHeight);
}
@@ -139,7 +140,8 @@ private:
bool previewOnly,
const char *componentName,
OMX_COLOR_FORMATTYPE colorFormat,
- const sp<ISurface> &surface,
+ const sp<ISurface> &isurface,
+ const sp<Surface> &surface,
size_t displayWidth, size_t displayHeight,
size_t decodedWidth, size_t decodedHeight);
@@ -151,7 +153,8 @@ void AwesomeLocalRenderer::init(
bool previewOnly,
const char *componentName,
OMX_COLOR_FORMATTYPE colorFormat,
- const sp<ISurface> &surface,
+ const sp<ISurface> &isurface,
+ const sp<Surface> &surface,
size_t displayWidth, size_t displayHeight,
size_t decodedWidth, size_t decodedHeight) {
if (!previewOnly) {
@@ -177,7 +180,7 @@ void AwesomeLocalRenderer::init(
if (func) {
mTarget =
- (*func)(surface, componentName, colorFormat,
+ (*func)(isurface, componentName, colorFormat,
displayWidth, displayHeight,
decodedWidth, decodedHeight);
}
@@ -773,8 +776,18 @@ status_t AwesomePlayer::play_l() {
return OK;
}
+void AwesomePlayer::notifyVideoSize_l() {
+ sp<MetaData> meta = mVideoSource->getFormat();
+
+ int32_t decodedWidth, decodedHeight;
+ CHECK(meta->findInt32(kKeyWidth, &decodedWidth));
+ CHECK(meta->findInt32(kKeyHeight, &decodedHeight));
+
+ notifyListener_l(MEDIA_SET_VIDEO_SIZE, decodedWidth, decodedHeight);
+}
+
void AwesomePlayer::initRenderer_l() {
- if (mISurface != NULL) {
+ if (mSurface != NULL || mISurface != NULL) {
sp<MetaData> meta = mVideoSource->getFormat();
int32_t format;
@@ -791,17 +804,7 @@ void AwesomePlayer::initRenderer_l() {
// before creating a new one.
IPCThreadState::self()->flushCommands();
- if (!strncmp("OMX.", component, 4)) {
- // Our OMX codecs allocate buffers on the media_server side
- // therefore they require a remote IOMXRenderer that knows how
- // to display them.
- mVideoRenderer = new AwesomeRemoteRenderer(
- mClient.interface()->createRenderer(
- mISurface, component,
- (OMX_COLOR_FORMATTYPE)format,
- decodedWidth, decodedHeight,
- mVideoWidth, mVideoHeight));
- } else {
+ if (mSurface != NULL) {
// Other decoders are instantiated locally and as a consequence
// allocate their buffers in local address space.
mVideoRenderer = new AwesomeLocalRenderer(
@@ -809,8 +812,19 @@ void AwesomePlayer::initRenderer_l() {
component,
(OMX_COLOR_FORMATTYPE)format,
mISurface,
+ mSurface,
mVideoWidth, mVideoHeight,
decodedWidth, decodedHeight);
+ } else {
+ // Our OMX codecs allocate buffers on the media_server side
+ // therefore they require a remote IOMXRenderer that knows how
+ // to display them.
+ mVideoRenderer = new AwesomeRemoteRenderer(
+ mClient.interface()->createRenderer(
+ mISurface, component,
+ (OMX_COLOR_FORMATTYPE)format,
+ decodedWidth, decodedHeight,
+ mVideoWidth, mVideoHeight));
}
}
}
@@ -856,6 +870,12 @@ void AwesomePlayer::setISurface(const sp<ISurface> &isurface) {
mISurface = isurface;
}
+void AwesomePlayer::setSurface(const sp<Surface> &surface) {
+ Mutex::Autolock autoLock(mLock);
+
+ mSurface = surface;
+}
+
void AwesomePlayer::setAudioSink(
const sp<MediaPlayerBase::AudioSink> &audioSink) {
Mutex::Autolock autoLock(mLock);
@@ -1113,6 +1133,8 @@ void AwesomePlayer::onVideoEvent() {
if (err == INFO_FORMAT_CHANGED) {
LOGV("VideoSource signalled format change.");
+ notifyVideoSize_l();
+
if (mVideoRenderer != NULL) {
mVideoRendererIsPreview = false;
initRenderer_l();
@@ -1605,10 +1627,10 @@ void AwesomePlayer::onPrepareAsyncEvent() {
void AwesomePlayer::finishAsyncPrepare_l() {
if (mIsAsyncPrepare) {
- if (mVideoWidth < 0 || mVideoHeight < 0) {
+ if (mVideoSource == NULL) {
notifyListener_l(MEDIA_SET_VIDEO_SIZE, 0, 0);
} else {
- notifyListener_l(MEDIA_SET_VIDEO_SIZE, mVideoWidth, mVideoHeight);
+ notifyVideoSize_l();
}
notifyListener_l(MEDIA_PREPARED);
@@ -1730,13 +1752,14 @@ status_t AwesomePlayer::resume() {
mFlags = state->mFlags & (AUTO_LOOPING | LOOPING | AT_EOS);
- if (state->mLastVideoFrame && mISurface != NULL) {
+ if (state->mLastVideoFrame && (mSurface != NULL || mISurface != NULL)) {
mVideoRenderer =
new AwesomeLocalRenderer(
true, // previewOnly
"",
(OMX_COLOR_FORMATTYPE)state->mColorFormat,
mISurface,
+ mSurface,
state->mVideoWidth,
state->mVideoHeight,
state->mDecodedWidth,
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 89cb135..1eb394a 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -27,6 +27,7 @@
#include <media/stagefright/MetaData.h>
#include <camera/Camera.h>
#include <camera/CameraParameters.h>
+#include <surfaceflinger/Surface.h>
#include <utils/String8.h>
#include <cutils/properties.h>
@@ -65,6 +66,11 @@ void CameraSourceListener::notify(int32_t msgType, int32_t ext1, int32_t ext2) {
void CameraSourceListener::postData(int32_t msgType, const sp<IMemory> &dataPtr) {
LOGV("postData(%d, ptr:%p, size:%d)",
msgType, dataPtr->pointer(), dataPtr->size());
+
+ sp<CameraSource> source = mSource.promote();
+ if (source.get() != NULL) {
+ source->dataCallback(msgType, dataPtr);
+ }
}
void CameraSourceListener::postDataTimestamp(
@@ -77,6 +83,10 @@ void CameraSourceListener::postDataTimestamp(
}
static int32_t getColorFormat(const char* colorFormat) {
+ if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV420P)) {
+ return OMX_COLOR_FormatYUV420Planar;
+ }
+
if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV422SP)) {
return OMX_COLOR_FormatYUV422SemiPlanar;
}
@@ -99,72 +109,386 @@ static int32_t getColorFormat(const char* colorFormat) {
CHECK_EQ(0, "Unknown color format");
}
-// static
CameraSource *CameraSource::Create() {
- sp<Camera> camera = Camera::connect(0);
-
- if (camera.get() == NULL) {
- return NULL;
- }
+ Size size;
+ size.width = -1;
+ size.height = -1;
- return new CameraSource(camera);
+ sp<ICamera> camera;
+ return new CameraSource(camera, 0, size, -1, NULL);
}
// static
-CameraSource *CameraSource::CreateFromCamera(const sp<Camera> &camera) {
- if (camera.get() == NULL) {
- return NULL;
+CameraSource *CameraSource::CreateFromCamera(
+ const sp<ICamera>& camera,
+ int32_t cameraId,
+ Size videoSize,
+ int32_t frameRate,
+ const sp<Surface>& surface) {
+
+ CameraSource *source = new CameraSource(camera, cameraId,
+ videoSize, frameRate, surface);
+
+ if (source != NULL) {
+ if (source->initCheck() != OK) {
+ delete source;
+ return NULL;
+ }
}
-
- return new CameraSource(camera);
+ return source;
}
-CameraSource::CameraSource(const sp<Camera> &camera)
- : mCamera(camera),
- mFirstFrameTimeUs(0),
- mLastFrameTimestampUs(0),
+CameraSource::CameraSource(
+ const sp<ICamera>& camera,
+ int32_t cameraId,
+ Size videoSize,
+ int32_t frameRate,
+ const sp<Surface>& surface)
+ : mCameraFlags(0),
+ mVideoFrameRate(-1),
+ mCamera(0),
+ mSurface(surface),
mNumFramesReceived(0),
+ mLastFrameTimestampUs(0),
+ mStarted(false),
+ mFirstFrameTimeUs(0),
mNumFramesEncoded(0),
mNumFramesDropped(0),
mNumGlitches(0),
mGlitchDurationThresholdUs(200000),
- mCollectStats(false),
- mStarted(false) {
+ mCollectStats(false) {
+
+ mVideoSize.width = -1;
+ mVideoSize.height = -1;
+
+ mInitCheck = init(camera, cameraId, videoSize, frameRate);
+}
+
+status_t CameraSource::initCheck() const {
+ return mInitCheck;
+}
+
+status_t CameraSource::isCameraAvailable(
+ const sp<ICamera>& camera, int32_t cameraId) {
+
+ if (camera == 0) {
+ mCamera = Camera::connect(cameraId);
+ mCameraFlags &= ~FLAGS_HOT_CAMERA;
+ } else {
+ mCamera = Camera::create(camera);
+ mCameraFlags |= FLAGS_HOT_CAMERA;
+ }
+
+ // Is camera available?
+ if (mCamera == 0) {
+ LOGE("Camera connection could not be established.");
+ return -EBUSY;
+ }
+ if (!(mCameraFlags & FLAGS_HOT_CAMERA)) {
+ mCamera->lock();
+ }
+ return OK;
+}
+
+
+/*
+ * Check to see whether the requested video width and height is one
+ * of the supported sizes.
+ * @param width the video frame width in pixels
+ * @param height the video frame height in pixels
+ * @param suppportedSizes the vector of sizes that we check against
+ * @return true if the dimension (width and height) is supported.
+ */
+static bool isVideoSizeSupported(
+ int32_t width, int32_t height,
+ const Vector<Size>& supportedSizes) {
+
+ LOGV("isVideoSizeSupported");
+ for (size_t i = 0; i < supportedSizes.size(); ++i) {
+ if (width == supportedSizes[i].width &&
+ height == supportedSizes[i].height) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * If the preview and video output is separate, we only set the
+ * the video size, and applications should set the preview size
+ * to some proper value, and the recording framework will not
+ * change the preview size; otherwise, if the video and preview
+ * output is the same, we need to set the preview to be the same
+ * as the requested video size.
+ *
+ */
+/*
+ * Query the camera to retrieve the supported video frame sizes
+ * and also to see whether CameraParameters::setVideoSize()
+ * is supported or not.
+ * @param params CameraParameters to retrieve the information
+ * @@param isSetVideoSizeSupported retunrs whether method
+ * CameraParameters::setVideoSize() is supported or not.
+ * @param sizes returns the vector of Size objects for the
+ * supported video frame sizes advertised by the camera.
+ */
+static void getSupportedVideoSizes(
+ const CameraParameters& params,
+ bool *isSetVideoSizeSupported,
+ Vector<Size>& sizes) {
+
+ *isSetVideoSizeSupported = true;
+ params.getSupportedVideoSizes(sizes);
+ if (sizes.size() == 0) {
+ LOGD("Camera does not support setVideoSize()");
+ params.getSupportedPreviewSizes(sizes);
+ *isSetVideoSizeSupported = false;
+ }
+}
+/*
+ * Check whether the camera has the supported color format
+ * @param params CameraParameters to retrieve the information
+ * @return OK if no error.
+ */
+status_t CameraSource::isCameraColorFormatSupported(
+ const CameraParameters& params) {
+ mColorFormat = getColorFormat(params.get(
+ CameraParameters::KEY_VIDEO_FRAME_FORMAT));
+ if (mColorFormat == -1) {
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+/*
+ * Configure the camera to use the requested video size
+ * (width and height) and/or frame rate. If both width and
+ * height are -1, configuration on the video size is skipped.
+ * if frameRate is -1, configuration on the frame rate
+ * is skipped. Skipping the configuration allows one to
+ * use the current camera setting without the need to
+ * actually know the specific values (see Create() method).
+ *
+ * @param params the CameraParameters to be configured
+ * @param width the target video frame width in pixels
+ * @param height the target video frame height in pixels
+ * @param frameRate the target frame rate in frames per second.
+ * @return OK if no error.
+ */
+status_t CameraSource::configureCamera(
+ CameraParameters* params,
+ int32_t width, int32_t height,
+ int32_t frameRate) {
+
+ Vector<Size> sizes;
+ bool isSetVideoSizeSupportedByCamera = true;
+ getSupportedVideoSizes(*params, &isSetVideoSizeSupportedByCamera, sizes);
+ bool isCameraParamChanged = false;
+ if (width != -1 && height != -1) {
+ if (!isVideoSizeSupported(width, height, sizes)) {
+ LOGE("Video dimension (%dx%d) is unsupported", width, height);
+ return BAD_VALUE;
+ }
+ if (isSetVideoSizeSupportedByCamera) {
+ params->setVideoSize(width, height);
+ } else {
+ params->setPreviewSize(width, height);
+ }
+ isCameraParamChanged = true;
+ } else if ((width == -1 && height != -1) ||
+ (width != -1 && height == -1)) {
+ // If one and only one of the width and height is -1
+ // we reject such a request.
+ LOGE("Requested video size (%dx%d) is not supported", width, height);
+ return BAD_VALUE;
+ } else { // width == -1 && height == -1
+ // Do not configure the camera.
+ // Use the current width and height value setting from the camera.
+ }
+
+ if (frameRate != -1) {
+ params->setPreviewFrameRate(frameRate);
+ isCameraParamChanged = true;
+ } else { // frameRate == -1
+ // Do not configure the camera.
+ // Use the current frame rate value setting from the camera
+ }
+
+ if (isCameraParamChanged) {
+ // Either frame rate or frame size needs to be changed.
+ String8 s = params->flatten();
+ if (OK != mCamera->setParameters(s)) {
+ LOGE("Could not change settings."
+ " Someone else is using camera %p?", mCamera.get());
+ return -EBUSY;
+ }
+ }
+ return OK;
+}
+
+/*
+ * Check whether the requested video frame size
+ * has been successfully configured or not. If both width and height
+ * are -1, check on the current width and height value setting
+ * is performed.
+ *
+ * @param params CameraParameters to retrieve the information
+ * @param the target video frame width in pixels to check against
+ * @param the target video frame height in pixels to check against
+ * @return OK if no error
+ */
+status_t CameraSource::checkVideoSize(
+ const CameraParameters& params,
+ int32_t width, int32_t height) {
+
+ int32_t frameWidthActual = -1;
+ int32_t frameHeightActual = -1;
+ params.getPreviewSize(&frameWidthActual, &frameHeightActual);
+ if (frameWidthActual < 0 || frameHeightActual < 0) {
+ LOGE("Failed to retrieve video frame size (%dx%d)",
+ frameWidthActual, frameHeightActual);
+ return UNKNOWN_ERROR;
+ }
+
+ // Check the actual video frame size against the target/requested
+ // video frame size.
+ if (width != -1 && height != -1) {
+ if (frameWidthActual != width || frameHeightActual != height) {
+ LOGE("Failed to set video frame size to %dx%d. "
+ "The actual video size is %dx%d ", width, height,
+ frameWidthActual, frameHeightActual);
+ return UNKNOWN_ERROR;
+ }
+ }
+
+ // Good now.
+ mVideoSize.width = frameWidthActual;
+ mVideoSize.height = frameHeightActual;
+ return OK;
+}
+
+/*
+ * Check the requested frame rate has been successfully configured or not.
+ * If the target frameRate is -1, check on the current frame rate value
+ * setting is performed.
+ *
+ * @param params CameraParameters to retrieve the information
+ * @param the target video frame rate to check against
+ * @return OK if no error.
+ */
+status_t CameraSource::checkFrameRate(
+ const CameraParameters& params,
+ int32_t frameRate) {
+
+ int32_t frameRateActual = params.getPreviewFrameRate();
+ if (frameRateActual < 0) {
+ LOGE("Failed to retrieve preview frame rate (%d)", frameRateActual);
+ return UNKNOWN_ERROR;
+ }
+
+ // Check the actual video frame rate against the target/requested
+ // video frame rate.
+ if (frameRate != -1 && (frameRateActual - frameRate) != 0) {
+ LOGE("Failed to set preview frame rate to %d fps. The actual "
+ "frame rate is %d", frameRate, frameRateActual);
+ return UNKNOWN_ERROR;
+ }
+
+ // Good now.
+ mVideoFrameRate = frameRateActual;
+ return OK;
+}
+
+/*
+ * Initialize the CameraSource to so that it becomes
+ * ready for providing the video input streams as requested.
+ * @param camera the camera object used for the video source
+ * @param cameraId if camera == 0, use camera with this id
+ * as the video source
+ * @param videoSize the target video frame size. If both
+ * width and height in videoSize is -1, use the current
+ * width and heigth settings by the camera
+ * @param frameRate the target frame rate in frames per second.
+ * if it is -1, use the current camera frame rate setting.
+ * @return OK if no error.
+ */
+status_t CameraSource::init(
+ const sp<ICamera>& camera,
+ int32_t cameraId,
+ Size videoSize,
+ int32_t frameRate) {
+
+ status_t err = OK;
int64_t token = IPCThreadState::self()->clearCallingIdentity();
- String8 s = mCamera->getParameters();
- IPCThreadState::self()->restoreCallingIdentity(token);
- printf("params: \"%s\"\n", s.string());
+ if ((err = isCameraAvailable(camera, cameraId)) != OK) {
+ return err;
+ }
+ CameraParameters params(mCamera->getParameters());
+ if ((err = isCameraColorFormatSupported(params)) != OK) {
+ return err;
+ }
- int32_t width, height, stride, sliceHeight;
- CameraParameters params(s);
- params.getPreviewSize(&width, &height);
+ // Set the camera to use the requested video frame size
+ // and/or frame rate.
+ if ((err = configureCamera(&params,
+ videoSize.width, videoSize.height,
+ frameRate))) {
+ return err;
+ }
- // Calculate glitch duraton threshold based on frame rate
- int32_t frameRate = params.getPreviewFrameRate();
- int64_t glitchDurationUs = (1000000LL / frameRate);
+ // Check on video frame size and frame rate.
+ CameraParameters newCameraParams(mCamera->getParameters());
+ if ((err = checkVideoSize(newCameraParams,
+ videoSize.width, videoSize.height)) != OK) {
+ return err;
+ }
+ if ((err = checkFrameRate(newCameraParams, frameRate)) != OK) {
+ return err;
+ }
+
+ // This CHECK is good, since we just passed the lock/unlock
+ // check earlier by calling mCamera->setParameters().
+ CHECK_EQ(OK, mCamera->setPreviewDisplay(mSurface));
+
+ /*
+ * mCamera->startRecording() signals camera hal to make
+ * available the video buffers (for instance, allocation
+ * of the video buffers may be triggered when camera hal's
+ * startRecording() method is called). Making available these
+ * video buffers earlier (before calling start()) is critical,
+ * if one wants to configure omx video encoders to use these
+ * buffers for passing video frame data during video recording
+ * without the need to memcpy the video frame data stored
+ * in these buffers. Eliminating memcpy for video frame data
+ * is crucial in performance for HD quality video recording
+ * applications.
+ *
+ * Based on OMX IL spec, configuring the omx video encoders
+ * must occur in loaded state. When start() is called, omx
+ * video encoders are already in idle state, which is too
+ * late. Thus, we must call mCamera->startRecording() earlier.
+ */
+ startCameraRecording();
+
+ IPCThreadState::self()->restoreCallingIdentity(token);
+
+ int64_t glitchDurationUs = (1000000LL / mVideoFrameRate);
if (glitchDurationUs > mGlitchDurationThresholdUs) {
mGlitchDurationThresholdUs = glitchDurationUs;
}
- const char *colorFormatStr = params.get(CameraParameters::KEY_VIDEO_FRAME_FORMAT);
- CHECK(colorFormatStr != NULL);
- int32_t colorFormat = getColorFormat(colorFormatStr);
-
// XXX: query camera for the stride and slice height
// when the capability becomes available.
- stride = width;
- sliceHeight = height;
-
mMeta = new MetaData;
- mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_RAW);
- mMeta->setInt32(kKeyColorFormat, colorFormat);
- mMeta->setInt32(kKeyWidth, width);
- mMeta->setInt32(kKeyHeight, height);
- mMeta->setInt32(kKeyStride, stride);
- mMeta->setInt32(kKeySliceHeight, sliceHeight);
-
+ mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_RAW);
+ mMeta->setInt32(kKeyColorFormat, mColorFormat);
+ mMeta->setInt32(kKeyWidth, mVideoSize.width);
+ mMeta->setInt32(kKeyHeight, mVideoSize.height);
+ mMeta->setInt32(kKeyStride, mVideoSize.width);
+ mMeta->setInt32(kKeySliceHeight, mVideoSize.height);
+ return OK;
}
CameraSource::~CameraSource() {
@@ -173,8 +497,17 @@ CameraSource::~CameraSource() {
}
}
+void CameraSource::startCameraRecording() {
+ CHECK_EQ(OK, mCamera->startRecording());
+ CHECK(mCamera->recordingEnabled());
+}
+
status_t CameraSource::start(MetaData *meta) {
CHECK(!mStarted);
+ if (mInitCheck != OK) {
+ LOGE("CameraSource is not initialized yet");
+ return mInitCheck;
+ }
char value[PROPERTY_VALUE_MAX];
if (property_get("media.stagefright.record-stats", value, NULL)
@@ -190,13 +523,17 @@ status_t CameraSource::start(MetaData *meta) {
int64_t token = IPCThreadState::self()->clearCallingIdentity();
mCamera->setListener(new CameraSourceListener(this));
- CHECK_EQ(OK, mCamera->startRecording());
IPCThreadState::self()->restoreCallingIdentity(token);
mStarted = true;
return OK;
}
+void CameraSource::stopCameraRecording() {
+ mCamera->setListener(NULL);
+ mCamera->stopRecording();
+}
+
status_t CameraSource::stop() {
LOGV("stop");
Mutex::Autolock autoLock(mLock);
@@ -204,15 +541,23 @@ status_t CameraSource::stop() {
mFrameAvailableCondition.signal();
int64_t token = IPCThreadState::self()->clearCallingIdentity();
- mCamera->setListener(NULL);
- mCamera->stopRecording();
+ stopCameraRecording();
releaseQueuedFrames();
while (!mFramesBeingEncoded.empty()) {
LOGI("Waiting for outstanding frames being encoded: %d",
mFramesBeingEncoded.size());
mFrameCompleteCondition.wait(mLock);
}
- mCamera = NULL;
+
+ LOGV("Disconnect camera");
+ if ((mCameraFlags & FLAGS_HOT_CAMERA) == 0) {
+ LOGV("Camera was cold when we started, stopping preview");
+ mCamera->stopPreview();
+ }
+ mCamera->unlock();
+ mCamera.clear();
+ mCamera = 0;
+ mCameraFlags = 0;
IPCThreadState::self()->restoreCallingIdentity(token);
if (mCollectStats) {
@@ -225,11 +570,15 @@ status_t CameraSource::stop() {
return OK;
}
+void CameraSource::releaseRecordingFrame(const sp<IMemory>& frame) {
+ mCamera->releaseRecordingFrame(frame);
+}
+
void CameraSource::releaseQueuedFrames() {
List<sp<IMemory> >::iterator it;
while (!mFramesReceived.empty()) {
it = mFramesReceived.begin();
- mCamera->releaseRecordingFrame(*it);
+ releaseRecordingFrame(*it);
mFramesReceived.erase(it);
++mNumFramesDropped;
}
@@ -241,7 +590,7 @@ sp<MetaData> CameraSource::getFormat() {
void CameraSource::releaseOneRecordingFrame(const sp<IMemory>& frame) {
int64_t token = IPCThreadState::self()->clearCallingIdentity();
- mCamera->releaseRecordingFrame(frame);
+ releaseRecordingFrame(frame);
IPCThreadState::self()->restoreCallingIdentity(token);
}
@@ -251,7 +600,6 @@ void CameraSource::signalBufferReturned(MediaBuffer *buffer) {
for (List<sp<IMemory> >::iterator it = mFramesBeingEncoded.begin();
it != mFramesBeingEncoded.end(); ++it) {
if ((*it)->pointer() == buffer->data()) {
-
releaseOneRecordingFrame((*it));
mFramesBeingEncoded.erase(it);
++mNumFramesEncoded;
@@ -343,6 +691,13 @@ void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
++mNumGlitches;
}
+ // May need to skip frame or modify timestamp. Currently implemented
+ // by the subclass CameraSourceTimeLapse.
+ if(skipCurrentFrame(timestampUs)) {
+ releaseOneRecordingFrame(data);
+ return;
+ }
+
mLastFrameTimestampUs = timestampUs;
if (mNumFramesReceived == 0) {
mFirstFrameTimeUs = timestampUs;
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
new file mode 100644
index 0000000..2f3f20b
--- /dev/null
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -0,0 +1,518 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CameraSourceTimeLapse"
+
+#include <binder/IPCThreadState.h>
+#include <binder/MemoryBase.h>
+#include <binder/MemoryHeapBase.h>
+#include <media/stagefright/CameraSource.h>
+#include <media/stagefright/CameraSourceTimeLapse.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/YUVImage.h>
+#include <media/stagefright/YUVCanvas.h>
+#include <camera/Camera.h>
+#include <camera/CameraParameters.h>
+#include <ui/Rect.h>
+#include <utils/String8.h>
+#include <utils/Vector.h>
+#include "OMX_Video.h"
+#include <limits.h>
+
+namespace android {
+
+// static
+CameraSourceTimeLapse *CameraSourceTimeLapse::CreateFromCamera(
+ const sp<ICamera> &camera,
+ int32_t cameraId,
+ Size videoSize,
+ int32_t videoFrameRate,
+ const sp<Surface>& surface,
+ int64_t timeBetweenTimeLapseFrameCaptureUs) {
+
+ CameraSourceTimeLapse *source = new
+ CameraSourceTimeLapse(camera, cameraId,
+ videoSize, videoFrameRate, surface,
+ timeBetweenTimeLapseFrameCaptureUs);
+
+ if (source != NULL) {
+ if (source->initCheck() != OK) {
+ delete source;
+ return NULL;
+ }
+ }
+ return source;
+}
+
+CameraSourceTimeLapse::CameraSourceTimeLapse(
+ const sp<ICamera>& camera,
+ int32_t cameraId,
+ Size videoSize,
+ int32_t videoFrameRate,
+ const sp<Surface>& surface,
+ int64_t timeBetweenTimeLapseFrameCaptureUs)
+ : CameraSource(camera, cameraId, videoSize, videoFrameRate, surface),
+ mTimeBetweenTimeLapseFrameCaptureUs(timeBetweenTimeLapseFrameCaptureUs),
+ mTimeBetweenTimeLapseVideoFramesUs(1E6/videoFrameRate),
+ mLastTimeLapseFrameRealTimestampUs(0),
+ mSkipCurrentFrame(false) {
+
+ LOGV("starting time lapse mode");
+ mVideoWidth = videoSize.width;
+ mVideoHeight = videoSize.height;
+
+ if (trySettingPreviewSize(videoSize.width, videoSize.height)) {
+ mUseStillCameraForTimeLapse = false;
+ } else {
+ // TODO: Add a check to see that mTimeBetweenTimeLapseFrameCaptureUs is greater
+ // than the fastest rate at which the still camera can take pictures.
+ mUseStillCameraForTimeLapse = true;
+ CHECK(setPictureSizeToClosestSupported(videoSize.width, videoSize.height));
+ mNeedCropping = computeCropRectangleOffset();
+ mMeta->setInt32(kKeyWidth, videoSize.width);
+ mMeta->setInt32(kKeyHeight, videoSize.height);
+ }
+
+ // Initialize quick stop variables.
+ mQuickStop = false;
+ mForceRead = false;
+ mLastReadBufferCopy = NULL;
+ mStopWaitingForIdleCamera = false;
+}
+
+CameraSourceTimeLapse::~CameraSourceTimeLapse() {
+}
+
+void CameraSourceTimeLapse::startQuickReadReturns() {
+ Mutex::Autolock autoLock(mQuickStopLock);
+ LOGV("Enabling quick read returns");
+
+ // Enable quick stop mode.
+ mQuickStop = true;
+
+ if (mUseStillCameraForTimeLapse) {
+ // wake up the thread right away.
+ mTakePictureCondition.signal();
+ } else {
+ // Force dataCallbackTimestamp() coming from the video camera to not skip the
+ // next frame as we want read() to get a get a frame right away.
+ mForceRead = true;
+ }
+}
+
+bool CameraSourceTimeLapse::trySettingPreviewSize(int32_t width, int32_t height) {
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ String8 s = mCamera->getParameters();
+ IPCThreadState::self()->restoreCallingIdentity(token);
+
+ CameraParameters params(s);
+ Vector<Size> supportedSizes;
+ params.getSupportedPreviewSizes(supportedSizes);
+
+ bool previewSizeSupported = false;
+ for (uint32_t i = 0; i < supportedSizes.size(); ++i) {
+ int32_t pictureWidth = supportedSizes[i].width;
+ int32_t pictureHeight = supportedSizes[i].height;
+
+ if ((pictureWidth == width) && (pictureHeight == height)) {
+ previewSizeSupported = true;
+ }
+ }
+
+ if (previewSizeSupported) {
+ LOGV("Video size (%d, %d) is a supported preview size", width, height);
+ params.setPreviewSize(width, height);
+ CHECK(mCamera->setParameters(params.flatten()));
+ return true;
+ }
+
+ return false;
+}
+
+bool CameraSourceTimeLapse::setPictureSizeToClosestSupported(int32_t width, int32_t height) {
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ String8 s = mCamera->getParameters();
+ IPCThreadState::self()->restoreCallingIdentity(token);
+
+ CameraParameters params(s);
+ Vector<Size> supportedSizes;
+ params.getSupportedPictureSizes(supportedSizes);
+
+ int32_t minPictureSize = INT_MAX;
+ for (uint32_t i = 0; i < supportedSizes.size(); ++i) {
+ int32_t pictureWidth = supportedSizes[i].width;
+ int32_t pictureHeight = supportedSizes[i].height;
+
+ if ((pictureWidth >= width) && (pictureHeight >= height)) {
+ int32_t pictureSize = pictureWidth*pictureHeight;
+ if (pictureSize < minPictureSize) {
+ minPictureSize = pictureSize;
+ mPictureWidth = pictureWidth;
+ mPictureHeight = pictureHeight;
+ }
+ }
+ }
+ LOGV("Picture size = (%d, %d)", mPictureWidth, mPictureHeight);
+ return (minPictureSize != INT_MAX);
+}
+
+bool CameraSourceTimeLapse::computeCropRectangleOffset() {
+ if ((mPictureWidth == mVideoWidth) && (mPictureHeight == mVideoHeight)) {
+ return false;
+ }
+
+ CHECK((mPictureWidth > mVideoWidth) && (mPictureHeight > mVideoHeight));
+
+ int32_t widthDifference = mPictureWidth - mVideoWidth;
+ int32_t heightDifference = mPictureHeight - mVideoHeight;
+
+ mCropRectStartX = widthDifference/2;
+ mCropRectStartY = heightDifference/2;
+
+ LOGV("setting crop rectangle offset to (%d, %d)", mCropRectStartX, mCropRectStartY);
+
+ return true;
+}
+
+void CameraSourceTimeLapse::signalBufferReturned(MediaBuffer* buffer) {
+ Mutex::Autolock autoLock(mQuickStopLock);
+ if (mQuickStop && (buffer == mLastReadBufferCopy)) {
+ buffer->setObserver(NULL);
+ buffer->release();
+ } else {
+ return CameraSource::signalBufferReturned(buffer);
+ }
+}
+
+void createMediaBufferCopy(const MediaBuffer& sourceBuffer, int64_t frameTime, MediaBuffer **newBuffer) {
+ size_t sourceSize = sourceBuffer.size();
+ void* sourcePointer = sourceBuffer.data();
+
+ (*newBuffer) = new MediaBuffer(sourceSize);
+ memcpy((*newBuffer)->data(), sourcePointer, sourceSize);
+
+ (*newBuffer)->meta_data()->setInt64(kKeyTime, frameTime);
+}
+
+void CameraSourceTimeLapse::fillLastReadBufferCopy(MediaBuffer& sourceBuffer) {
+ int64_t frameTime;
+ CHECK(sourceBuffer.meta_data()->findInt64(kKeyTime, &frameTime));
+ createMediaBufferCopy(sourceBuffer, frameTime, &mLastReadBufferCopy);
+ mLastReadBufferCopy->add_ref();
+ mLastReadBufferCopy->setObserver(this);
+}
+
+status_t CameraSourceTimeLapse::read(
+ MediaBuffer **buffer, const ReadOptions *options) {
+ if (mLastReadBufferCopy == NULL) {
+ mLastReadStatus = CameraSource::read(buffer, options);
+
+ // mQuickStop may have turned to true while read was blocked. Make a copy of
+ // the buffer in that case.
+ Mutex::Autolock autoLock(mQuickStopLock);
+ if (mQuickStop && *buffer) {
+ fillLastReadBufferCopy(**buffer);
+ }
+ return mLastReadStatus;
+ } else {
+ (*buffer) = mLastReadBufferCopy;
+ (*buffer)->add_ref();
+ return mLastReadStatus;
+ }
+}
+
+// static
+void *CameraSourceTimeLapse::ThreadTimeLapseWrapper(void *me) {
+ CameraSourceTimeLapse *source = static_cast<CameraSourceTimeLapse *>(me);
+ source->threadTimeLapseEntry();
+ return NULL;
+}
+
+void CameraSourceTimeLapse::threadTimeLapseEntry() {
+ while (mStarted) {
+ {
+ Mutex::Autolock autoLock(mCameraIdleLock);
+ if (!mCameraIdle) {
+ mCameraIdleCondition.wait(mCameraIdleLock);
+ }
+ CHECK(mCameraIdle);
+ mCameraIdle = false;
+ }
+
+ // Even if mQuickStop == true we need to take one more picture
+ // as a read() may be blocked, waiting for a frame to get available.
+ // After this takePicture, if mQuickStop == true, we can safely exit
+ // this thread as read() will make a copy of this last frame and keep
+ // returning it in the quick stop mode.
+ Mutex::Autolock autoLock(mQuickStopLock);
+ CHECK_EQ(OK, mCamera->takePicture());
+ if (mQuickStop) {
+ LOGV("threadTimeLapseEntry: Exiting due to mQuickStop = true");
+ return;
+ }
+ mTakePictureCondition.waitRelative(mQuickStopLock,
+ mTimeBetweenTimeLapseFrameCaptureUs * 1000);
+ }
+ LOGV("threadTimeLapseEntry: Exiting due to mStarted = false");
+}
+
+void CameraSourceTimeLapse::startCameraRecording() {
+ if (mUseStillCameraForTimeLapse) {
+ LOGV("start time lapse recording using still camera");
+
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ String8 s = mCamera->getParameters();
+ IPCThreadState::self()->restoreCallingIdentity(token);
+
+ CameraParameters params(s);
+ params.setPictureSize(mPictureWidth, mPictureHeight);
+ mCamera->setParameters(params.flatten());
+ mCameraIdle = true;
+ mStopWaitingForIdleCamera = false;
+
+ // disable shutter sound and play the recording sound.
+ mCamera->sendCommand(CAMERA_CMD_ENABLE_SHUTTER_SOUND, 0, 0);
+ mCamera->sendCommand(CAMERA_CMD_PLAY_RECORDING_SOUND, 0, 0);
+
+ // create a thread which takes pictures in a loop
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+ pthread_create(&mThreadTimeLapse, &attr, ThreadTimeLapseWrapper, this);
+ pthread_attr_destroy(&attr);
+ } else {
+ LOGV("start time lapse recording using video camera");
+ CHECK_EQ(OK, mCamera->startRecording());
+ }
+}
+
+void CameraSourceTimeLapse::stopCameraRecording() {
+ if (mUseStillCameraForTimeLapse) {
+ void *dummy;
+ pthread_join(mThreadTimeLapse, &dummy);
+
+ // Last takePicture may still be underway. Wait for the camera to get
+ // idle.
+ Mutex::Autolock autoLock(mCameraIdleLock);
+ mStopWaitingForIdleCamera = true;
+ if (!mCameraIdle) {
+ mCameraIdleCondition.wait(mCameraIdleLock);
+ }
+ CHECK(mCameraIdle);
+ mCamera->setListener(NULL);
+
+ // play the recording sound.
+ mCamera->sendCommand(CAMERA_CMD_PLAY_RECORDING_SOUND, 0, 0);
+ } else {
+ mCamera->setListener(NULL);
+ mCamera->stopRecording();
+ }
+ if (mLastReadBufferCopy) {
+ mLastReadBufferCopy->release();
+ mLastReadBufferCopy = NULL;
+ }
+}
+
+void CameraSourceTimeLapse::releaseRecordingFrame(const sp<IMemory>& frame) {
+ if (!mUseStillCameraForTimeLapse) {
+ mCamera->releaseRecordingFrame(frame);
+ }
+}
+
+sp<IMemory> CameraSourceTimeLapse::createIMemoryCopy(const sp<IMemory> &source_data) {
+ size_t source_size = source_data->size();
+ void* source_pointer = source_data->pointer();
+
+ sp<MemoryHeapBase> newMemoryHeap = new MemoryHeapBase(source_size);
+ sp<MemoryBase> newMemory = new MemoryBase(newMemoryHeap, 0, source_size);
+ memcpy(newMemory->pointer(), source_pointer, source_size);
+ return newMemory;
+}
+
+// Allocates IMemory of final type MemoryBase with the given size.
+sp<IMemory> allocateIMemory(size_t size) {
+ sp<MemoryHeapBase> newMemoryHeap = new MemoryHeapBase(size);
+ sp<MemoryBase> newMemory = new MemoryBase(newMemoryHeap, 0, size);
+ return newMemory;
+}
+
+// static
+void *CameraSourceTimeLapse::ThreadStartPreviewWrapper(void *me) {
+ CameraSourceTimeLapse *source = static_cast<CameraSourceTimeLapse *>(me);
+ source->threadStartPreview();
+ return NULL;
+}
+
+void CameraSourceTimeLapse::threadStartPreview() {
+ CHECK_EQ(OK, mCamera->startPreview());
+ Mutex::Autolock autoLock(mCameraIdleLock);
+ mCameraIdle = true;
+ mCameraIdleCondition.signal();
+}
+
+void CameraSourceTimeLapse::restartPreview() {
+ // Start this in a different thread, so that the dataCallback can return
+ LOGV("restartPreview");
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
+
+ pthread_t threadPreview;
+ pthread_create(&threadPreview, &attr, ThreadStartPreviewWrapper, this);
+ pthread_attr_destroy(&attr);
+}
+
+sp<IMemory> CameraSourceTimeLapse::cropYUVImage(const sp<IMemory> &source_data) {
+ // find the YUV format
+ int32_t srcFormat;
+ CHECK(mMeta->findInt32(kKeyColorFormat, &srcFormat));
+ YUVImage::YUVFormat yuvFormat;
+ if (srcFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
+ yuvFormat = YUVImage::YUV420SemiPlanar;
+ } else {
+ CHECK_EQ(srcFormat, OMX_COLOR_FormatYUV420Planar);
+ yuvFormat = YUVImage::YUV420Planar;
+ }
+
+ // allocate memory for cropped image and setup a canvas using it.
+ sp<IMemory> croppedImageMemory = allocateIMemory(
+ YUVImage::bufferSize(yuvFormat, mVideoWidth, mVideoHeight));
+ YUVImage yuvImageCropped(yuvFormat,
+ mVideoWidth, mVideoHeight,
+ (uint8_t *)croppedImageMemory->pointer());
+ YUVCanvas yuvCanvasCrop(yuvImageCropped);
+
+ YUVImage yuvImageSource(yuvFormat,
+ mPictureWidth, mPictureHeight,
+ (uint8_t *)source_data->pointer());
+ yuvCanvasCrop.CopyImageRect(
+ Rect(mCropRectStartX, mCropRectStartY,
+ mCropRectStartX + mVideoWidth,
+ mCropRectStartY + mVideoHeight),
+ 0, 0,
+ yuvImageSource);
+
+ return croppedImageMemory;
+}
+
+void CameraSourceTimeLapse::dataCallback(int32_t msgType, const sp<IMemory> &data) {
+ if (msgType == CAMERA_MSG_COMPRESSED_IMAGE) {
+ // takePicture will complete after this callback, so restart preview.
+ restartPreview();
+ return;
+ }
+ if (msgType != CAMERA_MSG_RAW_IMAGE) {
+ return;
+ }
+
+ LOGV("dataCallback for timelapse still frame");
+ CHECK_EQ(true, mUseStillCameraForTimeLapse);
+
+ int64_t timestampUs;
+ if (mNumFramesReceived == 0) {
+ timestampUs = mStartTimeUs;
+ } else {
+ timestampUs = mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs;
+ }
+
+ if (mNeedCropping) {
+ sp<IMemory> croppedImageData = cropYUVImage(data);
+ dataCallbackTimestamp(timestampUs, msgType, croppedImageData);
+ } else {
+ sp<IMemory> dataCopy = createIMemoryCopy(data);
+ dataCallbackTimestamp(timestampUs, msgType, dataCopy);
+ }
+}
+
+bool CameraSourceTimeLapse::skipCurrentFrame(int64_t timestampUs) {
+ if (mSkipCurrentFrame) {
+ mSkipCurrentFrame = false;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool CameraSourceTimeLapse::skipFrameAndModifyTimeStamp(int64_t *timestampUs) {
+ if (!mUseStillCameraForTimeLapse) {
+ if (mLastTimeLapseFrameRealTimestampUs == 0) {
+ // First time lapse frame. Initialize mLastTimeLapseFrameRealTimestampUs
+ // to current time (timestampUs) and save frame data.
+ LOGV("dataCallbackTimestamp timelapse: initial frame");
+
+ mLastTimeLapseFrameRealTimestampUs = *timestampUs;
+ return false;
+ }
+
+ {
+ Mutex::Autolock autoLock(mQuickStopLock);
+
+ // mForceRead may be set to true by startQuickReadReturns(). In that
+ // case don't skip this frame.
+ if (mForceRead) {
+ LOGV("dataCallbackTimestamp timelapse: forced read");
+ mForceRead = false;
+ *timestampUs = mLastFrameTimestampUs;
+ return false;
+ }
+ }
+
+ if (*timestampUs <
+ (mLastTimeLapseFrameRealTimestampUs + mTimeBetweenTimeLapseFrameCaptureUs)) {
+ // Skip all frames from last encoded frame until
+ // sufficient time (mTimeBetweenTimeLapseFrameCaptureUs) has passed.
+ // Tell the camera to release its recording frame and return.
+ LOGV("dataCallbackTimestamp timelapse: skipping intermediate frame");
+ return true;
+ } else {
+ // Desired frame has arrived after mTimeBetweenTimeLapseFrameCaptureUs time:
+ // - Reset mLastTimeLapseFrameRealTimestampUs to current time.
+ // - Artificially modify timestampUs to be one frame time (1/framerate) ahead
+ // of the last encoded frame's time stamp.
+ LOGV("dataCallbackTimestamp timelapse: got timelapse frame");
+
+ mLastTimeLapseFrameRealTimestampUs = *timestampUs;
+ *timestampUs = mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs;
+ return false;
+ }
+ }
+ return false;
+}
+
+void CameraSourceTimeLapse::dataCallbackTimestamp(int64_t timestampUs, int32_t msgType,
+ const sp<IMemory> &data) {
+ if (!mUseStillCameraForTimeLapse) {
+ mSkipCurrentFrame = skipFrameAndModifyTimeStamp(&timestampUs);
+ } else {
+ Mutex::Autolock autoLock(mCameraIdleLock);
+ // If we are using the still camera and stop() has been called, it may
+ // be waiting for the camera to get idle. In that case return
+ // immediately. Calling CameraSource::dataCallbackTimestamp() will lead
+ // to a deadlock since it tries to access CameraSource::mLock which in
+ // this case is held by CameraSource::stop() currently waiting for the
+ // camera to get idle. And camera will not get idle until this call
+ // returns.
+ if (mStopWaitingForIdleCamera) {
+ return;
+ }
+ }
+ CameraSource::dataCallbackTimestamp(timestampUs, msgType, data);
+}
+
+} // namespace android
diff --git a/media/libstagefright/MediaSourceSplitter.cpp b/media/libstagefright/MediaSourceSplitter.cpp
new file mode 100644
index 0000000..abc7012
--- /dev/null
+++ b/media/libstagefright/MediaSourceSplitter.cpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaSourceSplitter"
+#include <utils/Log.h>
+
+#include <media/stagefright/MediaSourceSplitter.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MetaData.h>
+
+namespace android {
+
+MediaSourceSplitter::MediaSourceSplitter(sp<MediaSource> mediaSource) {
+ mNumberOfClients = 0;
+ mSource = mediaSource;
+ mSourceStarted = false;
+
+ mNumberOfClientsStarted = 0;
+ mNumberOfCurrentReads = 0;
+ mCurrentReadBit = 0;
+ mLastReadCompleted = true;
+}
+
+MediaSourceSplitter::~MediaSourceSplitter() {
+}
+
+sp<MediaSource> MediaSourceSplitter::createClient() {
+ Mutex::Autolock autoLock(mLock);
+
+ sp<MediaSource> client = new Client(this, mNumberOfClients++);
+ mClientsStarted.push(false);
+ mClientsDesiredReadBit.push(0);
+ return client;
+}
+
+status_t MediaSourceSplitter::start(int clientId, MetaData *params) {
+ Mutex::Autolock autoLock(mLock);
+
+ LOGV("start client (%d)", clientId);
+ if (mClientsStarted[clientId]) {
+ return OK;
+ }
+
+ mNumberOfClientsStarted++;
+
+ if (!mSourceStarted) {
+ LOGV("Starting real source from client (%d)", clientId);
+ status_t err = mSource->start(params);
+
+ if (err == OK) {
+ mSourceStarted = true;
+ mClientsStarted.editItemAt(clientId) = true;
+ mClientsDesiredReadBit.editItemAt(clientId) = !mCurrentReadBit;
+ }
+
+ return err;
+ } else {
+ mClientsStarted.editItemAt(clientId) = true;
+ if (mLastReadCompleted) {
+ // Last read was completed. So join in the threads for the next read.
+ mClientsDesiredReadBit.editItemAt(clientId) = !mCurrentReadBit;
+ } else {
+ // Last read is ongoing. So join in the threads for the current read.
+ mClientsDesiredReadBit.editItemAt(clientId) = mCurrentReadBit;
+ }
+ return OK;
+ }
+}
+
+status_t MediaSourceSplitter::stop(int clientId) {
+ Mutex::Autolock autoLock(mLock);
+
+ LOGV("stop client (%d)", clientId);
+ CHECK(clientId >= 0 && clientId < mNumberOfClients);
+ CHECK(mClientsStarted[clientId]);
+
+ if (--mNumberOfClientsStarted == 0) {
+ LOGV("Stopping real source from client (%d)", clientId);
+ status_t err = mSource->stop();
+ mSourceStarted = false;
+ mClientsStarted.editItemAt(clientId) = false;
+ return err;
+ } else {
+ mClientsStarted.editItemAt(clientId) = false;
+ if (!mLastReadCompleted && (mClientsDesiredReadBit[clientId] == mCurrentReadBit)) {
+ // !mLastReadCompleted implies that buffer has been read from source, but all
+ // clients haven't read it.
+ // mClientsDesiredReadBit[clientId] == mCurrentReadBit implies that this
+ // client would have wanted to read from this buffer. (i.e. it has not yet
+ // called read() for the current read buffer.)
+ // Since other threads may be waiting for all the clients' reads to complete,
+ // signal that this read has been aborted.
+ signalReadComplete_lock(true);
+ }
+ return OK;
+ }
+}
+
+sp<MetaData> MediaSourceSplitter::getFormat(int clientId) {
+ Mutex::Autolock autoLock(mLock);
+
+ LOGV("getFormat client (%d)", clientId);
+ return mSource->getFormat();
+}
+
+status_t MediaSourceSplitter::read(int clientId,
+ MediaBuffer **buffer, const MediaSource::ReadOptions *options) {
+ Mutex::Autolock autoLock(mLock);
+
+ CHECK(clientId >= 0 && clientId < mNumberOfClients);
+
+ LOGV("read client (%d)", clientId);
+ *buffer = NULL;
+
+ if (!mClientsStarted[clientId]) {
+ return OK;
+ }
+
+ if (mCurrentReadBit != mClientsDesiredReadBit[clientId]) {
+ // Desired buffer has not been read from source yet.
+
+ // If the current client is the special client with clientId = 0
+ // then read from source, else wait until the client 0 has finished
+ // reading from source.
+ if (clientId == 0) {
+ // Wait for all client's last read to complete first so as to not
+ // corrupt the buffer at mLastReadMediaBuffer.
+ waitForAllClientsLastRead_lock(clientId);
+
+ readFromSource_lock(options);
+ *buffer = mLastReadMediaBuffer;
+ } else {
+ waitForReadFromSource_lock(clientId);
+
+ *buffer = mLastReadMediaBuffer;
+ (*buffer)->add_ref();
+ }
+ CHECK(mCurrentReadBit == mClientsDesiredReadBit[clientId]);
+ } else {
+ // Desired buffer has already been read from source. Use the cached data.
+ CHECK(clientId != 0);
+
+ *buffer = mLastReadMediaBuffer;
+ (*buffer)->add_ref();
+ }
+
+ mClientsDesiredReadBit.editItemAt(clientId) = !mClientsDesiredReadBit[clientId];
+ signalReadComplete_lock(false);
+
+ return mLastReadStatus;
+}
+
+void MediaSourceSplitter::readFromSource_lock(const MediaSource::ReadOptions *options) {
+ mLastReadStatus = mSource->read(&mLastReadMediaBuffer , options);
+
+ mCurrentReadBit = !mCurrentReadBit;
+ mLastReadCompleted = false;
+ mReadFromSourceCondition.broadcast();
+}
+
+void MediaSourceSplitter::waitForReadFromSource_lock(int32_t clientId) {
+ mReadFromSourceCondition.wait(mLock);
+}
+
+void MediaSourceSplitter::waitForAllClientsLastRead_lock(int32_t clientId) {
+ if (mLastReadCompleted) {
+ return;
+ }
+ mAllReadsCompleteCondition.wait(mLock);
+ CHECK(mLastReadCompleted);
+}
+
+void MediaSourceSplitter::signalReadComplete_lock(bool readAborted) {
+ if (!readAborted) {
+ mNumberOfCurrentReads++;
+ }
+
+ if (mNumberOfCurrentReads == mNumberOfClientsStarted) {
+ mLastReadCompleted = true;
+ mNumberOfCurrentReads = 0;
+ mAllReadsCompleteCondition.broadcast();
+ }
+}
+
+status_t MediaSourceSplitter::pause(int clientId) {
+ return ERROR_UNSUPPORTED;
+}
+
+// Client
+
+MediaSourceSplitter::Client::Client(
+ sp<MediaSourceSplitter> splitter,
+ int32_t clientId) {
+ mSplitter = splitter;
+ mClientId = clientId;
+}
+
+status_t MediaSourceSplitter::Client::start(MetaData *params) {
+ return mSplitter->start(mClientId, params);
+}
+
+status_t MediaSourceSplitter::Client::stop() {
+ return mSplitter->stop(mClientId);
+}
+
+sp<MetaData> MediaSourceSplitter::Client::getFormat() {
+ return mSplitter->getFormat(mClientId);
+}
+
+status_t MediaSourceSplitter::Client::read(
+ MediaBuffer **buffer, const ReadOptions *options) {
+ return mSplitter->read(mClientId, buffer, options);
+}
+
+status_t MediaSourceSplitter::Client::pause() {
+ return mSplitter->pause(mClientId);
+}
+
+} // namespace android
diff --git a/media/libstagefright/NuHTTPDataSource.cpp b/media/libstagefright/NuHTTPDataSource.cpp
index fcbfdac..2743b2f 100644
--- a/media/libstagefright/NuHTTPDataSource.cpp
+++ b/media/libstagefright/NuHTTPDataSource.cpp
@@ -42,7 +42,7 @@ static bool ParseURL(
path->setTo(slashPos);
}
- char *colonPos = strchr(host->string(), ':');
+ const char *colonPos = strchr(host->string(), ':');
if (colonPos != NULL) {
unsigned long x;
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 4648ad3..5e70888 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -152,31 +152,38 @@ static sp<MediaSource> InstantiateSoftwareCodec(
static const CodecInfo kDecoderInfo[] = {
{ MEDIA_MIMETYPE_IMAGE_JPEG, "OMX.TI.JPEG.decode" },
+// { MEDIA_MIMETYPE_AUDIO_MPEG, "OMX.Nvidia.mp3.decoder" },
// { MEDIA_MIMETYPE_AUDIO_MPEG, "OMX.TI.MP3.decode" },
{ MEDIA_MIMETYPE_AUDIO_MPEG, "MP3Decoder" },
// { MEDIA_MIMETYPE_AUDIO_MPEG, "OMX.PV.mp3dec" },
// { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.TI.AMR.decode" },
+// { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.Nvidia.amr.decoder" },
{ MEDIA_MIMETYPE_AUDIO_AMR_NB, "AMRNBDecoder" },
// { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.PV.amrdec" },
+// { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.Nvidia.amrwb.decoder" },
{ MEDIA_MIMETYPE_AUDIO_AMR_WB, "OMX.TI.WBAMR.decode" },
{ MEDIA_MIMETYPE_AUDIO_AMR_WB, "AMRWBDecoder" },
// { MEDIA_MIMETYPE_AUDIO_AMR_WB, "OMX.PV.amrdec" },
+// { MEDIA_MIMETYPE_AUDIO_AAC, "OMX.Nvidia.aac.decoder" },
{ MEDIA_MIMETYPE_AUDIO_AAC, "OMX.TI.AAC.decode" },
{ MEDIA_MIMETYPE_AUDIO_AAC, "AACDecoder" },
// { MEDIA_MIMETYPE_AUDIO_AAC, "OMX.PV.aacdec" },
{ MEDIA_MIMETYPE_AUDIO_G711_ALAW, "G711Decoder" },
{ MEDIA_MIMETYPE_AUDIO_G711_MLAW, "G711Decoder" },
+// { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.Nvidia.mp4.decode" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.7x30.video.decoder.mpeg4" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.video.decoder.mpeg4" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.Video.Decoder" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.SEC.MPEG4.Decoder" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "M4vH263Decoder" },
// { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.PV.mpeg4dec" },
+// { MEDIA_MIMETYPE_VIDEO_H263, "OMX.Nvidia.h263.decode" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.7x30.video.decoder.h263" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.video.decoder.h263" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.SEC.H263.Decoder" },
{ MEDIA_MIMETYPE_VIDEO_H263, "M4vH263Decoder" },
// { MEDIA_MIMETYPE_VIDEO_H263, "OMX.PV.h263dec" },
+ { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.Nvidia.h264.decode" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.7x30.video.decoder.avc" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.video.decoder.avc" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.TI.Video.Decoder" },
@@ -198,18 +205,21 @@ static const CodecInfo kEncoderInfo[] = {
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.7x30.video.encoder.mpeg4" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.video.encoder.mpeg4" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.Video.encoder" },
+ { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.Nvidia.mp4.encoder" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.SEC.MPEG4.Encoder" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "M4vH263Encoder" },
// { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.PV.mpeg4enc" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.7x30.video.encoder.h263" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.video.encoder.h263" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.TI.Video.encoder" },
+ { MEDIA_MIMETYPE_VIDEO_H263, "OMX.Nvidia.h263.encoder" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.SEC.H263.Encoder" },
{ MEDIA_MIMETYPE_VIDEO_H263, "M4vH263Encoder" },
// { MEDIA_MIMETYPE_VIDEO_H263, "OMX.PV.h263enc" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.7x30.video.encoder.avc" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.video.encoder.avc" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.TI.Video.encoder" },
+ { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.Nvidia.h264.encoder" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.SEC.AVC.Encoder" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "AVCEncoder" },
// { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.PV.avcenc" },
@@ -350,6 +360,13 @@ uint32_t OMXCodec::getComponentQuirks(
const char *componentName, bool isEncoder) {
uint32_t quirks = 0;
+ if (!strcmp(componentName, "OMX.Nvidia.amr.decoder") ||
+ !strcmp(componentName, "OMX.Nvidia.amrwb.decoder") ||
+ !strcmp(componentName, "OMX.Nvidia.aac.decoder") ||
+ !strcmp(componentName, "OMX.Nvidia.mp3.decoder")) {
+ quirks |= kDecoderLiesAboutNumberOfChannels;
+ }
+
if (!strcmp(componentName, "OMX.PV.avcdec")) {
quirks |= kWantsNALFragments;
}
@@ -1239,6 +1256,10 @@ status_t OMXCodec::setupAVCEncoderParameters(const sp<MetaData>& meta) {
h264type.bMBAFF = OMX_FALSE;
h264type.eLoopFilterMode = OMX_VIDEO_AVCLoopFilterEnable;
+ if (!strcasecmp("OMX.Nvidia.h264.encoder", mComponentName)) {
+ h264type.eLevel = OMX_VIDEO_AVCLevelMax;
+ }
+
err = mOMX->setParameter(
mNode, OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
CHECK_EQ(err, OK);
@@ -1827,7 +1848,32 @@ void OMXCodec::onEvent(OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
case OMX_EventPortSettingsChanged:
{
- onPortSettingsChanged(data1);
+ CODEC_LOGV("OMX_EventPortSettingsChanged(port=%ld, data2=0x%08lx)",
+ data1, data2);
+
+ if (data2 == 0 || data2 == OMX_IndexParamPortDefinition) {
+ onPortSettingsChanged(data1);
+ } else if (data1 == kPortIndexOutput
+ && data2 == OMX_IndexConfigCommonOutputCrop) {
+
+ OMX_CONFIG_RECTTYPE rect;
+ rect.nPortIndex = kPortIndexOutput;
+ InitOMXParams(&rect);
+
+ status_t err =
+ mOMX->getConfig(
+ mNode, OMX_IndexConfigCommonOutputCrop,
+ &rect, sizeof(rect));
+
+ if (err == OK) {
+ CODEC_LOGV(
+ "output crop (%ld, %ld, %ld, %ld)",
+ rect.nLeft, rect.nTop, rect.nWidth, rect.nHeight);
+ } else {
+ CODEC_LOGE("getConfig(OMX_IndexConfigCommonOutputCrop) "
+ "returned error 0x%08x", err);
+ }
+ }
break;
}
@@ -2217,6 +2263,7 @@ void OMXCodec::disablePortAsync(OMX_U32 portIndex) {
CHECK_EQ(mPortStatus[portIndex], ENABLED);
mPortStatus[portIndex] = DISABLING;
+ CODEC_LOGV("sending OMX_CommandPortDisable(%ld)", portIndex);
status_t err =
mOMX->sendCommand(mNode, OMX_CommandPortDisable, portIndex);
CHECK_EQ(err, OK);
diff --git a/media/libstagefright/VideoSourceDownSampler.cpp b/media/libstagefright/VideoSourceDownSampler.cpp
new file mode 100644
index 0000000..ea7b09a
--- /dev/null
+++ b/media/libstagefright/VideoSourceDownSampler.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "VideoSourceDownSampler"
+
+#include <media/stagefright/VideoSourceDownSampler.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/YUVImage.h>
+#include <media/stagefright/YUVCanvas.h>
+#include "OMX_Video.h"
+
+namespace android {
+
+VideoSourceDownSampler::VideoSourceDownSampler(const sp<MediaSource> &videoSource,
+ int32_t width, int32_t height) {
+ LOGV("Construct VideoSourceDownSampler");
+ CHECK(width > 0);
+ CHECK(height > 0);
+
+ mRealVideoSource = videoSource;
+ mWidth = width;
+ mHeight = height;
+
+ mMeta = new MetaData(*(mRealVideoSource->getFormat()));
+ CHECK(mMeta->findInt32(kKeyWidth, &mRealSourceWidth));
+ CHECK(mMeta->findInt32(kKeyHeight, &mRealSourceHeight));
+
+ if ((mWidth != mRealSourceWidth) || (mHeight != mRealSourceHeight)) {
+ // Change meta data for width and height.
+ CHECK(mWidth <= mRealSourceWidth);
+ CHECK(mHeight <= mRealSourceHeight);
+
+ mNeedDownSampling = true;
+ computeDownSamplingParameters();
+ mMeta->setInt32(kKeyWidth, mWidth);
+ mMeta->setInt32(kKeyHeight, mHeight);
+ } else {
+ mNeedDownSampling = false;
+ }
+}
+
+VideoSourceDownSampler::~VideoSourceDownSampler() {
+}
+
+void VideoSourceDownSampler::computeDownSamplingParameters() {
+ mDownSampleSkipX = mRealSourceWidth / mWidth;
+ mDownSampleSkipY = mRealSourceHeight / mHeight;
+
+ mDownSampleOffsetX = mRealSourceWidth - mDownSampleSkipX * mWidth;
+ mDownSampleOffsetY = mRealSourceHeight - mDownSampleSkipY * mHeight;
+}
+
+void VideoSourceDownSampler::downSampleYUVImage(
+ const MediaBuffer &sourceBuffer, MediaBuffer **buffer) const {
+ // find the YUV format
+ int32_t srcFormat;
+ CHECK(mMeta->findInt32(kKeyColorFormat, &srcFormat));
+ YUVImage::YUVFormat yuvFormat;
+ if (srcFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
+ yuvFormat = YUVImage::YUV420SemiPlanar;
+ } else if (srcFormat == OMX_COLOR_FormatYUV420Planar) {
+ yuvFormat = YUVImage::YUV420Planar;
+ }
+
+ // allocate mediaBuffer for down sampled image and setup a canvas.
+ *buffer = new MediaBuffer(YUVImage::bufferSize(yuvFormat, mWidth, mHeight));
+ YUVImage yuvDownSampledImage(yuvFormat,
+ mWidth, mHeight,
+ (uint8_t *)(*buffer)->data());
+ YUVCanvas yuvCanvasDownSample(yuvDownSampledImage);
+
+ YUVImage yuvImageSource(yuvFormat,
+ mRealSourceWidth, mRealSourceHeight,
+ (uint8_t *)sourceBuffer.data());
+ yuvCanvasDownSample.downsample(mDownSampleOffsetX, mDownSampleOffsetY,
+ mDownSampleSkipX, mDownSampleSkipY,
+ yuvImageSource);
+}
+
+status_t VideoSourceDownSampler::start(MetaData *params) {
+ LOGV("start");
+ return mRealVideoSource->start();
+}
+
+status_t VideoSourceDownSampler::stop() {
+ LOGV("stop");
+ return mRealVideoSource->stop();
+}
+
+sp<MetaData> VideoSourceDownSampler::getFormat() {
+ LOGV("getFormat");
+ return mMeta;
+}
+
+status_t VideoSourceDownSampler::read(
+ MediaBuffer **buffer, const ReadOptions *options) {
+ LOGV("read");
+ MediaBuffer *realBuffer;
+ status_t err = mRealVideoSource->read(&realBuffer, options);
+
+ if (mNeedDownSampling) {
+ downSampleYUVImage(*realBuffer, buffer);
+
+ int64_t frameTime;
+ realBuffer->meta_data()->findInt64(kKeyTime, &frameTime);
+ (*buffer)->meta_data()->setInt64(kKeyTime, frameTime);
+
+ // We just want this buffer to be deleted when the encoder releases it.
+ // So don't add a reference to it and set the observer to NULL.
+ (*buffer)->setObserver(NULL);
+
+ // The original buffer is no longer required. Release it.
+ realBuffer->release();
+ } else {
+ *buffer = realBuffer;
+ }
+
+ return err;
+}
+
+status_t VideoSourceDownSampler::pause() {
+ LOGV("pause");
+ return mRealVideoSource->pause();
+}
+
+} // namespace android
diff --git a/media/libstagefright/colorconversion/Android.mk b/media/libstagefright/colorconversion/Android.mk
index 0dcbd73..40be0f9 100644
--- a/media/libstagefright/colorconversion/Android.mk
+++ b/media/libstagefright/colorconversion/Android.mk
@@ -7,6 +7,7 @@ LOCAL_SRC_FILES:= \
LOCAL_C_INCLUDES := \
$(TOP)/frameworks/base/include/media/stagefright/openmax
+ $(TOP)/hardware/msm7k
LOCAL_SHARED_LIBRARIES := \
libbinder \
@@ -17,6 +18,11 @@ LOCAL_SHARED_LIBRARIES := \
libsurfaceflinger_client\
libcamera_client
+# ifeq ($(TARGET_BOARD_PLATFORM),msm7k)
+ifeq ($(TARGET_PRODUCT),passion)
+ LOCAL_CFLAGS += -DHAS_YCBCR420_SP_ADRENO
+endif
+
LOCAL_MODULE:= libstagefright_color_conversion
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index a6dbf69..662a84a 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -22,65 +22,182 @@
#include <binder/MemoryHeapBase.h>
#include <binder/MemoryHeapPmem.h>
#include <media/stagefright/MediaDebug.h>
-#include <surfaceflinger/ISurface.h>
+#include <surfaceflinger/Surface.h>
+#include <ui/android_native_buffer.h>
+#include <ui/GraphicBufferMapper.h>
+
+// XXX: Temporary hack to allow referencing the _ADRENO pixel format here.
+#include <libgralloc-qsd8k/gralloc_priv.h>
namespace android {
SoftwareRenderer::SoftwareRenderer(
OMX_COLOR_FORMATTYPE colorFormat,
- const sp<ISurface> &surface,
+ const sp<Surface> &surface,
size_t displayWidth, size_t displayHeight,
size_t decodedWidth, size_t decodedHeight)
: mColorFormat(colorFormat),
- mConverter(colorFormat, OMX_COLOR_Format16bitRGB565),
- mISurface(surface),
+ mConverter(NULL),
+ mYUVMode(None),
+ mSurface(surface),
mDisplayWidth(displayWidth),
mDisplayHeight(displayHeight),
mDecodedWidth(decodedWidth),
- mDecodedHeight(decodedHeight),
- mFrameSize(mDecodedWidth * mDecodedHeight * 2), // RGB565
- mIndex(0) {
- mMemoryHeap = new MemoryHeapBase("/dev/pmem_adsp", 2 * mFrameSize);
- if (mMemoryHeap->heapID() < 0) {
- LOGI("Creating physical memory heap failed, reverting to regular heap.");
- mMemoryHeap = new MemoryHeapBase(2 * mFrameSize);
- } else {
- sp<MemoryHeapPmem> pmemHeap = new MemoryHeapPmem(mMemoryHeap);
- pmemHeap->slap();
- mMemoryHeap = pmemHeap;
+ mDecodedHeight(decodedHeight) {
+ LOGI("input format = %d", mColorFormat);
+ LOGI("display = %d x %d, decoded = %d x %d",
+ mDisplayWidth, mDisplayHeight, mDecodedWidth, mDecodedHeight);
+
+ mDecodedWidth = mDisplayWidth;
+ mDecodedHeight = mDisplayHeight;
+
+ int halFormat;
+ switch (mColorFormat) {
+#if HAS_YCBCR420_SP_ADRENO
+ case OMX_COLOR_FormatYUV420Planar:
+ {
+ halFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO;
+ mYUVMode = YUV420ToYUV420sp;
+ break;
+ }
+
+ case 0x7fa30c00:
+ {
+ halFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO;
+ mYUVMode = YUV420spToYUV420sp;
+ break;
+ }
+#endif
+
+ default:
+ halFormat = HAL_PIXEL_FORMAT_RGB_565;
+
+ mConverter = new ColorConverter(
+ mColorFormat, OMX_COLOR_Format16bitRGB565);
+ CHECK(mConverter->isValid());
+ break;
}
- CHECK(mISurface.get() != NULL);
+ CHECK(mSurface.get() != NULL);
CHECK(mDecodedWidth > 0);
CHECK(mDecodedHeight > 0);
- CHECK(mMemoryHeap->heapID() >= 0);
- CHECK(mConverter.isValid());
+ CHECK(mConverter == NULL || mConverter->isValid());
- ISurface::BufferHeap bufferHeap(
- mDisplayWidth, mDisplayHeight,
- mDecodedWidth, mDecodedHeight,
- PIXEL_FORMAT_RGB_565,
- mMemoryHeap);
+ CHECK_EQ(0,
+ native_window_set_usage(
+ mSurface.get(),
+ GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN
+ | GRALLOC_USAGE_HW_TEXTURE));
- status_t err = mISurface->registerBuffers(bufferHeap);
- CHECK_EQ(err, OK);
+ CHECK_EQ(0, native_window_set_buffer_count(mSurface.get(), 2));
+
+ // Width must be multiple of 32???
+ CHECK_EQ(0, native_window_set_buffers_geometry(
+ mSurface.get(), mDecodedWidth, mDecodedHeight,
+ halFormat));
}
SoftwareRenderer::~SoftwareRenderer() {
- mISurface->unregisterBuffers();
+ delete mConverter;
+ mConverter = NULL;
+}
+
+static inline size_t ALIGN(size_t x, size_t alignment) {
+ return (x + alignment - 1) & ~(alignment - 1);
}
void SoftwareRenderer::render(
const void *data, size_t size, void *platformPrivate) {
- size_t offset = mIndex * mFrameSize;
- void *dst = (uint8_t *)mMemoryHeap->getBase() + offset;
+ android_native_buffer_t *buf;
+ int err;
+ if ((err = mSurface->dequeueBuffer(mSurface.get(), &buf)) != 0) {
+ LOGW("Surface::dequeueBuffer returned error %d", err);
+ return;
+ }
+
+ CHECK_EQ(0, mSurface->lockBuffer(mSurface.get(), buf));
+
+ GraphicBufferMapper &mapper = GraphicBufferMapper::get();
+
+ Rect bounds(mDecodedWidth, mDecodedHeight);
- mConverter.convert(
- mDecodedWidth, mDecodedHeight,
- data, 0, dst, 2 * mDecodedWidth);
+ void *dst;
+ CHECK_EQ(0, mapper.lock(
+ buf->handle, GRALLOC_USAGE_SW_WRITE_OFTEN, bounds, &dst));
- mISurface->postBuffer(offset);
- mIndex = 1 - mIndex;
+ if (mConverter) {
+ mConverter->convert(
+ mDecodedWidth, mDecodedHeight,
+ data, 0, dst, buf->stride * 2);
+ } else if (mYUVMode == YUV420spToYUV420sp) {
+ // Input and output are both YUV420sp, but the alignment requirements
+ // are different.
+ size_t srcYStride = mDecodedWidth;
+ const uint8_t *srcY = (const uint8_t *)data;
+ uint8_t *dstY = (uint8_t *)dst;
+ for (size_t i = 0; i < mDecodedHeight; ++i) {
+ memcpy(dstY, srcY, mDecodedWidth);
+ srcY += srcYStride;
+ dstY += buf->stride;
+ }
+
+ size_t srcUVStride = (mDecodedWidth + 1) & ~1;
+ size_t dstUVStride = ALIGN(mDecodedWidth / 2, 32) * 2;
+
+ const uint8_t *srcUV = (const uint8_t *)data
+ + mDecodedHeight * mDecodedWidth;
+
+ size_t dstUVOffset = ALIGN(ALIGN(mDecodedHeight, 32) * buf->stride, 4096);
+ uint8_t *dstUV = (uint8_t *)dst + dstUVOffset;
+
+ for (size_t i = 0; i < (mDecodedHeight + 1) / 2; ++i) {
+ memcpy(dstUV, srcUV, (mDecodedWidth + 1) & ~1);
+ srcUV += srcUVStride;
+ dstUV += dstUVStride;
+ }
+ } else if (mYUVMode == YUV420ToYUV420sp) {
+ // Input is YUV420 planar, output is YUV420sp, adhere to proper
+ // alignment requirements.
+ size_t srcYStride = mDecodedWidth;
+ const uint8_t *srcY = (const uint8_t *)data;
+ uint8_t *dstY = (uint8_t *)dst;
+ for (size_t i = 0; i < mDecodedHeight; ++i) {
+ memcpy(dstY, srcY, mDecodedWidth);
+ srcY += srcYStride;
+ dstY += buf->stride;
+ }
+
+ size_t srcUVStride = (mDecodedWidth + 1) / 2;
+ size_t dstUVStride = ALIGN(mDecodedWidth / 2, 32) * 2;
+
+ const uint8_t *srcU = (const uint8_t *)data
+ + mDecodedHeight * mDecodedWidth;
+
+ const uint8_t *srcV =
+ srcU + ((mDecodedWidth + 1) / 2) * ((mDecodedHeight + 1) / 2);
+
+ size_t dstUVOffset = ALIGN(ALIGN(mDecodedHeight, 32) * buf->stride, 4096);
+ uint8_t *dstUV = (uint8_t *)dst + dstUVOffset;
+
+ for (size_t i = 0; i < (mDecodedHeight + 1) / 2; ++i) {
+ for (size_t j = 0; j < (mDecodedWidth + 1) / 2; ++j) {
+ dstUV[2 * j + 1] = srcU[j];
+ dstUV[2 * j] = srcV[j];
+ }
+ srcU += srcUVStride;
+ srcV += srcUVStride;
+ dstUV += dstUVStride;
+ }
+ } else {
+ memcpy(dst, data, size);
+ }
+
+ CHECK_EQ(0, mapper.unlock(buf->handle));
+
+ if ((err = mSurface->queueBuffer(mSurface.get(), buf)) != 0) {
+ LOGW("Surface::queueBuffer returned error %d", err);
+ }
+ buf = NULL;
}
} // namespace android
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index a68c641..90f3d6d 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -96,7 +96,7 @@ static bool MakeURL(const char *baseURL, const char *url, AString *out) {
out->setTo(baseURL);
out->append(url);
} else {
- char *slashPos = strrchr(baseURL, '/');
+ const char *slashPos = strrchr(baseURL, '/');
if (slashPos > &baseURL[6]) {
out->setTo(baseURL, slashPos - baseURL);
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 46a0c65..5a1d7e7 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -76,6 +76,7 @@ struct AwesomePlayer {
bool isPlaying() const;
void setISurface(const sp<ISurface> &isurface);
+ void setSurface(const sp<Surface> &surface);
void setAudioSink(const sp<MediaPlayerBase::AudioSink> &audioSink);
status_t setLooping(bool shouldLoop);
@@ -121,6 +122,7 @@ private:
wp<MediaPlayerBase> mListener;
sp<ISurface> mISurface;
+ sp<Surface> mSurface;
sp<MediaPlayerBase::AudioSink> mAudioSink;
SystemTimeSource mSystemTimeSource;
@@ -226,6 +228,7 @@ private:
status_t seekTo_l(int64_t timeUs);
status_t pause_l(bool at_eos = false);
void initRenderer_l();
+ void notifyVideoSize_l();
void seekAudioIfNecessary_l();
void cancelPlayerEvents(bool keepBufferingGoing = false);
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index c99da59..83b75ad 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -59,10 +59,17 @@ public:
node_id node, OMX_INDEXTYPE index,
const void *params, size_t size);
+ virtual status_t enableGraphicBuffers(
+ node_id node, OMX_U32 port_index, OMX_BOOL enable);
+
virtual status_t useBuffer(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
buffer_id *buffer);
+ virtual status_t useGraphicBuffer(
+ node_id node, OMX_U32 port_index,
+ const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer);
+
virtual status_t allocateBuffer(
node_id node, OMX_U32 port_index, size_t size,
buffer_id *buffer, void **buffer_data);
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index b5b31ac..8c7c562 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -49,10 +49,16 @@ struct OMXNodeInstance {
status_t getConfig(OMX_INDEXTYPE index, void *params, size_t size);
status_t setConfig(OMX_INDEXTYPE index, const void *params, size_t size);
+ status_t enableGraphicBuffers(OMX_U32 portIndex, OMX_BOOL enable);
+
status_t useBuffer(
OMX_U32 portIndex, const sp<IMemory> &params,
OMX::buffer_id *buffer);
+ status_t useGraphicBuffer(
+ OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
+ OMX::buffer_id *buffer);
+
status_t allocateBuffer(
OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer,
void **buffer_data);
@@ -125,4 +131,3 @@ private:
} // namespace android
#endif // OMX_NODE_INSTANCE_H_
-
diff --git a/media/libstagefright/include/SoftwareRenderer.h b/media/libstagefright/include/SoftwareRenderer.h
index 9eed089..8d58056 100644
--- a/media/libstagefright/include/SoftwareRenderer.h
+++ b/media/libstagefright/include/SoftwareRenderer.h
@@ -24,14 +24,14 @@
namespace android {
-class ISurface;
+class Surface;
class MemoryHeapBase;
class SoftwareRenderer : public VideoRenderer {
public:
SoftwareRenderer(
OMX_COLOR_FORMATTYPE colorFormat,
- const sp<ISurface> &surface,
+ const sp<Surface> &surface,
size_t displayWidth, size_t displayHeight,
size_t decodedWidth, size_t decodedHeight);
@@ -41,14 +41,18 @@ public:
const void *data, size_t size, void *platformPrivate);
private:
+ enum YUVMode {
+ None,
+ YUV420ToYUV420sp,
+ YUV420spToYUV420sp,
+ };
+
OMX_COLOR_FORMATTYPE mColorFormat;
- ColorConverter mConverter;
- sp<ISurface> mISurface;
+ ColorConverter *mConverter;
+ YUVMode mYUVMode;
+ sp<Surface> mSurface;
size_t mDisplayWidth, mDisplayHeight;
size_t mDecodedWidth, mDecodedHeight;
- size_t mFrameSize;
- sp<MemoryHeapBase> mMemoryHeap;
- int mIndex;
SoftwareRenderer(const SoftwareRenderer &);
SoftwareRenderer &operator=(const SoftwareRenderer &);
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index c927da1..d89f54b 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -289,6 +289,11 @@ status_t OMX::setConfig(
index, params, size);
}
+status_t OMX::enableGraphicBuffers(
+ node_id node, OMX_U32 port_index, OMX_BOOL enable) {
+ return findInstance(node)->enableGraphicBuffers(port_index, enable);
+}
+
status_t OMX::useBuffer(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
buffer_id *buffer) {
@@ -296,6 +301,13 @@ status_t OMX::useBuffer(
port_index, params, buffer);
}
+status_t OMX::useGraphicBuffer(
+ node_id node, OMX_U32 port_index,
+ const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) {
+ return findInstance(node)->useGraphicBuffer(
+ port_index, graphicBuffer, buffer);
+}
+
status_t OMX::allocateBuffer(
node_id node, OMX_U32 port_index, size_t size,
buffer_id *buffer, void **buffer_data) {
@@ -497,12 +509,17 @@ sp<IOMXRenderer> OMX::createRenderer(
}
if (!impl) {
+#if 0
LOGW("Using software renderer.");
impl = new SoftwareRenderer(
colorFormat,
surface,
displayWidth, displayHeight,
encodedWidth, encodedHeight);
+#else
+ CHECK(!"Should not be here.");
+ return NULL;
+#endif
}
return new OMXRenderer(impl);
@@ -527,4 +544,3 @@ void OMXRenderer::render(IOMX::buffer_id buffer) {
}
} // namespace android
-
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 5db516e..ba4d765 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -24,6 +24,7 @@
#include <OMX_Component.h>
#include <binder/IMemory.h>
+#include <media/stagefright/HardwareAPI.h>
#include <media/stagefright/MediaDebug.h>
#include <media/stagefright/MediaErrors.h>
@@ -40,6 +41,11 @@ struct BufferMeta {
mIsBackup(false) {
}
+ BufferMeta(const sp<GraphicBuffer> &graphicBuffer)
+ : mGraphicBuffer(graphicBuffer),
+ mIsBackup(false) {
+ }
+
void CopyFromOMX(const OMX_BUFFERHEADERTYPE *header) {
if (!mIsBackup) {
return;
@@ -61,6 +67,7 @@ struct BufferMeta {
}
private:
+ sp<GraphicBuffer> mGraphicBuffer;
sp<IMemory> mMem;
size_t mSize;
bool mIsBackup;
@@ -240,6 +247,43 @@ status_t OMXNodeInstance::setConfig(
return StatusFromOMXError(err);
}
+status_t OMXNodeInstance::enableGraphicBuffers(
+ OMX_U32 portIndex, OMX_BOOL enable) {
+ Mutex::Autolock autoLock(mLock);
+
+ OMX_INDEXTYPE index;
+ OMX_ERRORTYPE err = OMX_GetExtensionIndex(
+ mHandle,
+ const_cast<OMX_STRING>("OMX.google.android.index.enableAndroidNativeBuffers"),
+ &index);
+
+ if (err != OMX_ErrorNone) {
+ LOGE("OMX_GetExtensionIndex failed");
+
+ return StatusFromOMXError(err);
+ }
+
+ OMX_VERSIONTYPE ver;
+ ver.s.nVersionMajor = 1;
+ ver.s.nVersionMinor = 0;
+ ver.s.nRevision = 0;
+ ver.s.nStep = 0;
+ EnableAndroidNativeBuffersParams params = {
+ sizeof(EnableAndroidNativeBuffersParams), ver, portIndex, enable,
+ };
+
+ err = OMX_SetParameter(mHandle, index, &params);
+
+ if (err != OMX_ErrorNone) {
+ LOGE("OMX_EnableAndroidNativeBuffers failed with error %d (0x%08x)",
+ err, err);
+
+ return UNKNOWN_ERROR;
+ }
+
+ return OK;
+}
+
status_t OMXNodeInstance::useBuffer(
OMX_U32 portIndex, const sp<IMemory> &params,
OMX::buffer_id *buffer) {
@@ -273,6 +317,60 @@ status_t OMXNodeInstance::useBuffer(
return OK;
}
+status_t OMXNodeInstance::useGraphicBuffer(
+ OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
+ OMX::buffer_id *buffer) {
+ Mutex::Autolock autoLock(mLock);
+
+ OMX_INDEXTYPE index;
+ OMX_ERRORTYPE err = OMX_GetExtensionIndex(
+ mHandle,
+ const_cast<OMX_STRING>("OMX.google.android.index.useAndroidNativeBuffer"),
+ &index);
+
+ if (err != OMX_ErrorNone) {
+ LOGE("OMX_GetExtensionIndex failed");
+
+ return StatusFromOMXError(err);
+ }
+
+ BufferMeta *bufferMeta = new BufferMeta(graphicBuffer);
+
+ OMX_BUFFERHEADERTYPE *header;
+
+ OMX_VERSIONTYPE ver;
+ ver.s.nVersionMajor = 1;
+ ver.s.nVersionMinor = 0;
+ ver.s.nRevision = 0;
+ ver.s.nStep = 0;
+ UseAndroidNativeBufferParams params = {
+ sizeof(UseAndroidNativeBufferParams), ver, portIndex, bufferMeta,
+ &header, graphicBuffer,
+ };
+
+ err = OMX_SetParameter(mHandle, index, &params);
+
+ if (err != OMX_ErrorNone) {
+ LOGE("OMX_UseAndroidNativeBuffer failed with error %d (0x%08x)", err,
+ err);
+
+ delete bufferMeta;
+ bufferMeta = NULL;
+
+ *buffer = 0;
+
+ return UNKNOWN_ERROR;
+ }
+
+ CHECK_EQ(header->pAppPrivate, bufferMeta);
+
+ *buffer = header;
+
+ addActiveBuffer(portIndex, *buffer);
+
+ return OK;
+}
+
status_t OMXNodeInstance::allocateBuffer(
OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer,
void **buffer_data) {
@@ -498,4 +596,3 @@ void OMXNodeInstance::freeActiveBuffers() {
}
} // namespace android
-
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index 5ec03b2..f928c06 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -133,7 +133,7 @@ bool ARTSPConnection::ParseURL(
path->setTo(slashPos);
}
- char *colonPos = strchr(host->c_str(), ':');
+ const char *colonPos = strchr(host->c_str(), ':');
if (colonPos != NULL) {
unsigned long x;
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index 0db3595..612caff 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -187,7 +187,7 @@ void ASessionDescription::getFormatType(
AString format;
getFormat(index, &format);
- char *lastSpacePos = strrchr(format.c_str(), ' ');
+ const char *lastSpacePos = strrchr(format.c_str(), ' ');
CHECK(lastSpacePos != NULL);
char *end;
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 1bc9925..6943608 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -1112,7 +1112,7 @@ private:
out->setTo(baseURL);
out->append(url);
} else {
- char *slashPos = strrchr(baseURL, '/');
+ const char *slashPos = strrchr(baseURL, '/');
if (slashPos > &baseURL[6]) {
out->setTo(baseURL, slashPos - baseURL);
diff --git a/media/libstagefright/yuv/Android.mk b/media/libstagefright/yuv/Android.mk
new file mode 100644
index 0000000..0794ad1
--- /dev/null
+++ b/media/libstagefright/yuv/Android.mk
@@ -0,0 +1,13 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ YUVImage.cpp \
+ YUVCanvas.cpp
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils
+
+LOCAL_MODULE:= libstagefright_yuv
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/yuv/YUVCanvas.cpp b/media/libstagefright/yuv/YUVCanvas.cpp
new file mode 100644
index 0000000..38aa779
--- /dev/null
+++ b/media/libstagefright/yuv/YUVCanvas.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "YUVCanvas"
+
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/YUVCanvas.h>
+#include <media/stagefright/YUVImage.h>
+#include <ui/Rect.h>
+
+namespace android {
+
+YUVCanvas::YUVCanvas(YUVImage &yuvImage)
+ : mYUVImage(yuvImage) {
+}
+
+YUVCanvas::~YUVCanvas() {
+}
+
+void YUVCanvas::FillYUV(uint8_t yValue, uint8_t uValue, uint8_t vValue) {
+ for (int32_t y = 0; y < mYUVImage.height(); ++y) {
+ for (int32_t x = 0; x < mYUVImage.width(); ++x) {
+ mYUVImage.setPixelValue(x, y, yValue, uValue, vValue);
+ }
+ }
+}
+
+void YUVCanvas::FillYUVRectangle(const Rect& rect,
+ uint8_t yValue, uint8_t uValue, uint8_t vValue) {
+ for (int32_t y = rect.top; y < rect.bottom; ++y) {
+ for (int32_t x = rect.left; x < rect.right; ++x) {
+ mYUVImage.setPixelValue(x, y, yValue, uValue, vValue);
+ }
+ }
+}
+
+void YUVCanvas::CopyImageRect(
+ const Rect& srcRect,
+ int32_t destStartX, int32_t destStartY,
+ const YUVImage &srcImage) {
+
+ // Try fast copy first
+ if (YUVImage::fastCopyRectangle(
+ srcRect,
+ destStartX, destStartY,
+ srcImage, mYUVImage)) {
+ return;
+ }
+
+ int32_t srcStartX = srcRect.left;
+ int32_t srcStartY = srcRect.top;
+ for (int32_t offsetY = 0; offsetY < srcRect.height(); ++offsetY) {
+ for (int32_t offsetX = 0; offsetX < srcRect.width(); ++offsetX) {
+ int32_t srcX = srcStartX + offsetX;
+ int32_t srcY = srcStartY + offsetY;
+
+ int32_t destX = destStartX + offsetX;
+ int32_t destY = destStartY + offsetY;
+
+ uint8_t yValue;
+ uint8_t uValue;
+ uint8_t vValue;
+
+ srcImage.getPixelValue(srcX, srcY, &yValue, &uValue, &vValue);
+ mYUVImage.setPixelValue(destX, destY, yValue, uValue, vValue);
+ }
+ }
+}
+
+void YUVCanvas::downsample(
+ int32_t srcOffsetX, int32_t srcOffsetY,
+ int32_t skipX, int32_t skipY,
+ const YUVImage &srcImage) {
+ // TODO: Add a low pass filter for downsampling.
+
+ // Check that srcImage is big enough to fill mYUVImage.
+ CHECK((srcOffsetX + (mYUVImage.width() - 1) * skipX) < srcImage.width());
+ CHECK((srcOffsetY + (mYUVImage.height() - 1) * skipY) < srcImage.height());
+
+ uint8_t yValue;
+ uint8_t uValue;
+ uint8_t vValue;
+
+ int32_t srcY = srcOffsetY;
+ for (int32_t y = 0; y < mYUVImage.height(); ++y) {
+ int32_t srcX = srcOffsetX;
+ for (int32_t x = 0; x < mYUVImage.width(); ++x) {
+ srcImage.getPixelValue(srcX, srcY, &yValue, &uValue, &vValue);
+ mYUVImage.setPixelValue(x, y, yValue, uValue, vValue);
+
+ srcX += skipX;
+ }
+ srcY += skipY;
+ }
+}
+
+} // namespace android
diff --git a/media/libstagefright/yuv/YUVImage.cpp b/media/libstagefright/yuv/YUVImage.cpp
new file mode 100644
index 0000000..b712062
--- /dev/null
+++ b/media/libstagefright/yuv/YUVImage.cpp
@@ -0,0 +1,413 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "YUVImage"
+
+#include <media/stagefright/YUVImage.h>
+#include <ui/Rect.h>
+#include <media/stagefright/MediaDebug.h>
+
+namespace android {
+
+YUVImage::YUVImage(YUVFormat yuvFormat, int32_t width, int32_t height) {
+ mYUVFormat = yuvFormat;
+ mWidth = width;
+ mHeight = height;
+
+ size_t numberOfBytes = bufferSize(yuvFormat, width, height);
+ uint8_t *buffer = new uint8_t[numberOfBytes];
+ mBuffer = buffer;
+ mOwnBuffer = true;
+
+ initializeYUVPointers();
+}
+
+YUVImage::YUVImage(YUVFormat yuvFormat, int32_t width, int32_t height, uint8_t *buffer) {
+ mYUVFormat = yuvFormat;
+ mWidth = width;
+ mHeight = height;
+ mBuffer = buffer;
+ mOwnBuffer = false;
+
+ initializeYUVPointers();
+}
+
+//static
+size_t YUVImage::bufferSize(YUVFormat yuvFormat, int32_t width, int32_t height) {
+ int32_t numberOfPixels = width*height;
+ size_t numberOfBytes = 0;
+ if (yuvFormat == YUV420Planar || yuvFormat == YUV420SemiPlanar) {
+ // Y takes numberOfPixels bytes and U/V take numberOfPixels/4 bytes each.
+ numberOfBytes = (size_t)(numberOfPixels + (numberOfPixels >> 1));
+ } else {
+ LOGE("Format not supported");
+ }
+ return numberOfBytes;
+}
+
+bool YUVImage::initializeYUVPointers() {
+ int32_t numberOfPixels = mWidth * mHeight;
+
+ if (mYUVFormat == YUV420Planar) {
+ mYdata = (uint8_t *)mBuffer;
+ mUdata = mYdata + numberOfPixels;
+ mVdata = mUdata + (numberOfPixels >> 2);
+ } else if (mYUVFormat == YUV420SemiPlanar) {
+ // U and V channels are interleaved as VUVUVU.
+ // So V data starts at the end of Y channel and
+ // U data starts right after V's start.
+ mYdata = (uint8_t *)mBuffer;
+ mVdata = mYdata + numberOfPixels;
+ mUdata = mVdata + 1;
+ } else {
+ LOGE("Format not supported");
+ return false;
+ }
+ return true;
+}
+
+YUVImage::~YUVImage() {
+ if (mOwnBuffer) delete[] mBuffer;
+}
+
+bool YUVImage::getOffsets(int32_t x, int32_t y,
+ int32_t *yOffset, int32_t *uOffset, int32_t *vOffset) const {
+ *yOffset = y*mWidth + x;
+
+ int32_t uvOffset = (y >> 1) * (mWidth >> 1) + (x >> 1);
+ if (mYUVFormat == YUV420Planar) {
+ *uOffset = uvOffset;
+ *vOffset = uvOffset;
+ } else if (mYUVFormat == YUV420SemiPlanar) {
+ // Since U and V channels are interleaved, offsets need
+ // to be doubled.
+ *uOffset = 2*uvOffset;
+ *vOffset = 2*uvOffset;
+ } else {
+ LOGE("Format not supported");
+ return false;
+ }
+
+ return true;
+}
+
+bool YUVImage::getOffsetIncrementsPerDataRow(
+ int32_t *yDataOffsetIncrement,
+ int32_t *uDataOffsetIncrement,
+ int32_t *vDataOffsetIncrement) const {
+ *yDataOffsetIncrement = mWidth;
+
+ int32_t uvDataOffsetIncrement = mWidth >> 1;
+
+ if (mYUVFormat == YUV420Planar) {
+ *uDataOffsetIncrement = uvDataOffsetIncrement;
+ *vDataOffsetIncrement = uvDataOffsetIncrement;
+ } else if (mYUVFormat == YUV420SemiPlanar) {
+ // Since U and V channels are interleaved, offsets need
+ // to be doubled.
+ *uDataOffsetIncrement = 2*uvDataOffsetIncrement;
+ *vDataOffsetIncrement = 2*uvDataOffsetIncrement;
+ } else {
+ LOGE("Format not supported");
+ return false;
+ }
+
+ return true;
+}
+
+uint8_t* YUVImage::getYAddress(int32_t offset) const {
+ return mYdata + offset;
+}
+
+uint8_t* YUVImage::getUAddress(int32_t offset) const {
+ return mUdata + offset;
+}
+
+uint8_t* YUVImage::getVAddress(int32_t offset) const {
+ return mVdata + offset;
+}
+
+bool YUVImage::getYUVAddresses(int32_t x, int32_t y,
+ uint8_t **yAddr, uint8_t **uAddr, uint8_t **vAddr) const {
+ int32_t yOffset;
+ int32_t uOffset;
+ int32_t vOffset;
+ if (!getOffsets(x, y, &yOffset, &uOffset, &vOffset)) return false;
+
+ *yAddr = getYAddress(yOffset);
+ *uAddr = getUAddress(uOffset);
+ *vAddr = getVAddress(vOffset);
+
+ return true;
+}
+
+bool YUVImage::validPixel(int32_t x, int32_t y) const {
+ return (x >= 0 && x < mWidth &&
+ y >= 0 && y < mHeight);
+}
+
+bool YUVImage::getPixelValue(int32_t x, int32_t y,
+ uint8_t *yPtr, uint8_t *uPtr, uint8_t *vPtr) const {
+ CHECK(validPixel(x, y));
+
+ uint8_t *yAddr;
+ uint8_t *uAddr;
+ uint8_t *vAddr;
+ if (!getYUVAddresses(x, y, &yAddr, &uAddr, &vAddr)) return false;
+
+ *yPtr = *yAddr;
+ *uPtr = *uAddr;
+ *vPtr = *vAddr;
+
+ return true;
+}
+
+bool YUVImage::setPixelValue(int32_t x, int32_t y,
+ uint8_t yValue, uint8_t uValue, uint8_t vValue) {
+ CHECK(validPixel(x, y));
+
+ uint8_t *yAddr;
+ uint8_t *uAddr;
+ uint8_t *vAddr;
+ if (!getYUVAddresses(x, y, &yAddr, &uAddr, &vAddr)) return false;
+
+ *yAddr = yValue;
+ *uAddr = uValue;
+ *vAddr = vValue;
+
+ return true;
+}
+
+void YUVImage::fastCopyRectangle420Planar(
+ const Rect& srcRect,
+ int32_t destStartX, int32_t destStartY,
+ const YUVImage &srcImage, YUVImage &destImage) {
+ CHECK(srcImage.mYUVFormat == YUV420Planar);
+ CHECK(destImage.mYUVFormat == YUV420Planar);
+
+ int32_t srcStartX = srcRect.left;
+ int32_t srcStartY = srcRect.top;
+ int32_t width = srcRect.width();
+ int32_t height = srcRect.height();
+
+ // Get source and destination start addresses
+ uint8_t *ySrcAddrBase;
+ uint8_t *uSrcAddrBase;
+ uint8_t *vSrcAddrBase;
+ srcImage.getYUVAddresses(srcStartX, srcStartY,
+ &ySrcAddrBase, &uSrcAddrBase, &vSrcAddrBase);
+
+ uint8_t *yDestAddrBase;
+ uint8_t *uDestAddrBase;
+ uint8_t *vDestAddrBase;
+ destImage.getYUVAddresses(destStartX, destStartY,
+ &yDestAddrBase, &uDestAddrBase, &vDestAddrBase);
+
+ // Get source and destination offset increments incurred in going
+ // from one data row to next.
+ int32_t ySrcOffsetIncrement;
+ int32_t uSrcOffsetIncrement;
+ int32_t vSrcOffsetIncrement;
+ srcImage.getOffsetIncrementsPerDataRow(
+ &ySrcOffsetIncrement, &uSrcOffsetIncrement, &vSrcOffsetIncrement);
+
+ int32_t yDestOffsetIncrement;
+ int32_t uDestOffsetIncrement;
+ int32_t vDestOffsetIncrement;
+ destImage.getOffsetIncrementsPerDataRow(
+ &yDestOffsetIncrement, &uDestOffsetIncrement, &vDestOffsetIncrement);
+
+ // Copy Y
+ {
+ size_t numberOfYBytesPerRow = (size_t) width;
+ uint8_t *ySrcAddr = ySrcAddrBase;
+ uint8_t *yDestAddr = yDestAddrBase;
+ for (int32_t offsetY = 0; offsetY < height; ++offsetY) {
+ memcpy(yDestAddr, ySrcAddr, numberOfYBytesPerRow);
+
+ ySrcAddr += ySrcOffsetIncrement;
+ yDestAddr += yDestOffsetIncrement;
+ }
+ }
+
+ // Copy U
+ {
+ size_t numberOfUBytesPerRow = (size_t) (width >> 1);
+ uint8_t *uSrcAddr = uSrcAddrBase;
+ uint8_t *uDestAddr = uDestAddrBase;
+ // Every other row has an entry for U/V channel values. Hence only
+ // go half the height.
+ for (int32_t offsetY = 0; offsetY < (height >> 1); ++offsetY) {
+ memcpy(uDestAddr, uSrcAddr, numberOfUBytesPerRow);
+
+ uSrcAddr += uSrcOffsetIncrement;
+ uDestAddr += uDestOffsetIncrement;
+ }
+ }
+
+ // Copy V
+ {
+ size_t numberOfVBytesPerRow = (size_t) (width >> 1);
+ uint8_t *vSrcAddr = vSrcAddrBase;
+ uint8_t *vDestAddr = vDestAddrBase;
+ // Every other pixel row has a U/V data row. Hence only go half the height.
+ for (int32_t offsetY = 0; offsetY < (height >> 1); ++offsetY) {
+ memcpy(vDestAddr, vSrcAddr, numberOfVBytesPerRow);
+
+ vSrcAddr += vSrcOffsetIncrement;
+ vDestAddr += vDestOffsetIncrement;
+ }
+ }
+}
+
+void YUVImage::fastCopyRectangle420SemiPlanar(
+ const Rect& srcRect,
+ int32_t destStartX, int32_t destStartY,
+ const YUVImage &srcImage, YUVImage &destImage) {
+ CHECK(srcImage.mYUVFormat == YUV420SemiPlanar);
+ CHECK(destImage.mYUVFormat == YUV420SemiPlanar);
+
+ int32_t srcStartX = srcRect.left;
+ int32_t srcStartY = srcRect.top;
+ int32_t width = srcRect.width();
+ int32_t height = srcRect.height();
+
+ // Get source and destination start addresses
+ uint8_t *ySrcAddrBase;
+ uint8_t *uSrcAddrBase;
+ uint8_t *vSrcAddrBase;
+ srcImage.getYUVAddresses(srcStartX, srcStartY,
+ &ySrcAddrBase, &uSrcAddrBase, &vSrcAddrBase);
+
+ uint8_t *yDestAddrBase;
+ uint8_t *uDestAddrBase;
+ uint8_t *vDestAddrBase;
+ destImage.getYUVAddresses(destStartX, destStartY,
+ &yDestAddrBase, &uDestAddrBase, &vDestAddrBase);
+
+ // Get source and destination offset increments incurred in going
+ // from one data row to next.
+ int32_t ySrcOffsetIncrement;
+ int32_t uSrcOffsetIncrement;
+ int32_t vSrcOffsetIncrement;
+ srcImage.getOffsetIncrementsPerDataRow(
+ &ySrcOffsetIncrement, &uSrcOffsetIncrement, &vSrcOffsetIncrement);
+
+ int32_t yDestOffsetIncrement;
+ int32_t uDestOffsetIncrement;
+ int32_t vDestOffsetIncrement;
+ destImage.getOffsetIncrementsPerDataRow(
+ &yDestOffsetIncrement, &uDestOffsetIncrement, &vDestOffsetIncrement);
+
+ // Copy Y
+ {
+ size_t numberOfYBytesPerRow = (size_t) width;
+ uint8_t *ySrcAddr = ySrcAddrBase;
+ uint8_t *yDestAddr = yDestAddrBase;
+ for (int32_t offsetY = 0; offsetY < height; ++offsetY) {
+ memcpy(yDestAddr, ySrcAddr, numberOfYBytesPerRow);
+
+ ySrcAddr = ySrcAddr + ySrcOffsetIncrement;
+ yDestAddr = yDestAddr + yDestOffsetIncrement;
+ }
+ }
+
+ // Copy UV
+ {
+ // UV are interleaved. So number of UV bytes per row is 2*(width/2).
+ size_t numberOfUVBytesPerRow = (size_t) width;
+ uint8_t *vSrcAddr = vSrcAddrBase;
+ uint8_t *vDestAddr = vDestAddrBase;
+ // Every other pixel row has a U/V data row. Hence only go half the height.
+ for (int32_t offsetY = 0; offsetY < (height >> 1); ++offsetY) {
+ memcpy(vDestAddr, vSrcAddr, numberOfUVBytesPerRow);
+
+ vSrcAddr += vSrcOffsetIncrement;
+ vDestAddr += vDestOffsetIncrement;
+ }
+ }
+}
+
+// static
+bool YUVImage::fastCopyRectangle(
+ const Rect& srcRect,
+ int32_t destStartX, int32_t destStartY,
+ const YUVImage &srcImage, YUVImage &destImage) {
+ if (srcImage.mYUVFormat == destImage.mYUVFormat) {
+ if (srcImage.mYUVFormat == YUV420Planar) {
+ fastCopyRectangle420Planar(
+ srcRect,
+ destStartX, destStartY,
+ srcImage, destImage);
+ } else if (srcImage.mYUVFormat == YUV420SemiPlanar) {
+ fastCopyRectangle420SemiPlanar(
+ srcRect,
+ destStartX, destStartY,
+ srcImage, destImage);
+ }
+ return true;
+ }
+ return false;
+}
+
+uint8_t clamp(uint8_t v, uint8_t minValue, uint8_t maxValue) {
+ CHECK(maxValue >= minValue);
+
+ if (v < minValue) return minValue;
+ else if (v > maxValue) return maxValue;
+ else return v;
+}
+
+void YUVImage::yuv2rgb(uint8_t yValue, uint8_t uValue, uint8_t vValue,
+ uint8_t *r, uint8_t *g, uint8_t *b) const {
+ *r = yValue + (1.370705 * (vValue-128));
+ *g = yValue - (0.698001 * (vValue-128)) - (0.337633 * (uValue-128));
+ *b = yValue + (1.732446 * (uValue-128));
+
+ *r = clamp(*r, 0, 255);
+ *g = clamp(*g, 0, 255);
+ *b = clamp(*b, 0, 255);
+}
+
+bool YUVImage::writeToPPM(const char *filename) const {
+ FILE *fp = fopen(filename, "w");
+ if (fp == NULL) {
+ return false;
+ }
+ fprintf(fp, "P3\n");
+ fprintf(fp, "%d %d\n", mWidth, mHeight);
+ fprintf(fp, "255\n");
+ for (int32_t y = 0; y < mHeight; ++y) {
+ for (int32_t x = 0; x < mWidth; ++x) {
+ uint8_t yValue;
+ uint8_t uValue;
+ uint8_t vValue;
+ getPixelValue(x, y, &yValue, &uValue, & vValue);
+
+ uint8_t rValue;
+ uint8_t gValue;
+ uint8_t bValue;
+ yuv2rgb(yValue, uValue, vValue, &rValue, &gValue, &bValue);
+
+ fprintf(fp, "%d %d %d\n", (int32_t)rValue, (int32_t)gValue, (int32_t)bValue);
+ }
+ }
+ fclose(fp);
+ return true;
+}
+
+} // namespace android