summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/camera/Camera.h28
-rw-r--r--include/camera/CameraHardwareInterface.h111
-rw-r--r--include/camera/CameraParameters.h59
-rw-r--r--include/camera/ICamera.h20
-rw-r--r--include/drm/DrmManagerClient.h6
-rw-r--r--include/drm/drm_framework_common.h4
-rw-r--r--include/media/AudioEffect.h3
-rw-r--r--include/media/AudioRecord.h2
-rw-r--r--include/media/AudioSystem.h14
-rw-r--r--include/media/AudioTrack.h1
-rw-r--r--include/media/EffectApi.h6
-rw-r--r--include/media/IMediaMetadataRetriever.h3
-rw-r--r--include/media/IMediaPlayer.h5
-rw-r--r--include/media/IMediaPlayerService.h6
-rw-r--r--include/media/IMediaRecorder.h6
-rw-r--r--include/media/IOMX.h52
-rw-r--r--include/media/IStreamSource.h68
-rw-r--r--include/media/MediaMetadataRetrieverInterface.h5
-rw-r--r--include/media/MediaPlayerInterface.h13
-rw-r--r--include/media/MediaProfiles.h44
-rw-r--r--include/media/MediaRecorderBase.h5
-rw-r--r--include/media/PVMediaRecorder.h69
-rw-r--r--include/media/PVMetadataRetriever.h51
-rw-r--r--include/media/PVPlayer.h90
-rw-r--r--include/media/mediametadataretriever.h3
-rw-r--r--include/media/mediaplayer.h2
-rw-r--r--include/media/mediarecorder.h5
-rw-r--r--include/media/mediascanner.h11
-rw-r--r--include/media/stagefright/ACodec.h157
-rw-r--r--include/media/stagefright/AMRWriter.h2
-rw-r--r--include/media/stagefright/CameraSource.h172
-rw-r--r--include/media/stagefright/CameraSourceTimeLapse.h243
-rw-r--r--include/media/stagefright/ColorConverter.h59
-rw-r--r--include/media/stagefright/DataSource.h9
-rw-r--r--include/media/stagefright/FileSource.h7
-rw-r--r--include/media/stagefright/HardwareAPI.h82
-rw-r--r--include/media/stagefright/JPEGSource.h4
-rw-r--r--include/media/stagefright/MPEG4Writer.h19
-rw-r--r--include/media/stagefright/MediaBuffer.h6
-rw-r--r--include/media/stagefright/MediaDefs.h2
-rw-r--r--include/media/stagefright/MediaSource.h13
-rw-r--r--include/media/stagefright/MediaSourceSplitter.h193
-rw-r--r--include/media/stagefright/MetaData.h26
-rw-r--r--include/media/stagefright/OMXCodec.h59
-rw-r--r--include/media/stagefright/VideoRenderer.h41
-rw-r--r--include/media/stagefright/VideoSourceDownSampler.h97
-rw-r--r--include/media/stagefright/YUVCanvas.h79
-rw-r--r--include/media/stagefright/YUVImage.h178
-rw-r--r--include/media/stagefright/foundation/ADebug.h11
-rw-r--r--include/media/stagefright/foundation/AHierarchicalStateMachine.h49
-rw-r--r--include/media/stagefright/foundation/AMessage.h24
-rw-r--r--include/private/surfaceflinger/SharedBufferStack.h10
-rw-r--r--include/private/ui/sw_gralloc_handle.h83
53 files changed, 1776 insertions, 541 deletions
diff --git a/include/camera/Camera.h b/include/camera/Camera.h
index e734c38..e5f7e62 100644
--- a/include/camera/Camera.h
+++ b/include/camera/Camera.h
@@ -19,11 +19,10 @@
#include <utils/Timers.h>
#include <camera/ICameraClient.h>
+#include <gui/ISurfaceTexture.h>
namespace android {
-class ISurface;
-
/*
* A set of bit masks for specifying how the received preview frames are
* handled before the previewCallback() call.
@@ -96,11 +95,19 @@ enum {
// or CAMERA_MSG_COMPRESSED_IMAGE. This is not allowed to be set during
// preview.
CAMERA_CMD_SET_DISPLAY_ORIENTATION = 3,
+
+ // cmdType to disable/enable shutter sound.
+ // In sendCommand passing arg1 = 0 will disable,
+ // while passing arg1 = 1 will enable the shutter sound.
+ CAMERA_CMD_ENABLE_SHUTTER_SOUND = 4,
+
+ // cmdType to play recording sound.
+ CAMERA_CMD_PLAY_RECORDING_SOUND = 5,
};
// camera fatal errors
enum {
- CAMERA_ERROR_UKNOWN = 1,
+ CAMERA_ERROR_UNKNOWN = 1,
CAMERA_ERROR_SERVER_DIED = 100
};
@@ -166,9 +173,11 @@ public:
status_t getStatus() { return mStatus; }
- // pass the buffered ISurface to the camera service
+ // pass the buffered Surface to the camera service
status_t setPreviewDisplay(const sp<Surface>& surface);
- status_t setPreviewDisplay(const sp<ISurface>& surface);
+
+ // pass the buffered ISurfaceTexture to the camera service
+ status_t setPreviewTexture(const sp<ISurfaceTexture>& surfaceTexture);
// start preview mode, must call setPreviewDisplay first
status_t startPreview();
@@ -209,6 +218,15 @@ public:
// send command to camera driver
status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
+ // return the total number of available video buffers.
+ int32_t getNumberOfVideoBuffers() const;
+
+ // return the individual video buffer corresponding to the given index.
+ sp<IMemory> getVideoBuffer(int32_t index) const;
+
+ // tell camera hal to store meta data or real YUV in video buffers.
+ status_t storeMetaDataInBuffers(bool enabled);
+
void setListener(const sp<CameraListener>& listener);
void setPreviewCallbackFlags(int preview_callback_flag);
diff --git a/include/camera/CameraHardwareInterface.h b/include/camera/CameraHardwareInterface.h
index 35c5aa1..86bd849 100644
--- a/include/camera/CameraHardwareInterface.h
+++ b/include/camera/CameraHardwareInterface.h
@@ -18,15 +18,16 @@
#define ANDROID_HARDWARE_CAMERA_HARDWARE_INTERFACE_H
#include <binder/IMemory.h>
+#include <ui/egl/android_natives.h>
#include <utils/RefBase.h>
#include <surfaceflinger/ISurface.h>
+#include <ui/android_native_buffer.h>
+#include <ui/GraphicBuffer.h>
#include <camera/Camera.h>
#include <camera/CameraParameters.h>
namespace android {
-class Overlay;
-
/**
* The size of image for display.
*/
@@ -86,8 +87,8 @@ class CameraHardwareInterface : public virtual RefBase {
public:
virtual ~CameraHardwareInterface() { }
- /** Return the IMemoryHeap for the preview image heap */
- virtual sp<IMemoryHeap> getPreviewHeap() const = 0;
+ /** Set the ANativeWindow to which preview frames are sent */
+ virtual status_t setPreviewWindow(const sp<ANativeWindow>& buf) = 0;
/** Return the IMemoryHeap for the raw image heap */
virtual sp<IMemoryHeap> getRawHeap() const = 0;
@@ -111,6 +112,13 @@ public:
/**
* Disable a message, or a set of messages.
+ *
+ * Once received a call to disableMsgType(CAMERA_MSG_VIDEO_FRAME), camera hal
+ * should not rely on its client to call releaseRecordingFrame() to release
+ * video recording frames sent out by the cameral hal before and after the
+ * disableMsgType(CAMERA_MSG_VIDEO_FRAME) call. Camera hal clients must not
+ * modify/access any video recording frame after calling
+ * disableMsgType(CAMERA_MSG_VIDEO_FRAME).
*/
virtual void disableMsgType(int32_t msgType) = 0;
@@ -127,12 +135,6 @@ public:
virtual status_t startPreview() = 0;
/**
- * Only used if overlays are used for camera preview.
- */
- virtual bool useOverlay() {return false;}
- virtual status_t setOverlay(const sp<Overlay> &overlay) {return BAD_VALUE;}
-
- /**
* Stop a previously started preview.
*/
virtual void stopPreview() = 0;
@@ -143,9 +145,89 @@ public:
virtual bool previewEnabled() = 0;
/**
+ * Retrieve the total number of available buffers from camera hal for passing
+ * video frame data in a recording session. Must be called again if a new
+ * recording session is started.
+ *
+ * This method should be called after startRecording(), since
+ * the some camera hal may choose to allocate the video buffers only after
+ * recording is started.
+ *
+ * Some camera hal may not implement this method, and 0 can be returned to
+ * indicate that this feature is not available.
+ *
+ * @return the number of video buffers that camera hal makes available.
+ * Zero (0) is returned to indicate that camera hal does not support
+ * this feature.
+ */
+ virtual int32_t getNumberOfVideoBuffers() const { return 0; }
+
+ /**
+ * Retrieve the video buffer corresponding to the given index in a
+ * recording session. Must be called again if a new recording session
+ * is started.
+ *
+ * It allows a client to retrieve all video buffers that camera hal makes
+ * available to passing video frame data by calling this method with all
+ * valid index values. The valid index value ranges from 0 to n, where
+ * n = getNumberOfVideoBuffers() - 1. With an index outside of the valid
+ * range, 0 must be returned. This method should be called after
+ * startRecording().
+ *
+ * The video buffers should NOT be modified/released by camera hal
+ * until stopRecording() is called and all outstanding video buffers
+ * previously sent out via CAMERA_MSG_VIDEO_FRAME have been released
+ * via releaseVideoBuffer().
+ *
+ * @param index an index to retrieve the corresponding video buffer.
+ *
+ * @return the video buffer corresponding to the given index.
+ */
+ virtual sp<IMemory> getVideoBuffer(int32_t index) const { return 0; }
+
+ /**
+ * Request the camera hal to store meta data or real YUV data in
+ * the video buffers send out via CAMERA_MSG_VIDEO_FRRAME for a
+ * recording session. If it is not called, the default camera
+ * hal behavior is to store real YUV data in the video buffers.
+ *
+ * This method should be called before startRecording() in order
+ * to be effective.
+ *
+ * If meta data is stored in the video buffers, it is up to the
+ * receiver of the video buffers to interpret the contents and
+ * to find the actual frame data with the help of the meta data
+ * in the buffer. How this is done is outside of the scope of
+ * this method.
+ *
+ * Some camera hal may not support storing meta data in the video
+ * buffers, but all camera hal should support storing real YUV data
+ * in the video buffers. If the camera hal does not support storing
+ * the meta data in the video buffers when it is requested to do
+ * do, INVALID_OPERATION must be returned. It is very useful for
+ * the camera hal to pass meta data rather than the actual frame
+ * data directly to the video encoder, since the amount of the
+ * uncompressed frame data can be very large if video size is large.
+ *
+ * @param enable if true to instruct the camera hal to store
+ * meta data in the video buffers; false to instruct
+ * the camera hal to store real YUV data in the video
+ * buffers.
+ *
+ * @return OK on success.
+ */
+ virtual status_t storeMetaDataInBuffers(bool enable) {
+ return enable? INVALID_OPERATION: OK;
+ }
+
+ /**
* Start record mode. When a record image is available a CAMERA_MSG_VIDEO_FRAME
* message is sent with the corresponding frame. Every record frame must be released
- * by calling releaseRecordingFrame().
+ * by a cameral hal client via releaseRecordingFrame() before the client calls
+ * disableMsgType(CAMERA_MSG_VIDEO_FRAME). After the client calls
+ * disableMsgType(CAMERA_MSG_VIDEO_FRAME), it is camera hal's responsibility
+ * to manage the life-cycle of the video recording frames, and the client must
+ * not modify/access any video recording frames.
*/
virtual status_t startRecording() = 0;
@@ -161,6 +243,13 @@ public:
/**
* Release a record frame previously returned by CAMERA_MSG_VIDEO_FRAME.
+ *
+ * It is camera hal client's responsibility to release video recording
+ * frames sent out by the camera hal before the camera hal receives
+ * a call to disableMsgType(CAMERA_MSG_VIDEO_FRAME). After it receives
+ * the call to disableMsgType(CAMERA_MSG_VIDEO_FRAME), it is camera hal's
+ * responsibility of managing the life-cycle of the video recording
+ * frames.
*/
virtual void releaseRecordingFrame(const sp<IMemory>& mem) = 0;
diff --git a/include/camera/CameraParameters.h b/include/camera/CameraParameters.h
index 4e770fd..431aaa4 100644
--- a/include/camera/CameraParameters.h
+++ b/include/camera/CameraParameters.h
@@ -59,6 +59,35 @@ public:
void setPreviewSize(int width, int height);
void getPreviewSize(int *width, int *height) const;
void getSupportedPreviewSizes(Vector<Size> &sizes) const;
+
+ // Set the dimensions in pixels to the given width and height
+ // for video frames. The given width and height must be one
+ // of the supported dimensions returned from
+ // getSupportedVideoSizes(). Must not be called if
+ // getSupportedVideoSizes() returns an empty Vector of Size.
+ void setVideoSize(int width, int height);
+ // Retrieve the current dimensions (width and height)
+ // in pixels for video frames, which must be one of the
+ // supported dimensions returned from getSupportedVideoSizes().
+ // Must not be called if getSupportedVideoSizes() returns an
+ // empty Vector of Size.
+ void getVideoSize(int *width, int *height) const;
+ // Retrieve a Vector of supported dimensions (width and height)
+ // in pixels for video frames. If sizes returned from the method
+ // is empty, the camera does not support calls to setVideoSize()
+ // or getVideoSize(). In adddition, it also indicates that
+ // the camera only has a single output, and does not have
+ // separate output for video frames and preview frame.
+ void getSupportedVideoSizes(Vector<Size> &sizes) const;
+ // Retrieve the preferred preview size (width and height) in pixels
+ // for video recording. The given width and height must be one of
+ // supported preview sizes returned from getSupportedPreviewSizes().
+ // Must not be called if getSupportedVideoSizes() returns an empty
+ // Vector of Size. If getSupportedVideoSizes() returns an empty
+ // Vector of Size, the width and height returned from this method
+ // is invalid, and is "-1x-1".
+ void getPreferredPreviewSizeForVideo(int *width, int *height) const;
+
void setPreviewFrameRate(int fps);
int getPreviewFrameRate() const;
void getPreviewFpsRange(int *min_fps, int *max_fps) const;
@@ -288,6 +317,31 @@ public:
// Example value: "0.95,1.9,Infinity" or "0.049,0.05,0.051". Read only.
static const char KEY_FOCUS_DISTANCES[];
+ // The current dimensions in pixels (width x height) for video frames.
+ // The width and height must be one of the supported sizes retrieved
+ // via KEY_SUPPORTED_VIDEO_SIZES.
+ // Example value: "1280x720". Read/write.
+ static const char KEY_VIDEO_SIZE[];
+ // A list of the supported dimensions in pixels (width x height)
+ // for video frames. See CAMERA_MSG_VIDEO_FRAME for details in
+ // frameworks/base/include/camera/Camera.h.
+ // Example: "176x144,1280x720". Read only.
+ static const char KEY_SUPPORTED_VIDEO_SIZES[];
+
+ // Preferred preview frame size in pixels for video recording.
+ // The width and height must be one of the supported sizes retrieved
+ // via KEY_SUPPORTED_PREVIEW_SIZES. This key can be used only when
+ // getSupportedVideoSizes() does not return an empty Vector of Size.
+ // Camcorder applications are recommended to set the preview size
+ // to a value that is not larger than the preferred preview size.
+ // In other words, the product of the width and height of the
+ // preview size should not be larger than that of the preferred
+ // preview size. In addition, we recommend to choos a preview size
+ // that has the same aspect ratio as the resolution of video to be
+ // recorded.
+ // Example value: "800x600". Read only.
+ static const char KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO[];
+
// The image format for video frames. See CAMERA_MSG_VIDEO_FRAME in
// frameworks/base/include/camera/Camera.h.
// Example value: "yuv420sp" or PIXEL_FORMAT_XXX constants. Read only.
@@ -361,7 +415,10 @@ public:
// for barcode reading.
static const char SCENE_MODE_BARCODE[];
- // Formats for setPreviewFormat and setPictureFormat.
+ // Pixel color formats for KEY_PREVIEW_FORMAT, KEY_PICTURE_FORMAT,
+ // and KEY_VIDEO_FRAME_FORMAT
+ // Planar variant of the YUV420 color format
+ static const char PIXEL_FORMAT_YUV420P[];
static const char PIXEL_FORMAT_YUV422SP[];
static const char PIXEL_FORMAT_YUV420SP[]; // NV21
static const char PIXEL_FORMAT_YUV422I[]; // YUY2
diff --git a/include/camera/ICamera.h b/include/camera/ICamera.h
index 6fcf9e5..b2310a6 100644
--- a/include/camera/ICamera.h
+++ b/include/camera/ICamera.h
@@ -20,10 +20,11 @@
#include <utils/RefBase.h>
#include <binder/IInterface.h>
#include <binder/Parcel.h>
-#include <surfaceflinger/ISurface.h>
+#include <surfaceflinger/Surface.h>
#include <binder/IMemory.h>
#include <utils/String8.h>
#include <camera/Camera.h>
+#include <gui/ISurfaceTexture.h>
namespace android {
@@ -45,8 +46,12 @@ public:
// allow other processes to use this ICamera interface
virtual status_t unlock() = 0;
- // pass the buffered ISurface to the camera service
- virtual status_t setPreviewDisplay(const sp<ISurface>& surface) = 0;
+ // pass the buffered Surface to the camera service
+ virtual status_t setPreviewDisplay(const sp<Surface>& surface) = 0;
+
+ // pass the buffered ISurfaceTexture to the camera service
+ virtual status_t setPreviewTexture(
+ const sp<ISurfaceTexture>& surfaceTexture) = 0;
// set the preview callback flag to affect how the received frames from
// preview are handled.
@@ -90,6 +95,15 @@ public:
// send command to camera driver
virtual status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2) = 0;
+
+ // return the total number of available video buffers
+ virtual int32_t getNumberOfVideoBuffers() const = 0;
+
+ // return the individual video buffer corresponding to the given index.
+ virtual sp<IMemory> getVideoBuffer(int32_t index) const = 0;
+
+ // tell the camera hal to store meta data or real YUV data in video buffers.
+ virtual status_t storeMetaDataInBuffers(bool enabled) = 0;
};
// ----------------------------------------------------------------------------
diff --git a/include/drm/DrmManagerClient.h b/include/drm/DrmManagerClient.h
index 004556f..085ebf1 100644
--- a/include/drm/DrmManagerClient.h
+++ b/include/drm/DrmManagerClient.h
@@ -69,7 +69,7 @@ public:
* @return
* Handle for the decryption session
*/
- DecryptHandle* openDecryptSession(int fd, int offset, int length);
+ DecryptHandle* openDecryptSession(int fd, off64_t offset, off64_t length);
/**
* Open the decrypt session to decrypt the given protected content
@@ -113,7 +113,7 @@ public:
* @return status_t
* Returns DRM_NO_ERROR for success, DRM_ERROR_UNKNOWN for failure
*/
- status_t setPlaybackStatus(DecryptHandle* decryptHandle, int playbackStatus, int position);
+ status_t setPlaybackStatus(DecryptHandle* decryptHandle, int playbackStatus, int64_t position);
/**
* Initialize decryption for the given unit of the protected content
@@ -167,7 +167,7 @@ public:
*
* @return Number of bytes read. Returns -1 for Failure.
*/
- ssize_t pread(DecryptHandle* decryptHandle, void* buffer, ssize_t numBytes, off_t offset);
+ ssize_t pread(DecryptHandle* decryptHandle, void* buffer, ssize_t numBytes, off64_t offset);
/**
* Validates whether an action on the DRM content is allowed or not.
diff --git a/include/drm/drm_framework_common.h b/include/drm/drm_framework_common.h
index c5765a9..1758cdd 100644
--- a/include/drm/drm_framework_common.h
+++ b/include/drm/drm_framework_common.h
@@ -217,6 +217,10 @@ public:
* POSIX based Decrypt API set for container based DRM
*/
static const int CONTAINER_BASED = 0x02;
+ /**
+ * Decrypt API for Widevine streams
+ */
+ static const int WV_BASED = 0x3;
};
/**
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index c967efb..cda2be0 100644
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -403,7 +403,7 @@ public:
static status_t guidToString(const effect_uuid_t *guid, char *str, size_t maxLen);
protected:
- volatile int32_t mEnabled; // enable state
+ bool mEnabled; // enable state
int32_t mSessionId; // audio session ID
int32_t mPriority; // priority for effect control
status_t mStatus; // effect status
@@ -412,6 +412,7 @@ protected:
void* mUserData; // client context for callback function
effect_descriptor_t mDescriptor; // effect descriptor
int32_t mId; // system wide unique effect engine instance ID
+ Mutex mLock; // Mutex for mEnabled access
private:
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 38e3d44..5f7cd90 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -356,7 +356,7 @@ private:
sp<IAudioRecord> mAudioRecord;
sp<IMemory> mCblkMemory;
sp<ClientRecordThread> mClientRecordThread;
- Mutex mRecordThreadLock;
+ Mutex mLock;
uint32_t mFrameCount;
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 9fd905f..1e29d82 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -156,6 +156,7 @@ public:
MODE_NORMAL = 0,
MODE_RINGTONE,
MODE_IN_CALL,
+ MODE_IN_COMMUNICATION,
NUM_MODES // not a valid entry, denotes end-of-list
};
@@ -262,11 +263,15 @@ public:
DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES = 0x100,
DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER = 0x200,
DEVICE_OUT_AUX_DIGITAL = 0x400,
+ DEVICE_OUT_ANLG_DOCK_HEADSET = 0x800,
+ DEVICE_OUT_DGTL_DOCK_HEADSET = 0x1000,
DEVICE_OUT_DEFAULT = 0x8000,
DEVICE_OUT_ALL = (DEVICE_OUT_EARPIECE | DEVICE_OUT_SPEAKER | DEVICE_OUT_WIRED_HEADSET |
DEVICE_OUT_WIRED_HEADPHONE | DEVICE_OUT_BLUETOOTH_SCO | DEVICE_OUT_BLUETOOTH_SCO_HEADSET |
DEVICE_OUT_BLUETOOTH_SCO_CARKIT | DEVICE_OUT_BLUETOOTH_A2DP | DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
- DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER | DEVICE_OUT_AUX_DIGITAL | DEVICE_OUT_DEFAULT),
+ DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER | DEVICE_OUT_AUX_DIGITAL |
+ DEVICE_OUT_ANLG_DOCK_HEADSET | DEVICE_OUT_DGTL_DOCK_HEADSET |
+ DEVICE_OUT_DEFAULT),
DEVICE_OUT_ALL_A2DP = (DEVICE_OUT_BLUETOOTH_A2DP | DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
@@ -309,6 +314,8 @@ public:
FORCE_WIRED_ACCESSORY,
FORCE_BT_CAR_DOCK,
FORCE_BT_DESK_DOCK,
+ FORCE_ANALOG_DOCK,
+ FORCE_DIGITAL_DOCK,
NUM_FORCE_CONFIG,
FORCE_DEFAULT = FORCE_NONE
};
@@ -466,7 +473,7 @@ public:
AudioParameter(const String8& keyValuePairs);
virtual ~AudioParameter();
- // reserved parameter keys for changeing standard parameters with setParameters() function.
+ // reserved parameter keys for changing standard parameters with setParameters() function.
// Using these keys is mandatory for AudioFlinger to properly monitor audio output/input
// configuration changes and act accordingly.
// keyRouting: to change audio routing, value is an int in AudioSystem::audio_devices
@@ -474,11 +481,14 @@ public:
// keyFormat: to change audio format, value is an int in AudioSystem::audio_format
// keyChannels: to change audio channel configuration, value is an int in AudioSystem::audio_channels
// keyFrameCount: to change audio output frame count, value is an int
+ // keyInputSource: to change audio input source, value is an int in audio_source
+ // (defined in media/mediarecorder.h)
static const char *keyRouting;
static const char *keySamplingRate;
static const char *keyFormat;
static const char *keyChannels;
static const char *keyFrameCount;
+ static const char *keyInputSource;
String8 toString();
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 4475d4a..813a905 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -480,6 +480,7 @@ private:
uint32_t mFlags;
int mSessionId;
int mAuxEffectId;
+ Mutex mLock;
};
diff --git a/include/media/EffectApi.h b/include/media/EffectApi.h
index 16fb43c..b97c22e 100644
--- a/include/media/EffectApi.h
+++ b/include/media/EffectApi.h
@@ -602,9 +602,9 @@ enum audio_device_e {
// Audio mode
enum audio_mode_e {
- AUDIO_MODE_NORMAL, // phone idle
- AUDIO_MODE_RINGTONE, // phone ringing
- AUDIO_MODE_IN_CALL // phone call connected
+ AUDIO_MODE_NORMAL, // device idle
+ AUDIO_MODE_RINGTONE, // device ringing
+ AUDIO_MODE_IN_CALL // audio call connected (VoIP or telephony)
};
// Values for "accessMode" field of buffer_config_t:
diff --git a/include/media/IMediaMetadataRetriever.h b/include/media/IMediaMetadataRetriever.h
index 9baba8e..e517cf0 100644
--- a/include/media/IMediaMetadataRetriever.h
+++ b/include/media/IMediaMetadataRetriever.h
@@ -33,8 +33,7 @@ public:
virtual status_t setDataSource(const char* srcUrl) = 0;
virtual status_t setDataSource(int fd, int64_t offset, int64_t length) = 0;
virtual status_t setMode(int mode) = 0;
- virtual status_t getMode(int* mode) const = 0;
- virtual sp<IMemory> captureFrame() = 0;
+ virtual sp<IMemory> getFrameAtTime(int64_t timeUs, int option) = 0;
virtual sp<IMemory> extractAlbumArt() = 0;
virtual const char* extractMetadata(int keyCode) = 0;
};
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
index af9a7ed..bba7ed7 100644
--- a/include/media/IMediaPlayer.h
+++ b/include/media/IMediaPlayer.h
@@ -25,6 +25,7 @@ namespace android {
class Parcel;
class ISurface;
+class Surface;
class IMediaPlayer: public IInterface
{
@@ -33,7 +34,7 @@ public:
virtual void disconnect() = 0;
- virtual status_t setVideoSurface(const sp<ISurface>& surface) = 0;
+ virtual status_t setVideoSurface(const sp<Surface>& surface) = 0;
virtual status_t prepareAsync() = 0;
virtual status_t start() = 0;
virtual status_t stop() = 0;
@@ -46,8 +47,6 @@ public:
virtual status_t setAudioStreamType(int type) = 0;
virtual status_t setLooping(int loop) = 0;
virtual status_t setVolume(float leftVolume, float rightVolume) = 0;
- virtual status_t suspend() = 0;
- virtual status_t resume() = 0;
virtual status_t setAuxEffectSendLevel(float level) = 0;
virtual status_t attachAuxEffect(int effectId) = 0;
diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h
index 9416ca1..0bfb166 100644
--- a/include/media/IMediaPlayerService.h
+++ b/include/media/IMediaPlayerService.h
@@ -32,6 +32,7 @@ namespace android {
class IMediaRecorder;
class IOMX;
+struct IStreamSource;
class IMediaPlayerService: public IInterface
{
@@ -45,6 +46,11 @@ public:
int audioSessionId = 0) = 0;
virtual sp<IMediaPlayer> create(pid_t pid, const sp<IMediaPlayerClient>& client,
int fd, int64_t offset, int64_t length, int audioSessionId) = 0;
+
+ virtual sp<IMediaPlayer> create(
+ pid_t pid, const sp<IMediaPlayerClient> &client,
+ const sp<IStreamSource> &source, int audioSessionId) = 0;
+
virtual sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, int* pFormat) = 0;
virtual sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, int* pFormat) = 0;
virtual sp<IOMX> getOMX() = 0;
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
index 54adca8..28be7c1 100644
--- a/include/media/IMediaRecorder.h
+++ b/include/media/IMediaRecorder.h
@@ -22,7 +22,7 @@
namespace android {
-class ISurface;
+class Surface;
class ICamera;
class IMediaRecorderClient;
@@ -32,7 +32,7 @@ public:
DECLARE_META_INTERFACE(MediaRecorder);
virtual status_t setCamera(const sp<ICamera>& camera) = 0;
- virtual status_t setPreviewSurface(const sp<ISurface>& surface) = 0;
+ virtual status_t setPreviewSurface(const sp<Surface>& surface) = 0;
virtual status_t setVideoSource(int vs) = 0;
virtual status_t setAudioSource(int as) = 0;
virtual status_t setOutputFormat(int of) = 0;
@@ -40,6 +40,7 @@ public:
virtual status_t setAudioEncoder(int ae) = 0;
virtual status_t setOutputFile(const char* path) = 0;
virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0;
+ virtual status_t setOutputFileAuxiliary(int fd) = 0;
virtual status_t setVideoSize(int width, int height) = 0;
virtual status_t setVideoFrameRate(int frames_per_second) = 0;
virtual status_t setParameters(const String8& params) = 0;
@@ -68,4 +69,3 @@ public:
}; // namespace android
#endif // ANDROID_IMEDIARECORDER_H
-
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index f794766..cb36bbb 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -19,6 +19,7 @@
#define ANDROID_IOMX_H_
#include <binder/IInterface.h>
+#include <ui/GraphicBuffer.h>
#include <utils/List.h>
#include <utils/String8.h>
@@ -78,10 +79,20 @@ public:
node_id node, OMX_INDEXTYPE index,
const void *params, size_t size) = 0;
+ virtual status_t storeMetaDataInBuffers(
+ node_id node, OMX_U32 port_index, OMX_BOOL enable) = 0;
+
+ virtual status_t enableGraphicBuffers(
+ node_id node, OMX_U32 port_index, OMX_BOOL enable) = 0;
+
virtual status_t useBuffer(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
buffer_id *buffer) = 0;
+ virtual status_t useGraphicBuffer(
+ node_id node, OMX_U32 port_index,
+ const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) = 0;
+
// This API clearly only makes sense if the caller lives in the
// same process as the callee, i.e. is the media_server, as the
// returned "buffer_data" pointer is just that, a pointer into local
@@ -109,33 +120,6 @@ public:
node_id node,
const char *parameter_name,
OMX_INDEXTYPE *index) = 0;
-
- virtual sp<IOMXRenderer> createRenderer(
- const sp<ISurface> &surface,
- const char *componentName,
- OMX_COLOR_FORMATTYPE colorFormat,
- size_t encodedWidth, size_t encodedHeight,
- size_t displayWidth, size_t displayHeight,
- int32_t rotationDegrees) = 0;
-
- // Note: These methods are _not_ virtual, it exists as a wrapper around
- // the virtual "createRenderer" method above facilitating extraction
- // of the ISurface from a regular Surface or a java Surface object.
- sp<IOMXRenderer> createRenderer(
- const sp<Surface> &surface,
- const char *componentName,
- OMX_COLOR_FORMATTYPE colorFormat,
- size_t encodedWidth, size_t encodedHeight,
- size_t displayWidth, size_t displayHeight,
- int32_t rotationDegrees);
-
- sp<IOMXRenderer> createRendererFromJavaSurface(
- JNIEnv *env, jobject javaSurface,
- const char *componentName,
- OMX_COLOR_FORMATTYPE colorFormat,
- size_t encodedWidth, size_t encodedHeight,
- size_t displayWidth, size_t displayHeight,
- int32_t rotationDegrees);
};
struct omx_message {
@@ -182,13 +166,6 @@ public:
virtual void onMessage(const omx_message &msg) = 0;
};
-class IOMXRenderer : public IInterface {
-public:
- DECLARE_META_INTERFACE(OMXRenderer);
-
- virtual void render(IOMX::buffer_id buffer) = 0;
-};
-
////////////////////////////////////////////////////////////////////////////////
class BnOMX : public BnInterface<IOMX> {
@@ -205,13 +182,6 @@ public:
uint32_t flags = 0);
};
-class BnOMXRenderer : public BnInterface<IOMXRenderer> {
-public:
- virtual status_t onTransact(
- uint32_t code, const Parcel &data, Parcel *reply,
- uint32_t flags = 0);
-};
-
} // namespace android
#endif // ANDROID_IOMX_H_
diff --git a/include/media/IStreamSource.h b/include/media/IStreamSource.h
new file mode 100644
index 0000000..4b698e6
--- /dev/null
+++ b/include/media/IStreamSource.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ISTREAMSOURCE_H_
+
+#define ANDROID_ISTREAMSOURCE_H_
+
+#include <binder/IInterface.h>
+
+namespace android {
+
+struct AMessage;
+struct IMemory;
+struct IStreamListener;
+
+struct IStreamSource : public IInterface {
+ DECLARE_META_INTERFACE(StreamSource);
+
+ virtual void setListener(const sp<IStreamListener> &listener) = 0;
+ virtual void setBuffers(const Vector<sp<IMemory> > &buffers) = 0;
+
+ virtual void onBufferAvailable(size_t index) = 0;
+};
+
+struct IStreamListener : public IInterface {
+ DECLARE_META_INTERFACE(StreamListener);
+
+ enum Command {
+ EOS,
+ DISCONTINUITY,
+ };
+
+ virtual void queueBuffer(size_t index, size_t size) = 0;
+
+ virtual void issueCommand(
+ Command cmd, bool synchronous, const sp<AMessage> &msg = NULL) = 0;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+struct BnStreamSource : public BnInterface<IStreamSource> {
+ virtual status_t onTransact(
+ uint32_t code, const Parcel &data, Parcel *reply,
+ uint32_t flags = 0);
+};
+
+struct BnStreamListener : public BnInterface<IStreamListener> {
+ virtual status_t onTransact(
+ uint32_t code, const Parcel &data, Parcel *reply,
+ uint32_t flags = 0);
+};
+
+} // namespace android
+
+#endif // ANDROID_ISTREAMSOURCE_H_
diff --git a/include/media/MediaMetadataRetrieverInterface.h b/include/media/MediaMetadataRetrieverInterface.h
index ff57774..717849d 100644
--- a/include/media/MediaMetadataRetrieverInterface.h
+++ b/include/media/MediaMetadataRetrieverInterface.h
@@ -33,8 +33,7 @@ public:
virtual status_t setDataSource(const char *url) = 0;
virtual status_t setDataSource(int fd, int64_t offset, int64_t length) = 0;
virtual status_t setMode(int mode) = 0;
- virtual status_t getMode(int* mode) const = 0;
- virtual VideoFrame* captureFrame() = 0;
+ virtual VideoFrame* getFrameAtTime(int64_t timeUs, int option) = 0;
virtual MediaAlbumArt* extractAlbumArt() = 0;
virtual const char* extractMetadata(int keyCode) = 0;
};
@@ -67,7 +66,7 @@ public:
}
virtual status_t getMode(int* mode) const { *mode = mMode; return NO_ERROR; }
- virtual VideoFrame* captureFrame() { return NULL; }
+ virtual VideoFrame* getFrameAtTime(int64_t timeUs, int option) { return NULL; }
virtual MediaAlbumArt* extractAlbumArt() { return NULL; }
virtual const char* extractMetadata(int keyCode) { return NULL; }
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 0521709..c0963a6 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -33,13 +33,15 @@ namespace android {
class Parcel;
class ISurface;
+class Surface;
template<typename T> class SortedVector;
enum player_type {
PV_PLAYER = 1,
SONIVOX_PLAYER = 2,
- STAGEFRIGHT_PLAYER = 4,
+ STAGEFRIGHT_PLAYER = 3,
+ NU_PLAYER = 4,
// Test players are available only in the 'test' and 'eng' builds.
// The shared library with the test player is passed passed as an
// argument to the 'test:' url in the setDataSource call.
@@ -105,7 +107,12 @@ public:
const KeyedVector<String8, String8> *headers = NULL) = 0;
virtual status_t setDataSource(int fd, int64_t offset, int64_t length) = 0;
- virtual status_t setVideoSurface(const sp<ISurface>& surface) = 0;
+
+ virtual status_t setDataSource(const sp<IStreamSource> &source) {
+ return INVALID_OPERATION;
+ }
+
+ virtual status_t setVideoSurface(const sp<Surface>& surface) = 0;
virtual status_t prepare() = 0;
virtual status_t prepareAsync() = 0;
virtual status_t start() = 0;
@@ -118,8 +125,6 @@ public:
virtual status_t reset() = 0;
virtual status_t setLooping(int loop) = 0;
virtual player_type playerType() = 0;
- virtual status_t suspend() { return INVALID_OPERATION; }
- virtual status_t resume() { return INVALID_OPERATION; }
virtual void setNotifyCallback(void* cookie, notify_callback_f notifyFunc) {
mCookie = cookie; mNotify = notifyFunc; }
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index c3cd361..aa97874 100644
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -25,7 +25,20 @@ namespace android {
enum camcorder_quality {
CAMCORDER_QUALITY_LOW = 0,
- CAMCORDER_QUALITY_HIGH = 1
+ CAMCORDER_QUALITY_HIGH = 1,
+ CAMCORDER_QUALITY_QCIF = 2,
+ CAMCORDER_QUALITY_CIF = 3,
+ CAMCORDER_QUALITY_480P = 4,
+ CAMCORDER_QUALITY_720P = 5,
+ CAMCORDER_QUALITY_1080P = 6,
+
+ CAMCORDER_QUALITY_TIME_LAPSE_LOW = 1000,
+ CAMCORDER_QUALITY_TIME_LAPSE_HIGH = 1001,
+ CAMCORDER_QUALITY_TIME_LAPSE_QCIF = 1002,
+ CAMCORDER_QUALITY_TIME_LAPSE_CIF = 1003,
+ CAMCORDER_QUALITY_TIME_LAPSE_480P = 1004,
+ CAMCORDER_QUALITY_TIME_LAPSE_720P = 1005,
+ CAMCORDER_QUALITY_TIME_LAPSE_1080P = 1006
};
enum video_decoder {
@@ -68,6 +81,12 @@ public:
camcorder_quality quality) const;
/**
+ * Returns true if a profile for the given camera at the given quality exists,
+ * or false if not.
+ */
+ bool hasCamcorderProfile(int cameraId, camcorder_quality quality) const;
+
+ /**
* Returns the output file formats supported.
*/
Vector<output_format> getOutputFileFormats() const;
@@ -252,6 +271,8 @@ private:
Vector<int> mLevels;
};
+ int getCamcorderProfileIndex(int cameraId, camcorder_quality quality) const;
+
// Debug
static void logVideoCodec(const VideoCodec& codec);
static void logAudioCodec(const AudioCodec& codec);
@@ -281,8 +302,25 @@ private:
// If the xml configuration file does not exist, use hard-coded values
static MediaProfiles* createDefaultInstance();
- static CamcorderProfile *createDefaultCamcorderLowProfile();
- static CamcorderProfile *createDefaultCamcorderHighProfile();
+
+ static CamcorderProfile *createDefaultCamcorderQcifProfile(camcorder_quality quality);
+ static CamcorderProfile *createDefaultCamcorderCifProfile(camcorder_quality quality);
+ static void createDefaultCamcorderLowProfiles(
+ MediaProfiles::CamcorderProfile **lowProfile,
+ MediaProfiles::CamcorderProfile **lowSpecificProfile);
+ static void createDefaultCamcorderHighProfiles(
+ MediaProfiles::CamcorderProfile **highProfile,
+ MediaProfiles::CamcorderProfile **highSpecificProfile);
+
+ static CamcorderProfile *createDefaultCamcorderTimeLapseQcifProfile(camcorder_quality quality);
+ static CamcorderProfile *createDefaultCamcorderTimeLapse480pProfile(camcorder_quality quality);
+ static void createDefaultCamcorderTimeLapseLowProfiles(
+ MediaProfiles::CamcorderProfile **lowTimeLapseProfile,
+ MediaProfiles::CamcorderProfile **lowSpecificTimeLapseProfile);
+ static void createDefaultCamcorderTimeLapseHighProfiles(
+ MediaProfiles::CamcorderProfile **highTimeLapseProfile,
+ MediaProfiles::CamcorderProfile **highSpecificTimeLapseProfile);
+
static void createDefaultCamcorderProfiles(MediaProfiles *profiles);
static void createDefaultVideoEncoders(MediaProfiles *profiles);
static void createDefaultAudioEncoders(MediaProfiles *profiles);
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
index 5e9e368..c42346e 100644
--- a/include/media/MediaRecorderBase.h
+++ b/include/media/MediaRecorderBase.h
@@ -22,7 +22,7 @@
namespace android {
-class ISurface;
+class Surface;
struct MediaRecorderBase {
MediaRecorderBase() {}
@@ -37,9 +37,10 @@ struct MediaRecorderBase {
virtual status_t setVideoSize(int width, int height) = 0;
virtual status_t setVideoFrameRate(int frames_per_second) = 0;
virtual status_t setCamera(const sp<ICamera>& camera) = 0;
- virtual status_t setPreviewSurface(const sp<ISurface>& surface) = 0;
+ virtual status_t setPreviewSurface(const sp<Surface>& surface) = 0;
virtual status_t setOutputFile(const char *path) = 0;
virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0;
+ virtual status_t setOutputFileAuxiliary(int fd) {return INVALID_OPERATION;}
virtual status_t setParameters(const String8& params) = 0;
virtual status_t setListener(const sp<IMediaRecorderClient>& listener) = 0;
virtual status_t prepare() = 0;
diff --git a/include/media/PVMediaRecorder.h b/include/media/PVMediaRecorder.h
deleted file mode 100644
index c091c39..0000000
--- a/include/media/PVMediaRecorder.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- **
- ** Copyright 2008, The Android Open Source Project
- **
- ** Licensed under the Apache License, Version 2.0 (the "License");
- ** you may not use this file except in compliance with the License.
- ** You may obtain a copy of the License at
- **
- ** http://www.apache.org/licenses/LICENSE-2.0
- **
- ** Unless required by applicable law or agreed to in writing, software
- ** distributed under the License is distributed on an "AS IS" BASIS,
- ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ** See the License for the specific language governing permissions and
- ** limitations under the License.
- */
-
-#ifndef ANDROID_PVMEDIARECORDER_H
-#define ANDROID_PVMEDIARECORDER_H
-
-#include <media/IMediaRecorderClient.h>
-#include <media/MediaRecorderBase.h>
-
-namespace android {
-
-class ISurface;
-class ICamera;
-class AuthorDriverWrapper;
-
-class PVMediaRecorder : public MediaRecorderBase {
-public:
- PVMediaRecorder();
- virtual ~PVMediaRecorder();
-
- virtual status_t init();
- virtual status_t setAudioSource(audio_source as);
- virtual status_t setVideoSource(video_source vs);
- virtual status_t setOutputFormat(output_format of);
- virtual status_t setAudioEncoder(audio_encoder ae);
- virtual status_t setVideoEncoder(video_encoder ve);
- virtual status_t setVideoSize(int width, int height);
- virtual status_t setVideoFrameRate(int frames_per_second);
- virtual status_t setCamera(const sp<ICamera>& camera);
- virtual status_t setPreviewSurface(const sp<ISurface>& surface);
- virtual status_t setOutputFile(const char *path);
- virtual status_t setOutputFile(int fd, int64_t offset, int64_t length);
- virtual status_t setParameters(const String8& params);
- virtual status_t setListener(const sp<IMediaRecorderClient>& listener);
- virtual status_t prepare();
- virtual status_t start();
- virtual status_t stop();
- virtual status_t close();
- virtual status_t reset();
- virtual status_t getMaxAmplitude(int *max);
- virtual status_t dump(int fd, const Vector<String16>& args) const;
-
-private:
- status_t doStop();
-
- AuthorDriverWrapper* mAuthorDriverWrapper;
-
- PVMediaRecorder(const PVMediaRecorder &);
- PVMediaRecorder &operator=(const PVMediaRecorder &);
-};
-
-}; // namespace android
-
-#endif // ANDROID_PVMEDIARECORDER_H
-
diff --git a/include/media/PVMetadataRetriever.h b/include/media/PVMetadataRetriever.h
deleted file mode 100644
index c202dfe..0000000
--- a/include/media/PVMetadataRetriever.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
-**
-** Copyright (C) 2008 The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#ifndef ANDROID_PVMETADATARETRIEVER_H
-#define ANDROID_PVMETADATARETRIEVER_H
-
-#include <utils/Errors.h>
-#include <media/MediaMetadataRetrieverInterface.h>
-#include <private/media/VideoFrame.h>
-
-namespace android {
-
-class MetadataDriver;
-
-class PVMetadataRetriever : public MediaMetadataRetrieverInterface
-{
-public:
- PVMetadataRetriever();
- virtual ~PVMetadataRetriever();
-
- virtual status_t setDataSource(const char *url);
- virtual status_t setDataSource(int fd, int64_t offset, int64_t length);
- virtual status_t setMode(int mode);
- virtual status_t getMode(int* mode) const;
- virtual VideoFrame* captureFrame();
- virtual MediaAlbumArt* extractAlbumArt();
- virtual const char* extractMetadata(int keyCode);
-
-private:
- mutable Mutex mLock;
- MetadataDriver* mMetadataDriver;
- char* mDataSourcePath;
-};
-
-}; // namespace android
-
-#endif // ANDROID_PVMETADATARETRIEVER_H
diff --git a/include/media/PVPlayer.h b/include/media/PVPlayer.h
deleted file mode 100644
index df50981..0000000
--- a/include/media/PVPlayer.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_PVPLAYER_H
-#define ANDROID_PVPLAYER_H
-
-#include <utils/Errors.h>
-#include <media/MediaPlayerInterface.h>
-#include <media/Metadata.h>
-
-#define MAX_OPENCORE_INSTANCES 25
-
-#ifdef MAX_OPENCORE_INSTANCES
-#include <cutils/atomic.h>
-#endif
-
-class PlayerDriver;
-
-namespace android {
-
-class PVPlayer : public MediaPlayerInterface
-{
-public:
- PVPlayer();
- virtual ~PVPlayer();
-
- virtual status_t initCheck();
-
- virtual status_t setDataSource(
- const char *url, const KeyedVector<String8, String8> *headers);
-
- virtual status_t setDataSource(int fd, int64_t offset, int64_t length);
- virtual status_t setVideoSurface(const sp<ISurface>& surface);
- virtual status_t prepare();
- virtual status_t prepareAsync();
- virtual status_t start();
- virtual status_t stop();
- virtual status_t pause();
- virtual bool isPlaying();
- virtual status_t seekTo(int msec);
- virtual status_t getCurrentPosition(int *msec);
- virtual status_t getDuration(int *msec);
- virtual status_t reset();
- virtual status_t setLooping(int loop);
- virtual player_type playerType() { return PV_PLAYER; }
- virtual status_t invoke(const Parcel& request, Parcel *reply);
- virtual status_t getMetadata(
- const SortedVector<media::Metadata::Type>& ids,
- Parcel *records);
-
- // make available to PlayerDriver
- void sendEvent(int msg, int ext1=0, int ext2=0) { MediaPlayerBase::sendEvent(msg, ext1, ext2); }
-
-private:
- static void do_nothing(status_t s, void *cookie, bool cancelled) { }
- static void run_init(status_t s, void *cookie, bool cancelled);
- static void run_set_video_surface(status_t s, void *cookie, bool cancelled);
- static void run_set_audio_output(status_t s, void *cookie, bool cancelled);
- static void run_prepare(status_t s, void *cookie, bool cancelled);
- static void check_for_live_streaming(status_t s, void *cookie, bool cancelled);
-
- PlayerDriver* mPlayerDriver;
- char * mDataSourcePath;
- bool mIsDataSourceSet;
- sp<ISurface> mSurface;
- int mSharedFd;
- status_t mInit;
- int mDuration;
-
-#ifdef MAX_OPENCORE_INSTANCES
- static volatile int32_t sNumInstances;
-#endif
-};
-
-}; // namespace android
-
-#endif // ANDROID_PVPLAYER_H
diff --git a/include/media/mediametadataretriever.h b/include/media/mediametadataretriever.h
index ddc07f6..03dd52d 100644
--- a/include/media/mediametadataretriever.h
+++ b/include/media/mediametadataretriever.h
@@ -82,8 +82,7 @@ public:
status_t setDataSource(const char* dataSourceUrl);
status_t setDataSource(int fd, int64_t offset, int64_t length);
status_t setMode(int mode);
- status_t getMode(int* mode);
- sp<IMemory> captureFrame();
+ sp<IMemory> getFrameAtTime(int64_t timeUs, int option);
sp<IMemory> extractAlbumArt();
const char* extractMetadata(int keyCode);
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 207191d..88b0c3e 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -169,8 +169,6 @@ public:
status_t invoke(const Parcel& request, Parcel *reply);
status_t setMetadataFilter(const Parcel& filter);
status_t getMetadata(bool update_only, bool apply_filter, Parcel *metadata);
- status_t suspend();
- status_t resume();
status_t setAudioSessionId(int sessionId);
int getAudioSessionId();
status_t setAuxEffectSendLevel(float level);
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 5ab1640..a710546 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -44,7 +44,8 @@ enum audio_source {
AUDIO_SOURCE_VOICE_CALL = 4,
AUDIO_SOURCE_CAMCORDER = 5,
AUDIO_SOURCE_VOICE_RECOGNITION = 6,
- AUDIO_SOURCE_MAX = AUDIO_SOURCE_VOICE_RECOGNITION,
+ AUDIO_SOURCE_VOICE_COMMUNICATION = 7,
+ AUDIO_SOURCE_MAX = AUDIO_SOURCE_VOICE_COMMUNICATION,
AUDIO_SOURCE_LIST_END // must be last - used to validate audio source type
};
@@ -173,6 +174,7 @@ public:
status_t setAudioEncoder(int ae);
status_t setOutputFile(const char* path);
status_t setOutputFile(int fd, int64_t offset, int64_t length);
+ status_t setOutputFileAuxiliary(int fd);
status_t setVideoSize(int width, int height);
status_t setVideoFrameRate(int frames_per_second);
status_t setParameters(const String8& params);
@@ -199,6 +201,7 @@ private:
bool mIsAudioEncoderSet;
bool mIsVideoEncoderSet;
bool mIsOutputFileSet;
+ bool mIsAuxiliaryOutputFileSet;
Mutex mLock;
Mutex mNotifyLock;
};
diff --git a/include/media/mediascanner.h b/include/media/mediascanner.h
index 0d397ac..df5be32 100644
--- a/include/media/mediascanner.h
+++ b/include/media/mediascanner.h
@@ -38,8 +38,7 @@ struct MediaScanner {
typedef bool (*ExceptionCheck)(void* env);
virtual status_t processDirectory(
- const char *path, const char *extensions,
- MediaScannerClient &client,
+ const char *path, MediaScannerClient &client,
ExceptionCheck exceptionCheck, void *exceptionEnv);
void setLocale(const char *locale);
@@ -55,9 +54,8 @@ private:
char *mLocale;
status_t doProcessDirectory(
- char *path, int pathRemaining, const char *extensions,
- MediaScannerClient &client, ExceptionCheck exceptionCheck,
- void *exceptionEnv);
+ char *path, int pathRemaining, MediaScannerClient &client,
+ ExceptionCheck exceptionCheck, void *exceptionEnv);
MediaScanner(const MediaScanner &);
MediaScanner &operator=(const MediaScanner &);
@@ -73,7 +71,8 @@ public:
bool addStringTag(const char* name, const char* value);
void endFile();
- virtual bool scanFile(const char* path, long long lastModified, long long fileSize) = 0;
+ virtual bool scanFile(const char* path, long long lastModified,
+ long long fileSize, bool isDirectory) = 0;
virtual bool handleStringTag(const char* name, const char* value) = 0;
virtual bool setMimeType(const char* mimeType) = 0;
virtual bool addNoMediaFolder(const char* path) = 0;
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
new file mode 100644
index 0000000..4599d70
--- /dev/null
+++ b/include/media/stagefright/ACodec.h
@@ -0,0 +1,157 @@
+#ifndef A_CODEC_H_
+
+#define A_CODEC_H_
+
+#include <stdint.h>
+#include <android/native_window.h>
+#include <media/IOMX.h>
+#include <media/stagefright/foundation/AHierarchicalStateMachine.h>
+
+namespace android {
+
+struct ABuffer;
+struct MemoryDealer;
+
+struct ACodec : public AHierarchicalStateMachine {
+ enum {
+ kWhatFillThisBuffer = 'fill',
+ kWhatDrainThisBuffer = 'drai',
+ kWhatEOS = 'eos ',
+ kWhatShutdownCompleted = 'scom',
+ kWhatFlushCompleted = 'fcom',
+ kWhatOutputFormatChanged = 'outC',
+ };
+
+ ACodec();
+
+ void setNotificationMessage(const sp<AMessage> &msg);
+ void initiateSetup(const sp<AMessage> &msg);
+ void signalFlush();
+ void signalResume();
+ void initiateShutdown();
+
+protected:
+ virtual ~ACodec();
+
+private:
+ struct BaseState;
+ struct UninitializedState;
+ struct LoadedToIdleState;
+ struct IdleToExecutingState;
+ struct ExecutingState;
+ struct OutputPortSettingsChangedState;
+ struct ExecutingToIdleState;
+ struct IdleToLoadedState;
+ struct ErrorState;
+ struct FlushingState;
+
+ enum {
+ kWhatSetup = 'setu',
+ kWhatOMXMessage = 'omx ',
+ kWhatInputBufferFilled = 'inpF',
+ kWhatOutputBufferDrained = 'outD',
+ kWhatShutdown = 'shut',
+ kWhatFlush = 'flus',
+ kWhatResume = 'resm',
+ kWhatDrainDeferredMessages = 'drai',
+ };
+
+ enum {
+ kPortIndexInput = 0,
+ kPortIndexOutput = 1
+ };
+
+ struct BufferInfo {
+ enum Status {
+ OWNED_BY_US,
+ OWNED_BY_COMPONENT,
+ OWNED_BY_UPSTREAM,
+ OWNED_BY_DOWNSTREAM,
+ OWNED_BY_NATIVE_WINDOW,
+ };
+
+ IOMX::buffer_id mBufferID;
+ Status mStatus;
+
+ sp<ABuffer> mData;
+ sp<GraphicBuffer> mGraphicBuffer;
+ };
+
+ sp<AMessage> mNotify;
+
+ sp<UninitializedState> mUninitializedState;
+ sp<LoadedToIdleState> mLoadedToIdleState;
+ sp<IdleToExecutingState> mIdleToExecutingState;
+ sp<ExecutingState> mExecutingState;
+ sp<OutputPortSettingsChangedState> mOutputPortSettingsChangedState;
+ sp<ExecutingToIdleState> mExecutingToIdleState;
+ sp<IdleToLoadedState> mIdleToLoadedState;
+ sp<ErrorState> mErrorState;
+ sp<FlushingState> mFlushingState;
+
+ AString mComponentName;
+ sp<IOMX> mOMX;
+ IOMX::node_id mNode;
+ sp<MemoryDealer> mDealer[2];
+
+ sp<ANativeWindow> mNativeWindow;
+
+ Vector<BufferInfo> mBuffers[2];
+ bool mPortEOS[2];
+
+ List<sp<AMessage> > mDeferredQueue;
+
+ bool mSentFormat;
+
+ status_t allocateBuffersOnPort(OMX_U32 portIndex);
+ status_t freeBuffersOnPort(OMX_U32 portIndex);
+ status_t freeBuffer(OMX_U32 portIndex, size_t i);
+
+ status_t allocateOutputBuffersFromNativeWindow();
+ status_t cancelBufferToNativeWindow(BufferInfo *info);
+ status_t freeOutputBuffersOwnedByNativeWindow();
+ BufferInfo *dequeueBufferFromNativeWindow();
+
+ BufferInfo *findBufferByID(
+ uint32_t portIndex, IOMX::buffer_id bufferID,
+ ssize_t *index = NULL);
+
+ void setComponentRole(bool isEncoder, const char *mime);
+ void configureCodec(const char *mime, const sp<AMessage> &msg);
+
+ status_t setVideoPortFormatType(
+ OMX_U32 portIndex,
+ OMX_VIDEO_CODINGTYPE compressionFormat,
+ OMX_COLOR_FORMATTYPE colorFormat);
+
+ status_t setSupportedOutputFormat();
+
+ status_t setupVideoDecoder(
+ const char *mime, int32_t width, int32_t height);
+
+ status_t setVideoFormatOnPort(
+ OMX_U32 portIndex,
+ int32_t width, int32_t height,
+ OMX_VIDEO_CODINGTYPE compressionFormat);
+
+ status_t setupAACDecoder(int32_t numChannels, int32_t sampleRate);
+ status_t setMinBufferSize(OMX_U32 portIndex, size_t size);
+
+ status_t initNativeWindow();
+
+ // Returns true iff all buffers on the given port have status OWNED_BY_US.
+ bool allYourBuffersAreBelongToUs(OMX_U32 portIndex);
+
+ bool allYourBuffersAreBelongToUs();
+
+ void deferMessage(const sp<AMessage> &msg);
+ void processDeferredMessages();
+
+ void sendFormatChange();
+
+ DISALLOW_EVIL_CONSTRUCTORS(ACodec);
+};
+
+} // namespace android
+
+#endif // A_CODEC_H_
diff --git a/include/media/stagefright/AMRWriter.h b/include/media/stagefright/AMRWriter.h
index aa965e1..62d57b4 100644
--- a/include/media/stagefright/AMRWriter.h
+++ b/include/media/stagefright/AMRWriter.h
@@ -44,7 +44,7 @@ protected:
virtual ~AMRWriter();
private:
- FILE *mFile;
+ int mFd;
status_t mInitCheck;
sp<MediaSource> mSource;
bool mStarted;
diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h
index 3192d03..794355b 100644
--- a/include/media/stagefright/CameraSource.h
+++ b/include/media/stagefright/CameraSource.h
@@ -20,39 +20,167 @@
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaSource.h>
+#include <camera/ICamera.h>
+#include <camera/CameraParameters.h>
#include <utils/List.h>
#include <utils/RefBase.h>
-#include <utils/threads.h>
namespace android {
-class ICamera;
class IMemory;
class Camera;
+class Surface;
class CameraSource : public MediaSource, public MediaBufferObserver {
public:
+ /**
+ * Factory method to create a new CameraSource using the current
+ * settings (such as video size, frame rate, color format, etc)
+ * from the default camera.
+ *
+ * @return NULL on error.
+ */
static CameraSource *Create();
- static CameraSource *CreateFromCamera(const sp<Camera> &camera);
+
+ /**
+ * Factory method to create a new CameraSource.
+ *
+ * @param camera the video input frame data source. If it is NULL,
+ * we will try to connect to the camera with the given
+ * cameraId.
+ *
+ * @param cameraId the id of the camera that the source will connect
+ * to if camera is NULL; otherwise ignored.
+ *
+ * @param videoSize the dimension (in pixels) of the video frame
+ * @param frameRate the target frames per second
+ * @param surface the preview surface for display where preview
+ * frames are sent to
+ * @param storeMetaDataInVideoBuffers true to request the camera
+ * source to store meta data in video buffers; false to
+ * request the camera source to store real YUV frame data
+ * in the video buffers. The camera source may not support
+ * storing meta data in video buffers, if so, a request
+ * to do that will NOT be honored. To find out whether
+ * meta data is actually being stored in video buffers
+ * during recording, call isMetaDataStoredInVideoBuffers().
+ *
+ * @return NULL on error.
+ */
+ static CameraSource *CreateFromCamera(const sp<ICamera> &camera,
+ int32_t cameraId,
+ Size videoSize,
+ int32_t frameRate,
+ const sp<Surface>& surface,
+ bool storeMetaDataInVideoBuffers = false);
virtual ~CameraSource();
virtual status_t start(MetaData *params = NULL);
virtual status_t stop();
+ virtual status_t read(
+ MediaBuffer **buffer, const ReadOptions *options = NULL);
+ /**
+ * Check whether a CameraSource object is properly initialized.
+ * Must call this method before stop().
+ * @return OK if initialization has successfully completed.
+ */
+ virtual status_t initCheck() const;
+
+ /**
+ * Returns the MetaData associated with the CameraSource,
+ * including:
+ * kKeyColorFormat: YUV color format of the video frames
+ * kKeyWidth, kKeyHeight: dimension (in pixels) of the video frames
+ * kKeySampleRate: frame rate in frames per second
+ * kKeyMIMEType: always fixed to be MEDIA_MIMETYPE_VIDEO_RAW
+ */
virtual sp<MetaData> getFormat();
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL);
+ /**
+ * Retrieve the total number of video buffers available from
+ * this source.
+ *
+ * This method is useful if these video buffers are used
+ * for passing video frame data to other media components,
+ * such as OMX video encoders, in order to eliminate the
+ * memcpy of the data.
+ *
+ * @return the total numbner of video buffers. Returns 0 to
+ * indicate that this source does not make the video
+ * buffer information availalble.
+ */
+ size_t getNumberOfVideoBuffers() const;
+
+ /**
+ * Retrieve the individual video buffer available from
+ * this source.
+ *
+ * @param index the index corresponding to the video buffer.
+ * Valid range of the index is [0, n], where n =
+ * getNumberOfVideoBuffers() - 1.
+ *
+ * @return the video buffer corresponding to the given index.
+ * If index is out of range, 0 should be returned.
+ */
+ sp<IMemory> getVideoBuffer(size_t index) const;
+
+ /**
+ * Tell whether this camera source stores meta data or real YUV
+ * frame data in video buffers.
+ *
+ * @return true if meta data is stored in the video
+ * buffers; false if real YUV data is stored in
+ * the video buffers.
+ */
+ bool isMetaDataStoredInVideoBuffers() const;
virtual void signalBufferReturned(MediaBuffer* buffer);
-private:
- friend class CameraSourceListener;
+protected:
+ enum CameraFlags {
+ FLAGS_SET_CAMERA = 1L << 0,
+ FLAGS_HOT_CAMERA = 1L << 1,
+ };
+
+ int32_t mCameraFlags;
+ Size mVideoSize;
+ int32_t mVideoFrameRate;
+ int32_t mColorFormat;
+ status_t mInitCheck;
- sp<Camera> mCamera;
+ sp<Camera> mCamera;
+ sp<Surface> mSurface;
sp<MetaData> mMeta;
+ int64_t mStartTimeUs;
+ int32_t mNumFramesReceived;
+ int64_t mLastFrameTimestampUs;
+ bool mStarted;
+
+ CameraSource(const sp<ICamera>& camera, int32_t cameraId,
+ Size videoSize, int32_t frameRate,
+ const sp<Surface>& surface,
+ bool storeMetaDataInVideoBuffers);
+
+ virtual void startCameraRecording();
+ virtual void stopCameraRecording();
+ virtual void releaseRecordingFrame(const sp<IMemory>& frame);
+
+ // Returns true if need to skip the current frame.
+ // Called from dataCallbackTimestamp.
+ virtual bool skipCurrentFrame(int64_t timestampUs) {return false;}
+
+ // Callback called when still camera raw data is available.
+ virtual void dataCallback(int32_t msgType, const sp<IMemory> &data) {}
+
+ virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType,
+ const sp<IMemory> &data);
+
+private:
+ friend class CameraSourceListener;
+
Mutex mLock;
Condition mFrameAvailableCondition;
Condition mFrameCompleteCondition;
@@ -60,25 +188,35 @@ private:
List<sp<IMemory> > mFramesBeingEncoded;
List<int64_t> mFrameTimes;
- int64_t mStartTimeUs;
int64_t mFirstFrameTimeUs;
- int64_t mLastFrameTimestampUs;
- int32_t mNumFramesReceived;
int32_t mNumFramesEncoded;
int32_t mNumFramesDropped;
int32_t mNumGlitches;
int64_t mGlitchDurationThresholdUs;
bool mCollectStats;
- bool mStarted;
-
- CameraSource(const sp<Camera> &camera);
-
- void dataCallbackTimestamp(
- int64_t timestampUs, int32_t msgType, const sp<IMemory> &data);
+ bool mIsMetaDataStoredInVideoBuffers;
void releaseQueuedFrames();
void releaseOneRecordingFrame(const sp<IMemory>& frame);
+
+ status_t init(const sp<ICamera>& camera, int32_t cameraId,
+ Size videoSize, int32_t frameRate,
+ bool storeMetaDataInVideoBuffers);
+ status_t isCameraAvailable(const sp<ICamera>& camera, int32_t cameraId);
+ status_t isCameraColorFormatSupported(const CameraParameters& params);
+ status_t configureCamera(CameraParameters* params,
+ int32_t width, int32_t height,
+ int32_t frameRate);
+
+ status_t checkVideoSize(const CameraParameters& params,
+ int32_t width, int32_t height);
+
+ status_t checkFrameRate(const CameraParameters& params,
+ int32_t frameRate);
+
+ void releaseCamera();
+
CameraSource(const CameraSource &);
CameraSource &operator=(const CameraSource &);
};
diff --git a/include/media/stagefright/CameraSourceTimeLapse.h b/include/media/stagefright/CameraSourceTimeLapse.h
new file mode 100644
index 0000000..0e5d534
--- /dev/null
+++ b/include/media/stagefright/CameraSourceTimeLapse.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CAMERA_SOURCE_TIME_LAPSE_H_
+
+#define CAMERA_SOURCE_TIME_LAPSE_H_
+
+#include <pthread.h>
+
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+
+namespace android {
+
+class ICamera;
+class IMemory;
+class Camera;
+
+class CameraSourceTimeLapse : public CameraSource {
+public:
+ static CameraSourceTimeLapse *CreateFromCamera(
+ const sp<ICamera> &camera,
+ int32_t cameraId,
+ Size videoSize,
+ int32_t videoFrameRate,
+ const sp<Surface>& surface,
+ int64_t timeBetweenTimeLapseFrameCaptureUs);
+
+ virtual ~CameraSourceTimeLapse();
+
+ // If the frame capture interval is large, read will block for a long time.
+ // Due to the way the mediaRecorder framework works, a stop() call from
+ // mediaRecorder waits until the read returns, causing a long wait for
+ // stop() to return. To avoid this, we can make read() return a copy of the
+ // last read frame with the same time stamp frequently. This keeps the
+ // read() call from blocking too long. Calling this function quickly
+ // captures another frame, keeps its copy, and enables this mode of read()
+ // returning quickly.
+ void startQuickReadReturns();
+
+private:
+ // If true, will use still camera takePicture() for time lapse frames
+ // If false, will use the videocamera frames instead.
+ bool mUseStillCameraForTimeLapse;
+
+ // Size of picture taken from still camera. This may be larger than the size
+ // of the video, as still camera may not support the exact video resolution
+ // demanded. See setPictureSizeToClosestSupported().
+ int32_t mPictureWidth;
+ int32_t mPictureHeight;
+
+ // size of the encoded video.
+ int32_t mVideoWidth;
+ int32_t mVideoHeight;
+
+ // True if we need to crop the still camera image to get the video frame.
+ bool mNeedCropping;
+
+ // Start location of the cropping rectangle.
+ int32_t mCropRectStartX;
+ int32_t mCropRectStartY;
+
+ // Time between capture of two frames during time lapse recording
+ // Negative value indicates that timelapse is disabled.
+ int64_t mTimeBetweenTimeLapseFrameCaptureUs;
+
+ // Time between two frames in final video (1/frameRate)
+ int64_t mTimeBetweenTimeLapseVideoFramesUs;
+
+ // Real timestamp of the last encoded time lapse frame
+ int64_t mLastTimeLapseFrameRealTimestampUs;
+
+ // Thread id of thread which takes still picture and sleeps in a loop.
+ pthread_t mThreadTimeLapse;
+
+ // Variable set in dataCallbackTimestamp() to help skipCurrentFrame()
+ // to know if current frame needs to be skipped.
+ bool mSkipCurrentFrame;
+
+ // Lock for accessing mCameraIdle
+ Mutex mCameraIdleLock;
+
+ // Condition variable to wait on if camera is is not yet idle. Once the
+ // camera gets idle, this variable will be signalled.
+ Condition mCameraIdleCondition;
+
+ // True if camera is in preview mode and ready for takePicture().
+ // False after a call to takePicture() but before the final compressed
+ // data callback has been called and preview has been restarted.
+ volatile bool mCameraIdle;
+
+ // True if stop() is waiting for camera to get idle, i.e. for the last
+ // takePicture() to complete. This is needed so that dataCallbackTimestamp()
+ // can return immediately.
+ volatile bool mStopWaitingForIdleCamera;
+
+ // Lock for accessing quick stop variables.
+ Mutex mQuickStopLock;
+
+ // Condition variable to wake up still picture thread.
+ Condition mTakePictureCondition;
+
+ // mQuickStop is set to true if we use quick read() returns, otherwise it is set
+ // to false. Once in this mode read() return a copy of the last read frame
+ // with the same time stamp. See startQuickReadReturns().
+ volatile bool mQuickStop;
+
+ // Forces the next frame passed to dataCallbackTimestamp() to be read
+ // as a time lapse frame. Used by startQuickReadReturns() so that the next
+ // frame wakes up any blocking read.
+ volatile bool mForceRead;
+
+ // Stores a copy of the MediaBuffer read in the last read() call after
+ // mQuickStop was true.
+ MediaBuffer* mLastReadBufferCopy;
+
+ // Status code for last read.
+ status_t mLastReadStatus;
+
+ CameraSourceTimeLapse(
+ const sp<ICamera> &camera,
+ int32_t cameraId,
+ Size videoSize,
+ int32_t videoFrameRate,
+ const sp<Surface>& surface,
+ int64_t timeBetweenTimeLapseFrameCaptureUs);
+
+ // Wrapper over CameraSource::signalBufferReturned() to implement quick stop.
+ // It only handles the case when mLastReadBufferCopy is signalled. Otherwise
+ // it calls the base class' function.
+ virtual void signalBufferReturned(MediaBuffer* buffer);
+
+ // Wrapper over CameraSource::read() to implement quick stop.
+ virtual status_t read(MediaBuffer **buffer, const ReadOptions *options = NULL);
+
+ // For still camera case starts a thread which calls camera's takePicture()
+ // in a loop. For video camera case, just starts the camera's video recording.
+ virtual void startCameraRecording();
+
+ // For still camera case joins the thread created in startCameraRecording().
+ // For video camera case, just stops the camera's video recording.
+ virtual void stopCameraRecording();
+
+ // For still camera case don't need to do anything as memory is locally
+ // allocated with refcounting.
+ // For video camera case just tell the camera to release the frame.
+ virtual void releaseRecordingFrame(const sp<IMemory>& frame);
+
+ // mSkipCurrentFrame is set to true in dataCallbackTimestamp() if the current
+ // frame needs to be skipped and this function just returns the value of mSkipCurrentFrame.
+ virtual bool skipCurrentFrame(int64_t timestampUs);
+
+ // Handles the callback to handle raw frame data from the still camera.
+ // Creates a copy of the frame data as the camera can reuse the frame memory
+ // once this callback returns. The function also sets a new timstamp corresponding
+ // to one frame time ahead of the last encoded frame's time stamp. It then
+ // calls dataCallbackTimestamp() of the base class with the copied data and the
+ // modified timestamp, which will think that it recieved the frame from a video
+ // camera and proceed as usual.
+ virtual void dataCallback(int32_t msgType, const sp<IMemory> &data);
+
+ // In the video camera case calls skipFrameAndModifyTimeStamp() to modify
+ // timestamp and set mSkipCurrentFrame.
+ // Then it calls the base CameraSource::dataCallbackTimestamp()
+ virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType,
+ const sp<IMemory> &data);
+
+ // Convenience function to fill mLastReadBufferCopy from the just read
+ // buffer.
+ void fillLastReadBufferCopy(MediaBuffer& sourceBuffer);
+
+ // If the passed in size (width x height) is a supported video/preview size,
+ // the function sets the camera's video/preview size to it and returns true.
+ // Otherwise returns false.
+ bool trySettingVideoSize(int32_t width, int32_t height);
+
+ // The still camera may not support the demanded video width and height.
+ // We look for the supported picture sizes from the still camera and
+ // choose the smallest one with either dimensions higher than the corresponding
+ // video dimensions. The still picture will be cropped to get the video frame.
+ // The function returns true if the camera supports picture sizes greater than
+ // or equal to the passed in width and height, and false otherwise.
+ bool setPictureSizeToClosestSupported(int32_t width, int32_t height);
+
+ // Computes the offset of the rectangle from where to start cropping the
+ // still image into the video frame. We choose the center of the image to be
+ // cropped. The offset is stored in (mCropRectStartX, mCropRectStartY).
+ bool computeCropRectangleOffset();
+
+ // Crops the source data into a smaller image starting at
+ // (mCropRectStartX, mCropRectStartY) and of the size of the video frame.
+ // The data is returned into a newly allocated IMemory.
+ sp<IMemory> cropYUVImage(const sp<IMemory> &source_data);
+
+ // When video camera is used for time lapse capture, returns true
+ // until enough time has passed for the next time lapse frame. When
+ // the frame needs to be encoded, it returns false and also modifies
+ // the time stamp to be one frame time ahead of the last encoded
+ // frame's time stamp.
+ bool skipFrameAndModifyTimeStamp(int64_t *timestampUs);
+
+ // Wrapper to enter threadTimeLapseEntry()
+ static void *ThreadTimeLapseWrapper(void *me);
+
+ // Runs a loop which sleeps until a still picture is required
+ // and then calls mCamera->takePicture() to take the still picture.
+ // Used only in the case mUseStillCameraForTimeLapse = true.
+ void threadTimeLapseEntry();
+
+ // Wrapper to enter threadStartPreview()
+ static void *ThreadStartPreviewWrapper(void *me);
+
+ // Starts the camera's preview.
+ void threadStartPreview();
+
+ // Starts thread ThreadStartPreviewWrapper() for restarting preview.
+ // Needs to be done in a thread so that dataCallback() which calls this function
+ // can return, and the camera can know that takePicture() is done.
+ void restartPreview();
+
+ // Creates a copy of source_data into a new memory of final type MemoryBase.
+ sp<IMemory> createIMemoryCopy(const sp<IMemory> &source_data);
+
+ CameraSourceTimeLapse(const CameraSourceTimeLapse &);
+ CameraSourceTimeLapse &operator=(const CameraSourceTimeLapse &);
+};
+
+} // namespace android
+
+#endif // CAMERA_SOURCE_TIME_LAPSE_H_
diff --git a/include/media/stagefright/ColorConverter.h b/include/media/stagefright/ColorConverter.h
index bc3f464..2ae8a5b 100644
--- a/include/media/stagefright/ColorConverter.h
+++ b/include/media/stagefright/ColorConverter.h
@@ -21,6 +21,7 @@
#include <sys/types.h>
#include <stdint.h>
+#include <utils/Errors.h>
#include <OMX_Video.h>
@@ -32,36 +33,48 @@ struct ColorConverter {
bool isValid() const;
- void convert(
- size_t width, size_t height,
- const void *srcBits, size_t srcSkip,
- void *dstBits, size_t dstSkip);
+ status_t convert(
+ const void *srcBits,
+ size_t srcWidth, size_t srcHeight,
+ size_t srcCropLeft, size_t srcCropTop,
+ size_t srcCropRight, size_t srcCropBottom,
+ void *dstBits,
+ size_t dstWidth, size_t dstHeight,
+ size_t dstCropLeft, size_t dstCropTop,
+ size_t dstCropRight, size_t dstCropBottom);
private:
+ struct BitmapParams {
+ BitmapParams(
+ void *bits,
+ size_t width, size_t height,
+ size_t cropLeft, size_t cropTop,
+ size_t cropRight, size_t cropBottom);
+
+ size_t cropWidth() const;
+ size_t cropHeight() const;
+
+ void *mBits;
+ size_t mWidth, mHeight;
+ size_t mCropLeft, mCropTop, mCropRight, mCropBottom;
+ };
+
OMX_COLOR_FORMATTYPE mSrcFormat, mDstFormat;
uint8_t *mClip;
uint8_t *initClip();
- void convertCbYCrY(
- size_t width, size_t height,
- const void *srcBits, size_t srcSkip,
- void *dstBits, size_t dstSkip);
-
- void convertYUV420Planar(
- size_t width, size_t height,
- const void *srcBits, size_t srcSkip,
- void *dstBits, size_t dstSkip);
-
- void convertQCOMYUV420SemiPlanar(
- size_t width, size_t height,
- const void *srcBits, size_t srcSkip,
- void *dstBits, size_t dstSkip);
-
- void convertYUV420SemiPlanar(
- size_t width, size_t height,
- const void *srcBits, size_t srcSkip,
- void *dstBits, size_t dstSkip);
+ status_t convertCbYCrY(
+ const BitmapParams &src, const BitmapParams &dst);
+
+ status_t convertYUV420Planar(
+ const BitmapParams &src, const BitmapParams &dst);
+
+ status_t convertQCOMYUV420SemiPlanar(
+ const BitmapParams &src, const BitmapParams &dst);
+
+ status_t convertYUV420SemiPlanar(
+ const BitmapParams &src, const BitmapParams &dst);
ColorConverter(const ColorConverter &);
ColorConverter &operator=(const ColorConverter &);
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index d0b9fcd..d4f1733 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -48,13 +48,13 @@ public:
virtual status_t initCheck() const = 0;
- virtual ssize_t readAt(off_t offset, void *data, size_t size) = 0;
+ virtual ssize_t readAt(off64_t offset, void *data, size_t size) = 0;
// Convenience methods:
- bool getUInt16(off_t offset, uint16_t *x);
+ bool getUInt16(off64_t offset, uint16_t *x);
// May return ERROR_UNSUPPORTED.
- virtual status_t getSize(off_t *size);
+ virtual status_t getSize(off64_t *size);
virtual uint32_t flags() {
return 0;
@@ -80,6 +80,9 @@ public:
}
virtual void getDrmInfo(DecryptHandle **handle, DrmManagerClient **client) {};
+ virtual String8 getUri() {
+ return String8();
+ }
protected:
virtual ~DataSource() {}
diff --git a/include/media/stagefright/FileSource.h b/include/media/stagefright/FileSource.h
index 4307263..72a0403 100644
--- a/include/media/stagefright/FileSource.h
+++ b/include/media/stagefright/FileSource.h
@@ -34,9 +34,9 @@ public:
virtual status_t initCheck() const;
- virtual ssize_t readAt(off_t offset, void *data, size_t size);
+ virtual ssize_t readAt(off64_t offset, void *data, size_t size);
- virtual status_t getSize(off_t *size);
+ virtual status_t getSize(off64_t *size);
virtual DecryptHandle* DrmInitialization(DrmManagerClient *client);
@@ -46,7 +46,6 @@ protected:
virtual ~FileSource();
private:
- FILE *mFile;
int mFd;
int64_t mOffset;
int64_t mLength;
@@ -59,7 +58,7 @@ private:
int64_t mDrmBufSize;
unsigned char *mDrmBuf;
- ssize_t readAtDRM(off_t offset, void *data, size_t size);
+ ssize_t readAtDRM(off64_t offset, void *data, size_t size);
FileSource(const FileSource &);
FileSource &operator=(const FileSource &);
diff --git a/include/media/stagefright/HardwareAPI.h b/include/media/stagefright/HardwareAPI.h
index 63f11d1..17908b4 100644
--- a/include/media/stagefright/HardwareAPI.h
+++ b/include/media/stagefright/HardwareAPI.h
@@ -19,28 +19,76 @@
#define HARDWARE_API_H_
#include <media/stagefright/OMXPluginBase.h>
-#include <media/stagefright/VideoRenderer.h>
-#include <surfaceflinger/ISurface.h>
+#include <ui/android_native_buffer.h>
#include <utils/RefBase.h>
#include <OMX_Component.h>
-extern android::VideoRenderer *createRenderer(
- const android::sp<android::ISurface> &surface,
- const char *componentName,
- OMX_COLOR_FORMATTYPE colorFormat,
- size_t displayWidth, size_t displayHeight,
- size_t decodedWidth, size_t decodedHeight);
-
-extern android::VideoRenderer *createRendererWithRotation(
- const android::sp<android::ISurface> &surface,
- const char *componentName,
- OMX_COLOR_FORMATTYPE colorFormat,
- size_t displayWidth, size_t displayHeight,
- size_t decodedWidth, size_t decodedHeight,
- int32_t rotationDegrees);
+namespace android {
+
+// A pointer to this struct is passed to the OMX_SetParameter when the extension
+// index for the 'OMX.google.android.index.enableAndroidNativeBuffers' extension
+// is given.
+//
+// When Android native buffer use is disabled for a port (the default state),
+// the OMX node should operate as normal, and expect UseBuffer calls to set its
+// buffers. This is the mode that will be used when CPU access to the buffer is
+// required.
+//
+// When Android native buffer use has been enabled for a given port, the video
+// color format for the port is to be interpreted as an Android pixel format
+// rather than an OMX color format. The node should then expect to receive
+// UseAndroidNativeBuffer calls (via OMX_SetParameter) rather than UseBuffer
+// calls for that port.
+struct EnableAndroidNativeBuffersParams {
+ OMX_U32 nSize;
+ OMX_VERSIONTYPE nVersion;
+ OMX_U32 nPortIndex;
+ OMX_BOOL enable;
+};
+
+// A pointer to this struct is passed to OMX_SetParameter() when the extension
+// index "OMX.google.android.index.storeMetaDataInBuffers"
+// is given.
+//
+// When meta data is stored in the video buffers passed between OMX clients
+// and OMX components, interpretation of the buffer data is up to the
+// buffer receiver, and the data may or may not be the actual video data, but
+// some information helpful for the receiver to locate the actual data.
+// The buffer receiver thus needs to know how to interpret what is stored
+// in these buffers, with mechanisms pre-determined externally. How to
+// interpret the meta data is outside of the scope of this method.
+//
+// Currently, this is specifically used to pass meta data from video source
+// (camera component, for instance) to video encoder to avoid memcpying of
+// input video frame data. To do this, bStoreMetaDta is set to OMX_TRUE.
+// If bStoreMetaData is set to false, real YUV frame data will be stored
+// in the buffers. In addition, if no OMX_SetParameter() call is made
+// with the corresponding extension index, real YUV data is stored
+// in the buffers.
+struct StoreMetaDataInBuffersParams {
+ OMX_U32 nSize;
+ OMX_VERSIONTYPE nVersion;
+ OMX_U32 nPortIndex;
+ OMX_BOOL bStoreMetaData;
+};
+
+// A pointer to this struct is passed to OMX_SetParameter when the extension
+// index for the 'OMX.google.android.index.useAndroidNativeBuffer' extension is
+// given. This call will only be performed if a prior call was made with the
+// 'OMX.google.android.index.enableAndroidNativeBuffers' extension index,
+// enabling use of Android native buffers.
+struct UseAndroidNativeBufferParams {
+ OMX_U32 nSize;
+ OMX_VERSIONTYPE nVersion;
+ OMX_U32 nPortIndex;
+ OMX_PTR pAppPrivate;
+ OMX_BUFFERHEADERTYPE **bufferHeader;
+ const sp<android_native_buffer_t>& nativeBuffer;
+};
+
+} // namespace android
extern android::OMXPluginBase *createOMXPlugin();
#endif // HARDWARE_API_H_
-
diff --git a/include/media/stagefright/JPEGSource.h b/include/media/stagefright/JPEGSource.h
index 9d0a700..1b7e91b 100644
--- a/include/media/stagefright/JPEGSource.h
+++ b/include/media/stagefright/JPEGSource.h
@@ -42,9 +42,9 @@ private:
sp<DataSource> mSource;
MediaBufferGroup *mGroup;
bool mStarted;
- off_t mSize;
+ off64_t mSize;
int32_t mWidth, mHeight;
- off_t mOffset;
+ off64_t mOffset;
status_t parseJPEG();
diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h
index 7bf07eb..f7618e9 100644
--- a/include/media/stagefright/MPEG4Writer.h
+++ b/include/media/stagefright/MPEG4Writer.h
@@ -61,20 +61,21 @@ protected:
private:
class Track;
- FILE *mFile;
+ int mFd;
+ status_t mInitCheck;
bool mUse4ByteNalLength;
bool mUse32BitOffset;
bool mIsFileSizeLimitExplicitlyRequested;
bool mPaused;
bool mStarted;
- off_t mOffset;
+ off64_t mOffset;
off_t mMdatOffset;
uint8_t *mMoovBoxBuffer;
- off_t mMoovBoxBufferOffset;
+ off64_t mMoovBoxBufferOffset;
bool mWriteMoovBoxToMemory;
- off_t mFreeBoxOffset;
+ off64_t mFreeBoxOffset;
bool mStreamableFile;
- off_t mEstimatedMoovBoxSize;
+ off64_t mEstimatedMoovBoxSize;
uint32_t mInterleaveDurationUs;
int32_t mTimeScale;
int64_t mStartTimestampUs;
@@ -83,7 +84,7 @@ private:
List<Track *> mTracks;
- List<off_t> mBoxes;
+ List<off64_t> mBoxes;
void setStartTimestampUs(int64_t timeUs);
int64_t getStartTimestampUs(); // Not const
@@ -145,10 +146,10 @@ private:
void unlock();
// Acquire lock before calling these methods
- off_t addSample_l(MediaBuffer *buffer);
- off_t addLengthPrefixedSample_l(MediaBuffer *buffer);
+ off64_t addSample_l(MediaBuffer *buffer);
+ off64_t addLengthPrefixedSample_l(MediaBuffer *buffer);
- inline size_t write(const void *ptr, size_t size, size_t nmemb, FILE* stream);
+ inline size_t write(const void *ptr, size_t size, size_t nmemb);
bool exceedsFileSizeLimit();
bool use32BitFileOffset() const;
bool exceedsFileDurationLimit();
diff --git a/include/media/stagefright/MediaBuffer.h b/include/media/stagefright/MediaBuffer.h
index 339e6fb..c1c4f94 100644
--- a/include/media/stagefright/MediaBuffer.h
+++ b/include/media/stagefright/MediaBuffer.h
@@ -25,6 +25,7 @@
namespace android {
+class GraphicBuffer;
class MediaBuffer;
class MediaBufferObserver;
class MetaData;
@@ -48,6 +49,8 @@ public:
MediaBuffer(size_t size);
+ MediaBuffer(const sp<GraphicBuffer>& graphicBuffer);
+
// Decrements the reference count and returns the buffer to its
// associated MediaBufferGroup if the reference count drops to 0.
void release();
@@ -63,6 +66,8 @@ public:
void set_range(size_t offset, size_t length);
+ sp<GraphicBuffer> graphicBuffer() const;
+
sp<MetaData> meta_data();
// Clears meta data and resets the range to the full extent.
@@ -94,6 +99,7 @@ private:
void *mData;
size_t mSize, mRangeOffset, mRangeLength;
+ sp<GraphicBuffer> mGraphicBuffer;
bool mOwnsData;
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index 92ce068..2d50ca5 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -44,6 +44,8 @@ extern const char *MEDIA_MIMETYPE_CONTAINER_OGG;
extern const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA;
extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS;
+extern const char *MEDIA_MIMETYPE_CONTAINER_WVM;
+
} // namespace android
#endif // MEDIA_DEFS_H_
diff --git a/include/media/stagefright/MediaSource.h b/include/media/stagefright/MediaSource.h
index dafc621..a31395e 100644
--- a/include/media/stagefright/MediaSource.h
+++ b/include/media/stagefright/MediaSource.h
@@ -78,31 +78,18 @@ struct MediaSource : public RefBase {
void clearSeekTo();
bool getSeekTo(int64_t *time_us, SeekMode *mode) const;
- // Option allows encoder to skip some frames until the specified
- // time stamp.
- // To prevent from being abused, when the skipFrame timestamp is
- // found to be more than 1 second later than the current timestamp,
- // an error will be returned from read().
- void clearSkipFrame();
- bool getSkipFrame(int64_t *timeUs) const;
- void setSkipFrame(int64_t timeUs);
-
void setLateBy(int64_t lateness_us);
int64_t getLateBy() const;
private:
enum Options {
- // Bit map
kSeekTo_Option = 1,
- kSkipFrame_Option = 2,
};
uint32_t mOptions;
int64_t mSeekTimeUs;
SeekMode mSeekMode;
int64_t mLatenessUs;
-
- int64_t mSkipFrameUntilTimeUs;
};
// Causes this source to suspend pulling data from its upstream source
diff --git a/include/media/stagefright/MediaSourceSplitter.h b/include/media/stagefright/MediaSourceSplitter.h
new file mode 100644
index 0000000..568f4c2
--- /dev/null
+++ b/include/media/stagefright/MediaSourceSplitter.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This class provides a way to split a single media source into multiple sources.
+// The constructor takes in the real mediaSource and createClient() can then be
+// used to create multiple sources served from this real mediaSource.
+//
+// Usage:
+// - Create MediaSourceSplitter by passing in a real mediaSource from which
+// multiple duplicate channels are needed.
+// - Create a client using createClient() and use it as any other mediaSource.
+//
+// Note that multiple clients can be created using createClient() and
+// started/stopped in any order. MediaSourceSplitter stops the real source only
+// when all clients have been stopped.
+//
+// If a new client is created/started after some existing clients have already
+// started, the new client will start getting its read frames from the current
+// time.
+
+#ifndef MEDIA_SOURCE_SPLITTER_H_
+
+#define MEDIA_SOURCE_SPLITTER_H_
+
+#include <media/stagefright/MediaSource.h>
+#include <utils/threads.h>
+#include <utils/Vector.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class MediaBuffer;
+class MetaData;
+
+class MediaSourceSplitter : public RefBase {
+public:
+ // Constructor
+ // mediaSource: The real mediaSource. The class keeps a reference to it to
+ // implement the various clients.
+ MediaSourceSplitter(sp<MediaSource> mediaSource);
+
+ ~MediaSourceSplitter();
+
+ // Creates a new client of base type MediaSource. Multiple clients can be
+ // created which get their data through the same real mediaSource. These
+ // clients can then be used like any other MediaSource, all of which provide
+ // data from the same real source.
+ sp<MediaSource> createClient();
+
+private:
+ // Total number of clients created through createClient().
+ int32_t mNumberOfClients;
+
+ // reference to the real MediaSource passed to the constructor.
+ sp<MediaSource> mSource;
+
+ // Stores pointer to the MediaBuffer read from the real MediaSource.
+ // All clients use this to implement the read() call.
+ MediaBuffer *mLastReadMediaBuffer;
+
+ // Status code for read from the real MediaSource. All clients return
+ // this for their read().
+ status_t mLastReadStatus;
+
+ // Boolean telling whether the real MediaSource has started.
+ bool mSourceStarted;
+
+ // List of booleans, one for each client, storing whether the corresponding
+ // client's start() has been called.
+ Vector<bool> mClientsStarted;
+
+ // Stores the number of clients which are currently started.
+ int32_t mNumberOfClientsStarted;
+
+ // Since different clients call read() asynchronously, we need to keep track
+ // of what data is currently read into the mLastReadMediaBuffer.
+ // mCurrentReadBit stores the bit for the current read buffer. This bit
+ // flips each time a new buffer is read from the source.
+ // mClientsDesiredReadBit stores the bit for the next desired read buffer
+ // for each client. This bit flips each time read() is completed for this
+ // client.
+ bool mCurrentReadBit;
+ Vector<bool> mClientsDesiredReadBit;
+
+ // Number of clients whose current read has been completed.
+ int32_t mNumberOfCurrentReads;
+
+ // Boolean telling whether the last read has been completed for all clients.
+ // The variable is reset to false each time buffer is read from the real
+ // source.
+ bool mLastReadCompleted;
+
+ // A global mutex for access to critical sections.
+ Mutex mLock;
+
+ // Condition variable for waiting on read from source to complete.
+ Condition mReadFromSourceCondition;
+
+ // Condition variable for waiting on all client's last read to complete.
+ Condition mAllReadsCompleteCondition;
+
+ // Functions used by Client to implement the MediaSource interface.
+
+ // If the real source has not been started yet by any client, starts it.
+ status_t start(int clientId, MetaData *params);
+
+ // Stops the real source after all clients have called stop().
+ status_t stop(int clientId);
+
+ // returns the real source's getFormat().
+ sp<MetaData> getFormat(int clientId);
+
+ // If the client's desired buffer has already been read into
+ // mLastReadMediaBuffer, points the buffer to that. Otherwise if it is the
+ // master client, reads the buffer from source or else waits for the master
+ // client to read the buffer and uses that.
+ status_t read(int clientId,
+ MediaBuffer **buffer, const MediaSource::ReadOptions *options = NULL);
+
+ // Not implemented right now.
+ status_t pause(int clientId);
+
+ // Function which reads a buffer from the real source into
+ // mLastReadMediaBuffer
+ void readFromSource_lock(const MediaSource::ReadOptions *options);
+
+ // Waits until read from the real source has been completed.
+ // _lock means that the function should be called when the thread has already
+ // obtained the lock for the mutex mLock.
+ void waitForReadFromSource_lock(int32_t clientId);
+
+ // Waits until all clients have read the current buffer in
+ // mLastReadCompleted.
+ void waitForAllClientsLastRead_lock(int32_t clientId);
+
+ // Each client calls this after it completes its read(). Once all clients
+ // have called this for the current buffer, the function calls
+ // mAllReadsCompleteCondition.broadcast() to signal the waiting clients.
+ void signalReadComplete_lock(bool readAborted);
+
+ // Make these constructors private.
+ MediaSourceSplitter();
+ MediaSourceSplitter(const MediaSourceSplitter &);
+ MediaSourceSplitter &operator=(const MediaSourceSplitter &);
+
+ // This class implements the MediaSource interface. Each client stores a
+ // reference to the parent MediaSourceSplitter and uses it to complete the
+ // various calls.
+ class Client : public MediaSource {
+ public:
+ // Constructor stores reference to the parent MediaSourceSplitter and it
+ // client id.
+ Client(sp<MediaSourceSplitter> splitter, int32_t clientId);
+
+ // MediaSource interface
+ virtual status_t start(MetaData *params = NULL);
+
+ virtual status_t stop();
+
+ virtual sp<MetaData> getFormat();
+
+ virtual status_t read(
+ MediaBuffer **buffer, const ReadOptions *options = NULL);
+
+ virtual status_t pause();
+
+ private:
+ // Refernce to the parent MediaSourceSplitter
+ sp<MediaSourceSplitter> mSplitter;
+
+ // Id of this client.
+ int32_t mClientId;
+ };
+
+ friend class Client;
+};
+
+} // namespace android
+
+#endif // MEDIA_SOURCE_SPLITTER_H_
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index ea2fa52..5170a2c 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -32,12 +32,17 @@ enum {
kKeyMIMEType = 'mime', // cstring
kKeyWidth = 'widt', // int32_t
kKeyHeight = 'heig', // int32_t
+
+ // a rectangle, if absent assumed to be (0, 0, width - 1, height - 1)
+ kKeyCropRect = 'crop',
+
kKeyRotation = 'rotA', // int32_t (angle in degrees)
kKeyIFramesInterval = 'ifiv', // int32_t
kKeyStride = 'strd', // int32_t
kKeySliceHeight = 'slht', // int32_t
kKeyChannelCount = '#chn', // int32_t
- kKeySampleRate = 'srte', // int32_t (also video frame rate)
+ kKeySampleRate = 'srte', // int32_t (audio sampling rate Hz)
+ kKeyFrameRate = 'frmR', // int32_t (video frame rate fps)
kKeyBitRate = 'brte', // int32_t (bps)
kKeyESDS = 'esds', // raw data
kKeyAVCC = 'avcc', // raw data
@@ -94,7 +99,6 @@ enum {
// Track authoring progress status
// kKeyTrackTimeStatus is used to track progress in elapsed time
kKeyTrackTimeStatus = 'tktm', // int64_t
- kKeyRotationDegree = 'rdge', // int32_t (clockwise, in degree)
kKeyNotRealTime = 'ntrt', // bool (int32_t)
@@ -104,6 +108,9 @@ enum {
kKeyValidSamples = 'valD', // int32_t
kKeyIsUnreadable = 'unre', // bool (int32_t)
+
+ // An indication that a video buffer has been rendered.
+ kKeyRendered = 'rend', // bool (int32_t)
};
enum {
@@ -123,6 +130,7 @@ public:
TYPE_INT64 = 'in64',
TYPE_FLOAT = 'floa',
TYPE_POINTER = 'ptr ',
+ TYPE_RECT = 'rect',
};
void clear();
@@ -134,12 +142,22 @@ public:
bool setFloat(uint32_t key, float value);
bool setPointer(uint32_t key, void *value);
+ bool setRect(
+ uint32_t key,
+ int32_t left, int32_t top,
+ int32_t right, int32_t bottom);
+
bool findCString(uint32_t key, const char **value);
bool findInt32(uint32_t key, int32_t *value);
bool findInt64(uint32_t key, int64_t *value);
bool findFloat(uint32_t key, float *value);
bool findPointer(uint32_t key, void **value);
+ bool findRect(
+ uint32_t key,
+ int32_t *left, int32_t *top,
+ int32_t *right, int32_t *bottom);
+
bool setData(uint32_t key, uint32_t type, const void *data, size_t size);
bool findData(uint32_t key, uint32_t *type,
@@ -185,6 +203,10 @@ private:
}
};
+ struct Rect {
+ int32_t mLeft, mTop, mRight, mBottom;
+ };
+
KeyedVector<uint32_t, typed_data> mItems;
// MetaData &operator=(const MetaData &);
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index fed6761..f8daa4f 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -18,6 +18,7 @@
#define OMX_CODEC_H_
+#include <android/native_window.h>
#include <media/IOMX.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaSource.h>
@@ -38,13 +39,22 @@ struct OMXCodec : public MediaSource,
// The client wants to access the output buffer's video
// data for example for thumbnail extraction.
kClientNeedsFramebuffer = 4,
+
+ // Request for software or hardware codecs. If request
+ // can not be fullfilled, Create() returns NULL.
+ kSoftwareCodecsOnly = 8,
+ kHardwareCodecsOnly = 16,
+
+ // Store meta data in video buffers
+ kStoreMetaDataInVideoBuffers = 32,
};
static sp<MediaSource> Create(
const sp<IOMX> &omx,
const sp<MetaData> &meta, bool createEncoder,
const sp<MediaSource> &source,
const char *matchComponentName = NULL,
- uint32_t flags = 0);
+ uint32_t flags = 0,
+ const sp<ANativeWindow> &nativeWindow = NULL);
static void setComponentRole(
const sp<IOMX> &omx, IOMX::node_id node, bool isEncoder,
@@ -60,8 +70,6 @@ struct OMXCodec : public MediaSource,
virtual status_t pause();
- void on_message(const omx_message &msg);
-
// from MediaBufferObserver
virtual void signalBufferReturned(MediaBuffer *buffer);
@@ -69,6 +77,13 @@ protected:
virtual ~OMXCodec();
private:
+
+ // Make sure mLock is accessible to OMXCodecObserver
+ friend class OMXCodecObserver;
+
+ // Call this with mLock hold
+ void on_message(const omx_message &msg);
+
enum State {
DEAD,
LOADED,
@@ -109,12 +124,18 @@ private:
kAvoidMemcopyInputRecordingFrames = 2048,
kRequiresLargerEncoderOutputBuffer = 4096,
kOutputBuffersAreUnreadable = 8192,
- kStoreMetaDataInInputVideoBuffers = 16384,
+ };
+
+ enum BufferStatus {
+ OWNED_BY_US,
+ OWNED_BY_COMPONENT,
+ OWNED_BY_NATIVE_WINDOW,
+ OWNED_BY_CLIENT,
};
struct BufferInfo {
IOMX::buffer_id mBuffer;
- bool mOwnedByComponent;
+ BufferStatus mStatus;
sp<IMemory> mMem;
size_t mSize;
void *mData;
@@ -151,7 +172,6 @@ private:
int64_t mSeekTimeUs;
ReadOptions::SeekMode mSeekMode;
int64_t mTargetTimeUs;
- int64_t mSkipTimeUs;
MediaBuffer *mLeftOverBuffer;
@@ -160,13 +180,23 @@ private:
bool mPaused;
+ sp<ANativeWindow> mNativeWindow;
+
+ // The index in each of the mPortBuffers arrays of the buffer that will be
+ // submitted to OMX next. This only applies when using buffers from a
+ // native window.
+ size_t mNextNativeBufferIndex[2];
+
// A list of indices into mPortStatus[kPortIndexOutput] filled with data.
List<size_t> mFilledBuffers;
Condition mBufferFilled;
+ bool mIsMetaDataStoredInVideoBuffers;
+
OMXCodec(const sp<IOMX> &omx, IOMX::node_id node, uint32_t quirks,
bool isEncoder, const char *mime, const char *componentName,
- const sp<MediaSource> &source);
+ const sp<MediaSource> &source,
+ const sp<ANativeWindow> &nativeWindow);
void addCodecSpecificData(const void *data, size_t size);
void clearCodecSpecificData();
@@ -217,13 +247,20 @@ private:
status_t allocateBuffers();
status_t allocateBuffersOnPort(OMX_U32 portIndex);
+ status_t allocateOutputBuffersFromNativeWindow();
+
+ status_t queueBufferToNativeWindow(BufferInfo *info);
+ status_t cancelBufferToNativeWindow(BufferInfo *info);
+ BufferInfo* dequeueBufferFromNativeWindow();
status_t freeBuffersOnPort(
OMX_U32 portIndex, bool onlyThoseWeOwn = false);
- void drainInputBuffer(IOMX::buffer_id buffer);
+ status_t freeBuffer(OMX_U32 portIndex, size_t bufIndex);
+
+ bool drainInputBuffer(IOMX::buffer_id buffer);
void fillOutputBuffer(IOMX::buffer_id buffer);
- void drainInputBuffer(BufferInfo *info);
+ bool drainInputBuffer(BufferInfo *info);
void fillOutputBuffer(BufferInfo *info);
void drainInputBuffers();
@@ -251,6 +288,7 @@ private:
status_t init();
void initOutputFormat(const sp<MetaData> &inputFormat);
+ status_t initNativeWindow();
void dumpPortStatus(OMX_U32 portIndex);
@@ -265,6 +303,8 @@ private:
uint32_t flags,
Vector<String8> *matchingCodecs);
+ void restorePatchedDataPointer(BufferInfo *info);
+
OMXCodec(const OMXCodec &);
OMXCodec &operator=(const OMXCodec &);
};
@@ -277,6 +317,7 @@ struct CodecProfileLevel {
struct CodecCapabilities {
String8 mComponentName;
Vector<CodecProfileLevel> mProfileLevels;
+ Vector<OMX_U32> mColorFormats;
};
// Return a vector of componentNames with supported profile/level pairs
diff --git a/include/media/stagefright/VideoRenderer.h b/include/media/stagefright/VideoRenderer.h
deleted file mode 100644
index f80b277..0000000
--- a/include/media/stagefright/VideoRenderer.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef VIDEO_RENDERER_H_
-
-#define VIDEO_RENDERER_H_
-
-#include <sys/types.h>
-
-namespace android {
-
-class VideoRenderer {
-public:
- virtual ~VideoRenderer() {}
-
- virtual void render(
- const void *data, size_t size, void *platformPrivate) = 0;
-
-protected:
- VideoRenderer() {}
-
- VideoRenderer(const VideoRenderer &);
- VideoRenderer &operator=(const VideoRenderer &);
-};
-
-} // namespace android
-
-#endif // VIDEO_RENDERER_H_
diff --git a/include/media/stagefright/VideoSourceDownSampler.h b/include/media/stagefright/VideoSourceDownSampler.h
new file mode 100644
index 0000000..439918c
--- /dev/null
+++ b/include/media/stagefright/VideoSourceDownSampler.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// VideoSourceDownSampler implements the MediaSource interface,
+// downsampling frames provided from a real video source.
+
+#ifndef VIDEO_SOURCE_DOWN_SAMPLER_H_
+
+#define VIDEO_SOURCE_DOWN_SAMPLER_H_
+
+#include <media/stagefright/MediaSource.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class IMemory;
+class MediaBuffer;
+class MetaData;
+
+class VideoSourceDownSampler : public MediaSource {
+public:
+ virtual ~VideoSourceDownSampler();
+
+ // Constructor:
+ // videoSource: The real video source which provides the original frames.
+ // width, height: The desired width, height. These should be less than or equal
+ // to those of the real video source. We then downsample the original frames to
+ // this size.
+ VideoSourceDownSampler(const sp<MediaSource> &videoSource,
+ int32_t width, int32_t height);
+
+ // MediaSource interface
+ virtual status_t start(MetaData *params = NULL);
+
+ virtual status_t stop();
+
+ virtual sp<MetaData> getFormat();
+
+ virtual status_t read(
+ MediaBuffer **buffer, const ReadOptions *options = NULL);
+
+ virtual status_t pause();
+
+private:
+ // Reference to the real video source.
+ sp<MediaSource> mRealVideoSource;
+
+ // Size of frames to be provided by this source.
+ int32_t mWidth;
+ int32_t mHeight;
+
+ // Size of frames provided by the real source.
+ int32_t mRealSourceWidth;
+ int32_t mRealSourceHeight;
+
+ // Down sampling paramters.
+ int32_t mDownSampleOffsetX;
+ int32_t mDownSampleOffsetY;
+ int32_t mDownSampleSkipX;
+ int32_t mDownSampleSkipY;
+
+ // True if we need to crop the still video image to get the video frame.
+ bool mNeedDownSampling;
+
+ // Meta data. This is a copy of the real source except for the width and
+ // height parameters.
+ sp<MetaData> mMeta;
+
+ // Computes the offset, skip parameters for downsampling the original frame
+ // to the desired size.
+ void computeDownSamplingParameters();
+
+ // Downsamples the frame in sourceBuffer to size (mWidth x mHeight). A new
+ // buffer is created which stores the downsampled image.
+ void downSampleYUVImage(const MediaBuffer &sourceBuffer, MediaBuffer **buffer) const;
+
+ // Disallow these.
+ VideoSourceDownSampler(const VideoSourceDownSampler &);
+ VideoSourceDownSampler &operator=(const VideoSourceDownSampler &);
+};
+
+} // namespace android
+
+#endif // VIDEO_SOURCE_DOWN_SAMPLER_H_
diff --git a/include/media/stagefright/YUVCanvas.h b/include/media/stagefright/YUVCanvas.h
new file mode 100644
index 0000000..ff70923
--- /dev/null
+++ b/include/media/stagefright/YUVCanvas.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// YUVCanvas holds a reference to a YUVImage on which it can do various
+// drawing operations. It provides various utility functions for filling,
+// cropping, etc.
+
+
+#ifndef YUV_CANVAS_H_
+
+#define YUV_CANVAS_H_
+
+#include <stdint.h>
+
+namespace android {
+
+class YUVImage;
+class Rect;
+
+class YUVCanvas {
+public:
+
+ // Constructor takes in reference to a yuvImage on which it can do
+ // various drawing opreations.
+ YUVCanvas(YUVImage &yuvImage);
+ ~YUVCanvas();
+
+ // Fills the entire image with the given YUV values.
+ void FillYUV(uint8_t yValue, uint8_t uValue, uint8_t vValue);
+
+ // Fills the rectangular region [startX,endX]x[startY,endY] with the given YUV values.
+ void FillYUVRectangle(const Rect& rect,
+ uint8_t yValue, uint8_t uValue, uint8_t vValue);
+
+ // Copies the region [startX,endX]x[startY,endY] from srcImage into the
+ // canvas' target image (mYUVImage) starting at
+ // (destinationStartX,destinationStartY).
+ // Note that undefined behavior may occur if srcImage is same as the canvas'
+ // target image.
+ void CopyImageRect(
+ const Rect& srcRect,
+ int32_t destStartX, int32_t destStartY,
+ const YUVImage &srcImage);
+
+ // Downsamples the srcImage into the canvas' target image (mYUVImage)
+ // The downsampling copies pixels from the source image starting at
+ // (srcOffsetX, srcOffsetY) to the target image, starting at (0, 0).
+ // For each X increment in the target image, skipX pixels are skipped
+ // in the source image.
+ // Similarly for each Y increment in the target image, skipY pixels
+ // are skipped in the source image.
+ void downsample(
+ int32_t srcOffsetX, int32_t srcOffsetY,
+ int32_t skipX, int32_t skipY,
+ const YUVImage &srcImage);
+
+private:
+ YUVImage& mYUVImage;
+
+ YUVCanvas(const YUVCanvas &);
+ YUVCanvas &operator=(const YUVCanvas &);
+};
+
+} // namespace android
+
+#endif // YUV_CANVAS_H_
diff --git a/include/media/stagefright/YUVImage.h b/include/media/stagefright/YUVImage.h
new file mode 100644
index 0000000..4e98618
--- /dev/null
+++ b/include/media/stagefright/YUVImage.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// A container class to hold YUV data and provide various utilities,
+// e.g. to set/get pixel values.
+// Supported formats:
+// - YUV420 Planar
+// - YUV420 Semi Planar
+//
+// Currently does not support variable strides.
+//
+// Implementation: Two simple abstractions are done to simplify access
+// to YUV channels for different formats:
+// - initializeYUVPointers() sets up pointers (mYdata, mUdata, mVdata) to
+// point to the right start locations of the different channel data depending
+// on the format.
+// - getOffsets() returns the correct offset for the different channels
+// depending on the format.
+// Location of any pixel's YUV channels can then be easily computed using these.
+//
+
+#ifndef YUV_IMAGE_H_
+
+#define YUV_IMAGE_H_
+
+#include <stdint.h>
+#include <cstring>
+
+namespace android {
+
+class Rect;
+
+class YUVImage {
+public:
+ // Supported YUV formats
+ enum YUVFormat {
+ YUV420Planar,
+ YUV420SemiPlanar
+ };
+
+ // Constructs an image with the given size, format. Also allocates and owns
+ // the required memory.
+ YUVImage(YUVFormat yuvFormat, int32_t width, int32_t height);
+
+ // Constructs an image with the given size, format. The memory is provided
+ // by the caller and we don't own it.
+ YUVImage(YUVFormat yuvFormat, int32_t width, int32_t height, uint8_t *buffer);
+
+ // Destructor to delete the memory if it owns it.
+ ~YUVImage();
+
+ // Returns the size of the buffer required to store the YUV data for the given
+ // format and geometry. Useful when the caller wants to allocate the requisite
+ // memory.
+ static size_t bufferSize(YUVFormat yuvFormat, int32_t width, int32_t height);
+
+ int32_t width() const {return mWidth;}
+ int32_t height() const {return mHeight;}
+
+ // Returns true if pixel is the range [0, width-1] x [0, height-1]
+ // and false otherwise.
+ bool validPixel(int32_t x, int32_t y) const;
+
+ // Get the pixel YUV value at pixel (x,y).
+ // Note that the range of x is [0, width-1] and the range of y is [0, height-1].
+ // Returns true if get was successful and false otherwise.
+ bool getPixelValue(int32_t x, int32_t y,
+ uint8_t *yPtr, uint8_t *uPtr, uint8_t *vPtr) const;
+
+ // Set the pixel YUV value at pixel (x,y).
+ // Note that the range of x is [0, width-1] and the range of y is [0, height-1].
+ // Returns true if set was successful and false otherwise.
+ bool setPixelValue(int32_t x, int32_t y,
+ uint8_t yValue, uint8_t uValue, uint8_t vValue);
+
+ // Uses memcpy to copy an entire row of data
+ static void fastCopyRectangle420Planar(
+ const Rect& srcRect,
+ int32_t destStartX, int32_t destStartY,
+ const YUVImage &srcImage, YUVImage &destImage);
+
+ // Uses memcpy to copy an entire row of data
+ static void fastCopyRectangle420SemiPlanar(
+ const Rect& srcRect,
+ int32_t destStartX, int32_t destStartY,
+ const YUVImage &srcImage, YUVImage &destImage);
+
+ // Tries to use memcopy to copy entire rows of data.
+ // Returns false if fast copy is not possible for the passed image formats.
+ static bool fastCopyRectangle(
+ const Rect& srcRect,
+ int32_t destStartX, int32_t destStartY,
+ const YUVImage &srcImage, YUVImage &destImage);
+
+ // Convert the given YUV value to RGB.
+ void yuv2rgb(uint8_t yValue, uint8_t uValue, uint8_t vValue,
+ uint8_t *r, uint8_t *g, uint8_t *b) const;
+
+ // Write the image to a human readable PPM file.
+ // Returns true if write was succesful and false otherwise.
+ bool writeToPPM(const char *filename) const;
+
+private:
+ // YUV Format of the image.
+ YUVFormat mYUVFormat;
+
+ int32_t mWidth;
+ int32_t mHeight;
+
+ // Pointer to the memory buffer.
+ uint8_t *mBuffer;
+
+ // Boolean telling whether we own the memory buffer.
+ bool mOwnBuffer;
+
+ // Pointer to start of the Y data plane.
+ uint8_t *mYdata;
+
+ // Pointer to start of the U data plane. Note that in case of interleaved formats like
+ // YUV420 semiplanar, mUdata points to the start of the U data in the UV plane.
+ uint8_t *mUdata;
+
+ // Pointer to start of the V data plane. Note that in case of interleaved formats like
+ // YUV420 semiplanar, mVdata points to the start of the V data in the UV plane.
+ uint8_t *mVdata;
+
+ // Initialize the pointers mYdata, mUdata, mVdata to point to the right locations for
+ // the given format and geometry.
+ // Returns true if initialize was succesful and false otherwise.
+ bool initializeYUVPointers();
+
+ // For the given pixel location, this returns the offset of the location of y, u and v
+ // data from the corresponding base pointers -- mYdata, mUdata, mVdata.
+ // Note that the range of x is [0, width-1] and the range of y is [0, height-1].
+ // Returns true if getting offsets was succesful and false otherwise.
+ bool getOffsets(int32_t x, int32_t y,
+ int32_t *yOffset, int32_t *uOffset, int32_t *vOffset) const;
+
+ // Returns the offset increments incurred in going from one data row to the next data row
+ // for the YUV channels. Note that this corresponds to data rows and not pixel rows.
+ // E.g. depending on formats, U/V channels may have only one data row corresponding
+ // to two pixel rows.
+ bool getOffsetIncrementsPerDataRow(
+ int32_t *yDataOffsetIncrement,
+ int32_t *uDataOffsetIncrement,
+ int32_t *vDataOffsetIncrement) const;
+
+ // Given the offset return the address of the corresponding channel's data.
+ uint8_t* getYAddress(int32_t offset) const;
+ uint8_t* getUAddress(int32_t offset) const;
+ uint8_t* getVAddress(int32_t offset) const;
+
+ // Given the pixel location, returns the address of the corresponding channel's data.
+ // Note that the range of x is [0, width-1] and the range of y is [0, height-1].
+ bool getYUVAddresses(int32_t x, int32_t y,
+ uint8_t **yAddr, uint8_t **uAddr, uint8_t **vAddr) const;
+
+ // Disallow implicit casting and copying.
+ YUVImage(const YUVImage &);
+ YUVImage &operator=(const YUVImage &);
+};
+
+} // namespace android
+
+#endif // YUV_IMAGE_H_
diff --git a/include/media/stagefright/foundation/ADebug.h b/include/media/stagefright/foundation/ADebug.h
index 69021d8..eb5e494 100644
--- a/include/media/stagefright/foundation/ADebug.h
+++ b/include/media/stagefright/foundation/ADebug.h
@@ -32,6 +32,7 @@ namespace android {
#define CHECK(condition) \
LOG_ALWAYS_FATAL_IF( \
!(condition), \
+ "%s", \
__FILE__ ":" LITERAL_TO_STRING(__LINE__) \
" CHECK(" #condition ") failed.")
@@ -58,10 +59,12 @@ MAKE_COMPARATOR(GT,>)
do { \
AString ___res = Compare_##suffix(x, y); \
if (!___res.empty()) { \
- LOG_ALWAYS_FATAL( \
- __FILE__ ":" LITERAL_TO_STRING(__LINE__) \
- " CHECK_" #suffix "( " #x "," #y ") failed: %s", \
- ___res.c_str()); \
+ AString ___full = \
+ __FILE__ ":" LITERAL_TO_STRING(__LINE__) \
+ " CHECK_" #suffix "( " #x "," #y ") failed: "; \
+ ___full.append(___res); \
+ \
+ LOG_ALWAYS_FATAL("%s", ___full.c_str()); \
} \
} while (false)
diff --git a/include/media/stagefright/foundation/AHierarchicalStateMachine.h b/include/media/stagefright/foundation/AHierarchicalStateMachine.h
new file mode 100644
index 0000000..b5786fb
--- /dev/null
+++ b/include/media/stagefright/foundation/AHierarchicalStateMachine.h
@@ -0,0 +1,49 @@
+#ifndef A_HIERARCHICAL_STATE_MACHINE_H_
+
+#define A_HIERARCHICAL_STATE_MACHINE_H_
+
+#include <media/stagefright/foundation/AHandler.h>
+
+namespace android {
+
+struct AState : public RefBase {
+ AState(const sp<AState> &parentState = NULL);
+
+ sp<AState> parentState();
+
+protected:
+ virtual ~AState();
+
+ virtual void stateEntered();
+ virtual void stateExited();
+
+ virtual bool onMessageReceived(const sp<AMessage> &msg) = 0;
+
+private:
+ friend struct AHierarchicalStateMachine;
+
+ sp<AState> mParentState;
+
+ DISALLOW_EVIL_CONSTRUCTORS(AState);
+};
+
+struct AHierarchicalStateMachine : public AHandler {
+ AHierarchicalStateMachine();
+
+protected:
+ virtual ~AHierarchicalStateMachine();
+
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
+ // Only to be called in response to a message.
+ void changeState(const sp<AState> &state);
+
+private:
+ sp<AState> mState;
+
+ DISALLOW_EVIL_CONSTRUCTORS(AHierarchicalStateMachine);
+};
+
+} // namespace android
+
+#endif // A_HIERARCHICAL_STATE_MACHINE_H_
diff --git a/include/media/stagefright/foundation/AMessage.h b/include/media/stagefright/foundation/AMessage.h
index c674cba..72dc730 100644
--- a/include/media/stagefright/foundation/AMessage.h
+++ b/include/media/stagefright/foundation/AMessage.h
@@ -26,16 +26,22 @@
namespace android {
struct AString;
+struct Parcel;
struct AMessage : public RefBase {
AMessage(uint32_t what = 0, ALooper::handler_id target = 0);
+ static sp<AMessage> FromParcel(const Parcel &parcel);
+ void writeToParcel(Parcel *parcel) const;
+
void setWhat(uint32_t what);
uint32_t what() const;
void setTarget(ALooper::handler_id target);
ALooper::handler_id target() const;
+ void clear();
+
void setInt32(const char *name, int32_t value);
void setInt64(const char *name, int64_t value);
void setSize(const char *name, size_t value);
@@ -46,6 +52,10 @@ struct AMessage : public RefBase {
void setObject(const char *name, const sp<RefBase> &obj);
void setMessage(const char *name, const sp<AMessage> &obj);
+ void setRect(
+ const char *name,
+ int32_t left, int32_t top, int32_t right, int32_t bottom);
+
bool findInt32(const char *name, int32_t *value) const;
bool findInt64(const char *name, int64_t *value) const;
bool findSize(const char *name, size_t *value) const;
@@ -56,8 +66,15 @@ struct AMessage : public RefBase {
bool findObject(const char *name, sp<RefBase> *obj) const;
bool findMessage(const char *name, sp<AMessage> *obj) const;
+ bool findRect(
+ const char *name,
+ int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) const;
+
void post(int64_t delayUs = 0);
+ // Performs a deep-copy of "this", contained messages are in turn "dup'ed".
+ // Warning: RefBase items, i.e. "objects" are _not_ copied but only have
+ // their refcount incremented.
sp<AMessage> dup() const;
AString debugString(int32_t indent = 0) const;
@@ -76,11 +93,16 @@ private:
kTypeString,
kTypeObject,
kTypeMessage,
+ kTypeRect,
};
uint32_t mWhat;
ALooper::handler_id mTarget;
+ struct Rect {
+ int32_t mLeft, mTop, mRight, mBottom;
+ };
+
struct Item {
union {
int32_t int32Value;
@@ -91,6 +113,7 @@ private:
void *ptrValue;
RefBase *refValue;
AString *stringValue;
+ Rect rectValue;
} u;
const char *mName;
Type mType;
@@ -102,7 +125,6 @@ private:
Item mItems[kMaxNumItems];
size_t mNumItems;
- void clear();
Item *allocateItem(const char *name);
void freeItem(Item *item);
const Item *findItem(const char *name, Type type) const;
diff --git a/include/private/surfaceflinger/SharedBufferStack.h b/include/private/surfaceflinger/SharedBufferStack.h
index d6ae5e9..9d589cf 100644
--- a/include/private/surfaceflinger/SharedBufferStack.h
+++ b/include/private/surfaceflinger/SharedBufferStack.h
@@ -285,6 +285,8 @@ public:
uint32_t getTransform(int buffer) const;
status_t resize(int newNumBuffers);
+ status_t grow(int newNumBuffers);
+ status_t shrink(int newNumBuffers);
SharedBufferStack::Statistics getStats() const;
@@ -346,6 +348,14 @@ private:
int mNumBuffers;
BufferList mBufferList;
+ struct BuffersAvailableCondition : public ConditionBase {
+ int mNumBuffers;
+ inline BuffersAvailableCondition(SharedBufferServer* sbs,
+ int numBuffers);
+ inline bool operator()() const;
+ inline const char* name() const { return "BuffersAvailableCondition"; }
+ };
+
struct UnlockUpdate : public UpdateBase {
const int lockedBuffer;
inline UnlockUpdate(SharedBufferBase* sbb, int lockedBuffer);
diff --git a/include/private/ui/sw_gralloc_handle.h b/include/private/ui/sw_gralloc_handle.h
deleted file mode 100644
index b3d333e..0000000
--- a/include/private/ui/sw_gralloc_handle.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_UI_PRIVATE_SW_GRALLOC_HANDLE_H
-#define ANDROID_UI_PRIVATE_SW_GRALLOC_HANDLE_H
-
-#include <stdint.h>
-#include <limits.h>
-#include <sys/cdefs.h>
-#include <hardware/gralloc.h>
-#include <errno.h>
-
-#include <cutils/native_handle.h>
-
-namespace android {
-
-/*****************************************************************************/
-
-struct sw_gralloc_handle_t : public native_handle
-{
- // file-descriptors
- int fd;
- // ints
- int magic;
- int size;
- int base;
- int prot;
- int pid;
-
- static const int sNumInts = 5;
- static const int sNumFds = 1;
- static const int sMagic = '_sgh';
-
- sw_gralloc_handle_t() :
- fd(-1), magic(sMagic), size(0), base(0), prot(0), pid(getpid())
- {
- version = sizeof(native_handle);
- numInts = sNumInts;
- numFds = sNumFds;
- }
- ~sw_gralloc_handle_t() {
- magic = 0;
- }
-
- static int validate(const native_handle* h) {
- const sw_gralloc_handle_t* hnd = (const sw_gralloc_handle_t*)h;
- if (!h || h->version != sizeof(native_handle) ||
- h->numInts != sNumInts || h->numFds != sNumFds ||
- hnd->magic != sMagic)
- {
- return -EINVAL;
- }
- return 0;
- }
-
- static status_t alloc(uint32_t w, uint32_t h, int format,
- int usage, buffer_handle_t* handle, int32_t* stride);
- static status_t free(sw_gralloc_handle_t* hnd);
- static status_t registerBuffer(sw_gralloc_handle_t* hnd);
- static status_t unregisterBuffer(sw_gralloc_handle_t* hnd);
- static status_t lock(sw_gralloc_handle_t* hnd, int usage,
- int l, int t, int w, int h, void** vaddr);
- static status_t unlock(sw_gralloc_handle_t* hnd);
-};
-
-/*****************************************************************************/
-
-}; // namespace android
-
-#endif /* ANDROID_UI_PRIVATE_SW_GRALLOC_HANDLE_H */