diff options
Diffstat (limited to 'include')
29 files changed, 1284 insertions, 74 deletions
diff --git a/include/camera/Camera.h b/include/camera/Camera.h index e6d84ba..8d8edd6 100644 --- a/include/camera/Camera.h +++ b/include/camera/Camera.h @@ -22,8 +22,6 @@ namespace android { -class ISurface; - /* * A set of bit masks for specifying how the received preview frames are * handled before the previewCallback() call. @@ -96,6 +94,14 @@ enum { // or CAMERA_MSG_COMPRESSED_IMAGE. This is not allowed to be set during // preview. CAMERA_CMD_SET_DISPLAY_ORIENTATION = 3, + + // cmdType to disable/enable shutter sound. + // In sendCommand passing arg1 = 0 will disable, + // while passing arg1 = 1 will enable the shutter sound. + CAMERA_CMD_ENABLE_SHUTTER_SOUND = 4, + + // cmdType to play recording sound. + CAMERA_CMD_PLAY_RECORDING_SOUND = 5, }; // camera fatal errors @@ -164,9 +170,8 @@ public: status_t getStatus() { return mStatus; } - // pass the buffered ISurface to the camera service + // pass the buffered Surface to the camera service status_t setPreviewDisplay(const sp<Surface>& surface); - status_t setPreviewDisplay(const sp<ISurface>& surface); // start preview mode, must call setPreviewDisplay first status_t startPreview(); @@ -207,6 +212,15 @@ public: // send command to camera driver status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2); + // return the total number of available video buffers. + int32_t getNumberOfVideoBuffers() const; + + // return the individual video buffer corresponding to the given index. + sp<IMemory> getVideoBuffer(int32_t index) const; + + // tell camera hal to store meta data or real YUV in video buffers. + status_t storeMetaDataInBuffers(bool enabled); + void setListener(const sp<CameraListener>& listener); void setPreviewCallbackFlags(int preview_callback_flag); diff --git a/include/camera/CameraHardwareInterface.h b/include/camera/CameraHardwareInterface.h index 6a66e3c..5465441 100644 --- a/include/camera/CameraHardwareInterface.h +++ b/include/camera/CameraHardwareInterface.h @@ -18,8 +18,11 @@ #define ANDROID_HARDWARE_CAMERA_HARDWARE_INTERFACE_H #include <binder/IMemory.h> +#include <ui/egl/android_natives.h> #include <utils/RefBase.h> #include <surfaceflinger/ISurface.h> +#include <ui/android_native_buffer.h> +#include <ui/GraphicBuffer.h> #include <camera/Camera.h> #include <camera/CameraParameters.h> @@ -86,8 +89,8 @@ class CameraHardwareInterface : public virtual RefBase { public: virtual ~CameraHardwareInterface() { } - /** Return the IMemoryHeap for the preview image heap */ - virtual sp<IMemoryHeap> getPreviewHeap() const = 0; + /** Set the ANativeWindow to which preview frames are sent */ + virtual status_t setPreviewWindow(const sp<ANativeWindow>& buf) = 0; /** Return the IMemoryHeap for the raw image heap */ virtual sp<IMemoryHeap> getRawHeap() const = 0; @@ -143,6 +146,82 @@ public: virtual bool previewEnabled() = 0; /** + * Retrieve the total number of available buffers from camera hal for passing + * video frame data in a recording session. Must be called again if a new + * recording session is started. + * + * This method should be called after startRecording(), since + * the some camera hal may choose to allocate the video buffers only after + * recording is started. + * + * Some camera hal may not implement this method, and 0 can be returned to + * indicate that this feature is not available. + * + * @return the number of video buffers that camera hal makes available. + * Zero (0) is returned to indicate that camera hal does not support + * this feature. + */ + virtual int32_t getNumberOfVideoBuffers() const { return 0; } + + /** + * Retrieve the video buffer corresponding to the given index in a + * recording session. Must be called again if a new recording session + * is started. + * + * It allows a client to retrieve all video buffers that camera hal makes + * available to passing video frame data by calling this method with all + * valid index values. The valid index value ranges from 0 to n, where + * n = getNumberOfVideoBuffers() - 1. With an index outside of the valid + * range, 0 must be returned. This method should be called after + * startRecording(). + * + * The video buffers should NOT be modified/released by camera hal + * until stopRecording() is called and all outstanding video buffers + * previously sent out via CAMERA_MSG_VIDEO_FRAME have been released + * via releaseVideoBuffer(). + * + * @param index an index to retrieve the corresponding video buffer. + * + * @return the video buffer corresponding to the given index. + */ + virtual sp<IMemory> getVideoBuffer(int32_t index) const { return 0; } + + /** + * Request the camera hal to store meta data or real YUV data in + * the video buffers send out via CAMERA_MSG_VIDEO_FRRAME for a + * recording session. If it is not called, the default camera + * hal behavior is to store real YUV data in the video buffers. + * + * This method should be called before startRecording() in order + * to be effective. + * + * If meta data is stored in the video buffers, it is up to the + * receiver of the video buffers to interpret the contents and + * to find the actual frame data with the help of the meta data + * in the buffer. How this is done is outside of the scope of + * this method. + * + * Some camera hal may not support storing meta data in the video + * buffers, but all camera hal should support storing real YUV data + * in the video buffers. If the camera hal does not support storing + * the meta data in the video buffers when it is requested to do + * do, INVALID_OPERATION must be returned. It is very useful for + * the camera hal to pass meta data rather than the actual frame + * data directly to the video encoder, since the amount of the + * uncompressed frame data can be very large if video size is large. + * + * @param enable if true to instruct the camera hal to store + * meta data in the video buffers; false to instruct + * the camera hal to store real YUV data in the video + * buffers. + * + * @return OK on success. + */ + virtual status_t storeMetaDataInBuffers(bool enable) { + return enable? INVALID_OPERATION: OK; + } + + /** * Start record mode. When a record image is available a CAMERA_MSG_VIDEO_FRAME * message is sent with the corresponding frame. Every record frame must be released * by calling releaseRecordingFrame(). diff --git a/include/camera/CameraParameters.h b/include/camera/CameraParameters.h index 53039a0..60031a4 100644 --- a/include/camera/CameraParameters.h +++ b/include/camera/CameraParameters.h @@ -59,6 +59,27 @@ public: void setPreviewSize(int width, int height); void getPreviewSize(int *width, int *height) const; void getSupportedPreviewSizes(Vector<Size> &sizes) const; + + // Set the dimensions in pixels to the given width and height + // for video frames. The given width and height must be one + // of the supported dimensions returned from + // getSupportedVideoSizes(). Must not be called if + // getSupportedVideoSizes() returns an empty Vector of Size. + void setVideoSize(int width, int height); + // Retrieve the current dimensions (width and height) + // in pixels for video frames, which must be one of the + // supported dimensions returned from getSupportedVideoSizes(). + // Must not be called if getSupportedVideoSizes() returns an + // empty Vector of Size. + void getVideoSize(int *width, int *height) const; + // Retrieve a Vector of supported dimensions (width and height) + // in pixels for video frames. If sizes returned from the method + // is empty, the camera does not support calls to setVideoSize() + // or getVideoSize(). In adddition, it also indicates that + // the camera only has a single output, and does not have + // separate output for video frames and preview frame. + void getSupportedVideoSizes(Vector<Size> &sizes) const; + void setPreviewFrameRate(int fps); int getPreviewFrameRate() const; void getPreviewFpsRange(int *min_fps, int *max_fps) const; @@ -281,6 +302,16 @@ public: // Example value: "0.95,1.9,Infinity" or "0.049,0.05,0.051". Read only. static const char KEY_FOCUS_DISTANCES[]; + // The current dimensions in pixels (width x height) for video frames. + // The width and height must be one of the supported sizes retrieved + // via KEY_SUPPORTED_VIDEO_SIZES. + // Example value: "1280x720". Read/write. + static const char KEY_VIDEO_SIZE[]; + // A list of the supported dimensions in pixels (width x height) + // for video frames. See CAMERA_MSG_VIDEO_FRAME for details in + // frameworks/base/include/camera/Camera.h. + // Example: "176x144,1280x720". Read only. + static const char KEY_SUPPORTED_VIDEO_SIZES[]; // The image format for video frames. See CAMERA_MSG_VIDEO_FRAME in // frameworks/base/include/camera/Camera.h. // Example value: "yuv420sp" or PIXEL_FORMAT_XXX constants. Read only. @@ -354,7 +385,10 @@ public: // for barcode reading. static const char SCENE_MODE_BARCODE[]; - // Formats for setPreviewFormat and setPictureFormat. + // Pixel color formats for KEY_PREVIEW_FORMAT, KEY_PICTURE_FORMAT, + // and KEY_VIDEO_FRAME_FORMAT + // Planar variant of the YUV420 color format + static const char PIXEL_FORMAT_YUV420P[]; static const char PIXEL_FORMAT_YUV422SP[]; static const char PIXEL_FORMAT_YUV420SP[]; // NV21 static const char PIXEL_FORMAT_YUV422I[]; // YUY2 diff --git a/include/camera/ICamera.h b/include/camera/ICamera.h index 6fcf9e5..b69e075 100644 --- a/include/camera/ICamera.h +++ b/include/camera/ICamera.h @@ -20,7 +20,7 @@ #include <utils/RefBase.h> #include <binder/IInterface.h> #include <binder/Parcel.h> -#include <surfaceflinger/ISurface.h> +#include <surfaceflinger/Surface.h> #include <binder/IMemory.h> #include <utils/String8.h> #include <camera/Camera.h> @@ -45,8 +45,8 @@ public: // allow other processes to use this ICamera interface virtual status_t unlock() = 0; - // pass the buffered ISurface to the camera service - virtual status_t setPreviewDisplay(const sp<ISurface>& surface) = 0; + // pass the buffered Surface to the camera service + virtual status_t setPreviewDisplay(const sp<Surface>& surface) = 0; // set the preview callback flag to affect how the received frames from // preview are handled. @@ -90,6 +90,15 @@ public: // send command to camera driver virtual status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2) = 0; + + // return the total number of available video buffers + virtual int32_t getNumberOfVideoBuffers() const = 0; + + // return the individual video buffer corresponding to the given index. + virtual sp<IMemory> getVideoBuffer(int32_t index) const = 0; + + // tell the camera hal to store meta data or real YUV data in video buffers. + virtual status_t storeMetaDataInBuffers(bool enabled) = 0; }; // ---------------------------------------------------------------------------- diff --git a/include/drm/drm_framework_common.h b/include/drm/drm_framework_common.h index c5765a9..1758cdd 100644 --- a/include/drm/drm_framework_common.h +++ b/include/drm/drm_framework_common.h @@ -217,6 +217,10 @@ public: * POSIX based Decrypt API set for container based DRM */ static const int CONTAINER_BASED = 0x02; + /** + * Decrypt API for Widevine streams + */ + static const int WV_BASED = 0x3; }; /** diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h index 9fd905f..1081c35 100644 --- a/include/media/AudioSystem.h +++ b/include/media/AudioSystem.h @@ -262,11 +262,15 @@ public: DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES = 0x100, DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER = 0x200, DEVICE_OUT_AUX_DIGITAL = 0x400, + DEVICE_OUT_ANLG_DOCK_HEADSET = 0x800, + DEVICE_OUT_DGTL_DOCK_HEADSET = 0x1000, DEVICE_OUT_DEFAULT = 0x8000, DEVICE_OUT_ALL = (DEVICE_OUT_EARPIECE | DEVICE_OUT_SPEAKER | DEVICE_OUT_WIRED_HEADSET | DEVICE_OUT_WIRED_HEADPHONE | DEVICE_OUT_BLUETOOTH_SCO | DEVICE_OUT_BLUETOOTH_SCO_HEADSET | DEVICE_OUT_BLUETOOTH_SCO_CARKIT | DEVICE_OUT_BLUETOOTH_A2DP | DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES | - DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER | DEVICE_OUT_AUX_DIGITAL | DEVICE_OUT_DEFAULT), + DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER | DEVICE_OUT_AUX_DIGITAL | + DEVICE_OUT_ANLG_DOCK_HEADSET | DEVICE_OUT_DGTL_DOCK_HEADSET | + DEVICE_OUT_DEFAULT), DEVICE_OUT_ALL_A2DP = (DEVICE_OUT_BLUETOOTH_A2DP | DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES | DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER), @@ -309,6 +313,8 @@ public: FORCE_WIRED_ACCESSORY, FORCE_BT_CAR_DOCK, FORCE_BT_DESK_DOCK, + FORCE_ANALOG_DOCK, + FORCE_DIGITAL_DOCK, NUM_FORCE_CONFIG, FORCE_DEFAULT = FORCE_NONE }; diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h index af9a7ed..3d77278 100644 --- a/include/media/IMediaPlayer.h +++ b/include/media/IMediaPlayer.h @@ -25,6 +25,7 @@ namespace android { class Parcel; class ISurface; +class Surface; class IMediaPlayer: public IInterface { @@ -33,7 +34,8 @@ public: virtual void disconnect() = 0; - virtual status_t setVideoSurface(const sp<ISurface>& surface) = 0; + virtual status_t setVideoISurface(const sp<ISurface>& surface) = 0; + virtual status_t setVideoSurface(const sp<Surface>& surface) = 0; virtual status_t prepareAsync() = 0; virtual status_t start() = 0; virtual status_t stop() = 0; @@ -46,8 +48,6 @@ public: virtual status_t setAudioStreamType(int type) = 0; virtual status_t setLooping(int loop) = 0; virtual status_t setVolume(float leftVolume, float rightVolume) = 0; - virtual status_t suspend() = 0; - virtual status_t resume() = 0; virtual status_t setAuxEffectSendLevel(float level) = 0; virtual status_t attachAuxEffect(int effectId) = 0; diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h index 54adca8..28be7c1 100644 --- a/include/media/IMediaRecorder.h +++ b/include/media/IMediaRecorder.h @@ -22,7 +22,7 @@ namespace android { -class ISurface; +class Surface; class ICamera; class IMediaRecorderClient; @@ -32,7 +32,7 @@ public: DECLARE_META_INTERFACE(MediaRecorder); virtual status_t setCamera(const sp<ICamera>& camera) = 0; - virtual status_t setPreviewSurface(const sp<ISurface>& surface) = 0; + virtual status_t setPreviewSurface(const sp<Surface>& surface) = 0; virtual status_t setVideoSource(int vs) = 0; virtual status_t setAudioSource(int as) = 0; virtual status_t setOutputFormat(int of) = 0; @@ -40,6 +40,7 @@ public: virtual status_t setAudioEncoder(int ae) = 0; virtual status_t setOutputFile(const char* path) = 0; virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0; + virtual status_t setOutputFileAuxiliary(int fd) = 0; virtual status_t setVideoSize(int width, int height) = 0; virtual status_t setVideoFrameRate(int frames_per_second) = 0; virtual status_t setParameters(const String8& params) = 0; @@ -68,4 +69,3 @@ public: }; // namespace android #endif // ANDROID_IMEDIARECORDER_H - diff --git a/include/media/IOMX.h b/include/media/IOMX.h index f794766..fa775e7 100644 --- a/include/media/IOMX.h +++ b/include/media/IOMX.h @@ -19,6 +19,7 @@ #define ANDROID_IOMX_H_ #include <binder/IInterface.h> +#include <ui/GraphicBuffer.h> #include <utils/List.h> #include <utils/String8.h> @@ -78,10 +79,20 @@ public: node_id node, OMX_INDEXTYPE index, const void *params, size_t size) = 0; + virtual status_t storeMetaDataInBuffers( + node_id node, OMX_U32 port_index, OMX_BOOL enable) = 0; + + virtual status_t enableGraphicBuffers( + node_id node, OMX_U32 port_index, OMX_BOOL enable) = 0; + virtual status_t useBuffer( node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms, buffer_id *buffer) = 0; + virtual status_t useGraphicBuffer( + node_id node, OMX_U32 port_index, + const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) = 0; + // This API clearly only makes sense if the caller lives in the // same process as the callee, i.e. is the media_server, as the // returned "buffer_data" pointer is just that, a pointer into local @@ -115,8 +126,7 @@ public: const char *componentName, OMX_COLOR_FORMATTYPE colorFormat, size_t encodedWidth, size_t encodedHeight, - size_t displayWidth, size_t displayHeight, - int32_t rotationDegrees) = 0; + size_t displayWidth, size_t displayHeight) = 0; // Note: These methods are _not_ virtual, it exists as a wrapper around // the virtual "createRenderer" method above facilitating extraction @@ -126,16 +136,14 @@ public: const char *componentName, OMX_COLOR_FORMATTYPE colorFormat, size_t encodedWidth, size_t encodedHeight, - size_t displayWidth, size_t displayHeight, - int32_t rotationDegrees); + size_t displayWidth, size_t displayHeight); sp<IOMXRenderer> createRendererFromJavaSurface( JNIEnv *env, jobject javaSurface, const char *componentName, OMX_COLOR_FORMATTYPE colorFormat, size_t encodedWidth, size_t encodedHeight, - size_t displayWidth, size_t displayHeight, - int32_t rotationDegrees); + size_t displayWidth, size_t displayHeight); }; struct omx_message { diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h index 0521709..2d55a55 100644 --- a/include/media/MediaPlayerInterface.h +++ b/include/media/MediaPlayerInterface.h @@ -33,6 +33,7 @@ namespace android { class Parcel; class ISurface; +class Surface; template<typename T> class SortedVector; @@ -105,7 +106,8 @@ public: const KeyedVector<String8, String8> *headers = NULL) = 0; virtual status_t setDataSource(int fd, int64_t offset, int64_t length) = 0; - virtual status_t setVideoSurface(const sp<ISurface>& surface) = 0; + virtual status_t setVideoISurface(const sp<ISurface>& surface) = 0; + virtual status_t setVideoSurface(const sp<Surface>& surface) = 0; virtual status_t prepare() = 0; virtual status_t prepareAsync() = 0; virtual status_t start() = 0; @@ -118,8 +120,6 @@ public: virtual status_t reset() = 0; virtual status_t setLooping(int loop) = 0; virtual player_type playerType() = 0; - virtual status_t suspend() { return INVALID_OPERATION; } - virtual status_t resume() { return INVALID_OPERATION; } virtual void setNotifyCallback(void* cookie, notify_callback_f notifyFunc) { mCookie = cookie; mNotify = notifyFunc; } diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h index c3cd361..aa97874 100644 --- a/include/media/MediaProfiles.h +++ b/include/media/MediaProfiles.h @@ -25,7 +25,20 @@ namespace android { enum camcorder_quality { CAMCORDER_QUALITY_LOW = 0, - CAMCORDER_QUALITY_HIGH = 1 + CAMCORDER_QUALITY_HIGH = 1, + CAMCORDER_QUALITY_QCIF = 2, + CAMCORDER_QUALITY_CIF = 3, + CAMCORDER_QUALITY_480P = 4, + CAMCORDER_QUALITY_720P = 5, + CAMCORDER_QUALITY_1080P = 6, + + CAMCORDER_QUALITY_TIME_LAPSE_LOW = 1000, + CAMCORDER_QUALITY_TIME_LAPSE_HIGH = 1001, + CAMCORDER_QUALITY_TIME_LAPSE_QCIF = 1002, + CAMCORDER_QUALITY_TIME_LAPSE_CIF = 1003, + CAMCORDER_QUALITY_TIME_LAPSE_480P = 1004, + CAMCORDER_QUALITY_TIME_LAPSE_720P = 1005, + CAMCORDER_QUALITY_TIME_LAPSE_1080P = 1006 }; enum video_decoder { @@ -68,6 +81,12 @@ public: camcorder_quality quality) const; /** + * Returns true if a profile for the given camera at the given quality exists, + * or false if not. + */ + bool hasCamcorderProfile(int cameraId, camcorder_quality quality) const; + + /** * Returns the output file formats supported. */ Vector<output_format> getOutputFileFormats() const; @@ -252,6 +271,8 @@ private: Vector<int> mLevels; }; + int getCamcorderProfileIndex(int cameraId, camcorder_quality quality) const; + // Debug static void logVideoCodec(const VideoCodec& codec); static void logAudioCodec(const AudioCodec& codec); @@ -281,8 +302,25 @@ private: // If the xml configuration file does not exist, use hard-coded values static MediaProfiles* createDefaultInstance(); - static CamcorderProfile *createDefaultCamcorderLowProfile(); - static CamcorderProfile *createDefaultCamcorderHighProfile(); + + static CamcorderProfile *createDefaultCamcorderQcifProfile(camcorder_quality quality); + static CamcorderProfile *createDefaultCamcorderCifProfile(camcorder_quality quality); + static void createDefaultCamcorderLowProfiles( + MediaProfiles::CamcorderProfile **lowProfile, + MediaProfiles::CamcorderProfile **lowSpecificProfile); + static void createDefaultCamcorderHighProfiles( + MediaProfiles::CamcorderProfile **highProfile, + MediaProfiles::CamcorderProfile **highSpecificProfile); + + static CamcorderProfile *createDefaultCamcorderTimeLapseQcifProfile(camcorder_quality quality); + static CamcorderProfile *createDefaultCamcorderTimeLapse480pProfile(camcorder_quality quality); + static void createDefaultCamcorderTimeLapseLowProfiles( + MediaProfiles::CamcorderProfile **lowTimeLapseProfile, + MediaProfiles::CamcorderProfile **lowSpecificTimeLapseProfile); + static void createDefaultCamcorderTimeLapseHighProfiles( + MediaProfiles::CamcorderProfile **highTimeLapseProfile, + MediaProfiles::CamcorderProfile **highSpecificTimeLapseProfile); + static void createDefaultCamcorderProfiles(MediaProfiles *profiles); static void createDefaultVideoEncoders(MediaProfiles *profiles); static void createDefaultAudioEncoders(MediaProfiles *profiles); diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h index 5e9e368..c42346e 100644 --- a/include/media/MediaRecorderBase.h +++ b/include/media/MediaRecorderBase.h @@ -22,7 +22,7 @@ namespace android { -class ISurface; +class Surface; struct MediaRecorderBase { MediaRecorderBase() {} @@ -37,9 +37,10 @@ struct MediaRecorderBase { virtual status_t setVideoSize(int width, int height) = 0; virtual status_t setVideoFrameRate(int frames_per_second) = 0; virtual status_t setCamera(const sp<ICamera>& camera) = 0; - virtual status_t setPreviewSurface(const sp<ISurface>& surface) = 0; + virtual status_t setPreviewSurface(const sp<Surface>& surface) = 0; virtual status_t setOutputFile(const char *path) = 0; virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0; + virtual status_t setOutputFileAuxiliary(int fd) {return INVALID_OPERATION;} virtual status_t setParameters(const String8& params) = 0; virtual status_t setListener(const sp<IMediaRecorderClient>& listener) = 0; virtual status_t prepare() = 0; diff --git a/include/media/PVMediaRecorder.h b/include/media/PVMediaRecorder.h index c091c39..4b44ccc 100644 --- a/include/media/PVMediaRecorder.h +++ b/include/media/PVMediaRecorder.h @@ -23,7 +23,7 @@ namespace android { -class ISurface; +class Surface; class ICamera; class AuthorDriverWrapper; @@ -41,7 +41,7 @@ public: virtual status_t setVideoSize(int width, int height); virtual status_t setVideoFrameRate(int frames_per_second); virtual status_t setCamera(const sp<ICamera>& camera); - virtual status_t setPreviewSurface(const sp<ISurface>& surface); + virtual status_t setPreviewSurface(const sp<Surface>& surface); virtual status_t setOutputFile(const char *path); virtual status_t setOutputFile(int fd, int64_t offset, int64_t length); virtual status_t setParameters(const String8& params); @@ -66,4 +66,3 @@ private: }; // namespace android #endif // ANDROID_PVMEDIARECORDER_H - diff --git a/include/media/PVPlayer.h b/include/media/PVPlayer.h index df50981..657e7a6 100644 --- a/include/media/PVPlayer.h +++ b/include/media/PVPlayer.h @@ -43,7 +43,8 @@ public: const char *url, const KeyedVector<String8, String8> *headers); virtual status_t setDataSource(int fd, int64_t offset, int64_t length); - virtual status_t setVideoSurface(const sp<ISurface>& surface); + virtual status_t setVideoISurface(const sp<ISurface>& surface); + virtual status_t setVideoSurface(const sp<Surface>& surface); virtual status_t prepare(); virtual status_t prepareAsync(); virtual status_t start(); diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h index 207191d..88b0c3e 100644 --- a/include/media/mediaplayer.h +++ b/include/media/mediaplayer.h @@ -169,8 +169,6 @@ public: status_t invoke(const Parcel& request, Parcel *reply); status_t setMetadataFilter(const Parcel& filter); status_t getMetadata(bool update_only, bool apply_filter, Parcel *metadata); - status_t suspend(); - status_t resume(); status_t setAudioSessionId(int sessionId); int getAudioSessionId(); status_t setAuxEffectSendLevel(float level); diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h index 5ab1640..32b6fa1 100644 --- a/include/media/mediarecorder.h +++ b/include/media/mediarecorder.h @@ -173,6 +173,7 @@ public: status_t setAudioEncoder(int ae); status_t setOutputFile(const char* path); status_t setOutputFile(int fd, int64_t offset, int64_t length); + status_t setOutputFileAuxiliary(int fd); status_t setVideoSize(int width, int height); status_t setVideoFrameRate(int frames_per_second); status_t setParameters(const String8& params); @@ -199,6 +200,7 @@ private: bool mIsAudioEncoderSet; bool mIsVideoEncoderSet; bool mIsOutputFileSet; + bool mIsAuxiliaryOutputFileSet; Mutex mLock; Mutex mNotifyLock; }; diff --git a/include/media/mediascanner.h b/include/media/mediascanner.h index 0d397ac..74c9d5d 100644 --- a/include/media/mediascanner.h +++ b/include/media/mediascanner.h @@ -38,8 +38,7 @@ struct MediaScanner { typedef bool (*ExceptionCheck)(void* env); virtual status_t processDirectory( - const char *path, const char *extensions, - MediaScannerClient &client, + const char *path, MediaScannerClient &client, ExceptionCheck exceptionCheck, void *exceptionEnv); void setLocale(const char *locale); @@ -55,9 +54,8 @@ private: char *mLocale; status_t doProcessDirectory( - char *path, int pathRemaining, const char *extensions, - MediaScannerClient &client, ExceptionCheck exceptionCheck, - void *exceptionEnv); + char *path, int pathRemaining, MediaScannerClient &client, + ExceptionCheck exceptionCheck, void *exceptionEnv); MediaScanner(const MediaScanner &); MediaScanner &operator=(const MediaScanner &); diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h index 3192d03..e6c9f93 100644 --- a/include/media/stagefright/CameraSource.h +++ b/include/media/stagefright/CameraSource.h @@ -20,39 +20,167 @@ #include <media/stagefright/MediaBuffer.h> #include <media/stagefright/MediaSource.h> +#include <camera/ICamera.h> +#include <camera/CameraParameters.h> #include <utils/List.h> #include <utils/RefBase.h> -#include <utils/threads.h> namespace android { -class ICamera; class IMemory; class Camera; +class Surface; class CameraSource : public MediaSource, public MediaBufferObserver { public: + /** + * Factory method to create a new CameraSource using the current + * settings (such as video size, frame rate, color format, etc) + * from the default camera. + * + * @return NULL on error. + */ static CameraSource *Create(); - static CameraSource *CreateFromCamera(const sp<Camera> &camera); + + /** + * Factory method to create a new CameraSource. + * + * @param camera the video input frame data source. If it is NULL, + * we will try to connect to the camera with the given + * cameraId. + * + * @param cameraId the id of the camera that the source will connect + * to if camera is NULL; otherwise ignored. + * + * @param videoSize the dimension (in pixels) of the video frame + * @param frameRate the target frames per second + * @param surface the preview surface for display where preview + * frames are sent to + * @param storeMetaDataInVideoBuffers true to request the camera + * source to store meta data in video buffers; false to + * request the camera source to store real YUV frame data + * in the video buffers. The camera source may not support + * storing meta data in video buffers, if so, a request + * to do that will NOT be honored. To find out whether + * meta data is actually being stored in video buffers + * during recording, call isMetaDataStoredInVideoBuffers(). + * + * @return NULL on error. + */ + static CameraSource *CreateFromCamera(const sp<ICamera> &camera, + int32_t cameraId, + Size videoSize, + int32_t frameRate, + const sp<Surface>& surface, + bool storeMetaDataInVideoBuffers = false); virtual ~CameraSource(); virtual status_t start(MetaData *params = NULL); virtual status_t stop(); + virtual status_t read( + MediaBuffer **buffer, const ReadOptions *options = NULL); + /** + * Check whether a CameraSource object is properly initialized. + * Must call this method before stop(). + * @return OK if initialization has successfully completed. + */ + virtual status_t initCheck() const; + + /** + * Returns the MetaData associated with the CameraSource, + * including: + * kKeyColorFormat: YUV color format of the video frames + * kKeyWidth, kKeyHeight: dimension (in pixels) of the video frames + * kKeySampleRate: frame rate in frames per second + * kKeyMIMEType: always fixed to be MEDIA_MIMETYPE_VIDEO_RAW + */ virtual sp<MetaData> getFormat(); - virtual status_t read( - MediaBuffer **buffer, const ReadOptions *options = NULL); + /** + * Retrieve the total number of video buffers available from + * this source. + * + * This method is useful if these video buffers are used + * for passing video frame data to other media components, + * such as OMX video encoders, in order to eliminate the + * memcpy of the data. + * + * @return the total numbner of video buffers. Returns 0 to + * indicate that this source does not make the video + * buffer information availalble. + */ + size_t getNumberOfVideoBuffers() const; + + /** + * Retrieve the individual video buffer available from + * this source. + * + * @param index the index corresponding to the video buffer. + * Valid range of the index is [0, n], where n = + * getNumberOfVideoBuffers() - 1. + * + * @return the video buffer corresponding to the given index. + * If index is out of range, 0 should be returned. + */ + sp<IMemory> getVideoBuffer(size_t index) const; + + /** + * Tell whether this camera source stores meta data or real YUV + * frame data in video buffers. + * + * @return true if meta data is stored in the video + * buffers; false if real YUV data is stored in + * the video buffers. + */ + bool isMetaDataStoredInVideoBuffers() const; virtual void signalBufferReturned(MediaBuffer* buffer); -private: - friend class CameraSourceListener; +protected: + enum CameraFlags { + FLAGS_SET_CAMERA = 1L << 0, + FLAGS_HOT_CAMERA = 1L << 1, + }; + + int32_t mCameraFlags; + Size mVideoSize; + int32_t mVideoFrameRate; + int32_t mColorFormat; + status_t mInitCheck; - sp<Camera> mCamera; + sp<Camera> mCamera; + sp<Surface> mSurface; sp<MetaData> mMeta; + int64_t mStartTimeUs; + int32_t mNumFramesReceived; + int64_t mLastFrameTimestampUs; + bool mStarted; + + CameraSource(const sp<ICamera>& camera, int32_t cameraId, + Size videoSize, int32_t frameRate, + const sp<Surface>& surface, + bool storeMetaDataInVideoBuffers); + + virtual void startCameraRecording(); + virtual void stopCameraRecording(); + virtual void releaseRecordingFrame(const sp<IMemory>& frame); + + // Returns true if need to skip the current frame. + // Called from dataCallbackTimestamp. + virtual bool skipCurrentFrame(int64_t timestampUs) {return false;} + + // Callback called when still camera raw data is available. + virtual void dataCallback(int32_t msgType, const sp<IMemory> &data) {} + + virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType, + const sp<IMemory> &data); + +private: + friend class CameraSourceListener; + Mutex mLock; Condition mFrameAvailableCondition; Condition mFrameCompleteCondition; @@ -60,25 +188,33 @@ private: List<sp<IMemory> > mFramesBeingEncoded; List<int64_t> mFrameTimes; - int64_t mStartTimeUs; int64_t mFirstFrameTimeUs; - int64_t mLastFrameTimestampUs; - int32_t mNumFramesReceived; int32_t mNumFramesEncoded; int32_t mNumFramesDropped; int32_t mNumGlitches; int64_t mGlitchDurationThresholdUs; bool mCollectStats; - bool mStarted; - - CameraSource(const sp<Camera> &camera); - - void dataCallbackTimestamp( - int64_t timestampUs, int32_t msgType, const sp<IMemory> &data); + bool mIsMetaDataStoredInVideoBuffers; void releaseQueuedFrames(); void releaseOneRecordingFrame(const sp<IMemory>& frame); + + status_t init(const sp<ICamera>& camera, int32_t cameraId, + Size videoSize, int32_t frameRate, + bool storeMetaDataInVideoBuffers); + status_t isCameraAvailable(const sp<ICamera>& camera, int32_t cameraId); + status_t isCameraColorFormatSupported(const CameraParameters& params); + status_t configureCamera(CameraParameters* params, + int32_t width, int32_t height, + int32_t frameRate); + + status_t checkVideoSize(const CameraParameters& params, + int32_t width, int32_t height); + + status_t checkFrameRate(const CameraParameters& params, + int32_t frameRate); + CameraSource(const CameraSource &); CameraSource &operator=(const CameraSource &); }; diff --git a/include/media/stagefright/CameraSourceTimeLapse.h b/include/media/stagefright/CameraSourceTimeLapse.h new file mode 100644 index 0000000..afe7287 --- /dev/null +++ b/include/media/stagefright/CameraSourceTimeLapse.h @@ -0,0 +1,243 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef CAMERA_SOURCE_TIME_LAPSE_H_ + +#define CAMERA_SOURCE_TIME_LAPSE_H_ + +#include <pthread.h> + +#include <utils/RefBase.h> +#include <utils/threads.h> + +namespace android { + +class ICamera; +class IMemory; +class Camera; + +class CameraSourceTimeLapse : public CameraSource { +public: + static CameraSourceTimeLapse *CreateFromCamera( + const sp<ICamera> &camera, + int32_t cameraId, + Size videoSize, + int32_t videoFrameRate, + const sp<Surface>& surface, + int64_t timeBetweenTimeLapseFrameCaptureUs); + + virtual ~CameraSourceTimeLapse(); + + // If the frame capture interval is large, read will block for a long time. + // Due to the way the mediaRecorder framework works, a stop() call from + // mediaRecorder waits until the read returns, causing a long wait for + // stop() to return. To avoid this, we can make read() return a copy of the + // last read frame with the same time stamp frequently. This keeps the + // read() call from blocking too long. Calling this function quickly + // captures another frame, keeps its copy, and enables this mode of read() + // returning quickly. + void startQuickReadReturns(); + +private: + // If true, will use still camera takePicture() for time lapse frames + // If false, will use the videocamera frames instead. + bool mUseStillCameraForTimeLapse; + + // Size of picture taken from still camera. This may be larger than the size + // of the video, as still camera may not support the exact video resolution + // demanded. See setPictureSizeToClosestSupported(). + int32_t mPictureWidth; + int32_t mPictureHeight; + + // size of the encoded video. + int32_t mVideoWidth; + int32_t mVideoHeight; + + // True if we need to crop the still camera image to get the video frame. + bool mNeedCropping; + + // Start location of the cropping rectangle. + int32_t mCropRectStartX; + int32_t mCropRectStartY; + + // Time between capture of two frames during time lapse recording + // Negative value indicates that timelapse is disabled. + int64_t mTimeBetweenTimeLapseFrameCaptureUs; + + // Time between two frames in final video (1/frameRate) + int64_t mTimeBetweenTimeLapseVideoFramesUs; + + // Real timestamp of the last encoded time lapse frame + int64_t mLastTimeLapseFrameRealTimestampUs; + + // Thread id of thread which takes still picture and sleeps in a loop. + pthread_t mThreadTimeLapse; + + // Variable set in dataCallbackTimestamp() to help skipCurrentFrame() + // to know if current frame needs to be skipped. + bool mSkipCurrentFrame; + + // Lock for accessing mCameraIdle + Mutex mCameraIdleLock; + + // Condition variable to wait on if camera is is not yet idle. Once the + // camera gets idle, this variable will be signalled. + Condition mCameraIdleCondition; + + // True if camera is in preview mode and ready for takePicture(). + // False after a call to takePicture() but before the final compressed + // data callback has been called and preview has been restarted. + volatile bool mCameraIdle; + + // True if stop() is waiting for camera to get idle, i.e. for the last + // takePicture() to complete. This is needed so that dataCallbackTimestamp() + // can return immediately. + volatile bool mStopWaitingForIdleCamera; + + // Lock for accessing quick stop variables. + Mutex mQuickStopLock; + + // Condition variable to wake up still picture thread. + Condition mTakePictureCondition; + + // mQuickStop is set to true if we use quick read() returns, otherwise it is set + // to false. Once in this mode read() return a copy of the last read frame + // with the same time stamp. See startQuickReadReturns(). + volatile bool mQuickStop; + + // Forces the next frame passed to dataCallbackTimestamp() to be read + // as a time lapse frame. Used by startQuickReadReturns() so that the next + // frame wakes up any blocking read. + volatile bool mForceRead; + + // Stores a copy of the MediaBuffer read in the last read() call after + // mQuickStop was true. + MediaBuffer* mLastReadBufferCopy; + + // Status code for last read. + status_t mLastReadStatus; + + CameraSourceTimeLapse( + const sp<ICamera> &camera, + int32_t cameraId, + Size videoSize, + int32_t videoFrameRate, + const sp<Surface>& surface, + int64_t timeBetweenTimeLapseFrameCaptureUs); + + // Wrapper over CameraSource::signalBufferReturned() to implement quick stop. + // It only handles the case when mLastReadBufferCopy is signalled. Otherwise + // it calls the base class' function. + virtual void signalBufferReturned(MediaBuffer* buffer); + + // Wrapper over CameraSource::read() to implement quick stop. + virtual status_t read(MediaBuffer **buffer, const ReadOptions *options = NULL); + + // For still camera case starts a thread which calls camera's takePicture() + // in a loop. For video camera case, just starts the camera's video recording. + virtual void startCameraRecording(); + + // For still camera case joins the thread created in startCameraRecording(). + // For video camera case, just stops the camera's video recording. + virtual void stopCameraRecording(); + + // For still camera case don't need to do anything as memory is locally + // allocated with refcounting. + // For video camera case just tell the camera to release the frame. + virtual void releaseRecordingFrame(const sp<IMemory>& frame); + + // mSkipCurrentFrame is set to true in dataCallbackTimestamp() if the current + // frame needs to be skipped and this function just returns the value of mSkipCurrentFrame. + virtual bool skipCurrentFrame(int64_t timestampUs); + + // Handles the callback to handle raw frame data from the still camera. + // Creates a copy of the frame data as the camera can reuse the frame memory + // once this callback returns. The function also sets a new timstamp corresponding + // to one frame time ahead of the last encoded frame's time stamp. It then + // calls dataCallbackTimestamp() of the base class with the copied data and the + // modified timestamp, which will think that it recieved the frame from a video + // camera and proceed as usual. + virtual void dataCallback(int32_t msgType, const sp<IMemory> &data); + + // In the video camera case calls skipFrameAndModifyTimeStamp() to modify + // timestamp and set mSkipCurrentFrame. + // Then it calls the base CameraSource::dataCallbackTimestamp() + virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType, + const sp<IMemory> &data); + + // Convenience function to fill mLastReadBufferCopy from the just read + // buffer. + void fillLastReadBufferCopy(MediaBuffer& sourceBuffer); + + // If the passed in size (width x height) is a supported preview size, + // the function sets the camera's preview size to it and returns true. + // Otherwise returns false. + bool trySettingPreviewSize(int32_t width, int32_t height); + + // The still camera may not support the demanded video width and height. + // We look for the supported picture sizes from the still camera and + // choose the smallest one with either dimensions higher than the corresponding + // video dimensions. The still picture will be cropped to get the video frame. + // The function returns true if the camera supports picture sizes greater than + // or equal to the passed in width and height, and false otherwise. + bool setPictureSizeToClosestSupported(int32_t width, int32_t height); + + // Computes the offset of the rectangle from where to start cropping the + // still image into the video frame. We choose the center of the image to be + // cropped. The offset is stored in (mCropRectStartX, mCropRectStartY). + bool computeCropRectangleOffset(); + + // Crops the source data into a smaller image starting at + // (mCropRectStartX, mCropRectStartY) and of the size of the video frame. + // The data is returned into a newly allocated IMemory. + sp<IMemory> cropYUVImage(const sp<IMemory> &source_data); + + // When video camera is used for time lapse capture, returns true + // until enough time has passed for the next time lapse frame. When + // the frame needs to be encoded, it returns false and also modifies + // the time stamp to be one frame time ahead of the last encoded + // frame's time stamp. + bool skipFrameAndModifyTimeStamp(int64_t *timestampUs); + + // Wrapper to enter threadTimeLapseEntry() + static void *ThreadTimeLapseWrapper(void *me); + + // Runs a loop which sleeps until a still picture is required + // and then calls mCamera->takePicture() to take the still picture. + // Used only in the case mUseStillCameraForTimeLapse = true. + void threadTimeLapseEntry(); + + // Wrapper to enter threadStartPreview() + static void *ThreadStartPreviewWrapper(void *me); + + // Starts the camera's preview. + void threadStartPreview(); + + // Starts thread ThreadStartPreviewWrapper() for restarting preview. + // Needs to be done in a thread so that dataCallback() which calls this function + // can return, and the camera can know that takePicture() is done. + void restartPreview(); + + // Creates a copy of source_data into a new memory of final type MemoryBase. + sp<IMemory> createIMemoryCopy(const sp<IMemory> &source_data); + + CameraSourceTimeLapse(const CameraSourceTimeLapse &); + CameraSourceTimeLapse &operator=(const CameraSourceTimeLapse &); +}; + +} // namespace android + +#endif // CAMERA_SOURCE_TIME_LAPSE_H_ diff --git a/include/media/stagefright/HardwareAPI.h b/include/media/stagefright/HardwareAPI.h index 63f11d1..4fd281b 100644 --- a/include/media/stagefright/HardwareAPI.h +++ b/include/media/stagefright/HardwareAPI.h @@ -21,10 +21,76 @@ #include <media/stagefright/OMXPluginBase.h> #include <media/stagefright/VideoRenderer.h> #include <surfaceflinger/ISurface.h> +#include <ui/android_native_buffer.h> #include <utils/RefBase.h> #include <OMX_Component.h> +namespace android { + +// A pointer to this struct is passed to the OMX_SetParameter when the extension +// index for the 'OMX.google.android.index.enableAndroidNativeBuffers' extension +// is given. +// +// When Android native buffer use is disabled for a port (the default state), +// the OMX node should operate as normal, and expect UseBuffer calls to set its +// buffers. This is the mode that will be used when CPU access to the buffer is +// required. +// +// When Android native buffer use has been enabled for a given port, the video +// color format for the port is to be interpreted as an Android pixel format +// rather than an OMX color format. The node should then expect to receive +// UseAndroidNativeBuffer calls (via OMX_SetParameter) rather than UseBuffer +// calls for that port. +struct EnableAndroidNativeBuffersParams { + OMX_U32 nSize; + OMX_VERSIONTYPE nVersion; + OMX_U32 nPortIndex; + OMX_BOOL enable; +}; + +// A pointer to this struct is passed to OMX_SetParameter() when the extension +// index "OMX.google.android.index.storeMetaDataInBuffers" +// is given. +// +// When meta data is stored in the video buffers passed between OMX clients +// and OMX components, interpretation of the buffer data is up to the +// buffer receiver, and the data may or may not be the actual video data, but +// some information helpful for the receiver to locate the actual data. +// The buffer receiver thus needs to know how to interpret what is stored +// in these buffers, with mechanisms pre-determined externally. How to +// interpret the meta data is outside of the scope of this method. +// +// Currently, this is specifically used to pass meta data from video source +// (camera component, for instance) to video encoder to avoid memcpying of +// input video frame data. To do this, bStoreMetaDta is set to OMX_TRUE. +// If bStoreMetaData is set to false, real YUV frame data will be stored +// in the buffers. In addition, if no OMX_SetParameter() call is made +// with the corresponding extension index, real YUV data is stored +// in the buffers. +struct StoreMetaDataInBuffersParams { + OMX_U32 nSize; + OMX_VERSIONTYPE nVersion; + OMX_U32 nPortIndex; + OMX_BOOL bStoreMetaData; +}; + +// A pointer to this struct is passed to OMX_SetParameter when the extension +// index for the 'OMX.google.android.index.useAndroidNativeBuffer' extension is +// given. This call will only be performed if a prior call was made with the +// 'OMX.google.android.index.enableAndroidNativeBuffers' extension index, +// enabling use of Android native buffers. +struct UseAndroidNativeBufferParams { + OMX_U32 nSize; + OMX_VERSIONTYPE nVersion; + OMX_U32 nPortIndex; + OMX_PTR pAppPrivate; + OMX_BUFFERHEADERTYPE **bufferHeader; + const sp<android_native_buffer_t>& nativeBuffer; +}; + +} // namespace android + extern android::VideoRenderer *createRenderer( const android::sp<android::ISurface> &surface, const char *componentName, @@ -32,15 +98,6 @@ extern android::VideoRenderer *createRenderer( size_t displayWidth, size_t displayHeight, size_t decodedWidth, size_t decodedHeight); -extern android::VideoRenderer *createRendererWithRotation( - const android::sp<android::ISurface> &surface, - const char *componentName, - OMX_COLOR_FORMATTYPE colorFormat, - size_t displayWidth, size_t displayHeight, - size_t decodedWidth, size_t decodedHeight, - int32_t rotationDegrees); - extern android::OMXPluginBase *createOMXPlugin(); #endif // HARDWARE_API_H_ - diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h index 7bf07eb..bb469e5 100644 --- a/include/media/stagefright/MPEG4Writer.h +++ b/include/media/stagefright/MPEG4Writer.h @@ -154,7 +154,6 @@ private: bool exceedsFileDurationLimit(); bool isFileStreamable() const; void trackProgressStatus(const Track* track, int64_t timeUs, status_t err = OK); - void writeCompositionMatrix(int32_t degrees); MPEG4Writer(const MPEG4Writer &); MPEG4Writer &operator=(const MPEG4Writer &); diff --git a/include/media/stagefright/MediaBuffer.h b/include/media/stagefright/MediaBuffer.h index 339e6fb..c1c4f94 100644 --- a/include/media/stagefright/MediaBuffer.h +++ b/include/media/stagefright/MediaBuffer.h @@ -25,6 +25,7 @@ namespace android { +class GraphicBuffer; class MediaBuffer; class MediaBufferObserver; class MetaData; @@ -48,6 +49,8 @@ public: MediaBuffer(size_t size); + MediaBuffer(const sp<GraphicBuffer>& graphicBuffer); + // Decrements the reference count and returns the buffer to its // associated MediaBufferGroup if the reference count drops to 0. void release(); @@ -63,6 +66,8 @@ public: void set_range(size_t offset, size_t length); + sp<GraphicBuffer> graphicBuffer() const; + sp<MetaData> meta_data(); // Clears meta data and resets the range to the full extent. @@ -94,6 +99,7 @@ private: void *mData; size_t mSize, mRangeOffset, mRangeLength; + sp<GraphicBuffer> mGraphicBuffer; bool mOwnsData; diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h index 92ce068..2d50ca5 100644 --- a/include/media/stagefright/MediaDefs.h +++ b/include/media/stagefright/MediaDefs.h @@ -44,6 +44,8 @@ extern const char *MEDIA_MIMETYPE_CONTAINER_OGG; extern const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA; extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS; +extern const char *MEDIA_MIMETYPE_CONTAINER_WVM; + } // namespace android #endif // MEDIA_DEFS_H_ diff --git a/include/media/stagefright/MediaSourceSplitter.h b/include/media/stagefright/MediaSourceSplitter.h new file mode 100644 index 0000000..568f4c2 --- /dev/null +++ b/include/media/stagefright/MediaSourceSplitter.h @@ -0,0 +1,193 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This class provides a way to split a single media source into multiple sources. +// The constructor takes in the real mediaSource and createClient() can then be +// used to create multiple sources served from this real mediaSource. +// +// Usage: +// - Create MediaSourceSplitter by passing in a real mediaSource from which +// multiple duplicate channels are needed. +// - Create a client using createClient() and use it as any other mediaSource. +// +// Note that multiple clients can be created using createClient() and +// started/stopped in any order. MediaSourceSplitter stops the real source only +// when all clients have been stopped. +// +// If a new client is created/started after some existing clients have already +// started, the new client will start getting its read frames from the current +// time. + +#ifndef MEDIA_SOURCE_SPLITTER_H_ + +#define MEDIA_SOURCE_SPLITTER_H_ + +#include <media/stagefright/MediaSource.h> +#include <utils/threads.h> +#include <utils/Vector.h> +#include <utils/RefBase.h> + +namespace android { + +class MediaBuffer; +class MetaData; + +class MediaSourceSplitter : public RefBase { +public: + // Constructor + // mediaSource: The real mediaSource. The class keeps a reference to it to + // implement the various clients. + MediaSourceSplitter(sp<MediaSource> mediaSource); + + ~MediaSourceSplitter(); + + // Creates a new client of base type MediaSource. Multiple clients can be + // created which get their data through the same real mediaSource. These + // clients can then be used like any other MediaSource, all of which provide + // data from the same real source. + sp<MediaSource> createClient(); + +private: + // Total number of clients created through createClient(). + int32_t mNumberOfClients; + + // reference to the real MediaSource passed to the constructor. + sp<MediaSource> mSource; + + // Stores pointer to the MediaBuffer read from the real MediaSource. + // All clients use this to implement the read() call. + MediaBuffer *mLastReadMediaBuffer; + + // Status code for read from the real MediaSource. All clients return + // this for their read(). + status_t mLastReadStatus; + + // Boolean telling whether the real MediaSource has started. + bool mSourceStarted; + + // List of booleans, one for each client, storing whether the corresponding + // client's start() has been called. + Vector<bool> mClientsStarted; + + // Stores the number of clients which are currently started. + int32_t mNumberOfClientsStarted; + + // Since different clients call read() asynchronously, we need to keep track + // of what data is currently read into the mLastReadMediaBuffer. + // mCurrentReadBit stores the bit for the current read buffer. This bit + // flips each time a new buffer is read from the source. + // mClientsDesiredReadBit stores the bit for the next desired read buffer + // for each client. This bit flips each time read() is completed for this + // client. + bool mCurrentReadBit; + Vector<bool> mClientsDesiredReadBit; + + // Number of clients whose current read has been completed. + int32_t mNumberOfCurrentReads; + + // Boolean telling whether the last read has been completed for all clients. + // The variable is reset to false each time buffer is read from the real + // source. + bool mLastReadCompleted; + + // A global mutex for access to critical sections. + Mutex mLock; + + // Condition variable for waiting on read from source to complete. + Condition mReadFromSourceCondition; + + // Condition variable for waiting on all client's last read to complete. + Condition mAllReadsCompleteCondition; + + // Functions used by Client to implement the MediaSource interface. + + // If the real source has not been started yet by any client, starts it. + status_t start(int clientId, MetaData *params); + + // Stops the real source after all clients have called stop(). + status_t stop(int clientId); + + // returns the real source's getFormat(). + sp<MetaData> getFormat(int clientId); + + // If the client's desired buffer has already been read into + // mLastReadMediaBuffer, points the buffer to that. Otherwise if it is the + // master client, reads the buffer from source or else waits for the master + // client to read the buffer and uses that. + status_t read(int clientId, + MediaBuffer **buffer, const MediaSource::ReadOptions *options = NULL); + + // Not implemented right now. + status_t pause(int clientId); + + // Function which reads a buffer from the real source into + // mLastReadMediaBuffer + void readFromSource_lock(const MediaSource::ReadOptions *options); + + // Waits until read from the real source has been completed. + // _lock means that the function should be called when the thread has already + // obtained the lock for the mutex mLock. + void waitForReadFromSource_lock(int32_t clientId); + + // Waits until all clients have read the current buffer in + // mLastReadCompleted. + void waitForAllClientsLastRead_lock(int32_t clientId); + + // Each client calls this after it completes its read(). Once all clients + // have called this for the current buffer, the function calls + // mAllReadsCompleteCondition.broadcast() to signal the waiting clients. + void signalReadComplete_lock(bool readAborted); + + // Make these constructors private. + MediaSourceSplitter(); + MediaSourceSplitter(const MediaSourceSplitter &); + MediaSourceSplitter &operator=(const MediaSourceSplitter &); + + // This class implements the MediaSource interface. Each client stores a + // reference to the parent MediaSourceSplitter and uses it to complete the + // various calls. + class Client : public MediaSource { + public: + // Constructor stores reference to the parent MediaSourceSplitter and it + // client id. + Client(sp<MediaSourceSplitter> splitter, int32_t clientId); + + // MediaSource interface + virtual status_t start(MetaData *params = NULL); + + virtual status_t stop(); + + virtual sp<MetaData> getFormat(); + + virtual status_t read( + MediaBuffer **buffer, const ReadOptions *options = NULL); + + virtual status_t pause(); + + private: + // Refernce to the parent MediaSourceSplitter + sp<MediaSourceSplitter> mSplitter; + + // Id of this client. + int32_t mClientId; + }; + + friend class Client; +}; + +} // namespace android + +#endif // MEDIA_SOURCE_SPLITTER_H_ diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h index a69d9af..a72aabf 100644 --- a/include/media/stagefright/MetaData.h +++ b/include/media/stagefright/MetaData.h @@ -32,7 +32,6 @@ enum { kKeyMIMEType = 'mime', // cstring kKeyWidth = 'widt', // int32_t kKeyHeight = 'heig', // int32_t - kKeyRotation = 'rotA', // int32_t (angle in degrees) kKeyIFramesInterval = 'ifiv', // int32_t kKeyStride = 'strd', // int32_t kKeySliceHeight = 'slht', // int32_t @@ -93,7 +92,6 @@ enum { // Track authoring progress status // kKeyTrackTimeStatus is used to track progress in elapsed time kKeyTrackTimeStatus = 'tktm', // int64_t - kKeyRotationDegree = 'rdge', // int32_t (clockwise, in degree) kKeyNotRealTime = 'ntrt', // bool (int32_t) @@ -103,6 +101,9 @@ enum { kKeyValidSamples = 'valD', // int32_t kKeyIsUnreadable = 'unre', // bool (int32_t) + + // An indication that a video buffer has been rendered. + kKeyRendered = 'rend', // bool (int32_t) }; enum { diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h index fed6761..517868c 100644 --- a/include/media/stagefright/OMXCodec.h +++ b/include/media/stagefright/OMXCodec.h @@ -18,6 +18,7 @@ #define OMX_CODEC_H_ +#include <android/native_window.h> #include <media/IOMX.h> #include <media/stagefright/MediaBuffer.h> #include <media/stagefright/MediaSource.h> @@ -38,13 +39,22 @@ struct OMXCodec : public MediaSource, // The client wants to access the output buffer's video // data for example for thumbnail extraction. kClientNeedsFramebuffer = 4, + + // Request for software or hardware codecs. If request + // can not be fullfilled, Create() returns NULL. + kSoftwareCodecsOnly = 8, + kHardwareCodecsOnly = 16, + + // Store meta data in video buffers + kStoreMetaDataInVideoBuffers = 32, }; static sp<MediaSource> Create( const sp<IOMX> &omx, const sp<MetaData> &meta, bool createEncoder, const sp<MediaSource> &source, const char *matchComponentName = NULL, - uint32_t flags = 0); + uint32_t flags = 0, + const sp<ANativeWindow> &nativeWindow = NULL); static void setComponentRole( const sp<IOMX> &omx, IOMX::node_id node, bool isEncoder, @@ -109,12 +119,12 @@ private: kAvoidMemcopyInputRecordingFrames = 2048, kRequiresLargerEncoderOutputBuffer = 4096, kOutputBuffersAreUnreadable = 8192, - kStoreMetaDataInInputVideoBuffers = 16384, }; struct BufferInfo { IOMX::buffer_id mBuffer; bool mOwnedByComponent; + bool mOwnedByNativeWindow; sp<IMemory> mMem; size_t mSize; void *mData; @@ -160,13 +170,23 @@ private: bool mPaused; + sp<ANativeWindow> mNativeWindow; + + // The index in each of the mPortBuffers arrays of the buffer that will be + // submitted to OMX next. This only applies when using buffers from a + // native window. + size_t mNextNativeBufferIndex[2]; + // A list of indices into mPortStatus[kPortIndexOutput] filled with data. List<size_t> mFilledBuffers; Condition mBufferFilled; + bool mIsMetaDataStoredInVideoBuffers; + OMXCodec(const sp<IOMX> &omx, IOMX::node_id node, uint32_t quirks, bool isEncoder, const char *mime, const char *componentName, - const sp<MediaSource> &source); + const sp<MediaSource> &source, + const sp<ANativeWindow> &nativeWindow); void addCodecSpecificData(const void *data, size_t size); void clearCodecSpecificData(); @@ -217,10 +237,17 @@ private: status_t allocateBuffers(); status_t allocateBuffersOnPort(OMX_U32 portIndex); + status_t allocateOutputBuffersFromNativeWindow(); + + status_t queueBufferToNativeWindow(BufferInfo *info); + status_t cancelBufferToNativeWindow(BufferInfo *info); + BufferInfo* dequeueBufferFromNativeWindow(); status_t freeBuffersOnPort( OMX_U32 portIndex, bool onlyThoseWeOwn = false); + status_t freeBuffer(OMX_U32 portIndex, size_t bufIndex); + void drainInputBuffer(IOMX::buffer_id buffer); void fillOutputBuffer(IOMX::buffer_id buffer); void drainInputBuffer(BufferInfo *info); @@ -251,6 +278,7 @@ private: status_t init(); void initOutputFormat(const sp<MetaData> &inputFormat); + status_t initNativeWindow(); void dumpPortStatus(OMX_U32 portIndex); diff --git a/include/media/stagefright/VideoSourceDownSampler.h b/include/media/stagefright/VideoSourceDownSampler.h new file mode 100644 index 0000000..439918c --- /dev/null +++ b/include/media/stagefright/VideoSourceDownSampler.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// VideoSourceDownSampler implements the MediaSource interface, +// downsampling frames provided from a real video source. + +#ifndef VIDEO_SOURCE_DOWN_SAMPLER_H_ + +#define VIDEO_SOURCE_DOWN_SAMPLER_H_ + +#include <media/stagefright/MediaSource.h> +#include <utils/RefBase.h> + +namespace android { + +class IMemory; +class MediaBuffer; +class MetaData; + +class VideoSourceDownSampler : public MediaSource { +public: + virtual ~VideoSourceDownSampler(); + + // Constructor: + // videoSource: The real video source which provides the original frames. + // width, height: The desired width, height. These should be less than or equal + // to those of the real video source. We then downsample the original frames to + // this size. + VideoSourceDownSampler(const sp<MediaSource> &videoSource, + int32_t width, int32_t height); + + // MediaSource interface + virtual status_t start(MetaData *params = NULL); + + virtual status_t stop(); + + virtual sp<MetaData> getFormat(); + + virtual status_t read( + MediaBuffer **buffer, const ReadOptions *options = NULL); + + virtual status_t pause(); + +private: + // Reference to the real video source. + sp<MediaSource> mRealVideoSource; + + // Size of frames to be provided by this source. + int32_t mWidth; + int32_t mHeight; + + // Size of frames provided by the real source. + int32_t mRealSourceWidth; + int32_t mRealSourceHeight; + + // Down sampling paramters. + int32_t mDownSampleOffsetX; + int32_t mDownSampleOffsetY; + int32_t mDownSampleSkipX; + int32_t mDownSampleSkipY; + + // True if we need to crop the still video image to get the video frame. + bool mNeedDownSampling; + + // Meta data. This is a copy of the real source except for the width and + // height parameters. + sp<MetaData> mMeta; + + // Computes the offset, skip parameters for downsampling the original frame + // to the desired size. + void computeDownSamplingParameters(); + + // Downsamples the frame in sourceBuffer to size (mWidth x mHeight). A new + // buffer is created which stores the downsampled image. + void downSampleYUVImage(const MediaBuffer &sourceBuffer, MediaBuffer **buffer) const; + + // Disallow these. + VideoSourceDownSampler(const VideoSourceDownSampler &); + VideoSourceDownSampler &operator=(const VideoSourceDownSampler &); +}; + +} // namespace android + +#endif // VIDEO_SOURCE_DOWN_SAMPLER_H_ diff --git a/include/media/stagefright/YUVCanvas.h b/include/media/stagefright/YUVCanvas.h new file mode 100644 index 0000000..ff70923 --- /dev/null +++ b/include/media/stagefright/YUVCanvas.h @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// YUVCanvas holds a reference to a YUVImage on which it can do various +// drawing operations. It provides various utility functions for filling, +// cropping, etc. + + +#ifndef YUV_CANVAS_H_ + +#define YUV_CANVAS_H_ + +#include <stdint.h> + +namespace android { + +class YUVImage; +class Rect; + +class YUVCanvas { +public: + + // Constructor takes in reference to a yuvImage on which it can do + // various drawing opreations. + YUVCanvas(YUVImage &yuvImage); + ~YUVCanvas(); + + // Fills the entire image with the given YUV values. + void FillYUV(uint8_t yValue, uint8_t uValue, uint8_t vValue); + + // Fills the rectangular region [startX,endX]x[startY,endY] with the given YUV values. + void FillYUVRectangle(const Rect& rect, + uint8_t yValue, uint8_t uValue, uint8_t vValue); + + // Copies the region [startX,endX]x[startY,endY] from srcImage into the + // canvas' target image (mYUVImage) starting at + // (destinationStartX,destinationStartY). + // Note that undefined behavior may occur if srcImage is same as the canvas' + // target image. + void CopyImageRect( + const Rect& srcRect, + int32_t destStartX, int32_t destStartY, + const YUVImage &srcImage); + + // Downsamples the srcImage into the canvas' target image (mYUVImage) + // The downsampling copies pixels from the source image starting at + // (srcOffsetX, srcOffsetY) to the target image, starting at (0, 0). + // For each X increment in the target image, skipX pixels are skipped + // in the source image. + // Similarly for each Y increment in the target image, skipY pixels + // are skipped in the source image. + void downsample( + int32_t srcOffsetX, int32_t srcOffsetY, + int32_t skipX, int32_t skipY, + const YUVImage &srcImage); + +private: + YUVImage& mYUVImage; + + YUVCanvas(const YUVCanvas &); + YUVCanvas &operator=(const YUVCanvas &); +}; + +} // namespace android + +#endif // YUV_CANVAS_H_ diff --git a/include/media/stagefright/YUVImage.h b/include/media/stagefright/YUVImage.h new file mode 100644 index 0000000..4e98618 --- /dev/null +++ b/include/media/stagefright/YUVImage.h @@ -0,0 +1,178 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// A container class to hold YUV data and provide various utilities, +// e.g. to set/get pixel values. +// Supported formats: +// - YUV420 Planar +// - YUV420 Semi Planar +// +// Currently does not support variable strides. +// +// Implementation: Two simple abstractions are done to simplify access +// to YUV channels for different formats: +// - initializeYUVPointers() sets up pointers (mYdata, mUdata, mVdata) to +// point to the right start locations of the different channel data depending +// on the format. +// - getOffsets() returns the correct offset for the different channels +// depending on the format. +// Location of any pixel's YUV channels can then be easily computed using these. +// + +#ifndef YUV_IMAGE_H_ + +#define YUV_IMAGE_H_ + +#include <stdint.h> +#include <cstring> + +namespace android { + +class Rect; + +class YUVImage { +public: + // Supported YUV formats + enum YUVFormat { + YUV420Planar, + YUV420SemiPlanar + }; + + // Constructs an image with the given size, format. Also allocates and owns + // the required memory. + YUVImage(YUVFormat yuvFormat, int32_t width, int32_t height); + + // Constructs an image with the given size, format. The memory is provided + // by the caller and we don't own it. + YUVImage(YUVFormat yuvFormat, int32_t width, int32_t height, uint8_t *buffer); + + // Destructor to delete the memory if it owns it. + ~YUVImage(); + + // Returns the size of the buffer required to store the YUV data for the given + // format and geometry. Useful when the caller wants to allocate the requisite + // memory. + static size_t bufferSize(YUVFormat yuvFormat, int32_t width, int32_t height); + + int32_t width() const {return mWidth;} + int32_t height() const {return mHeight;} + + // Returns true if pixel is the range [0, width-1] x [0, height-1] + // and false otherwise. + bool validPixel(int32_t x, int32_t y) const; + + // Get the pixel YUV value at pixel (x,y). + // Note that the range of x is [0, width-1] and the range of y is [0, height-1]. + // Returns true if get was successful and false otherwise. + bool getPixelValue(int32_t x, int32_t y, + uint8_t *yPtr, uint8_t *uPtr, uint8_t *vPtr) const; + + // Set the pixel YUV value at pixel (x,y). + // Note that the range of x is [0, width-1] and the range of y is [0, height-1]. + // Returns true if set was successful and false otherwise. + bool setPixelValue(int32_t x, int32_t y, + uint8_t yValue, uint8_t uValue, uint8_t vValue); + + // Uses memcpy to copy an entire row of data + static void fastCopyRectangle420Planar( + const Rect& srcRect, + int32_t destStartX, int32_t destStartY, + const YUVImage &srcImage, YUVImage &destImage); + + // Uses memcpy to copy an entire row of data + static void fastCopyRectangle420SemiPlanar( + const Rect& srcRect, + int32_t destStartX, int32_t destStartY, + const YUVImage &srcImage, YUVImage &destImage); + + // Tries to use memcopy to copy entire rows of data. + // Returns false if fast copy is not possible for the passed image formats. + static bool fastCopyRectangle( + const Rect& srcRect, + int32_t destStartX, int32_t destStartY, + const YUVImage &srcImage, YUVImage &destImage); + + // Convert the given YUV value to RGB. + void yuv2rgb(uint8_t yValue, uint8_t uValue, uint8_t vValue, + uint8_t *r, uint8_t *g, uint8_t *b) const; + + // Write the image to a human readable PPM file. + // Returns true if write was succesful and false otherwise. + bool writeToPPM(const char *filename) const; + +private: + // YUV Format of the image. + YUVFormat mYUVFormat; + + int32_t mWidth; + int32_t mHeight; + + // Pointer to the memory buffer. + uint8_t *mBuffer; + + // Boolean telling whether we own the memory buffer. + bool mOwnBuffer; + + // Pointer to start of the Y data plane. + uint8_t *mYdata; + + // Pointer to start of the U data plane. Note that in case of interleaved formats like + // YUV420 semiplanar, mUdata points to the start of the U data in the UV plane. + uint8_t *mUdata; + + // Pointer to start of the V data plane. Note that in case of interleaved formats like + // YUV420 semiplanar, mVdata points to the start of the V data in the UV plane. + uint8_t *mVdata; + + // Initialize the pointers mYdata, mUdata, mVdata to point to the right locations for + // the given format and geometry. + // Returns true if initialize was succesful and false otherwise. + bool initializeYUVPointers(); + + // For the given pixel location, this returns the offset of the location of y, u and v + // data from the corresponding base pointers -- mYdata, mUdata, mVdata. + // Note that the range of x is [0, width-1] and the range of y is [0, height-1]. + // Returns true if getting offsets was succesful and false otherwise. + bool getOffsets(int32_t x, int32_t y, + int32_t *yOffset, int32_t *uOffset, int32_t *vOffset) const; + + // Returns the offset increments incurred in going from one data row to the next data row + // for the YUV channels. Note that this corresponds to data rows and not pixel rows. + // E.g. depending on formats, U/V channels may have only one data row corresponding + // to two pixel rows. + bool getOffsetIncrementsPerDataRow( + int32_t *yDataOffsetIncrement, + int32_t *uDataOffsetIncrement, + int32_t *vDataOffsetIncrement) const; + + // Given the offset return the address of the corresponding channel's data. + uint8_t* getYAddress(int32_t offset) const; + uint8_t* getUAddress(int32_t offset) const; + uint8_t* getVAddress(int32_t offset) const; + + // Given the pixel location, returns the address of the corresponding channel's data. + // Note that the range of x is [0, width-1] and the range of y is [0, height-1]. + bool getYUVAddresses(int32_t x, int32_t y, + uint8_t **yAddr, uint8_t **uAddr, uint8_t **vAddr) const; + + // Disallow implicit casting and copying. + YUVImage(const YUVImage &); + YUVImage &operator=(const YUVImage &); +}; + +} // namespace android + +#endif // YUV_IMAGE_H_ |