summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/android_runtime/AndroidRuntime.h27
-rw-r--r--include/binder/Parcel.h3
-rw-r--r--include/camera/Camera.h6
-rw-r--r--include/camera/CameraHardwareInterface.h54
-rw-r--r--include/camera/CameraParameters.h79
-rw-r--r--include/camera/ICamera.h6
-rw-r--r--include/drm/DrmInfoEvent.h6
-rw-r--r--include/drm/DrmManagerClient.h21
-rw-r--r--include/drm/drm_framework_common.h29
-rw-r--r--include/gui/ISurfaceTexture.h11
-rw-r--r--include/gui/SurfaceTexture.h77
-rw-r--r--include/gui/SurfaceTextureClient.h29
-rw-r--r--include/media/AudioParameter.h72
-rw-r--r--include/media/AudioRecord.h15
-rw-r--r--include/media/AudioSystem.h338
-rw-r--r--include/media/AudioTrack.h7
-rw-r--r--include/media/EffectApi.h6
-rw-r--r--include/media/IAudioPolicyService.h35
-rw-r--r--include/media/IMediaMetadataRetriever.h9
-rw-r--r--include/media/IMediaPlayer.h3
-rw-r--r--include/media/IMediaPlayerClient.h2
-rw-r--r--include/media/IOMX.h1
-rw-r--r--include/media/MediaMetadataRetrieverInterface.h6
-rw-r--r--include/media/MediaPlayerInterface.h13
-rw-r--r--include/media/MediaRecorderBase.h4
-rw-r--r--include/media/MemoryLeakTrackUtil.h28
-rw-r--r--include/media/mediametadataretriever.h12
-rw-r--r--include/media/mediaplayer.h8
-rw-r--r--include/media/mediarecorder.h80
-rw-r--r--include/media/mediascanner.h5
-rw-r--r--include/media/stagefright/AudioPlayer.h2
-rw-r--r--include/media/stagefright/AudioSource.h6
-rw-r--r--include/media/stagefright/CameraSource.h28
-rw-r--r--include/media/stagefright/DataSource.h6
-rw-r--r--include/media/stagefright/FileSource.h6
-rw-r--r--include/media/stagefright/MPEG4Writer.h2
-rw-r--r--include/media/stagefright/MediaDefs.h1
-rw-r--r--include/media/stagefright/MediaErrors.h12
-rw-r--r--include/private/media/AudioTrackShared.h8
-rw-r--r--include/private/opengles/gl_context.h18
-rw-r--r--include/surfaceflinger/IGraphicBufferAlloc.h10
-rw-r--r--include/surfaceflinger/ISurfaceComposerClient.h1
-rw-r--r--include/surfaceflinger/Surface.h10
-rw-r--r--include/surfaceflinger/SurfaceComposerClient.h2
-rw-r--r--include/tts/TtsEngine.h232
-rw-r--r--include/ui/Input.h105
-rwxr-xr-xinclude/ui/KeycodeLabels.h3
-rw-r--r--include/ui/Region.h23
-rw-r--r--include/ui/egl/android_natives.h32
-rw-r--r--include/utils/BitSet.h10
-rw-r--r--include/utils/GenerationCache.h255
-rw-r--r--include/utils/ResourceTypes.h67
-rw-r--r--include/utils/Timers.h10
-rw-r--r--include/utils/Vector.h20
54 files changed, 1018 insertions, 843 deletions
diff --git a/include/android_runtime/AndroidRuntime.h b/include/android_runtime/AndroidRuntime.h
index 22c9b72..b02a057 100644
--- a/include/android_runtime/AndroidRuntime.h
+++ b/include/android_runtime/AndroidRuntime.h
@@ -46,17 +46,13 @@ public:
const char* className, const JNINativeMethod* gMethods, int numMethods);
/**
- * Call a static Java function that takes no arguments and returns void.
- */
- status_t callStatic(const char* className, const char* methodName);
-
- /**
* Call a class's static main method with the given arguments,
*/
- status_t callMain(const char* className, int argc, const char* const argv[]);
+ status_t callMain(const char* className, jclass clazz, int argc,
+ const char* const argv[]);
/**
- * Find a class, with the input either of the form
+ * Find a class, with the input either of the form
* "package/class" or "package.class".
*/
static jclass findClass(JNIEnv* env, const char* className);
@@ -67,7 +63,14 @@ public:
void start(); // start in android.util.RuntimeInit
static AndroidRuntime* getRuntime();
-
+
+ /**
+ * This gets called after the VM has been created, but before we
+ * run any code. Override it to make any FindClass calls that need
+ * to use CLASSPATH.
+ */
+ virtual void onVmCreated(JNIEnv* env);
+
/**
* This gets called after the JavaVM has initialized. Override it
* with the system's native entry point.
@@ -98,6 +101,9 @@ public:
/** return a pointer to the JNIEnv pointer for this thread */
static JNIEnv* getJNIEnv();
+ /** return a new string corresponding to 'className' with all '.'s replaced by '/'s. */
+ static char* toSlashClassName(const char* className);
+
private:
static int startReg(JNIEnv* env);
void parseExtraOpts(char* extraOptsBuf);
@@ -112,7 +118,7 @@ private:
* Thread creation helpers.
*/
static int javaCreateThreadEtc(
- android_thread_func_t entryFunction,
+ android_thread_func_t entryFunction,
void* userData,
const char* threadName,
int32_t threadPriority,
@@ -121,9 +127,6 @@ private:
static int javaThreadShell(void* args);
};
-// Returns the Unix file descriptor for a ParcelFileDescriptor object
-extern int getParcelFileDescriptorFD(JNIEnv* env, jobject object);
-
extern CursorWindow * get_window_from_object(JNIEnv * env, jobject javaWindow);
}
diff --git a/include/binder/Parcel.h b/include/binder/Parcel.h
index 32c9a1d5..bfe13f0 100644
--- a/include/binder/Parcel.h
+++ b/include/binder/Parcel.h
@@ -53,7 +53,8 @@ public:
status_t setData(const uint8_t* buffer, size_t len);
- status_t appendFrom(Parcel *parcel, size_t start, size_t len);
+ status_t appendFrom(const Parcel *parcel,
+ size_t start, size_t len);
bool hasFileDescriptors() const;
diff --git a/include/camera/Camera.h b/include/camera/Camera.h
index f3c8f64..3c6dccc 100644
--- a/include/camera/Camera.h
+++ b/include/camera/Camera.h
@@ -219,12 +219,6 @@ public:
// send command to camera driver
status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
- // return the total number of available video buffers.
- int32_t getNumberOfVideoBuffers() const;
-
- // return the individual video buffer corresponding to the given index.
- sp<IMemory> getVideoBuffer(int32_t index) const;
-
// tell camera hal to store meta data or real YUV in video buffers.
status_t storeMetaDataInBuffers(bool enabled);
diff --git a/include/camera/CameraHardwareInterface.h b/include/camera/CameraHardwareInterface.h
index 86bd849..3f34120 100644
--- a/include/camera/CameraHardwareInterface.h
+++ b/include/camera/CameraHardwareInterface.h
@@ -28,16 +28,6 @@
namespace android {
-/**
- * The size of image for display.
- */
-typedef struct image_rect_struct
-{
- uint32_t width; /* Image width */
- uint32_t height; /* Image height */
-} image_rect_type;
-
-
typedef void (*notify_callback)(int32_t msgType,
int32_t ext1,
int32_t ext2,
@@ -90,9 +80,6 @@ public:
/** Set the ANativeWindow to which preview frames are sent */
virtual status_t setPreviewWindow(const sp<ANativeWindow>& buf) = 0;
- /** Return the IMemoryHeap for the raw image heap */
- virtual sp<IMemoryHeap> getRawHeap() const = 0;
-
/** Set the notification and data callbacks */
virtual void setCallbacks(notify_callback notify_cb,
data_callback data_cb,
@@ -145,47 +132,6 @@ public:
virtual bool previewEnabled() = 0;
/**
- * Retrieve the total number of available buffers from camera hal for passing
- * video frame data in a recording session. Must be called again if a new
- * recording session is started.
- *
- * This method should be called after startRecording(), since
- * the some camera hal may choose to allocate the video buffers only after
- * recording is started.
- *
- * Some camera hal may not implement this method, and 0 can be returned to
- * indicate that this feature is not available.
- *
- * @return the number of video buffers that camera hal makes available.
- * Zero (0) is returned to indicate that camera hal does not support
- * this feature.
- */
- virtual int32_t getNumberOfVideoBuffers() const { return 0; }
-
- /**
- * Retrieve the video buffer corresponding to the given index in a
- * recording session. Must be called again if a new recording session
- * is started.
- *
- * It allows a client to retrieve all video buffers that camera hal makes
- * available to passing video frame data by calling this method with all
- * valid index values. The valid index value ranges from 0 to n, where
- * n = getNumberOfVideoBuffers() - 1. With an index outside of the valid
- * range, 0 must be returned. This method should be called after
- * startRecording().
- *
- * The video buffers should NOT be modified/released by camera hal
- * until stopRecording() is called and all outstanding video buffers
- * previously sent out via CAMERA_MSG_VIDEO_FRAME have been released
- * via releaseVideoBuffer().
- *
- * @param index an index to retrieve the corresponding video buffer.
- *
- * @return the video buffer corresponding to the given index.
- */
- virtual sp<IMemory> getVideoBuffer(int32_t index) const { return 0; }
-
- /**
* Request the camera hal to store meta data or real YUV data in
* the video buffers send out via CAMERA_MSG_VIDEO_FRRAME for a
* recording session. If it is not called, the default camera
diff --git a/include/camera/CameraParameters.h b/include/camera/CameraParameters.h
index da2f049..db81721 100644
--- a/include/camera/CameraParameters.h
+++ b/include/camera/CameraParameters.h
@@ -247,6 +247,45 @@ public:
// Supported focus modes.
// Example value: "auto,macro,fixed". Read only.
static const char KEY_SUPPORTED_FOCUS_MODES[];
+ // The maximum number of focus areas supported. This is the maximum length
+ // of KEY_FOCUS_AREAS.
+ // Example value: "0" or "2". Read only.
+ static const char KEY_MAX_NUM_FOCUS_AREAS[];
+ // Current focus areas.
+ //
+ // Before accessing this parameter, apps should check
+ // KEY_MAX_NUM_FOCUS_AREAS first to know the maximum number of focus areas
+ // first. If the value is 0, focus area is not supported.
+ //
+ // Each focus area is a five-element int array. The first four elements are
+ // the rectangle of the area (left, top, right, bottom). The direction is
+ // relative to the sensor orientation, that is, what the sensor sees. The
+ // direction is not affected by the rotation or mirroring of
+ // CAMERA_CMD_SET_DISPLAY_ORIENTATION. Coordinates range from -1000 to 1000.
+ // (-1000,-1000) is the upper left point. (1000, 1000) is the lower right
+ // point. The length and width of focus areas cannot be 0 or negative.
+ //
+ // The fifth element is the weight. Values for weight must range from 1 to
+ // 1000. The weight should be interpreted as a per-pixel weight - all
+ // pixels in the area have the specified weight. This means a small area
+ // with the same weight as a larger area will have less influence on the
+ // focusing than the larger area. Focus areas can partially overlap and the
+ // driver will add the weights in the overlap region.
+ //
+ // A special case of single focus area (0,0,0,0,0) means driver to decide
+ // the focus area. For example, the driver may use more signals to decide
+ // focus areas and change them dynamically. Apps can set (0,0,0,0,0) if they
+ // want the driver to decide focus areas.
+ //
+ // Focus areas are relative to the current field of view (KEY_ZOOM). No
+ // matter what the zoom level is, (-1000,-1000) represents the top of the
+ // currently visible camera frame. The focus area cannot be set to be
+ // outside the current field of view, even when using zoom.
+ //
+ // Focus area only has effect if the current focus mode is FOCUS_MODE_AUTO,
+ // FOCUS_MODE_MACRO, or FOCUS_MODE_CONTINOUS_VIDEO.
+ // Example value: "(-10,-10,0,0,300),(0,0,10,10,700)". Read/write.
+ static const char KEY_FOCUS_AREAS[];
// Focal length in millimeter.
// Example value: "4.31". Read only.
static const char KEY_FOCAL_LENGTH[];
@@ -270,6 +309,46 @@ public:
// 0.3333, EV is -2.
// Example value: "0.333333333" or "0.5". Read only.
static const char KEY_EXPOSURE_COMPENSATION_STEP[];
+ // The maximum number of metering areas supported. This is the maximum
+ // length of KEY_METERING_AREAS.
+ // Example value: "0" or "2". Read only.
+ static const char KEY_MAX_NUM_METERING_AREAS[];
+ // Current metering areas. Camera driver uses these areas to decide
+ // exposure.
+ //
+ // Before accessing this parameter, apps should check
+ // KEY_MAX_NUM_METERING_AREAS first to know the maximum number of metering
+ // areas first. If the value is 0, metering area is not supported.
+ //
+ // Each metering area is a rectangle with specified weight. The direction is
+ // relative to the sensor orientation, that is, what the sensor sees. The
+ // direction is not affected by the rotation or mirroring of
+ // CAMERA_CMD_SET_DISPLAY_ORIENTATION. Coordinates of the rectangle range
+ // from -1000 to 1000. (-1000, -1000) is the upper left point. (1000, 1000)
+ // is the lower right point. The length and width of metering areas cannot
+ // be 0 or negative.
+ //
+ // The fifth element is the weight. Values for weight must range from 1 to
+ // 1000. The weight should be interpreted as a per-pixel weight - all
+ // pixels in the area have the specified weight. This means a small area
+ // with the same weight as a larger area will have less influence on the
+ // metering than the larger area. Metering areas can partially overlap and
+ // the driver will add the weights in the overlap region.
+ //
+ // A special case of all-zero single metering area means driver to decide
+ // the metering area. For example, the driver may use more signals to decide
+ // metering areas and change them dynamically. Apps can set all-zero if they
+ // want the driver to decide metering areas.
+ //
+ // Metering areas are relative to the current field of view (KEY_ZOOM).
+ // No matter what the zoom level is, (-1000,-1000) represents the top of the
+ // currently visible camera frame. The metering area cannot be set to be
+ // outside the current field of view, even when using zoom.
+ //
+ // No matter what metering areas are, the final exposure are compensated
+ // by KEY_EXPOSURE_COMPENSATION.
+ // Example value: "(-10,-10,0,0,300),(0,0,10,10,700)". Read/write.
+ static const char KEY_METERING_AREAS[];
// Current zoom value.
// Example value: "0" or "6". Read/write.
static const char KEY_ZOOM[];
diff --git a/include/camera/ICamera.h b/include/camera/ICamera.h
index 2344b3f..400d7f4 100644
--- a/include/camera/ICamera.h
+++ b/include/camera/ICamera.h
@@ -102,12 +102,6 @@ public:
// send command to camera driver
virtual status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2) = 0;
- // return the total number of available video buffers
- virtual int32_t getNumberOfVideoBuffers() const = 0;
-
- // return the individual video buffer corresponding to the given index.
- virtual sp<IMemory> getVideoBuffer(int32_t index) const = 0;
-
// tell the camera hal to store meta data or real YUV data in video buffers.
virtual status_t storeMetaDataInBuffers(bool enabled) = 0;
};
diff --git a/include/drm/DrmInfoEvent.h b/include/drm/DrmInfoEvent.h
index add33d3..dfca228 100644
--- a/include/drm/DrmInfoEvent.h
+++ b/include/drm/DrmInfoEvent.h
@@ -77,7 +77,7 @@ public:
* @param[in] infoType Type of information
* @param[in] message Message description
*/
- DrmInfoEvent(int uniqueId, int infoType, const String8& message);
+ DrmInfoEvent(int uniqueId, int infoType, const String8 message);
/**
* Destructor for DrmInfoEvent
@@ -104,12 +104,12 @@ public:
*
* @return Message description
*/
- const String8& getMessage() const;
+ const String8 getMessage() const;
private:
int mUniqueId;
int mInfoType;
- const String8& mMessage;
+ const String8 mMessage;
};
};
diff --git a/include/drm/DrmManagerClient.h b/include/drm/DrmManagerClient.h
index 7a0bf4f..b8fe46d 100644
--- a/include/drm/DrmManagerClient.h
+++ b/include/drm/DrmManagerClient.h
@@ -69,7 +69,7 @@ public:
* @return
* Handle for the decryption session
*/
- DecryptHandle* openDecryptSession(int fd, off64_t offset, off64_t length);
+ sp<DecryptHandle> openDecryptSession(int fd, off64_t offset, off64_t length);
/**
* Open the decrypt session to decrypt the given protected content
@@ -78,7 +78,7 @@ public:
* @return
* Handle for the decryption session
*/
- DecryptHandle* openDecryptSession(const char* uri);
+ sp<DecryptHandle> openDecryptSession(const char* uri);
/**
* Close the decrypt session for the given handle
@@ -87,7 +87,7 @@ public:
* @return status_t
* Returns DRM_NO_ERROR for success, DRM_ERROR_UNKNOWN for failure
*/
- status_t closeDecryptSession(DecryptHandle* decryptHandle);
+ status_t closeDecryptSession(sp<DecryptHandle> &decryptHandle);
/**
* Consumes the rights for a content.
@@ -101,7 +101,7 @@ public:
* Returns DRM_NO_ERROR for success, DRM_ERROR_UNKNOWN for failure.
* In case license has been expired, DRM_ERROR_LICENSE_EXPIRED will be returned.
*/
- status_t consumeRights(DecryptHandle* decryptHandle, int action, bool reserve);
+ status_t consumeRights(sp<DecryptHandle> &decryptHandle, int action, bool reserve);
/**
* Informs the DRM engine about the playback actions performed on the DRM files.
@@ -113,7 +113,8 @@ public:
* @return status_t
* Returns DRM_NO_ERROR for success, DRM_ERROR_UNKNOWN for failure
*/
- status_t setPlaybackStatus(DecryptHandle* decryptHandle, int playbackStatus, int64_t position);
+ status_t setPlaybackStatus(
+ sp<DecryptHandle> &decryptHandle, int playbackStatus, int64_t position);
/**
* Initialize decryption for the given unit of the protected content
@@ -125,7 +126,7 @@ public:
* Returns DRM_NO_ERROR for success, DRM_ERROR_UNKNOWN for failure
*/
status_t initializeDecryptUnit(
- DecryptHandle* decryptHandle, int decryptUnitId, const DrmBuffer* headerInfo);
+ sp<DecryptHandle> &decryptHandle, int decryptUnitId, const DrmBuffer* headerInfo);
/**
* Decrypt the protected content buffers for the given unit
@@ -144,7 +145,7 @@ public:
* DRM_ERROR_DECRYPT for failure.
*/
status_t decrypt(
- DecryptHandle* decryptHandle, int decryptUnitId,
+ sp<DecryptHandle> &decryptHandle, int decryptUnitId,
const DrmBuffer* encBuffer, DrmBuffer** decBuffer, DrmBuffer* IV = NULL);
/**
@@ -155,7 +156,8 @@ public:
* @return status_t
* Returns DRM_NO_ERROR for success, DRM_ERROR_UNKNOWN for failure
*/
- status_t finalizeDecryptUnit(DecryptHandle* decryptHandle, int decryptUnitId);
+ status_t finalizeDecryptUnit(
+ sp<DecryptHandle> &decryptHandle, int decryptUnitId);
/**
* Reads the specified number of bytes from an open DRM file.
@@ -167,7 +169,8 @@ public:
*
* @return Number of bytes read. Returns -1 for Failure.
*/
- ssize_t pread(DecryptHandle* decryptHandle, void* buffer, ssize_t numBytes, off64_t offset);
+ ssize_t pread(sp<DecryptHandle> &decryptHandle,
+ void* buffer, ssize_t numBytes, off64_t offset);
/**
* Validates whether an action on the DRM content is allowed or not.
diff --git a/include/drm/drm_framework_common.h b/include/drm/drm_framework_common.h
index 454fc99..3330ebc 100644
--- a/include/drm/drm_framework_common.h
+++ b/include/drm/drm_framework_common.h
@@ -19,6 +19,7 @@
#include <utils/Vector.h>
#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
#include <utils/String8.h>
#include <utils/Errors.h>
@@ -30,14 +31,17 @@ namespace android {
* Error code for DRM Frameowrk
*/
enum {
- DRM_ERROR_BASE = -2000,
-
- DRM_ERROR_UNKNOWN = DRM_ERROR_BASE,
- DRM_ERROR_LICENSE_EXPIRED = DRM_ERROR_BASE - 1,
- DRM_ERROR_SESSION_NOT_OPENED = DRM_ERROR_BASE - 2,
- DRM_ERROR_DECRYPT_UNIT_NOT_INITIALIZED = DRM_ERROR_BASE - 3,
- DRM_ERROR_DECRYPT = DRM_ERROR_BASE - 4,
- DRM_ERROR_CANNOT_HANDLE = DRM_ERROR_BASE - 5,
+ // The following constant values should be in sync with
+ // media/stagefright/MediaErrors.h
+ ERROR_BASE = -2000,
+
+ DRM_ERROR_UNKNOWN = ERROR_BASE,
+ DRM_ERROR_NO_LICENSE = ERROR_BASE - 1,
+ DRM_ERROR_LICENSE_EXPIRED = ERROR_BASE - 2,
+ DRM_ERROR_SESSION_NOT_OPENED = ERROR_BASE - 3,
+ DRM_ERROR_DECRYPT_UNIT_NOT_INITIALIZED = ERROR_BASE - 4,
+ DRM_ERROR_DECRYPT = ERROR_BASE - 5,
+ DRM_ERROR_CANNOT_HANDLE = ERROR_BASE - 6,
DRM_NO_ERROR = NO_ERROR
};
@@ -251,7 +255,7 @@ public:
/**
* Defines decryption handle
*/
-class DecryptHandle {
+class DecryptHandle : public RefBase {
public:
/**
* Decryption session Handle
@@ -307,8 +311,13 @@ public:
decryptId(INVALID_VALUE),
mimeType(""),
decryptApiType(INVALID_VALUE),
- status(INVALID_VALUE) {
+ status(INVALID_VALUE),
+ decryptInfo(NULL) {
+
+ }
+ ~DecryptHandle() {
+ delete decryptInfo; decryptInfo = NULL;
}
bool operator<(const DecryptHandle& handle) const {
diff --git a/include/gui/ISurfaceTexture.h b/include/gui/ISurfaceTexture.h
index 168310c..d2d3bb8 100644
--- a/include/gui/ISurfaceTexture.h
+++ b/include/gui/ISurfaceTexture.h
@@ -36,6 +36,8 @@ class ISurfaceTexture : public IInterface
public:
DECLARE_META_INTERFACE(SurfaceTexture);
+ enum { BUFFER_NEEDS_REALLOCATION = 1 };
+
// requestBuffer requests a new buffer for the given index. The server (i.e.
// the ISurfaceTexture implementation) assigns the newly created buffer to
// the given slot index, and the client is expected to mirror the
@@ -56,14 +58,19 @@ public:
// should call requestBuffer to assign a new buffer to that slot. The client
// is expected to either call cancelBuffer on the dequeued slot or to fill
// in the contents of its associated buffer contents and call queueBuffer.
+ // If dequeueBuffer return BUFFER_NEEDS_REALLOCATION, the client is
+ // expected to call requestBuffer immediately.
virtual status_t dequeueBuffer(int *slot) = 0;
// queueBuffer indicates that the client has finished filling in the
// contents of the buffer associated with slot and transfers ownership of
// that slot back to the server. It is not valid to call queueBuffer on a
// slot that is not owned by the client or one for which a buffer associated
- // via requestBuffer.
- virtual status_t queueBuffer(int slot) = 0;
+ // via requestBuffer. In addition, a timestamp must be provided by the
+ // client for this buffer. The timestamp is measured in nanoseconds, and
+ // must be monotonically increasing. Its other properties (zero point, etc)
+ // are client-dependent, and should be documented by the client.
+ virtual status_t queueBuffer(int slot, int64_t timestamp) = 0;
// cancelBuffer indicates that the client does not wish to fill in the
// buffer associated with slot and transfers ownership of the slot back to
diff --git a/include/gui/SurfaceTexture.h b/include/gui/SurfaceTexture.h
index 9bf38f7..96828c6 100644
--- a/include/gui/SurfaceTexture.h
+++ b/include/gui/SurfaceTexture.h
@@ -66,7 +66,12 @@ public:
// unmodified.
virtual status_t dequeueBuffer(int *buf);
- virtual status_t queueBuffer(int buf);
+ // queueBuffer returns a filled buffer to the SurfaceTexture. In addition, a
+ // timestamp must be provided for the buffer. The timestamp is in
+ // nanoseconds, and must be monotonically increasing. Its other semantics
+ // (zero point, etc) are client-dependent and should be documented by the
+ // client.
+ virtual status_t queueBuffer(int buf, int64_t timestamp);
virtual void cancelBuffer(int buf);
virtual status_t setCrop(const Rect& reg);
virtual status_t setTransform(uint32_t transform);
@@ -98,6 +103,14 @@ public:
// functions.
void getTransformMatrix(float mtx[16]);
+ // getTimestamp retrieves the timestamp associated with the texture image
+ // set by the most recent call to updateTexImage.
+ //
+ // The timestamp is in nanoseconds, and is monotonically increasing. Its
+ // other semantics (zero point, etc) are source-dependent and should be
+ // documented by the source.
+ int64_t getTimestamp();
+
// setFrameAvailableListener sets the listener object that will be notified
// when a new frame becomes available.
void setFrameAvailableListener(const sp<FrameAvailableListener>& l);
@@ -108,11 +121,34 @@ public:
// buffers before the client is done with them.
sp<IBinder> getAllocator();
-private:
+ // setDefaultBufferSize is used to set the size of buffers returned by
+ // requestBuffers when a with and height of zero is requested.
+ // A call to setDefaultBufferSize() may trigger requestBuffers() to
+ // be called from the client.
+ status_t setDefaultBufferSize(uint32_t w, uint32_t h);
+
+ // getCurrentBuffer returns the buffer associated with the current image.
+ sp<GraphicBuffer> getCurrentBuffer() const;
+
+ // getCurrentTextureTarget returns the texture target of the current
+ // texture as returned by updateTexImage().
+ GLenum getCurrentTextureTarget() const;
+
+ // getCurrentCrop returns the cropping rectangle of the current buffer
+ Rect getCurrentCrop() const;
+
+ // getCurrentTransform returns the transform of the current buffer
+ uint32_t getCurrentTransform() const;
+
+protected:
// freeAllBuffers frees the resources (both GraphicBuffer and EGLImage) for
// all slots.
void freeAllBuffers();
+ static bool isExternalFormat(uint32_t format);
+ static GLenum getTextureTarget(uint32_t format);
+
+private:
// createImage creates a new EGLImage from a GraphicBuffer.
EGLImageKHR createImage(EGLDisplay dpy,
@@ -145,6 +181,23 @@ private:
// for a slot when requestBuffer is called with that slot's index.
BufferSlot mSlots[NUM_BUFFER_SLOTS];
+ // mDefaultWidth holds the default width of allocated buffers. It is used
+ // in requestBuffers() if a width and height of zero is specified.
+ uint32_t mDefaultWidth;
+
+ // mDefaultHeight holds the default height of allocated buffers. It is used
+ // in requestBuffers() if a width and height of zero is specified.
+ uint32_t mDefaultHeight;
+
+ // mPixelFormat holds the pixel format of allocated buffers. It is used
+ // in requestBuffers() if a format of zero is specified.
+ uint32_t mPixelFormat;
+
+ // mUseDefaultSize indicates whether or not the default size should be used
+ // that is, if the last requestBuffer has been called with both width
+ // and height null.
+ bool mUseDefaultSize;
+
// mBufferCount is the number of buffer slots that the client and server
// must maintain. It defaults to MIN_BUFFER_SLOTS and can be changed by
// calling setBufferCount.
@@ -158,6 +211,10 @@ private:
// reset mCurrentTexture to INVALID_BUFFER_SLOT.
int mCurrentTexture;
+ // mCurrentTextureTarget is the GLES texture target to be used with the
+ // current texture.
+ GLenum mCurrentTextureTarget;
+
// mCurrentTextureBuf is the graphic buffer of the current texture. It's
// possible that this buffer is not associated with any buffer slot, so we
// must track it separately in order to properly use
@@ -172,6 +229,10 @@ private:
// gets set to mLastQueuedTransform each time updateTexImage is called.
uint32_t mCurrentTransform;
+ // mCurrentTimestamp is the timestamp for the current texture. It
+ // gets set to mLastQueuedTimestamp each time updateTexImage is called.
+ int64_t mCurrentTimestamp;
+
// mLastQueued is the buffer slot index of the most recently enqueued buffer.
// At construction time it is initialized to INVALID_BUFFER_SLOT, and is
// updated each time queueBuffer is called.
@@ -187,6 +248,10 @@ private:
// queueBuffer gets called.
uint32_t mLastQueuedTransform;
+ // mLastQueuedTimestamp is the timestamp for the buffer that was most
+ // recently queued. This gets set by queueBuffer.
+ int64_t mLastQueuedTimestamp;
+
// mNextCrop is the crop rectangle that will be used for the next buffer
// that gets queued. It is set by calling setCrop.
Rect mNextCrop;
@@ -204,12 +269,6 @@ private:
// allocate new GraphicBuffer objects.
sp<IGraphicBufferAlloc> mGraphicBufferAlloc;
- // mAllocdBuffers is mirror of the list of buffers that SurfaceFlinger is
- // referencing. This is kept so that gralloc implementations do not need to
- // properly handle the case where SurfaceFlinger no longer holds a reference
- // to a buffer, but other processes do.
- Vector<sp<GraphicBuffer> > mAllocdBuffers;
-
// mFrameAvailableListener is the listener object that will be called when a
// new frame becomes available. If it is not NULL it will be called from
// queueBuffer.
@@ -218,7 +277,7 @@ private:
// mMutex is the mutex used to prevent concurrent access to the member
// variables of SurfaceTexture objects. It must be locked whenever the
// member variables are accessed.
- Mutex mMutex;
+ mutable Mutex mMutex;
};
// ----------------------------------------------------------------------------
diff --git a/include/gui/SurfaceTextureClient.h b/include/gui/SurfaceTextureClient.h
index 7992105..fe9b049 100644
--- a/include/gui/SurfaceTextureClient.h
+++ b/include/gui/SurfaceTextureClient.h
@@ -27,6 +27,8 @@
namespace android {
+class Surface;
+
class SurfaceTextureClient
: public EGLNativeBase<ANativeWindow, SurfaceTextureClient, RefBase>
{
@@ -36,6 +38,7 @@ public:
sp<ISurfaceTexture> getISurfaceTexture() const;
private:
+ friend class Surface;
// can't be copied
SurfaceTextureClient& operator = (const SurfaceTextureClient& rhs);
@@ -63,6 +66,7 @@ private:
int dispatchSetBufferCount(va_list args);
int dispatchSetBuffersGeometry(va_list args);
int dispatchSetBuffersTransform(va_list args);
+ int dispatchSetBuffersTimestamp(va_list args);
int dispatchSetCrop(va_list args);
int dispatchSetUsage(va_list args);
@@ -71,11 +75,14 @@ private:
int setBufferCount(int bufferCount);
int setBuffersGeometry(int w, int h, int format);
int setBuffersTransform(int transform);
+ int setBuffersTimestamp(int64_t timestamp);
int setCrop(Rect const* rect);
int setUsage(uint32_t reqUsage);
void freeAllBuffers();
+ int getConnectedApi() const;
+
enum { MIN_UNDEQUEUED_BUFFERS = SurfaceTexture::MIN_UNDEQUEUED_BUFFERS };
enum { MIN_BUFFER_SLOTS = SurfaceTexture::MIN_BUFFER_SLOTS };
enum { NUM_BUFFER_SLOTS = SurfaceTexture::NUM_BUFFER_SLOTS };
@@ -114,10 +121,30 @@ private:
// at the next deuque operation. It is initialized to 0.
uint32_t mReqUsage;
+ // mTimestamp is the timestamp that will be used for the next buffer queue
+ // operation. It defaults to NATIVE_WINDOW_TIMESTAMP_AUTO, which means that
+ // a timestamp is auto-generated when queueBuffer is called.
+ int64_t mTimestamp;
+
+ // mConnectedApi holds the currently connected API to this surface
+ int mConnectedApi;
+
+ // mQueryWidth is the width returned by query(). It is set to width
+ // of the last dequeued buffer or to mReqWidth if no buffer was dequeued.
+ uint32_t mQueryWidth;
+
+ // mQueryHeight is the height returned by query(). It is set to height
+ // of the last dequeued buffer or to mReqHeight if no buffer was dequeued.
+ uint32_t mQueryHeight;
+
+ // mQueryFormat is the format returned by query(). It is set to the last
+ // dequeued format or to mReqFormat if no buffer was dequeued.
+ uint32_t mQueryFormat;
+
// mMutex is the mutex used to prevent concurrent access to the member
// variables of SurfaceTexture objects. It must be locked whenever the
// member variables are accessed.
- Mutex mMutex;
+ mutable Mutex mMutex;
};
}; // namespace android
diff --git a/include/media/AudioParameter.h b/include/media/AudioParameter.h
new file mode 100644
index 0000000..79d5d82
--- /dev/null
+++ b/include/media/AudioParameter.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2008-2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIOPARAMETER_H_
+#define ANDROID_AUDIOPARAMETER_H_
+
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class AudioParameter {
+
+public:
+ AudioParameter() {}
+ AudioParameter(const String8& keyValuePairs);
+ virtual ~AudioParameter();
+
+ // reserved parameter keys for changing standard parameters with setParameters() function.
+ // Using these keys is mandatory for AudioFlinger to properly monitor audio output/input
+ // configuration changes and act accordingly.
+ // keyRouting: to change audio routing, value is an int in audio_devices_t
+ // keySamplingRate: to change sampling rate routing, value is an int
+ // keyFormat: to change audio format, value is an int in audio_format_t
+ // keyChannels: to change audio channel configuration, value is an int in audio_channels_t
+ // keyFrameCount: to change audio output frame count, value is an int
+ // keyInputSource: to change audio input source, value is an int in audio_source_t
+ // (defined in media/mediarecorder.h)
+ static const char *keyRouting;
+ static const char *keySamplingRate;
+ static const char *keyFormat;
+ static const char *keyChannels;
+ static const char *keyFrameCount;
+ static const char *keyInputSource;
+
+ String8 toString();
+
+ status_t add(const String8& key, const String8& value);
+ status_t addInt(const String8& key, const int value);
+ status_t addFloat(const String8& key, const float value);
+
+ status_t remove(const String8& key);
+
+ status_t get(const String8& key, String8& value);
+ status_t getInt(const String8& key, int& value);
+ status_t getFloat(const String8& key, float& value);
+ status_t getAt(size_t index, String8& key, String8& value);
+
+ size_t size() { return mParameters.size(); }
+
+private:
+ String8 mKeyValuePairs;
+ KeyedVector <String8, String8> mParameters;
+};
+
+}; // namespace android
+
+#endif /*ANDROID_AUDIOPARAMETER_H_*/
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 293764d..def3612 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -30,6 +30,7 @@
#include <binder/IMemory.h>
#include <utils/threads.h>
+#include <hardware/audio.h>
namespace android {
@@ -127,9 +128,9 @@ public:
*
* inputSource: Select the audio input to record to (e.g. AUDIO_SOURCE_DEFAULT).
* sampleRate: Track sampling rate in Hz.
- * format: Audio format (e.g AudioSystem::PCM_16_BIT for signed
+ * format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
* 16 bits per sample).
- * channels: Channel mask: see AudioSystem::audio_channels.
+ * channels: Channel mask: see audio_channels_t.
* frameCount: Total size of track PCM buffer in frames. This defines the
* latency of the track.
* flags: A bitmask of acoustic values from enum record_flags. It enables
@@ -142,15 +143,15 @@ public:
*/
enum record_flags {
- RECORD_AGC_ENABLE = AudioSystem::AGC_ENABLE,
- RECORD_NS_ENABLE = AudioSystem::NS_ENABLE,
- RECORD_IIR_ENABLE = AudioSystem::TX_IIR_ENABLE
+ RECORD_AGC_ENABLE = AUDIO_IN_ACOUSTICS_AGC_ENABLE,
+ RECORD_NS_ENABLE = AUDIO_IN_ACOUSTICS_NS_ENABLE,
+ RECORD_IIR_ENABLE = AUDIO_IN_ACOUSTICS_TX_IIR_ENABLE,
};
AudioRecord(int inputSource,
uint32_t sampleRate = 0,
int format = 0,
- uint32_t channels = AudioSystem::CHANNEL_IN_MONO,
+ uint32_t channels = AUDIO_CHANNEL_IN_MONO,
int frameCount = 0,
uint32_t flags = 0,
callback_t cbf = 0,
@@ -176,7 +177,7 @@ public:
status_t set(int inputSource = 0,
uint32_t sampleRate = 0,
int format = 0,
- uint32_t channels = AudioSystem::CHANNEL_IN_MONO,
+ uint32_t channels = AUDIO_CHANNEL_IN_MONO,
int frameCount = 0,
uint32_t flags = 0,
callback_t cbf = 0,
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 2dc4beb..eb61a87 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -21,10 +21,15 @@
#include <utils/threads.h>
#include <media/IAudioFlinger.h>
+#include <hardware/audio.h>
+#include <hardware/audio_policy.h>
+
+/* XXX: Should be include by all the users instead */
+#include <media/AudioParameter.h>
+
namespace android {
typedef void (*audio_error_callback)(status_t err);
-typedef int audio_io_handle_t;
class IAudioPolicyService;
class String8;
@@ -33,151 +38,6 @@ class AudioSystem
{
public:
- enum stream_type {
- DEFAULT =-1,
- VOICE_CALL = 0,
- SYSTEM = 1,
- RING = 2,
- MUSIC = 3,
- ALARM = 4,
- NOTIFICATION = 5,
- BLUETOOTH_SCO = 6,
- ENFORCED_AUDIBLE = 7, // Sounds that cannot be muted by user and must be routed to speaker
- DTMF = 8,
- TTS = 9,
- NUM_STREAM_TYPES
- };
-
- // Audio sub formats (see AudioSystem::audio_format).
- enum pcm_sub_format {
- PCM_SUB_16_BIT = 0x1, // must be 1 for backward compatibility
- PCM_SUB_8_BIT = 0x2, // must be 2 for backward compatibility
- };
-
- // MP3 sub format field definition : can use 11 LSBs in the same way as MP3 frame header to specify
- // bit rate, stereo mode, version...
- enum mp3_sub_format {
- //TODO
- };
-
- // AMR NB/WB sub format field definition: specify frame block interleaving, bandwidth efficient or octet aligned,
- // encoding mode for recording...
- enum amr_sub_format {
- //TODO
- };
-
- // AAC sub format field definition: specify profile or bitrate for recording...
- enum aac_sub_format {
- //TODO
- };
-
- // VORBIS sub format field definition: specify quality for recording...
- enum vorbis_sub_format {
- //TODO
- };
-
- // Audio format consists in a main format field (upper 8 bits) and a sub format field (lower 24 bits).
- // The main format indicates the main codec type. The sub format field indicates options and parameters
- // for each format. The sub format is mainly used for record to indicate for instance the requested bitrate
- // or profile. It can also be used for certain formats to give informations not present in the encoded
- // audio stream (e.g. octet alignement for AMR).
- enum audio_format {
- INVALID_FORMAT = -1,
- FORMAT_DEFAULT = 0,
- PCM = 0x00000000, // must be 0 for backward compatibility
- MP3 = 0x01000000,
- AMR_NB = 0x02000000,
- AMR_WB = 0x03000000,
- AAC = 0x04000000,
- HE_AAC_V1 = 0x05000000,
- HE_AAC_V2 = 0x06000000,
- VORBIS = 0x07000000,
- MAIN_FORMAT_MASK = 0xFF000000,
- SUB_FORMAT_MASK = 0x00FFFFFF,
- // Aliases
- PCM_16_BIT = (PCM|PCM_SUB_16_BIT),
- PCM_8_BIT = (PCM|PCM_SUB_8_BIT)
- };
-
-
- // Channel mask definitions must be kept in sync with JAVA values in /media/java/android/media/AudioFormat.java
- enum audio_channels {
- // output channels
- CHANNEL_OUT_FRONT_LEFT = 0x4,
- CHANNEL_OUT_FRONT_RIGHT = 0x8,
- CHANNEL_OUT_FRONT_CENTER = 0x10,
- CHANNEL_OUT_LOW_FREQUENCY = 0x20,
- CHANNEL_OUT_BACK_LEFT = 0x40,
- CHANNEL_OUT_BACK_RIGHT = 0x80,
- CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x100,
- CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x200,
- CHANNEL_OUT_BACK_CENTER = 0x400,
- CHANNEL_OUT_MONO = CHANNEL_OUT_FRONT_LEFT,
- CHANNEL_OUT_STEREO = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT),
- CHANNEL_OUT_QUAD = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
- CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT),
- CHANNEL_OUT_SURROUND = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
- CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_BACK_CENTER),
- CHANNEL_OUT_5POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
- CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT),
- CHANNEL_OUT_7POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
- CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT |
- CHANNEL_OUT_FRONT_LEFT_OF_CENTER | CHANNEL_OUT_FRONT_RIGHT_OF_CENTER),
- CHANNEL_OUT_ALL = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
- CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT |
- CHANNEL_OUT_FRONT_LEFT_OF_CENTER | CHANNEL_OUT_FRONT_RIGHT_OF_CENTER | CHANNEL_OUT_BACK_CENTER),
-
- // input channels
- CHANNEL_IN_LEFT = 0x4,
- CHANNEL_IN_RIGHT = 0x8,
- CHANNEL_IN_FRONT = 0x10,
- CHANNEL_IN_BACK = 0x20,
- CHANNEL_IN_LEFT_PROCESSED = 0x40,
- CHANNEL_IN_RIGHT_PROCESSED = 0x80,
- CHANNEL_IN_FRONT_PROCESSED = 0x100,
- CHANNEL_IN_BACK_PROCESSED = 0x200,
- CHANNEL_IN_PRESSURE = 0x400,
- CHANNEL_IN_X_AXIS = 0x800,
- CHANNEL_IN_Y_AXIS = 0x1000,
- CHANNEL_IN_Z_AXIS = 0x2000,
- CHANNEL_IN_VOICE_UPLINK = 0x4000,
- CHANNEL_IN_VOICE_DNLINK = 0x8000,
- CHANNEL_IN_MONO = CHANNEL_IN_FRONT,
- CHANNEL_IN_STEREO = (CHANNEL_IN_LEFT | CHANNEL_IN_RIGHT),
- CHANNEL_IN_ALL = (CHANNEL_IN_LEFT | CHANNEL_IN_RIGHT | CHANNEL_IN_FRONT | CHANNEL_IN_BACK|
- CHANNEL_IN_LEFT_PROCESSED | CHANNEL_IN_RIGHT_PROCESSED | CHANNEL_IN_FRONT_PROCESSED | CHANNEL_IN_BACK_PROCESSED|
- CHANNEL_IN_PRESSURE | CHANNEL_IN_X_AXIS | CHANNEL_IN_Y_AXIS | CHANNEL_IN_Z_AXIS |
- CHANNEL_IN_VOICE_UPLINK | CHANNEL_IN_VOICE_DNLINK)
- };
-
- enum audio_mode {
- MODE_INVALID = -2,
- MODE_CURRENT = -1,
- MODE_NORMAL = 0,
- MODE_RINGTONE,
- MODE_IN_CALL,
- MODE_IN_COMMUNICATION,
- NUM_MODES // not a valid entry, denotes end-of-list
- };
-
- enum audio_in_acoustics {
- AGC_ENABLE = 0x0001,
- AGC_DISABLE = 0,
- NS_ENABLE = 0x0002,
- NS_DISABLE = 0,
- TX_IIR_ENABLE = 0x0004,
- TX_DISABLE = 0
- };
-
- // special audio session values
- enum audio_sessions {
- SESSION_OUTPUT_STAGE = -1, // session for effects attached to a particular output stream
- // (value must be less than 0)
- SESSION_OUTPUT_MIX = 0, // session for effects applied to output mix. These effects can
- // be moved by audio policy manager to another output stream
- // (value must be 0)
- };
-
/* These are static methods to control the system-wide AudioFlinger
* only privileged processes can have access to them
*/
@@ -189,6 +49,7 @@ public:
// set/get master volume
static status_t setMasterVolume(float value);
static status_t getMasterVolume(float* volume);
+
// mute/unmute audio outputs
static status_t setMasterMute(bool mute);
static status_t getMasterMute(bool* mute);
@@ -201,7 +62,7 @@ public:
static status_t setStreamMute(int stream, bool mute);
static status_t getStreamMute(int stream, bool* mute);
- // set audio mode in audio hardware (see AudioSystem::audio_mode)
+ // set audio mode in audio hardware (see audio_mode_t)
static status_t setMode(int mode);
// returns true in *state if tracks are active on the specified stream or has been active
@@ -222,9 +83,9 @@ public:
static float linearToLog(int volume);
static int logToLinear(float volume);
- static status_t getOutputSamplingRate(int* samplingRate, int stream = DEFAULT);
- static status_t getOutputFrameCount(int* frameCount, int stream = DEFAULT);
- static status_t getOutputLatency(uint32_t* latency, int stream = DEFAULT);
+ static status_t getOutputSamplingRate(int* samplingRate, int stream = AUDIO_STREAM_DEFAULT);
+ static status_t getOutputFrameCount(int* frameCount, int stream = AUDIO_STREAM_DEFAULT);
+ static status_t getOutputLatency(uint32_t* latency, int stream = AUDIO_STREAM_DEFAULT);
static bool routedToA2dpOutput(int streamType);
@@ -234,7 +95,7 @@ public:
static status_t setVoiceVolume(float volume);
// return the number of audio frames written by AudioFlinger to audio HAL and
- // audio dsp to DAC since the output on which the specificed stream is playing
+ // audio dsp to DAC since the output on which the specified stream is playing
// has exited standby.
// returned status (from utils/Errors.h) can be:
// - NO_ERROR: successful operation, halFrames and dspFrames point to valid data
@@ -242,93 +103,11 @@ public:
// - BAD_VALUE: invalid parameter
// NOTE: this feature is not supported on all hardware platforms and it is
// necessary to check returned status before using the returned values.
- static status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, int stream = DEFAULT);
+ static status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, int stream = AUDIO_STREAM_DEFAULT);
static unsigned int getInputFramesLost(audio_io_handle_t ioHandle);
static int newAudioSessionId();
- //
- // AudioPolicyService interface
- //
-
- enum audio_devices {
- // output devices
- DEVICE_OUT_EARPIECE = 0x1,
- DEVICE_OUT_SPEAKER = 0x2,
- DEVICE_OUT_WIRED_HEADSET = 0x4,
- DEVICE_OUT_WIRED_HEADPHONE = 0x8,
- DEVICE_OUT_BLUETOOTH_SCO = 0x10,
- DEVICE_OUT_BLUETOOTH_SCO_HEADSET = 0x20,
- DEVICE_OUT_BLUETOOTH_SCO_CARKIT = 0x40,
- DEVICE_OUT_BLUETOOTH_A2DP = 0x80,
- DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES = 0x100,
- DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER = 0x200,
- DEVICE_OUT_AUX_DIGITAL = 0x400,
- DEVICE_OUT_ANLG_DOCK_HEADSET = 0x800,
- DEVICE_OUT_DGTL_DOCK_HEADSET = 0x1000,
- DEVICE_OUT_DEFAULT = 0x8000,
- DEVICE_OUT_ALL = (DEVICE_OUT_EARPIECE | DEVICE_OUT_SPEAKER | DEVICE_OUT_WIRED_HEADSET |
- DEVICE_OUT_WIRED_HEADPHONE | DEVICE_OUT_BLUETOOTH_SCO | DEVICE_OUT_BLUETOOTH_SCO_HEADSET |
- DEVICE_OUT_BLUETOOTH_SCO_CARKIT | DEVICE_OUT_BLUETOOTH_A2DP | DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
- DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER | DEVICE_OUT_AUX_DIGITAL |
- DEVICE_OUT_ANLG_DOCK_HEADSET | DEVICE_OUT_DGTL_DOCK_HEADSET |
- DEVICE_OUT_DEFAULT),
- DEVICE_OUT_ALL_A2DP = (DEVICE_OUT_BLUETOOTH_A2DP | DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
- DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
-
- // input devices
- DEVICE_IN_COMMUNICATION = 0x10000,
- DEVICE_IN_AMBIENT = 0x20000,
- DEVICE_IN_BUILTIN_MIC = 0x40000,
- DEVICE_IN_BLUETOOTH_SCO_HEADSET = 0x80000,
- DEVICE_IN_WIRED_HEADSET = 0x100000,
- DEVICE_IN_AUX_DIGITAL = 0x200000,
- DEVICE_IN_VOICE_CALL = 0x400000,
- DEVICE_IN_BACK_MIC = 0x800000,
- DEVICE_IN_DEFAULT = 0x80000000,
-
- DEVICE_IN_ALL = (DEVICE_IN_COMMUNICATION | DEVICE_IN_AMBIENT | DEVICE_IN_BUILTIN_MIC |
- DEVICE_IN_BLUETOOTH_SCO_HEADSET | DEVICE_IN_WIRED_HEADSET | DEVICE_IN_AUX_DIGITAL |
- DEVICE_IN_VOICE_CALL | DEVICE_IN_BACK_MIC | DEVICE_IN_DEFAULT)
- };
-
- // device connection states used for setDeviceConnectionState()
- enum device_connection_state {
- DEVICE_STATE_UNAVAILABLE,
- DEVICE_STATE_AVAILABLE,
- NUM_DEVICE_STATES
- };
-
- // request to open a direct output with getOutput() (by opposition to sharing an output with other AudioTracks)
- enum output_flags {
- OUTPUT_FLAG_INDIRECT = 0x0,
- OUTPUT_FLAG_DIRECT = 0x1
- };
-
- // device categories used for setForceUse()
- enum forced_config {
- FORCE_NONE,
- FORCE_SPEAKER,
- FORCE_HEADPHONES,
- FORCE_BT_SCO,
- FORCE_BT_A2DP,
- FORCE_WIRED_ACCESSORY,
- FORCE_BT_CAR_DOCK,
- FORCE_BT_DESK_DOCK,
- FORCE_ANALOG_DOCK,
- FORCE_DIGITAL_DOCK,
- NUM_FORCE_CONFIG,
- FORCE_DEFAULT = FORCE_NONE
- };
-
- // usages used for setForceUse()
- enum force_use {
- FOR_COMMUNICATION,
- FOR_MEDIA,
- FOR_RECORD,
- FOR_DOCK,
- NUM_FORCE_USE
- };
// types of io configuration change events received with ioConfigChanged()
enum io_config_event {
@@ -359,40 +138,40 @@ public:
//
// IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
//
- static status_t setDeviceConnectionState(audio_devices device, device_connection_state state, const char *device_address);
- static device_connection_state getDeviceConnectionState(audio_devices device, const char *device_address);
+ static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state, const char *device_address);
+ static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device, const char *device_address);
static status_t setPhoneState(int state);
static status_t setRingerMode(uint32_t mode, uint32_t mask);
- static status_t setForceUse(force_use usage, forced_config config);
- static forced_config getForceUse(force_use usage);
- static audio_io_handle_t getOutput(stream_type stream,
+ static status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
+ static audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
+ static audio_io_handle_t getOutput(audio_stream_type_t stream,
uint32_t samplingRate = 0,
- uint32_t format = FORMAT_DEFAULT,
- uint32_t channels = CHANNEL_OUT_STEREO,
- output_flags flags = OUTPUT_FLAG_INDIRECT);
+ uint32_t format = AUDIO_FORMAT_DEFAULT,
+ uint32_t channels = AUDIO_CHANNEL_OUT_STEREO,
+ audio_policy_output_flags_t flags = AUDIO_POLICY_OUTPUT_FLAG_INDIRECT);
static status_t startOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
int session = 0);
static status_t stopOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
int session = 0);
static void releaseOutput(audio_io_handle_t output);
static audio_io_handle_t getInput(int inputSource,
uint32_t samplingRate = 0,
- uint32_t format = FORMAT_DEFAULT,
- uint32_t channels = CHANNEL_IN_MONO,
- audio_in_acoustics acoustics = (audio_in_acoustics)0);
+ uint32_t format = AUDIO_FORMAT_DEFAULT,
+ uint32_t channels = AUDIO_CHANNEL_IN_MONO,
+ audio_in_acoustics_t acoustics = (audio_in_acoustics_t)0);
static status_t startInput(audio_io_handle_t input);
static status_t stopInput(audio_io_handle_t input);
static void releaseInput(audio_io_handle_t input);
- static status_t initStreamVolume(stream_type stream,
+ static status_t initStreamVolume(audio_stream_type_t stream,
int indexMin,
int indexMax);
- static status_t setStreamVolumeIndex(stream_type stream, int index);
- static status_t getStreamVolumeIndex(stream_type stream, int *index);
+ static status_t setStreamVolumeIndex(audio_stream_type_t stream, int index);
+ static status_t getStreamVolumeIndex(audio_stream_type_t stream, int *index);
- static uint32_t getStrategyForStream(stream_type stream);
- static uint32_t getDevicesForStream(stream_type stream);
+ static uint32_t getStrategyForStream(audio_stream_type_t stream);
+ static uint32_t getDevicesForStream(audio_stream_type_t stream);
static audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc);
static status_t registerEffect(effect_descriptor_t *desc,
@@ -406,17 +185,6 @@ public:
// ----------------------------------------------------------------------------
- static uint32_t popCount(uint32_t u);
- static bool isOutputDevice(audio_devices device);
- static bool isInputDevice(audio_devices device);
- static bool isA2dpDevice(audio_devices device);
- static bool isBluetoothScoDevice(audio_devices device);
- static bool isLowVisibility(stream_type stream);
- static bool isOutputChannel(uint32_t channel);
- static bool isInputChannel(uint32_t channel);
- static bool isValidFormat(uint32_t format);
- static bool isLinearPCM(uint32_t format);
-
private:
class AudioFlingerClient: public IBinder::DeathRecipient, public BnAudioFlingerClient
@@ -468,50 +236,6 @@ private:
static DefaultKeyedVector<audio_io_handle_t, OutputDescriptor *> gOutputs;
};
-class AudioParameter {
-
-public:
- AudioParameter() {}
- AudioParameter(const String8& keyValuePairs);
- virtual ~AudioParameter();
-
- // reserved parameter keys for changing standard parameters with setParameters() function.
- // Using these keys is mandatory for AudioFlinger to properly monitor audio output/input
- // configuration changes and act accordingly.
- // keyRouting: to change audio routing, value is an int in AudioSystem::audio_devices
- // keySamplingRate: to change sampling rate routing, value is an int
- // keyFormat: to change audio format, value is an int in AudioSystem::audio_format
- // keyChannels: to change audio channel configuration, value is an int in AudioSystem::audio_channels
- // keyFrameCount: to change audio output frame count, value is an int
- // keyInputSource: to change audio input source, value is an int in audio_source
- // (defined in media/mediarecorder.h)
- static const char *keyRouting;
- static const char *keySamplingRate;
- static const char *keyFormat;
- static const char *keyChannels;
- static const char *keyFrameCount;
- static const char *keyInputSource;
-
- String8 toString();
-
- status_t add(const String8& key, const String8& value);
- status_t addInt(const String8& key, const int value);
- status_t addFloat(const String8& key, const float value);
-
- status_t remove(const String8& key);
-
- status_t get(const String8& key, String8& value);
- status_t getInt(const String8& key, int& value);
- status_t getFloat(const String8& key, float& value);
- status_t getAt(size_t index, String8& key, String8& value);
-
- size_t size() { return mParameters.size(); }
-
-private:
- String8 mKeyValuePairs;
- KeyedVector <String8, String8> mParameters;
-};
-
}; // namespace android
#endif /*ANDROID_AUDIOSYSTEM_H_*/
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 3e346db..de928da 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -30,7 +30,6 @@
#include <binder/IMemory.h>
#include <utils/threads.h>
-
namespace android {
// ----------------------------------------------------------------------------
@@ -126,11 +125,11 @@ public:
* Parameters:
*
* streamType: Select the type of audio stream this track is attached to
- * (e.g. AudioSystem::MUSIC).
+ * (e.g. AUDIO_STREAM_MUSIC).
* sampleRate: Track sampling rate in Hz.
- * format: Audio format (e.g AudioSystem::PCM_16_BIT for signed
+ * format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
* 16 bits per sample).
- * channels: Channel mask: see AudioSystem::audio_channels.
+ * channels: Channel mask: see audio_channels_t.
* frameCount: Total size of track PCM buffer in frames. This defines the
* latency of the track.
* flags: Reserved for future use.
diff --git a/include/media/EffectApi.h b/include/media/EffectApi.h
index b97c22e..a5ad846 100644
--- a/include/media/EffectApi.h
+++ b/include/media/EffectApi.h
@@ -602,9 +602,9 @@ enum audio_device_e {
// Audio mode
enum audio_mode_e {
- AUDIO_MODE_NORMAL, // device idle
- AUDIO_MODE_RINGTONE, // device ringing
- AUDIO_MODE_IN_CALL // audio call connected (VoIP or telephony)
+ AUDIO_EFFECT_MODE_NORMAL, // device idle
+ AUDIO_EFFECT_MODE_RINGTONE, // device ringing
+ AUDIO_EFFECT_MODE_IN_CALL, // audio call connected (VoIP or telephony)
};
// Values for "accessMode" field of buffer_config_t:
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index 720a562..09b2bfe 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -26,6 +26,7 @@
#include <binder/IInterface.h>
#include <media/AudioSystem.h>
+#include <hardware/audio_policy.h>
namespace android {
@@ -39,42 +40,42 @@ public:
//
// IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
//
- virtual status_t setDeviceConnectionState(AudioSystem::audio_devices device,
- AudioSystem::device_connection_state state,
+ virtual status_t setDeviceConnectionState(audio_devices_t device,
+ audio_policy_dev_state_t state,
const char *device_address) = 0;
- virtual AudioSystem::device_connection_state getDeviceConnectionState(AudioSystem::audio_devices device,
+ virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
const char *device_address) = 0;
virtual status_t setPhoneState(int state) = 0;
virtual status_t setRingerMode(uint32_t mode, uint32_t mask) = 0;
- virtual status_t setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config) = 0;
- virtual AudioSystem::forced_config getForceUse(AudioSystem::force_use usage) = 0;
- virtual audio_io_handle_t getOutput(AudioSystem::stream_type stream,
+ virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) = 0;
+ virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) = 0;
+ virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
uint32_t samplingRate = 0,
- uint32_t format = AudioSystem::FORMAT_DEFAULT,
+ uint32_t format = AUDIO_FORMAT_DEFAULT,
uint32_t channels = 0,
- AudioSystem::output_flags flags = AudioSystem::OUTPUT_FLAG_INDIRECT) = 0;
+ audio_policy_output_flags_t flags = AUDIO_POLICY_OUTPUT_FLAG_INDIRECT) = 0;
virtual status_t startOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
int session = 0) = 0;
virtual status_t stopOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
int session = 0) = 0;
virtual void releaseOutput(audio_io_handle_t output) = 0;
virtual audio_io_handle_t getInput(int inputSource,
uint32_t samplingRate = 0,
- uint32_t format = AudioSystem::FORMAT_DEFAULT,
+ uint32_t format = AUDIO_FORMAT_DEFAULT,
uint32_t channels = 0,
- AudioSystem::audio_in_acoustics acoustics = (AudioSystem::audio_in_acoustics)0) = 0;
+ audio_in_acoustics_t acoustics = (audio_in_acoustics_t)0) = 0;
virtual status_t startInput(audio_io_handle_t input) = 0;
virtual status_t stopInput(audio_io_handle_t input) = 0;
virtual void releaseInput(audio_io_handle_t input) = 0;
- virtual status_t initStreamVolume(AudioSystem::stream_type stream,
+ virtual status_t initStreamVolume(audio_stream_type_t stream,
int indexMin,
int indexMax) = 0;
- virtual status_t setStreamVolumeIndex(AudioSystem::stream_type stream, int index) = 0;
- virtual status_t getStreamVolumeIndex(AudioSystem::stream_type stream, int *index) = 0;
- virtual uint32_t getStrategyForStream(AudioSystem::stream_type stream) = 0;
- virtual uint32_t getDevicesForStream(AudioSystem::stream_type stream) = 0;
+ virtual status_t setStreamVolumeIndex(audio_stream_type_t stream, int index) = 0;
+ virtual status_t getStreamVolumeIndex(audio_stream_type_t stream, int *index) = 0;
+ virtual uint32_t getStrategyForStream(audio_stream_type_t stream) = 0;
+ virtual uint32_t getDevicesForStream(audio_stream_type_t stream) = 0;
virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc) = 0;
virtual status_t registerEffect(effect_descriptor_t *desc,
audio_io_handle_t output,
diff --git a/include/media/IMediaMetadataRetriever.h b/include/media/IMediaMetadataRetriever.h
index 8e3cdbb..1c1c268 100644
--- a/include/media/IMediaMetadataRetriever.h
+++ b/include/media/IMediaMetadataRetriever.h
@@ -18,10 +18,11 @@
#ifndef ANDROID_IMEDIAMETADATARETRIEVER_H
#define ANDROID_IMEDIAMETADATARETRIEVER_H
-#include <utils/RefBase.h>
#include <binder/IInterface.h>
#include <binder/Parcel.h>
#include <binder/IMemory.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
namespace android {
@@ -30,7 +31,11 @@ class IMediaMetadataRetriever: public IInterface
public:
DECLARE_META_INTERFACE(MediaMetadataRetriever);
virtual void disconnect() = 0;
- virtual status_t setDataSource(const char* srcUrl) = 0;
+
+ virtual status_t setDataSource(
+ const char *srcUrl,
+ const KeyedVector<String8, String8> *headers = NULL) = 0;
+
virtual status_t setDataSource(int fd, int64_t offset, int64_t length) = 0;
virtual sp<IMemory> getFrameAtTime(int64_t timeUs, int option) = 0;
virtual sp<IMemory> extractAlbumArt() = 0;
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
index 70519ef..d552b2e 100644
--- a/include/media/IMediaPlayer.h
+++ b/include/media/IMediaPlayer.h
@@ -24,7 +24,6 @@
namespace android {
class Parcel;
-class ISurface;
class Surface;
class ISurfaceTexture;
@@ -52,6 +51,8 @@ public:
virtual status_t setVolume(float leftVolume, float rightVolume) = 0;
virtual status_t setAuxEffectSendLevel(float level) = 0;
virtual status_t attachAuxEffect(int effectId) = 0;
+ virtual status_t setParameter(int key, const Parcel& request) = 0;
+ virtual status_t getParameter(int key, Parcel* reply) = 0;
// Invoke a generic method on the player by using opaque parcels
// for the request and reply.
diff --git a/include/media/IMediaPlayerClient.h b/include/media/IMediaPlayerClient.h
index eee6c97..daec1c7 100644
--- a/include/media/IMediaPlayerClient.h
+++ b/include/media/IMediaPlayerClient.h
@@ -28,7 +28,7 @@ class IMediaPlayerClient: public IInterface
public:
DECLARE_META_INTERFACE(MediaPlayerClient);
- virtual void notify(int msg, int ext1, int ext2) = 0;
+ virtual void notify(int msg, int ext1, int ext2, const Parcel *obj) = 0;
};
// ----------------------------------------------------------------------------
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index 16a9342..3c65147 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -33,7 +33,6 @@ namespace android {
class IMemory;
class IOMXObserver;
class IOMXRenderer;
-class ISurface;
class Surface;
class IOMX : public IInterface {
diff --git a/include/media/MediaMetadataRetrieverInterface.h b/include/media/MediaMetadataRetrieverInterface.h
index 0449122..27b7e4d 100644
--- a/include/media/MediaMetadataRetrieverInterface.h
+++ b/include/media/MediaMetadataRetrieverInterface.h
@@ -30,7 +30,11 @@ class MediaMetadataRetrieverBase : public RefBase
public:
MediaMetadataRetrieverBase() {}
virtual ~MediaMetadataRetrieverBase() {}
- virtual status_t setDataSource(const char *url) = 0;
+
+ virtual status_t setDataSource(
+ const char *url,
+ const KeyedVector<String8, String8> *headers = NULL) = 0;
+
virtual status_t setDataSource(int fd, int64_t offset, int64_t length) = 0;
virtual VideoFrame* getFrameAtTime(int64_t timeUs, int option) = 0;
virtual MediaAlbumArt* extractAlbumArt() = 0;
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 117d7eb..f0401cc 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -32,7 +32,6 @@
namespace android {
class Parcel;
-class ISurface;
class Surface;
class ISurfaceTexture;
@@ -56,7 +55,8 @@ enum player_type {
// callback mechanism for passing messages to MediaPlayer object
-typedef void (*notify_callback_f)(void* cookie, int msg, int ext1, int ext2);
+typedef void (*notify_callback_f)(void* cookie,
+ int msg, int ext1, int ext2, const Parcel *obj);
// abstract base class - use MediaPlayerInterface
class MediaPlayerBase : public RefBase
@@ -85,7 +85,7 @@ public:
// audio data.
virtual status_t open(
uint32_t sampleRate, int channelCount,
- int format=AudioSystem::PCM_16_BIT,
+ int format=AUDIO_FORMAT_PCM_16_BIT,
int bufferCount=DEFAULT_AUDIOSINK_BUFFERCOUNT,
AudioCallback cb = NULL,
void *cookie = NULL) = 0;
@@ -132,6 +132,8 @@ public:
virtual status_t reset() = 0;
virtual status_t setLooping(int loop) = 0;
virtual player_type playerType() = 0;
+ virtual status_t setParameter(int key, const Parcel &request) = 0;
+ virtual status_t getParameter(int key, Parcel *reply) = 0;
// Invoke a generic method on the player by using opaque parcels
// for the request and reply.
@@ -160,9 +162,10 @@ public:
mCookie = cookie; mNotify = notifyFunc;
}
- void sendEvent(int msg, int ext1=0, int ext2=0) {
+ void sendEvent(int msg, int ext1=0, int ext2=0,
+ const Parcel *obj=NULL) {
Mutex::Autolock autoLock(mNotifyLock);
- if (mNotify) mNotify(mCookie, msg, ext1, ext2);
+ if (mNotify) mNotify(mCookie, msg, ext1, ext2, obj);
}
private:
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
index c42346e..5fe7722 100644
--- a/include/media/MediaRecorderBase.h
+++ b/include/media/MediaRecorderBase.h
@@ -20,6 +20,8 @@
#include <media/mediarecorder.h>
+#include <hardware/audio.h>
+
namespace android {
class Surface;
@@ -29,7 +31,7 @@ struct MediaRecorderBase {
virtual ~MediaRecorderBase() {}
virtual status_t init() = 0;
- virtual status_t setAudioSource(audio_source as) = 0;
+ virtual status_t setAudioSource(audio_source_t as) = 0;
virtual status_t setVideoSource(video_source vs) = 0;
virtual status_t setOutputFormat(output_format of) = 0;
virtual status_t setAudioEncoder(audio_encoder ae) = 0;
diff --git a/include/media/MemoryLeakTrackUtil.h b/include/media/MemoryLeakTrackUtil.h
new file mode 100644
index 0000000..290b748
--- /dev/null
+++ b/include/media/MemoryLeakTrackUtil.h
@@ -0,0 +1,28 @@
+
+/*
+ * Copyright 2011, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MEMORY_LEAK_TRACK_UTIL_H
+#define MEMORY_LEAK_TRACK_UTIL_H
+
+namespace android {
+/*
+ * Dump the memory adddress of the calling process to the given fd.
+ */
+extern void dumpMemoryAddresses(int fd);
+
+};
+
+#endif // MEMORY_LEAK_TRACK_UTIL_H
diff --git a/include/media/mediametadataretriever.h b/include/media/mediametadataretriever.h
index e905006..3e343e0 100644
--- a/include/media/mediametadataretriever.h
+++ b/include/media/mediametadataretriever.h
@@ -47,6 +47,12 @@ enum {
METADATA_KEY_ALBUMARTIST = 13,
METADATA_KEY_DISC_NUMBER = 14,
METADATA_KEY_COMPILATION = 15,
+ METADATA_KEY_HAS_AUDIO = 16,
+ METADATA_KEY_HAS_VIDEO = 17,
+ METADATA_KEY_VIDEO_WIDTH = 18,
+ METADATA_KEY_VIDEO_HEIGHT = 19,
+ METADATA_KEY_BITRATE = 20,
+
// Add more here...
};
@@ -56,7 +62,11 @@ public:
MediaMetadataRetriever();
~MediaMetadataRetriever();
void disconnect();
- status_t setDataSource(const char* dataSourceUrl);
+
+ status_t setDataSource(
+ const char *dataSourceUrl,
+ const KeyedVector<String8, String8> *headers = NULL);
+
status_t setDataSource(int fd, int64_t offset, int64_t length);
sp<IMemory> getFrameAtTime(int64_t timeUs, int option);
sp<IMemory> extractAlbumArt();
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 528eeb9..241626c 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -37,6 +37,7 @@ enum media_event_type {
MEDIA_BUFFERING_UPDATE = 3,
MEDIA_SEEK_COMPLETE = 4,
MEDIA_SET_VIDEO_SIZE = 5,
+ MEDIA_TIMED_TEXT = 99,
MEDIA_ERROR = 100,
MEDIA_INFO = 200,
};
@@ -129,7 +130,7 @@ enum media_player_states {
class MediaPlayerListener: virtual public RefBase
{
public:
- virtual void notify(int msg, int ext1, int ext2) = 0;
+ virtual void notify(int msg, int ext1, int ext2, const Parcel *obj) = 0;
};
class MediaPlayer : public BnMediaPlayerClient,
@@ -166,7 +167,7 @@ public:
status_t setLooping(int loop);
bool isLooping();
status_t setVolume(float leftVolume, float rightVolume);
- void notify(int msg, int ext1, int ext2);
+ void notify(int msg, int ext1, int ext2, const Parcel *obj = NULL);
static sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, int* pFormat);
static sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, int* pFormat);
status_t invoke(const Parcel& request, Parcel *reply);
@@ -176,6 +177,9 @@ public:
int getAudioSessionId();
status_t setAuxEffectSendLevel(float level);
status_t attachAuxEffect(int effectId);
+ status_t setParameter(int key, const Parcel& request);
+ status_t getParameter(int key, Parcel* reply);
+
private:
void clear_l();
status_t seekTo_l(int msec);
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index a710546..18a3c6a 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -33,23 +33,6 @@ class ICamera;
typedef void (*media_completion_f)(status_t status, void *cookie);
-/* Do not change these values without updating their counterparts
- * in media/java/android/media/MediaRecorder.java!
- */
-enum audio_source {
- AUDIO_SOURCE_DEFAULT = 0,
- AUDIO_SOURCE_MIC = 1,
- AUDIO_SOURCE_VOICE_UPLINK = 2,
- AUDIO_SOURCE_VOICE_DOWNLINK = 3,
- AUDIO_SOURCE_VOICE_CALL = 4,
- AUDIO_SOURCE_CAMCORDER = 5,
- AUDIO_SOURCE_VOICE_RECOGNITION = 6,
- AUDIO_SOURCE_VOICE_COMMUNICATION = 7,
- AUDIO_SOURCE_MAX = AUDIO_SOURCE_VOICE_COMMUNICATION,
-
- AUDIO_SOURCE_LIST_END // must be last - used to validate audio source type
-};
-
enum video_source {
VIDEO_SOURCE_DEFAULT = 0,
VIDEO_SOURCE_CAMERA = 1,
@@ -104,35 +87,62 @@ enum video_encoder {
};
/*
- * The state machine of the media_recorder uses a set of different state names.
- * The mapping between the media_recorder and the pvauthorengine is shown below:
- *
- * mediarecorder pvauthorengine
- * ----------------------------------------------------------------
- * MEDIA_RECORDER_ERROR ERROR
- * MEDIA_RECORDER_IDLE IDLE
- * MEDIA_RECORDER_INITIALIZED OPENED
- * MEDIA_RECORDER_DATASOURCE_CONFIGURED
- * MEDIA_RECORDER_PREPARED INITIALIZED
- * MEDIA_RECORDER_RECORDING RECORDING
+ * The state machine of the media_recorder.
*/
enum media_recorder_states {
+ // Error state.
MEDIA_RECORDER_ERROR = 0,
+
+ // Recorder was just created.
MEDIA_RECORDER_IDLE = 1 << 0,
+
+ // Recorder has been initialized.
MEDIA_RECORDER_INITIALIZED = 1 << 1,
+
+ // Configuration of the recorder has been completed.
MEDIA_RECORDER_DATASOURCE_CONFIGURED = 1 << 2,
+
+ // Recorder is ready to start.
MEDIA_RECORDER_PREPARED = 1 << 3,
+
+ // Recording is in progress.
MEDIA_RECORDER_RECORDING = 1 << 4,
};
// The "msg" code passed to the listener in notify.
enum media_recorder_event_type {
+ MEDIA_RECORDER_EVENT_LIST_START = 1,
MEDIA_RECORDER_EVENT_ERROR = 1,
- MEDIA_RECORDER_EVENT_INFO = 2
+ MEDIA_RECORDER_EVENT_INFO = 2,
+ MEDIA_RECORDER_EVENT_LIST_END = 99,
+
+ // Track related event types
+ MEDIA_RECORDER_TRACK_EVENT_LIST_START = 100,
+ MEDIA_RECORDER_TRACK_EVENT_ERROR = 100,
+ MEDIA_RECORDER_TRACK_EVENT_INFO = 101,
+ MEDIA_RECORDER_TRACK_EVENT_LIST_END = 1000,
};
+/*
+ * The (part of) "what" code passed to the listener in notify.
+ * When the error or info type is track specific, the what has
+ * the following layout:
+ * the left-most 16-bit is meant for error or info type.
+ * the right-most 4-bit is meant for track id.
+ * the rest is reserved.
+ *
+ * | track id | reserved | error or info type |
+ * 31 28 16 0
+ *
+ */
enum media_recorder_error_type {
- MEDIA_RECORDER_ERROR_UNKNOWN = 1
+ MEDIA_RECORDER_ERROR_UNKNOWN = 1,
+
+ // Track related error type
+ MEDIA_RECORDER_TRACK_ERROR_LIST_START = 100,
+ MEDIA_RECORDER_TRACK_ERROR_GENERAL = 100,
+ MEDIA_RECORDER_ERROR_VIDEO_NO_SYNC_FRAME = 200,
+ MEDIA_RECORDER_TRACK_ERROR_LIST_END = 1000,
};
// The codes are distributed as follow:
@@ -141,11 +151,15 @@ enum media_recorder_error_type {
//
enum media_recorder_info_type {
MEDIA_RECORDER_INFO_UNKNOWN = 1,
+
MEDIA_RECORDER_INFO_MAX_DURATION_REACHED = 800,
MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED = 801,
- MEDIA_RECORDER_INFO_COMPLETION_STATUS = 802,
- MEDIA_RECORDER_INFO_PROGRESS_FRAME_STATUS = 803,
- MEDIA_RECORDER_INFO_PROGRESS_TIME_STATUS = 804,
+
+ // All track related informtional events start here
+ MEDIA_RECORDER_TRACK_INFO_LIST_START = 1000,
+ MEDIA_RECORDER_TRACK_INFO_COMPLETION_STATUS = 1000,
+ MEDIA_RECORDER_TRACK_INFO_PROGRESS_IN_TIME = 1001,
+ MEDIA_RECORDER_TRACK_INFO_LIST_END = 2000,
};
// ----------------------------------------------------------------------------
diff --git a/include/media/mediascanner.h b/include/media/mediascanner.h
index df5be32..765c039 100644
--- a/include/media/mediascanner.h
+++ b/include/media/mediascanner.h
@@ -55,7 +55,7 @@ private:
status_t doProcessDirectory(
char *path, int pathRemaining, MediaScannerClient &client,
- ExceptionCheck exceptionCheck, void *exceptionEnv);
+ bool noMedia, ExceptionCheck exceptionCheck, void *exceptionEnv);
MediaScanner(const MediaScanner &);
MediaScanner &operator=(const MediaScanner &);
@@ -72,10 +72,9 @@ public:
void endFile();
virtual bool scanFile(const char* path, long long lastModified,
- long long fileSize, bool isDirectory) = 0;
+ long long fileSize, bool isDirectory, bool noMedia) = 0;
virtual bool handleStringTag(const char* name, const char* value) = 0;
virtual bool setMimeType(const char* mimeType) = 0;
- virtual bool addNoMediaFolder(const char* path) = 0;
protected:
void convertValues(uint32_t encoding);
diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h
index d12ee9c..0b79324 100644
--- a/include/media/stagefright/AudioPlayer.h
+++ b/include/media/stagefright/AudioPlayer.h
@@ -108,6 +108,8 @@ private:
void reset();
+ uint32_t getNumFramesPendingPlayout() const;
+
AudioPlayer(const AudioPlayer &);
AudioPlayer &operator=(const AudioPlayer &);
};
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 9e6f0e2..20a9e16 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -24,16 +24,18 @@
#include <media/stagefright/MediaBuffer.h>
#include <utils/List.h>
+#include <hardware/audio.h>
+
namespace android {
class AudioRecord;
struct AudioSource : public MediaSource, public MediaBufferObserver {
// Note that the "channels" parameter is _not_ the number of channels,
- // but a bitmask of AudioSystem::audio_channels constants.
+ // but a bitmask of audio_channels_t constants.
AudioSource(
int inputSource, uint32_t sampleRate,
- uint32_t channels = AudioSystem::CHANNEL_IN_MONO);
+ uint32_t channels = AUDIO_CHANNEL_IN_MONO);
status_t initCheck() const;
diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h
index 4a39fbf..bb25bae 100644
--- a/include/media/stagefright/CameraSource.h
+++ b/include/media/stagefright/CameraSource.h
@@ -99,34 +99,6 @@ public:
virtual sp<MetaData> getFormat();
/**
- * Retrieve the total number of video buffers available from
- * this source.
- *
- * This method is useful if these video buffers are used
- * for passing video frame data to other media components,
- * such as OMX video encoders, in order to eliminate the
- * memcpy of the data.
- *
- * @return the total numbner of video buffers. Returns 0 to
- * indicate that this source does not make the video
- * buffer information availalble.
- */
- size_t getNumberOfVideoBuffers() const;
-
- /**
- * Retrieve the individual video buffer available from
- * this source.
- *
- * @param index the index corresponding to the video buffer.
- * Valid range of the index is [0, n], where n =
- * getNumberOfVideoBuffers() - 1.
- *
- * @return the video buffer corresponding to the given index.
- * If index is out of range, 0 should be returned.
- */
- sp<IMemory> getVideoBuffer(size_t index) const;
-
- /**
* Tell whether this camera source stores meta data or real YUV
* frame data in video buffers.
*
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index f95e56a..6b6fcdf 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -75,15 +75,17 @@ public:
static void RegisterDefaultSniffers();
// for DRM
- virtual DecryptHandle* DrmInitialization() {
+ virtual sp<DecryptHandle> DrmInitialization() {
return NULL;
}
- virtual void getDrmInfo(DecryptHandle **handle, DrmManagerClient **client) {};
+ virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client) {};
virtual String8 getUri() {
return String8();
}
+ virtual String8 getMIMEType() const;
+
protected:
virtual ~DataSource() {}
diff --git a/include/media/stagefright/FileSource.h b/include/media/stagefright/FileSource.h
index 51a4343..6cf86dc 100644
--- a/include/media/stagefright/FileSource.h
+++ b/include/media/stagefright/FileSource.h
@@ -38,9 +38,9 @@ public:
virtual status_t getSize(off64_t *size);
- virtual DecryptHandle* DrmInitialization();
+ virtual sp<DecryptHandle> DrmInitialization();
- virtual void getDrmInfo(DecryptHandle **handle, DrmManagerClient **client);
+ virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client);
protected:
virtual ~FileSource();
@@ -52,7 +52,7 @@ private:
Mutex mLock;
/*for DRM*/
- DecryptHandle *mDecryptHandle;
+ sp<DecryptHandle> mDecryptHandle;
DrmManagerClient *mDrmManagerClient;
int64_t mDrmBufOffset;
int64_t mDrmBufSize;
diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h
index 5c5229d..15f86ea 100644
--- a/include/media/stagefright/MPEG4Writer.h
+++ b/include/media/stagefright/MPEG4Writer.h
@@ -157,7 +157,7 @@ private:
bool use32BitFileOffset() const;
bool exceedsFileDurationLimit();
bool isFileStreamable() const;
- void trackProgressStatus(const Track* track, int64_t timeUs, status_t err = OK);
+ void trackProgressStatus(size_t trackId, int64_t timeUs, status_t err = OK);
void writeCompositionMatrix(int32_t degrees);
MPEG4Writer(const MPEG4Writer &);
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index 66dfff6..6a21627 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -45,6 +45,7 @@ extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
extern const char *MEDIA_MIMETYPE_CONTAINER_OGG;
extern const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA;
extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS;
+extern const char *MEDIA_MIMETYPE_CONTAINER_AVI;
extern const char *MEDIA_MIMETYPE_CONTAINER_WVM;
diff --git a/include/media/stagefright/MediaErrors.h b/include/media/stagefright/MediaErrors.h
index 1a6d548..7cc993c 100644
--- a/include/media/stagefright/MediaErrors.h
+++ b/include/media/stagefright/MediaErrors.h
@@ -41,7 +41,17 @@ enum {
INFO_FORMAT_CHANGED = MEDIA_ERROR_BASE - 12,
INFO_DISCONTINUITY = MEDIA_ERROR_BASE - 13,
- ERROR_NO_LICENSE = MEDIA_ERROR_BASE - 14,
+ // The following constant values should be in sync with
+ // drm/drm_framework_common.h
+ DRM_ERROR_BASE = -2000,
+
+ ERROR_DRM_UNKNOWN = DRM_ERROR_BASE,
+ ERROR_DRM_NO_LICENSE = DRM_ERROR_BASE - 1,
+ ERROR_DRM_LICENSE_EXPIRED = DRM_ERROR_BASE - 2,
+ ERROR_DRM_SESSION_NOT_OPENED = DRM_ERROR_BASE - 3,
+ ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED = DRM_ERROR_BASE - 4,
+ ERROR_DRM_DECRYPT = DRM_ERROR_BASE - 5,
+ ERROR_DRM_CANNOT_HANDLE = DRM_ERROR_BASE - 6,
// Heartbeat Error Codes
HEARTBEAT_ERROR_BASE = -3000,
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 4610135..1827c3e 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -83,13 +83,12 @@ struct audio_track_cblk_t
uint8_t frameSize;
uint8_t channelCount;
- uint16_t flags;
-
uint16_t bufferTimeoutMs; // Maximum cumulated timeout before restarting audioflinger
- uint16_t waitTimeMs; // Cumulated wait time
+ uint16_t waitTimeMs; // Cumulated wait time
uint16_t sendLevel;
- uint16_t reserved;
+ volatile int32_t flags;
+
// Cache line boundary (32 bytes)
audio_track_cblk_t();
uint32_t stepUser(uint32_t frameCount);
@@ -98,6 +97,7 @@ struct audio_track_cblk_t
uint32_t framesAvailable();
uint32_t framesAvailable_l();
uint32_t framesReady();
+ bool tryLock();
};
diff --git a/include/private/opengles/gl_context.h b/include/private/opengles/gl_context.h
index c7db9a6..6b1fa77 100644
--- a/include/private/opengles/gl_context.h
+++ b/include/private/opengles/gl_context.h
@@ -26,14 +26,11 @@
#endif
#include <private/pixelflinger/ggl_context.h>
-#include <hardware/copybit.h>
#include <hardware/gralloc.h>
#include <GLES/gl.h>
#include <GLES/glext.h>
-struct android_native_buffer_t;
-
namespace android {
@@ -604,14 +601,6 @@ struct prims_t {
void (*renderTriangle)(GL, vertex_t*, vertex_t*, vertex_t*);
};
-struct copybits_context_t {
- // A handle to the blit engine, if it exists, else NULL.
- copybit_device_t* blitEngine;
- int32_t minScale;
- int32_t maxScale;
- android_native_buffer_t* drawSurfaceBuffer;
-};
-
struct ogles_context_t {
context_t rasterizer;
array_machine_t arrays __attribute__((aligned(32)));
@@ -636,13 +625,6 @@ struct ogles_context_t {
EGLSurfaceManager* surfaceManager;
EGLBufferObjectManager* bufferObjectManager;
- // copybits is only used if LIBAGL_USE_GRALLOC_COPYBITS is
- // defined, but it is always present because ogles_context_t is a public
- // struct that is used by clients of libagl. We want the size and offsets
- // to stay the same, whether or not LIBAGL_USE_GRALLOC_COPYBITS is defined.
-
- copybits_context_t copybits;
-
GLenum error;
static inline ogles_context_t* get() {
diff --git a/include/surfaceflinger/IGraphicBufferAlloc.h b/include/surfaceflinger/IGraphicBufferAlloc.h
index d996af7..01e4bd9 100644
--- a/include/surfaceflinger/IGraphicBufferAlloc.h
+++ b/include/surfaceflinger/IGraphicBufferAlloc.h
@@ -32,18 +32,10 @@ class IGraphicBufferAlloc : public IInterface
public:
DECLARE_META_INTERFACE(GraphicBufferAlloc);
- /* Create a new GraphicBuffer for the client to use. The server will
- * maintain a reference to the newly created GraphicBuffer until
- * freeAllGraphicBuffers is called.
+ /* Create a new GraphicBuffer for the client to use.
*/
virtual sp<GraphicBuffer> createGraphicBuffer(uint32_t w, uint32_t h,
PixelFormat format, uint32_t usage) = 0;
-
- /* Free all but one of the GraphicBuffer objects that the server is
- * currently referencing. If bufIndex is not a valid index of the buffers
- * the server is referencing, then all buffers are freed.
- */
- virtual void freeAllGraphicBuffersExcept(int bufIndex) = 0;
};
// ----------------------------------------------------------------------------
diff --git a/include/surfaceflinger/ISurfaceComposerClient.h b/include/surfaceflinger/ISurfaceComposerClient.h
index a1e9e04..46b1bb7 100644
--- a/include/surfaceflinger/ISurfaceComposerClient.h
+++ b/include/surfaceflinger/ISurfaceComposerClient.h
@@ -64,7 +64,6 @@ public:
* Requires ACCESS_SURFACE_FLINGER permission
*/
virtual sp<ISurface> createSurface( surface_data_t* data,
- int pid,
const String8& name,
DisplayID display,
uint32_t w,
diff --git a/include/surfaceflinger/Surface.h b/include/surfaceflinger/Surface.h
index 9e0b5bb..3923e61 100644
--- a/include/surfaceflinger/Surface.h
+++ b/include/surfaceflinger/Surface.h
@@ -102,10 +102,6 @@ private:
friend class Test;
// videoEditor preview classes
friend class VideoEditorPreviewController;
-
- const sp<ISurface>& getISurface() const { return mSurface; }
-
-
friend class Surface;
SurfaceControl(
@@ -169,6 +165,7 @@ public:
// setSwapRectangle() is intended to be used by GL ES clients
void setSwapRectangle(const Rect& r);
+ sp<IBinder> asBinder() const;
private:
/*
@@ -226,7 +223,8 @@ private:
int dispatch_set_buffer_count(va_list args);
int dispatch_set_buffers_geometry(va_list args);
int dispatch_set_buffers_transform(va_list args);
-
+ int dispatch_set_buffers_timestamp(va_list args);
+
void setUsage(uint32_t reqUsage);
int connect(int api);
int disconnect(int api);
@@ -234,13 +232,13 @@ private:
int setBufferCount(int bufferCount);
int setBuffersGeometry(int w, int h, int format);
int setBuffersTransform(int transform);
+ int setBuffersTimestamp(int64_t timestamp);
/*
* private stuff...
*/
void init();
status_t validate(bool inCancelBuffer = false) const;
- sp<ISurface> getISurface() const;
// When the buffer pool is a fixed size we want to make sure SurfaceFlinger
// won't stall clients, so we require an extra buffer.
diff --git a/include/surfaceflinger/SurfaceComposerClient.h b/include/surfaceflinger/SurfaceComposerClient.h
index 25b2ebf..c61a5bf 100644
--- a/include/surfaceflinger/SurfaceComposerClient.h
+++ b/include/surfaceflinger/SurfaceComposerClient.h
@@ -79,7 +79,6 @@ public:
//! Create a surface
sp<SurfaceControl> createSurface(
- int pid, // pid of the process the surface is for
const String8& name,// name of the surface
DisplayID display, // Display to create this surface on
uint32_t w, // width in pixel
@@ -89,7 +88,6 @@ public:
);
sp<SurfaceControl> createSurface(
- int pid, // pid of the process the surface is for
DisplayID display, // Display to create this surface on
uint32_t w, // width in pixel
uint32_t h, // height in pixel
diff --git a/include/tts/TtsEngine.h b/include/tts/TtsEngine.h
deleted file mode 100644
index 998e353..0000000
--- a/include/tts/TtsEngine.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright (C) 2009 Google Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include <media/AudioSystem.h>
-
-// This header defines the interface used by the Android platform
-// to access Text-To-Speech functionality in shared libraries that implement
-// speech synthesis and the management of resources associated with the
-// synthesis.
-// An example of the implementation of this interface can be found in
-// FIXME: add path+name to implementation of default TTS engine
-// Libraries implementing this interface are used in:
-// frameworks/base/tts/jni/android_tts_SpeechSynthesis.cpp
-
-namespace android {
-
-#define ANDROID_TTS_ENGINE_PROPERTY_CONFIG "engineConfig"
-#define ANDROID_TTS_ENGINE_PROPERTY_PITCH "pitch"
-#define ANDROID_TTS_ENGINE_PROPERTY_RATE "rate"
-#define ANDROID_TTS_ENGINE_PROPERTY_VOLUME "volume"
-
-
-enum tts_synth_status {
- TTS_SYNTH_DONE = 0,
- TTS_SYNTH_PENDING = 1
-};
-
-enum tts_callback_status {
- TTS_CALLBACK_HALT = 0,
- TTS_CALLBACK_CONTINUE = 1
-};
-
-// The callback is used by the implementation of this interface to notify its
-// client, the Android TTS service, that the last requested synthesis has been
-// completed. // TODO reword
-// The callback for synthesis completed takes:
-// @param [inout] void *& - The userdata pointer set in the original
-// synth call
-// @param [in] uint32_t - Track sampling rate in Hz
-// @param [in] uint32_t - The audio format
-// @param [in] int - The number of channels
-// @param [inout] int8_t *& - A buffer of audio data only valid during the
-// execution of the callback
-// @param [inout] size_t & - The size of the buffer
-// @param [in] tts_synth_status - indicate whether the synthesis is done, or
-// if more data is to be synthesized.
-// @return TTS_CALLBACK_HALT to indicate the synthesis must stop,
-// TTS_CALLBACK_CONTINUE to indicate the synthesis must continue if
-// there is more data to produce.
-typedef tts_callback_status (synthDoneCB_t)(void *&, uint32_t,
- uint32_t, int, int8_t *&, size_t&, tts_synth_status);
-
-class TtsEngine;
-extern "C" TtsEngine* getTtsEngine();
-
-enum tts_result {
- TTS_SUCCESS = 0,
- TTS_FAILURE = -1,
- TTS_FEATURE_UNSUPPORTED = -2,
- TTS_VALUE_INVALID = -3,
- TTS_PROPERTY_UNSUPPORTED = -4,
- TTS_PROPERTY_SIZE_TOO_SMALL = -5,
- TTS_MISSING_RESOURCES = -6
-};
-
-enum tts_support_result {
- TTS_LANG_COUNTRY_VAR_AVAILABLE = 2,
- TTS_LANG_COUNTRY_AVAILABLE = 1,
- TTS_LANG_AVAILABLE = 0,
- TTS_LANG_MISSING_DATA = -1,
- TTS_LANG_NOT_SUPPORTED = -2
-};
-
-class TtsEngine
-{
-public:
- virtual ~TtsEngine() {}
-
- // Initialize the TTS engine and returns whether initialization succeeded.
- // @param synthDoneCBPtr synthesis callback function pointer
- // @return TTS_SUCCESS, or TTS_FAILURE
- virtual tts_result init(synthDoneCB_t synthDoneCBPtr, const char *engineConfig);
-
- // Shut down the TTS engine and releases all associated resources.
- // @return TTS_SUCCESS, or TTS_FAILURE
- virtual tts_result shutdown();
-
- // Interrupt synthesis and flushes any synthesized data that hasn't been
- // output yet. This will block until callbacks underway are completed.
- // @return TTS_SUCCESS, or TTS_FAILURE
- virtual tts_result stop();
-
- // Returns the level of support for the language, country and variant.
- // @return TTS_LANG_COUNTRY_VAR_AVAILABLE if the language, country and variant are supported,
- // and the corresponding resources are correctly installed
- // TTS_LANG_COUNTRY_AVAILABLE if the language and country are supported and the
- // corresponding resources are correctly installed, but there is no match for
- // the specified variant
- // TTS_LANG_AVAILABLE if the language is supported and the
- // corresponding resources are correctly installed, but there is no match for
- // the specified country and variant
- // TTS_LANG_MISSING_DATA if the required resources to provide any level of support
- // for the language are not correctly installed
- // TTS_LANG_NOT_SUPPORTED if the language is not supported by the TTS engine.
- virtual tts_support_result isLanguageAvailable(const char *lang, const char *country,
- const char *variant);
-
- // Load the resources associated with the specified language. The loaded
- // language will only be used once a call to setLanguage() with the same
- // language value is issued. Language and country values are coded according to the ISO three
- // letter codes for languages and countries, as can be retrieved from a java.util.Locale
- // instance. The variant value is encoded as the variant string retrieved from a
- // java.util.Locale instance built with that variant data.
- // @param lang pointer to the ISO three letter code for the language
- // @param country pointer to the ISO three letter code for the country
- // @param variant pointer to the variant code
- // @return TTS_SUCCESS, or TTS_FAILURE
- virtual tts_result loadLanguage(const char *lang, const char *country, const char *variant);
-
- // Load the resources associated with the specified language, country and Locale variant.
- // The loaded language will only be used once a call to setLanguageFromLocale() with the same
- // language value is issued. Language and country values are coded according to the ISO three
- // letter codes for languages and countries, as can be retrieved from a java.util.Locale
- // instance. The variant value is encoded as the variant string retrieved from a
- // java.util.Locale instance built with that variant data.
- // @param lang pointer to the ISO three letter code for the language
- // @param country pointer to the ISO three letter code for the country
- // @param variant pointer to the variant code
- // @return TTS_SUCCESS, or TTS_FAILURE
- virtual tts_result setLanguage(const char *lang, const char *country, const char *variant);
-
- // Retrieve the currently set language, country and variant, or empty strings if none of
- // parameters have been set. Language and country are represented by their 3-letter ISO code
- // @param[out] pointer to the retrieved 3-letter code language value
- // @param[out] pointer to the retrieved 3-letter code country value
- // @param[out] pointer to the retrieved variant value
- // @return TTS_SUCCESS, or TTS_FAILURE
- virtual tts_result getLanguage(char *language, char *country, char *variant);
-
- // Notifies the engine what audio parameters should be used for the synthesis.
- // This is meant to be used as a hint, the engine implementation will set the output values
- // to those of the synthesis format, based on a given hint.
- // @param[inout] encoding in: the desired audio sample format
- // out: the format used by the TTS engine
- // @param[inout] rate in: the desired audio sample rate
- // out: the sample rate used by the TTS engine
- // @param[inout] channels in: the desired number of audio channels
- // out: the number of channels used by the TTS engine
- // @return TTS_SUCCESS, or TTS_FAILURE
- virtual tts_result setAudioFormat(AudioSystem::audio_format& encoding, uint32_t& rate,
- int& channels);
-
- // Set a property for the the TTS engine
- // "size" is the maximum size of "value" for properties "property"
- // @param property pointer to the property name
- // @param value pointer to the property value
- // @param size maximum size required to store this type of property
- // @return TTS_PROPERTY_UNSUPPORTED, or TTS_SUCCESS, or TTS_FAILURE,
- // or TTS_VALUE_INVALID
- virtual tts_result setProperty(const char *property, const char *value,
- const size_t size);
-
- // Retrieve a property from the TTS engine
- // @param property pointer to the property name
- // @param[out] value pointer to the retrieved language value
- // @param[inout] iosize in: stores the size available to store the
- // property value.
- // out: stores the size required to hold the language
- // value if getLanguage() returned
- // TTS_PROPERTY_SIZE_TOO_SMALL, unchanged otherwise
- // @return TTS_PROPERTY_UNSUPPORTED, or TTS_SUCCESS,
- // or TTS_PROPERTY_SIZE_TOO_SMALL
- virtual tts_result getProperty(const char *property, char *value,
- size_t *iosize);
-
- // Synthesize the text.
- // As the synthesis is performed, the engine invokes the callback to notify
- // the TTS framework that it has filled the given buffer, and indicates how
- // many bytes it wrote. The callback is called repeatedly until the engine
- // has generated all the audio data corresponding to the text.
- // Note about the format of the input: the text parameter may use the
- // following elements
- // and their respective attributes as defined in the SSML 1.0 specification:
- // * lang
- // * say-as:
- // o interpret-as
- // * phoneme
- // * voice:
- // o gender,
- // o age,
- // o variant,
- // o name
- // * emphasis
- // * break:
- // o strength,
- // o time
- // * prosody:
- // o pitch,
- // o contour,
- // o range,
- // o rate,
- // o duration,
- // o volume
- // * mark
- // Differences between this text format and SSML are:
- // * full SSML documents are not supported
- // * namespaces are not supported
- // Text is coded in UTF-8.
- // @param text the UTF-8 text to synthesize
- // @param userdata pointer to be returned when the call is invoked
- // @param buffer the location where the synthesized data must be written
- // @param bufferSize the number of bytes that can be written in buffer
- // @return TTS_SUCCESS or TTS_FAILURE
- virtual tts_result synthesizeText(const char *text, int8_t *buffer,
- size_t bufferSize, void *userdata);
-
-};
-
-} // namespace android
-
diff --git a/include/ui/Input.h b/include/ui/Input.h
index d9d77c4..9b92c73 100644
--- a/include/ui/Input.h
+++ b/include/ui/Input.h
@@ -27,6 +27,7 @@
#include <utils/Timers.h>
#include <utils/RefBase.h>
#include <utils/String8.h>
+#include <utils/BitSet.h>
#ifdef HAVE_ANDROID_OS
class SkMatrix;
@@ -36,10 +37,16 @@ class SkMatrix;
* Additional private constants not defined in ndk/ui/input.h.
*/
enum {
- /*
- * Private control to determine when an app is tracking a key sequence.
- */
- AKEY_EVENT_FLAG_START_TRACKING = 0x40000000
+ /* Private control to determine when an app is tracking a key sequence. */
+ AKEY_EVENT_FLAG_START_TRACKING = 0x40000000,
+
+ /* Key event is inconsistent with previously sent key events. */
+ AKEY_EVENT_FLAG_TAINTED = 0x80000000,
+};
+
+enum {
+ /* Motion event is inconsistent with previously sent motion events. */
+ AMOTION_EVENT_FLAG_TAINTED = 0x80000000,
};
enum {
@@ -127,6 +134,12 @@ enum {
// input device or an application with system-wide event injection permission.
POLICY_FLAG_TRUSTED = 0x02000000,
+ // Indicates that the input event has passed through an input filter.
+ POLICY_FLAG_FILTERED = 0x04000000,
+
+ // Disables automatic key repeating behavior.
+ POLICY_FLAG_DISABLE_KEY_REPEAT = 0x08000000,
+
/* These flags are set by the input reader policy as it intercepts each event. */
// Indicates that the screen was off when the event was received and the event
@@ -208,6 +221,13 @@ struct PointerCoords {
status_t writeToParcel(Parcel* parcel) const;
#endif
+ bool operator==(const PointerCoords& other) const;
+ inline bool operator!=(const PointerCoords& other) const {
+ return !(*this == other);
+ }
+
+ void copyFrom(const PointerCoords& other);
+
private:
void tooManyAxes(int axis);
};
@@ -303,10 +323,19 @@ public:
inline int32_t getAction() const { return mAction; }
+ inline int32_t getActionMasked() const { return mAction & AMOTION_EVENT_ACTION_MASK; }
+
+ inline int32_t getActionIndex() const {
+ return (mAction & AMOTION_EVENT_ACTION_POINTER_INDEX_MASK)
+ >> AMOTION_EVENT_ACTION_POINTER_INDEX_SHIFT;
+ }
+
inline void setAction(int32_t action) { mAction = action; }
inline int32_t getFlags() const { return mFlags; }
+ inline void setFlags(int32_t flags) { mFlags = flags; }
+
inline int32_t getEdgeFlags() const { return mEdgeFlags; }
inline void setEdgeFlags(int32_t edgeFlags) { mEdgeFlags = edgeFlags; }
@@ -450,6 +479,8 @@ public:
AMOTION_EVENT_AXIS_ORIENTATION, pointerIndex, historicalIndex);
}
+ ssize_t findPointerIndex(int32_t pointerId) const;
+
void initialize(
int32_t deviceId,
int32_t source,
@@ -543,6 +574,72 @@ private:
};
/*
+ * Calculates the velocity of pointer movements over time.
+ */
+class VelocityTracker {
+public:
+ struct Position {
+ float x, y;
+ };
+
+ VelocityTracker();
+
+ // Resets the velocity tracker state.
+ void clear();
+
+ // Resets the velocity tracker state for specific pointers.
+ // Call this method when some pointers have changed and may be reusing
+ // an id that was assigned to a different pointer earlier.
+ void clearPointers(BitSet32 idBits);
+
+ // Adds movement information for a set of pointers.
+ // The idBits bitfield specifies the pointer ids of the pointers whose positions
+ // are included in the movement.
+ // The positions array contains position information for each pointer in order by
+ // increasing id. Its size should be equal to the number of one bits in idBits.
+ void addMovement(nsecs_t eventTime, BitSet32 idBits, const Position* positions);
+
+ // Adds movement information for all pointers in a MotionEvent, including historical samples.
+ void addMovement(const MotionEvent* event);
+
+ // Gets the velocity of the specified pointer id in position units per second.
+ // Returns false and sets the velocity components to zero if there is no movement
+ // information for the pointer.
+ bool getVelocity(uint32_t id, float* outVx, float* outVy) const;
+
+ // Gets the active pointer id, or -1 if none.
+ inline int32_t getActivePointerId() const { return mActivePointerId; }
+
+ // Gets a bitset containing all pointer ids from the most recent movement.
+ inline BitSet32 getCurrentPointerIdBits() const { return mMovements[mIndex].idBits; }
+
+private:
+ // Number of samples to keep.
+ static const uint32_t HISTORY_SIZE = 10;
+
+ // Oldest sample to consider when calculating the velocity.
+ static const nsecs_t MAX_AGE = 200 * 1000000; // 200 ms
+
+ // When the total duration of the window of samples being averaged is less
+ // than the window size, the resulting velocity is scaled to reduce the impact
+ // of overestimation in short traces.
+ static const nsecs_t MIN_WINDOW = 100 * 1000000; // 100 ms
+
+ // The minimum duration between samples when estimating velocity.
+ static const nsecs_t MIN_DURATION = 10 * 1000000; // 10 ms
+
+ struct Movement {
+ nsecs_t eventTime;
+ BitSet32 idBits;
+ Position positions[MAX_POINTERS];
+ };
+
+ uint32_t mIndex;
+ Movement mMovements[HISTORY_SIZE];
+ int32_t mActivePointerId;
+};
+
+/*
* Describes the characteristics and capabilities of an input device.
*/
class InputDeviceInfo {
diff --git a/include/ui/KeycodeLabels.h b/include/ui/KeycodeLabels.h
index b912e9b..8383957 100755
--- a/include/ui/KeycodeLabels.h
+++ b/include/ui/KeycodeLabels.h
@@ -228,6 +228,9 @@ static const KeycodeLabel KEYCODES[] = {
{ "BUTTON_14", 201 },
{ "BUTTON_15", 202 },
{ "BUTTON_16", 203 },
+ { "LANGUAGE_SWITCH", 204 },
+ { "MANNER_MODE", 205 },
+ { "3D_MODE", 206 },
// NOTE: If you add a new keycode here you must also add it to several other files.
// Refer to frameworks/base/core/java/android/view/KeyEvent.java for the full list.
diff --git a/include/ui/Region.h b/include/ui/Region.h
index 925fd06..6c9a620 100644
--- a/include/ui/Region.h
+++ b/include/ui/Region.h
@@ -24,8 +24,6 @@
#include <ui/Rect.h>
-#include <hardware/copybit.h>
-
namespace android {
// ---------------------------------------------------------------------------
@@ -183,27 +181,6 @@ Region& Region::operator -= (const Region& rhs) {
Region& Region::operator += (const Point& pt) {
return translateSelf(pt.x, pt.y);
}
-
-// ---------------------------------------------------------------------------
-
-struct region_iterator : public copybit_region_t {
- region_iterator(const Region& region)
- : b(region.begin()), e(region.end()) {
- this->next = iterate;
- }
-private:
- static int iterate(copybit_region_t const * self, copybit_rect_t* rect) {
- region_iterator const* me = static_cast<region_iterator const*>(self);
- if (me->b != me->e) {
- *reinterpret_cast<Rect*>(rect) = *me->b++;
- return 1;
- }
- return 0;
- }
- mutable Region::const_iterator b;
- Region::const_iterator const e;
-};
-
// ---------------------------------------------------------------------------
}; // namespace android
diff --git a/include/ui/egl/android_natives.h b/include/ui/egl/android_natives.h
index 0fc1ddf..0a6e4fb 100644
--- a/include/ui/egl/android_natives.h
+++ b/include/ui/egl/android_natives.h
@@ -57,7 +57,7 @@ typedef struct android_native_base_t
{
/* a magic value defined by the actual EGL native type */
int magic;
-
+
/* the sizeof() of the actual EGL native type */
int version;
@@ -129,6 +129,7 @@ enum {
NATIVE_WINDOW_SET_BUFFER_COUNT,
NATIVE_WINDOW_SET_BUFFERS_GEOMETRY,
NATIVE_WINDOW_SET_BUFFERS_TRANSFORM,
+ NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP,
};
/* parameter for NATIVE_WINDOW_[DIS]CONNECT */
@@ -157,7 +158,15 @@ enum {
NATIVE_WINDOW_SURFACE_TEXTURE_CLIENT, // SurfaceTextureClient
};
-struct ANativeWindow
+/* parameter for NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP
+ *
+ * Special timestamp value to indicate that timestamps should be auto-generated
+ * by the native window when queueBuffer is called. This is equal to INT64_MIN,
+ * defined directly to avoid problems with C99/C++ inclusion of stdint.h.
+ */
+const int64_t NATIVE_WINDOW_TIMESTAMP_AUTO = (-9223372036854775807LL-1);
+
+struct ANativeWindow
{
#ifdef __cplusplus
ANativeWindow()
@@ -262,7 +271,8 @@ struct ANativeWindow
* NATIVE_WINDOW_SET_BUFFER_COUNT
* NATIVE_WINDOW_SET_BUFFERS_GEOMETRY
* NATIVE_WINDOW_SET_BUFFERS_TRANSFORM
- *
+ * NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP
+ *
*/
int (*perform)(struct ANativeWindow* window,
@@ -389,6 +399,22 @@ static inline int native_window_set_buffers_transform(
transform);
}
+/*
+ * native_window_set_buffers_timestamp(..., int64_t timestamp)
+ * All buffers queued after this call will be associated with the timestamp
+ * parameter specified. If the timestamp is set to NATIVE_WINDOW_TIMESTAMP_AUTO
+ * (the default), timestamps will be generated automatically when queueBuffer is
+ * called. The timestamp is measured in nanoseconds, and must be monotonically
+ * increasing.
+ */
+static inline int native_window_set_buffers_timestamp(
+ ANativeWindow* window,
+ int64_t timestamp)
+{
+ return window->perform(window, NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP,
+ timestamp);
+}
+
// ---------------------------------------------------------------------------
/* FIXME: this is legacy for pixmaps */
diff --git a/include/utils/BitSet.h b/include/utils/BitSet.h
index f5dbcd9..de748b5 100644
--- a/include/utils/BitSet.h
+++ b/include/utils/BitSet.h
@@ -61,6 +61,16 @@ struct BitSet32 {
// Result is undefined if all bits are marked.
inline uint32_t firstUnmarkedBit() const { return __builtin_clz(~ value); }
+ // Finds the last marked bit in the set.
+ // Result is undefined if all bits are unmarked.
+ inline uint32_t lastMarkedBit() const { return 31 - __builtin_ctz(value); }
+
+ // Gets the index of the specified bit in the set, which is the number of
+ // marked bits that appear before the specified bit.
+ inline uint32_t getIndexOfBit(uint32_t n) const {
+ return __builtin_popcount(value & ~(0xffffffffUL >> n));
+ }
+
inline bool operator== (const BitSet32& other) const { return value == other.value; }
inline bool operator!= (const BitSet32& other) const { return value != other.value; }
};
diff --git a/include/utils/GenerationCache.h b/include/utils/GenerationCache.h
new file mode 100644
index 0000000..bb9ddd6
--- /dev/null
+++ b/include/utils/GenerationCache.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_UTILS_GENERATION_CACHE_H
+#define ANDROID_UTILS_GENERATION_CACHE_H
+
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+/**
+ * GenerationCache callback used when an item is removed
+ */
+template<typename EntryKey, typename EntryValue>
+class OnEntryRemoved {
+public:
+ virtual ~OnEntryRemoved() { };
+ virtual void operator()(EntryKey& key, EntryValue& value) = 0;
+}; // class OnEntryRemoved
+
+template<typename EntryKey, typename EntryValue>
+struct Entry: public LightRefBase<Entry<EntryKey, EntryValue> > {
+ Entry() { }
+ Entry(const Entry<EntryKey, EntryValue>& e):
+ key(e.key), value(e.value), parent(e.parent), child(e.child) { }
+ Entry(sp<Entry<EntryKey, EntryValue> > e):
+ key(e->key), value(e->value), parent(e->parent), child(e->child) { }
+
+ EntryKey key;
+ EntryValue value;
+
+ sp<Entry<EntryKey, EntryValue> > parent;
+ sp<Entry<EntryKey, EntryValue> > child;
+}; // struct Entry
+
+/**
+ * A LRU type cache
+ */
+template<typename K, typename V>
+class GenerationCache {
+public:
+ GenerationCache(uint32_t maxCapacity);
+ virtual ~GenerationCache();
+
+ enum Capacity {
+ kUnlimitedCapacity,
+ };
+
+ void setOnEntryRemovedListener(OnEntryRemoved<K, V>* listener);
+
+ void clear();
+
+ bool contains(K key) const;
+ V get(K key);
+ K getKeyAt(uint32_t index) const;
+ bool put(K key, V value);
+ V remove(K key);
+ V removeOldest();
+ V getValueAt(uint32_t index) const;
+
+ uint32_t size() const;
+
+ void addToCache(sp<Entry<K, V> > entry, K key, V value);
+ void attachToCache(sp<Entry<K, V> > entry);
+ void detachFromCache(sp<Entry<K, V> > entry);
+
+ V removeAt(ssize_t index);
+
+private:
+ KeyedVector<K, sp<Entry<K, V> > > mCache;
+ uint32_t mMaxCapacity;
+
+ OnEntryRemoved<K, V>* mListener;
+
+ sp<Entry<K, V> > mOldest;
+ sp<Entry<K, V> > mYoungest;
+}; // class GenerationCache
+
+template<typename K, typename V>
+GenerationCache<K, V>::GenerationCache(uint32_t maxCapacity): mMaxCapacity(maxCapacity),
+ mListener(NULL) {
+};
+
+template<typename K, typename V>
+GenerationCache<K, V>::~GenerationCache() {
+ clear();
+};
+
+template<typename K, typename V>
+uint32_t GenerationCache<K, V>::size() const {
+ return mCache.size();
+}
+
+/**
+ * Should be set by the user of the Cache so that the callback is called whenever an item is
+ * removed from the cache
+ */
+template<typename K, typename V>
+void GenerationCache<K, V>::setOnEntryRemovedListener(OnEntryRemoved<K, V>* listener) {
+ mListener = listener;
+}
+
+template<typename K, typename V>
+void GenerationCache<K, V>::clear() {
+ if (mListener) {
+ for (uint32_t i = 0; i < mCache.size(); i++) {
+ sp<Entry<K, V> > entry = mCache.valueAt(i);
+ if (mListener) {
+ (*mListener)(entry->key, entry->value);
+ }
+ }
+ }
+ mCache.clear();
+ mYoungest.clear();
+ mOldest.clear();
+}
+
+template<typename K, typename V>
+bool GenerationCache<K, V>::contains(K key) const {
+ return mCache.indexOfKey(key) >= 0;
+}
+
+template<typename K, typename V>
+K GenerationCache<K, V>::getKeyAt(uint32_t index) const {
+ return mCache.keyAt(index);
+}
+
+template<typename K, typename V>
+V GenerationCache<K, V>::getValueAt(uint32_t index) const {
+ return mCache.valueAt(index)->value;
+}
+
+template<typename K, typename V>
+V GenerationCache<K, V>::get(K key) {
+ ssize_t index = mCache.indexOfKey(key);
+ if (index >= 0) {
+ sp<Entry<K, V> > entry = mCache.valueAt(index);
+ if (entry.get()) {
+ detachFromCache(entry);
+ attachToCache(entry);
+ return entry->value;
+ }
+ }
+
+ return NULL;
+}
+
+template<typename K, typename V>
+bool GenerationCache<K, V>::put(K key, V value) {
+ if (mMaxCapacity != kUnlimitedCapacity && mCache.size() >= mMaxCapacity) {
+ removeOldest();
+ }
+
+ ssize_t index = mCache.indexOfKey(key);
+ if (index < 0) {
+ sp<Entry<K, V> > entry = new Entry<K, V>;
+ addToCache(entry, key, value);
+ return true;
+ }
+
+ return false;
+}
+
+template<typename K, typename V>
+void GenerationCache<K, V>::addToCache(sp<Entry<K, V> > entry, K key, V value) {
+ entry->key = key;
+ entry->value = value;
+ mCache.add(key, entry);
+ attachToCache(entry);
+}
+
+template<typename K, typename V>
+V GenerationCache<K, V>::remove(K key) {
+ ssize_t index = mCache.indexOfKey(key);
+ if (index >= 0) {
+ return removeAt(index);
+ }
+
+ return NULL;
+}
+
+template<typename K, typename V>
+V GenerationCache<K, V>::removeAt(ssize_t index) {
+ sp<Entry<K, V> > entry = mCache.valueAt(index);
+ if (mListener) {
+ (*mListener)(entry->key, entry->value);
+ }
+ mCache.removeItemsAt(index, 1);
+ detachFromCache(entry);
+
+ return entry->value;
+}
+
+template<typename K, typename V>
+V GenerationCache<K, V>::removeOldest() {
+ if (mOldest.get()) {
+ ssize_t index = mCache.indexOfKey(mOldest->key);
+ if (index >= 0) {
+ return removeAt(index);
+ }
+ }
+
+ return NULL;
+}
+
+template<typename K, typename V>
+void GenerationCache<K, V>::attachToCache(sp<Entry<K, V> > entry) {
+ if (!mYoungest.get()) {
+ mYoungest = mOldest = entry;
+ } else {
+ entry->parent = mYoungest;
+ mYoungest->child = entry;
+ mYoungest = entry;
+ }
+}
+
+template<typename K, typename V>
+void GenerationCache<K, V>::detachFromCache(sp<Entry<K, V> > entry) {
+ if (entry->parent.get()) {
+ entry->parent->child = entry->child;
+ }
+
+ if (entry->child.get()) {
+ entry->child->parent = entry->parent;
+ }
+
+ if (mOldest == entry) {
+ mOldest = entry->child;
+ }
+
+ if (mYoungest == entry) {
+ mYoungest = entry->parent;
+ }
+
+ entry->parent.clear();
+ entry->child.clear();
+}
+
+}; // namespace android
+
+#endif // ANDROID_UTILS_GENERATION_CACHE_H
diff --git a/include/utils/ResourceTypes.h b/include/utils/ResourceTypes.h
index 24e72e9..173412e 100644
--- a/include/utils/ResourceTypes.h
+++ b/include/utils/ResourceTypes.h
@@ -972,6 +972,14 @@ struct ResTable_config
uint32_t screenConfig;
};
+ union {
+ struct {
+ uint16_t screenWidthDp;
+ uint16_t screenHeightDp;
+ };
+ uint32_t screenSizeDp;
+ };
+
inline void copyFromDeviceNoSwap(const ResTable_config& o) {
const size_t size = dtohl(o.size);
if (size >= sizeof(ResTable_config)) {
@@ -992,6 +1000,8 @@ struct ResTable_config
screenHeight = dtohs(screenHeight);
sdkVersion = dtohs(sdkVersion);
minorVersion = dtohs(minorVersion);
+ screenWidthDp = dtohs(screenWidthDp);
+ screenHeightDp = dtohs(screenHeightDp);
}
inline void swapHtoD() {
@@ -1003,6 +1013,8 @@ struct ResTable_config
screenHeight = htods(screenHeight);
sdkVersion = htods(sdkVersion);
minorVersion = htods(minorVersion);
+ screenWidthDp = htods(screenWidthDp);
+ screenHeightDp = htods(screenHeightDp);
}
inline int compare(const ResTable_config& o) const {
@@ -1021,6 +1033,8 @@ struct ResTable_config
diff = (int32_t)(screenLayout - o.screenLayout);
if (diff != 0) return diff;
diff = (int32_t)(uiMode - o.uiMode);
+ if (diff != 0) return diff;
+ diff = (int32_t)(screenSizeDp - o.screenSizeDp);
return (int)diff;
}
@@ -1061,6 +1075,7 @@ struct ResTable_config
if (version != o.version) diffs |= CONFIG_VERSION;
if (screenLayout != o.screenLayout) diffs |= CONFIG_SCREEN_LAYOUT;
if (uiMode != o.uiMode) diffs |= CONFIG_UI_MODE;
+ if (screenSizeDp != o.screenSizeDp) diffs |= CONFIG_SCREEN_SIZE;
return diffs;
}
@@ -1105,6 +1120,18 @@ struct ResTable_config
}
}
+ if (screenSizeDp || o.screenSizeDp) {
+ if (screenWidthDp != o.screenWidthDp) {
+ if (!screenWidthDp) return false;
+ if (!o.screenWidthDp) return true;
+ }
+
+ if (screenHeightDp != o.screenHeightDp) {
+ if (!screenHeightDp) return false;
+ if (!o.screenHeightDp) return true;
+ }
+ }
+
if (orientation != o.orientation) {
if (!orientation) return false;
if (!o.orientation) return true;
@@ -1243,6 +1270,30 @@ struct ResTable_config
}
}
+ if (screenSizeDp || o.screenSizeDp) {
+ // Better is based on the sum of the difference between both
+ // width and height from the requested dimensions. We are
+ // assuming the invalid configs (with smaller dimens) have
+ // already been filtered. Note that if a particular dimension
+ // is unspecified, we will end up with a large value (the
+ // difference between 0 and the requested dimension), which is
+ // good since we will prefer a config that has specified a
+ // dimension value.
+ int myDelta = 0, otherDelta = 0;
+ if (requested->screenWidthDp) {
+ myDelta += requested->screenWidthDp - screenWidthDp;
+ otherDelta += requested->screenWidthDp - o.screenWidthDp;
+ }
+ if (requested->screenHeightDp) {
+ myDelta += requested->screenHeightDp - screenHeightDp;
+ otherDelta += requested->screenHeightDp - o.screenHeightDp;
+ }
+ //LOGI("Comparing this %dx%d to other %dx%d in %dx%d: myDelta=%d otherDelta=%d",
+ // screenWidthDp, screenHeightDp, o.screenWidthDp, o.screenHeightDp,
+ // requested->screenWidthDp, requested->screenHeightDp, myDelta, otherDelta);
+ return (myDelta <= otherDelta);
+ }
+
if ((orientation != o.orientation) && requested->orientation) {
return (orientation);
}
@@ -1426,6 +1477,18 @@ struct ResTable_config
return false;
}
}
+ if (screenSizeDp != 0) {
+ if (settings.screenWidthDp != 0 && screenWidthDp != 0
+ && screenWidthDp > settings.screenWidthDp) {
+ //LOGI("Filtering out width %d in requested %d", screenWidthDp, settings.screenWidthDp);
+ return false;
+ }
+ if (settings.screenHeightDp != 0 && screenHeightDp != 0
+ && screenHeightDp > settings.screenHeightDp) {
+ //LOGI("Filtering out height %d in requested %d", screenHeightDp, settings.screenHeightDp);
+ return false;
+ }
+ }
if (screenType != 0) {
if (settings.orientation != 0 && orientation != 0
&& orientation != settings.orientation) {
@@ -1505,13 +1568,13 @@ struct ResTable_config
String8 toString() const {
char buf[200];
sprintf(buf, "imsi=%d/%d lang=%c%c reg=%c%c orient=%d touch=%d dens=%d "
- "kbd=%d nav=%d input=%d scrnW=%d scrnH=%d sz=%d long=%d "
+ "kbd=%d nav=%d input=%d ssz=%dx%d %ddp x %ddp sz=%d long=%d "
"ui=%d night=%d vers=%d.%d",
mcc, mnc,
language[0] ? language[0] : '-', language[1] ? language[1] : '-',
country[0] ? country[0] : '-', country[1] ? country[1] : '-',
orientation, touchscreen, density, keyboard, navigation, inputFlags,
- screenWidth, screenHeight,
+ screenWidth, screenHeight, screenWidthDp, screenHeightDp,
screenLayout&MASK_SCREENSIZE, screenLayout&MASK_SCREENLONG,
uiMode&MASK_UI_MODE_TYPE, uiMode&MASK_UI_MODE_NIGHT,
sdkVersion, minorVersion);
diff --git a/include/utils/Timers.h b/include/utils/Timers.h
index 9a9e07c..8b4d322 100644
--- a/include/utils/Timers.h
+++ b/include/utils/Timers.h
@@ -88,6 +88,16 @@ nsecs_t systemTime(int clock = SYSTEM_TIME_MONOTONIC);
nsecs_t systemTime(int clock);
#endif // def __cplusplus
+/**
+ * Returns the number of milliseconds to wait between the reference time and the timeout time.
+ * If the timeout is in the past relative to the reference time, returns 0.
+ * If the timeout is more than INT_MAX milliseconds in the future relative to the reference time,
+ * such as when timeoutTime == LLONG_MAX, returns -1 to indicate an infinite timeout delay.
+ * Otherwise, returns the difference between the reference time and timeout time
+ * rounded up to the next millisecond.
+ */
+int toMillisecondTimeoutDelay(nsecs_t referenceTime, nsecs_t timeoutTime);
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/include/utils/Vector.h b/include/utils/Vector.h
index 6fd307f..f1e87e6 100644
--- a/include/utils/Vector.h
+++ b/include/utils/Vector.h
@@ -165,6 +165,26 @@ public:
// for debugging only
inline size_t getItemSize() const { return itemSize(); }
+
+ /*
+ * these inlines add some level of compatibility with STL. eventually
+ * we should probably turn things around.
+ */
+ typedef TYPE* iterator;
+ typedef TYPE const* const_iterator;
+
+ inline iterator begin() { return editArray(); }
+ inline iterator end() { return editArray() + size(); }
+ inline const_iterator begin() const { return array(); }
+ inline const_iterator end() const { return array() + size(); }
+ inline void reserve(size_t n) { setCapacity(n); }
+ inline bool empty() const{ return isEmpty(); }
+ inline void push_back(const TYPE& item) { insertAt(item, size()); }
+ inline void push_front(const TYPE& item) { insertAt(item, 0); }
+ inline iterator erase(iterator pos) {
+ return begin() + removeItemsAt(pos-array());
+ }
+
protected:
virtual void do_construct(void* storage, size_t num) const;
virtual void do_destroy(void* storage, size_t num) const;