summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorThe Android Open Source Project <initial-contribution@android.com>2013-11-22 10:35:20 -0800
committerThe Android Open Source Project <initial-contribution@android.com>2013-11-22 10:35:20 -0800
commit5bf2560ce9b70bee077e0c264ac06648f0f63acc (patch)
treef17ecec5321e8d583d045135f29f14f3c7418e71 /include
parenteb76f318e9daf91dbf195bcb74852b3bd736a32a (diff)
parentb2059ff384eee8ffb70a7ec8fc5570405201c734 (diff)
downloadframeworks_av-5bf2560ce9b70bee077e0c264ac06648f0f63acc.zip
frameworks_av-5bf2560ce9b70bee077e0c264ac06648f0f63acc.tar.gz
frameworks_av-5bf2560ce9b70bee077e0c264ac06648f0f63acc.tar.bz2
Merge commit 'b2059ff384eee8ffb70a7ec8fc5570405201c734' into HEAD
Diffstat (limited to 'include')
-rw-r--r--include/camera/Camera.h20
-rw-r--r--include/camera/CameraBase.h7
-rw-r--r--include/camera/CameraMetadata.h32
-rw-r--r--include/camera/ICamera.h18
-rw-r--r--include/camera/ICameraClient.h3
-rw-r--r--include/camera/ICameraService.h31
-rw-r--r--include/camera/ICameraServiceListener.h3
-rw-r--r--include/camera/IProCameraCallbacks.h5
-rw-r--r--include/camera/IProCameraUser.h3
-rw-r--r--include/camera/ProCamera.h9
-rw-r--r--include/camera/camera2/CaptureRequest.h42
-rw-r--r--include/camera/camera2/ICameraDeviceCallbacks.h75
-rw-r--r--include/camera/camera2/ICameraDeviceUser.h84
-rw-r--r--include/cpustats/CentralTendencyStatistics.h75
-rw-r--r--include/cpustats/README.txt6
-rw-r--r--include/cpustats/ThreadCpuUsage.h140
-rw-r--r--include/media/AudioBufferProvider.h15
-rw-r--r--include/media/AudioRecord.h289
-rw-r--r--include/media/AudioSystem.h36
-rw-r--r--include/media/AudioTimestamp.h33
-rw-r--r--include/media/AudioTrack.h395
-rw-r--r--include/media/EffectsFactoryApi.h24
-rw-r--r--include/media/ExtendedAudioBufferProvider.h8
-rw-r--r--include/media/IAudioFlinger.h21
-rw-r--r--include/media/IAudioPolicyService.h6
-rw-r--r--include/media/IAudioRecord.h6
-rw-r--r--include/media/IAudioTrack.h11
-rw-r--r--include/media/IDrm.h2
-rw-r--r--include/media/IHDCP.h26
-rw-r--r--include/media/IMediaPlayerService.h8
-rw-r--r--include/media/IOMX.h19
-rw-r--r--include/media/IRemoteDisplayClient.h2
-rw-r--r--include/media/JetPlayer.h2
-rw-r--r--include/media/MediaPlayerInterface.h19
-rw-r--r--include/media/SoundPool.h7
-rw-r--r--include/media/ToneGenerator.h4
-rw-r--r--include/media/Visualizer.h11
-rw-r--r--include/media/mediaplayer.h14
-rw-r--r--include/media/nbaio/AudioStreamOutSink.h2
-rw-r--r--include/media/nbaio/MonoPipe.h10
-rw-r--r--include/media/nbaio/MonoPipeReader.h2
-rw-r--r--include/media/nbaio/NBAIO.h10
-rw-r--r--include/media/nbaio/NBLog.h10
-rw-r--r--include/media/nbaio/SourceAudioBufferProvider.h3
-rw-r--r--include/media/stagefright/ACodec.h16
-rw-r--r--include/media/stagefright/AudioPlayer.h29
-rw-r--r--include/media/stagefright/AudioSource.h2
-rw-r--r--include/media/stagefright/MediaCodecList.h3
-rw-r--r--include/media/stagefright/MediaDefs.h3
-rw-r--r--include/media/stagefright/MediaErrors.h5
-rw-r--r--include/media/stagefright/MediaMuxer.h10
-rw-r--r--include/media/stagefright/OMXCodec.h5
-rw-r--r--include/media/stagefright/SurfaceMediaSource.h10
-rw-r--r--include/media/stagefright/Utils.h11
-rw-r--r--include/media/stagefright/foundation/ALooperRoster.h1
-rw-r--r--include/media/stagefright/foundation/ANetworkSession.h135
-rw-r--r--include/media/stagefright/foundation/ParsedMessage.h60
-rw-r--r--include/private/media/AudioTrackShared.h426
58 files changed, 1855 insertions, 409 deletions
diff --git a/include/camera/Camera.h b/include/camera/Camera.h
index 37626a4..79682b8 100644
--- a/include/camera/Camera.h
+++ b/include/camera/Camera.h
@@ -51,8 +51,14 @@ struct CameraTraits<Camera>
typedef CameraListener TCamListener;
typedef ICamera TCamUser;
typedef ICameraClient TCamCallbacks;
+ typedef status_t (ICameraService::*TCamConnectService)(const sp<ICameraClient>&,
+ int, const String16&, int,
+ /*out*/
+ sp<ICamera>&);
+ static TCamConnectService fnConnectService;
};
+
class Camera :
public CameraBase<Camera>,
public BnCameraClient
@@ -75,9 +81,9 @@ public:
status_t unlock();
// pass the buffered IGraphicBufferProducer to the camera service
- status_t setPreviewTexture(const sp<IGraphicBufferProducer>& bufferProducer);
+ status_t setPreviewTarget(const sp<IGraphicBufferProducer>& bufferProducer);
- // start preview mode, must call setPreviewDisplay first
+ // start preview mode, must call setPreviewTarget first
status_t startPreview();
// stop preview mode
@@ -86,7 +92,7 @@ public:
// get preview state
bool previewEnabled();
- // start recording mode, must call setPreviewDisplay first
+ // start recording mode, must call setPreviewTarget first
status_t startRecording();
// stop recording mode
@@ -121,7 +127,15 @@ public:
void setListener(const sp<CameraListener>& listener);
void setRecordingProxyListener(const sp<ICameraRecordingProxyListener>& listener);
+
+ // Configure preview callbacks to app. Only one of the older
+ // callbacks or the callback surface can be active at the same time;
+ // enabling one will disable the other if active. Flags can be
+ // disabled by calling it with CAMERA_FRAME_CALLBACK_FLAG_NOOP, and
+ // Target by calling it with a NULL interface.
void setPreviewCallbackFlags(int preview_callback_flag);
+ status_t setPreviewCallbackTarget(
+ const sp<IGraphicBufferProducer>& callbackProducer);
sp<ICameraRecordingProxy> getRecordingProxy();
diff --git a/include/camera/CameraBase.h b/include/camera/CameraBase.h
index 9b08c0f..1b93157 100644
--- a/include/camera/CameraBase.h
+++ b/include/camera/CameraBase.h
@@ -54,9 +54,10 @@ template <typename TCam, typename TCamTraits = CameraTraits<TCam> >
class CameraBase : public IBinder::DeathRecipient
{
public:
- typedef typename TCamTraits::TCamListener TCamListener;
- typedef typename TCamTraits::TCamUser TCamUser;
- typedef typename TCamTraits::TCamCallbacks TCamCallbacks;
+ typedef typename TCamTraits::TCamListener TCamListener;
+ typedef typename TCamTraits::TCamUser TCamUser;
+ typedef typename TCamTraits::TCamCallbacks TCamCallbacks;
+ typedef typename TCamTraits::TCamConnectService TCamConnectService;
static sp<TCam> connect(int cameraId,
const String16& clientPackageName,
diff --git a/include/camera/CameraMetadata.h b/include/camera/CameraMetadata.h
index 8eeb2e7..fe2bd19 100644
--- a/include/camera/CameraMetadata.h
+++ b/include/camera/CameraMetadata.h
@@ -22,6 +22,7 @@
#include <utils/Vector.h>
namespace android {
+class Parcel;
/**
* A convenience wrapper around the C-based camera_metadata_t library.
@@ -159,6 +160,12 @@ class CameraMetadata {
status_t erase(uint32_t tag);
/**
+ * Swap the underlying camera metadata between this and the other
+ * metadata object.
+ */
+ void swap(CameraMetadata &other);
+
+ /**
* Dump contents into FD for debugging. The verbosity levels are
* 0: Tag entry information only, no data values
* 1: Level 0 plus at most 16 data values per entry
@@ -169,6 +176,31 @@ class CameraMetadata {
*/
void dump(int fd, int verbosity = 1, int indentation = 0) const;
+ /**
+ * Serialization over Binder
+ */
+
+ // Metadata object is unchanged when reading from parcel fails.
+ status_t readFromParcel(Parcel *parcel);
+ status_t writeToParcel(Parcel *parcel) const;
+
+ /**
+ * Caller becomes the owner of the new metadata
+ * 'const Parcel' doesnt prevent us from calling the read functions.
+ * which is interesting since it changes the internal state
+ *
+ * NULL can be returned when no metadata was sent, OR if there was an issue
+ * unpacking the serialized data (i.e. bad parcel or invalid structure).
+ */
+ static status_t readFromParcel(const Parcel &parcel,
+ camera_metadata_t** out);
+ /**
+ * Caller retains ownership of metadata
+ * - Write 2 (int32 + blob) args in the current position
+ */
+ static status_t writeToParcel(Parcel &parcel,
+ const camera_metadata_t* metadata);
+
private:
camera_metadata_t *mBuffer;
bool mLocked;
diff --git a/include/camera/ICamera.h b/include/camera/ICamera.h
index 2236c1f..b025735 100644
--- a/include/camera/ICamera.h
+++ b/include/camera/ICamera.h
@@ -32,6 +32,9 @@ class Surface;
class ICamera: public IInterface
{
+ /**
+ * Keep up-to-date with ICamera.aidl in frameworks/base
+ */
public:
DECLARE_META_INTERFACE(Camera);
@@ -47,14 +50,21 @@ public:
virtual status_t unlock() = 0;
// pass the buffered IGraphicBufferProducer to the camera service
- virtual status_t setPreviewTexture(
+ virtual status_t setPreviewTarget(
const sp<IGraphicBufferProducer>& bufferProducer) = 0;
// set the preview callback flag to affect how the received frames from
- // preview are handled.
+ // preview are handled. Enabling preview callback flags disables any active
+ // preview callback surface set by setPreviewCallbackTarget().
virtual void setPreviewCallbackFlag(int flag) = 0;
-
- // start preview mode, must call setPreviewDisplay first
+ // set a buffer interface to use for client-received preview frames instead
+ // of preview callback buffers. Passing a valid interface here disables any
+ // active preview callbacks set by setPreviewCallbackFlag(). Passing NULL
+ // disables the use of the callback target.
+ virtual status_t setPreviewCallbackTarget(
+ const sp<IGraphicBufferProducer>& callbackProducer) = 0;
+
+ // start preview mode, must call setPreviewTarget first
virtual status_t startPreview() = 0;
// stop preview mode
diff --git a/include/camera/ICameraClient.h b/include/camera/ICameraClient.h
index b30aa7a..1584dba 100644
--- a/include/camera/ICameraClient.h
+++ b/include/camera/ICameraClient.h
@@ -28,6 +28,9 @@ namespace android {
class ICameraClient: public IInterface
{
+ /**
+ * Keep up-to-date with ICameraClient.aidl in frameworks/base
+ */
public:
DECLARE_META_INTERFACE(CameraClient);
diff --git a/include/camera/ICameraService.h b/include/camera/ICameraService.h
index aaf6eb3..f342122 100644
--- a/include/camera/ICameraService.h
+++ b/include/camera/ICameraService.h
@@ -28,17 +28,25 @@ class ICameraClient;
class IProCameraUser;
class IProCameraCallbacks;
class ICameraServiceListener;
+class ICameraDeviceUser;
+class ICameraDeviceCallbacks;
+class CameraMetadata;
class ICameraService : public IInterface
{
public:
+ /**
+ * Keep up-to-date with ICameraService.aidl in frameworks/base
+ */
enum {
GET_NUMBER_OF_CAMERAS = IBinder::FIRST_CALL_TRANSACTION,
GET_CAMERA_INFO,
CONNECT,
CONNECT_PRO,
+ CONNECT_DEVICE,
ADD_LISTENER,
REMOVE_LISTENER,
+ GET_CAMERA_CHARACTERISTICS,
};
enum {
@@ -52,6 +60,9 @@ public:
virtual status_t getCameraInfo(int cameraId,
struct CameraInfo* cameraInfo) = 0;
+ virtual status_t getCameraCharacteristics(int cameraId,
+ CameraMetadata* cameraInfo) = 0;
+
// Returns 'OK' if operation succeeded
// - Errors: ALREADY_EXISTS if the listener was already added
virtual status_t addListener(const sp<ICameraServiceListener>& listener)
@@ -65,15 +76,27 @@ public:
* clientUid == USE_CALLING_UID, then the calling UID is used instead. Only
* trusted callers can set a clientUid other than USE_CALLING_UID.
*/
- virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient,
+ virtual status_t connect(const sp<ICameraClient>& cameraClient,
+ int cameraId,
+ const String16& clientPackageName,
+ int clientUid,
+ /*out*/
+ sp<ICamera>& device) = 0;
+
+ virtual status_t connectPro(const sp<IProCameraCallbacks>& cameraCb,
int cameraId,
const String16& clientPackageName,
- int clientUid) = 0;
+ int clientUid,
+ /*out*/
+ sp<IProCameraUser>& device) = 0;
- virtual sp<IProCameraUser> connect(const sp<IProCameraCallbacks>& cameraCb,
+ virtual status_t connectDevice(
+ const sp<ICameraDeviceCallbacks>& cameraCb,
int cameraId,
const String16& clientPackageName,
- int clientUid) = 0;
+ int clientUid,
+ /*out*/
+ sp<ICameraDeviceUser>& device) = 0;
};
// ----------------------------------------------------------------------------
diff --git a/include/camera/ICameraServiceListener.h b/include/camera/ICameraServiceListener.h
index f2a11c2..0a0e43a 100644
--- a/include/camera/ICameraServiceListener.h
+++ b/include/camera/ICameraServiceListener.h
@@ -26,6 +26,9 @@ namespace android {
class ICameraServiceListener : public IInterface
{
+ /**
+ * Keep up-to-date with ICameraServiceListener.aidl in frameworks/base
+ */
public:
/**
diff --git a/include/camera/IProCameraCallbacks.h b/include/camera/IProCameraCallbacks.h
index 563ec17..e8abb89 100644
--- a/include/camera/IProCameraCallbacks.h
+++ b/include/camera/IProCameraCallbacks.h
@@ -30,6 +30,9 @@ namespace android {
class IProCameraCallbacks : public IInterface
{
+ /**
+ * Keep up-to-date with IProCameraCallbacks.aidl in frameworks/base
+ */
public:
DECLARE_META_INTERFACE(ProCameraCallbacks);
@@ -48,7 +51,7 @@ public:
/** Missing by design: implementation is client-side in ProCamera.cpp **/
// virtual void onBufferReceived(int streamId,
// const CpuConsumer::LockedBufer& buf);
- virtual void onResultReceived(int32_t frameId,
+ virtual void onResultReceived(int32_t requestId,
camera_metadata* result) = 0;
};
diff --git a/include/camera/IProCameraUser.h b/include/camera/IProCameraUser.h
index 45b818c..2ccc4d2 100644
--- a/include/camera/IProCameraUser.h
+++ b/include/camera/IProCameraUser.h
@@ -34,6 +34,9 @@ class Surface;
class IProCameraUser: public IInterface
{
+ /**
+ * Keep up-to-date with IProCameraUser.aidl in frameworks/base
+ */
public:
DECLARE_META_INTERFACE(ProCameraUser);
diff --git a/include/camera/ProCamera.h b/include/camera/ProCamera.h
index 3d1652f..83a3028 100644
--- a/include/camera/ProCamera.h
+++ b/include/camera/ProCamera.h
@@ -25,6 +25,7 @@
#include <camera/IProCameraUser.h>
#include <camera/Camera.h>
#include <camera/CameraMetadata.h>
+#include <camera/ICameraService.h>
#include <gui/CpuConsumer.h>
#include <gui/Surface.h>
@@ -87,8 +88,14 @@ struct CameraTraits<ProCamera>
typedef ProCameraListener TCamListener;
typedef IProCameraUser TCamUser;
typedef IProCameraCallbacks TCamCallbacks;
+ typedef status_t (ICameraService::*TCamConnectService)(const sp<IProCameraCallbacks>&,
+ int, const String16&, int,
+ /*out*/
+ sp<IProCameraUser>&);
+ static TCamConnectService fnConnectService;
};
+
class ProCamera :
public CameraBase<ProCamera>,
public BnProCameraCallbacks
@@ -245,7 +252,7 @@ protected:
virtual void onLockStatusChanged(
IProCameraCallbacks::LockStatus newLockStatus);
- virtual void onResultReceived(int32_t frameId,
+ virtual void onResultReceived(int32_t requestId,
camera_metadata* result);
private:
ProCamera(int cameraId);
diff --git a/include/camera/camera2/CaptureRequest.h b/include/camera/camera2/CaptureRequest.h
new file mode 100644
index 0000000..e56d61f
--- /dev/null
+++ b/include/camera/camera2/CaptureRequest.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_PHOTOGRAPHY_CAPTUREREQUEST_H
+#define ANDROID_HARDWARE_PHOTOGRAPHY_CAPTUREREQUEST_H
+
+#include <utils/RefBase.h>
+#include <utils/Vector.h>
+#include <camera/CameraMetadata.h>
+
+namespace android {
+
+class Surface;
+
+struct CaptureRequest : public virtual RefBase {
+public:
+
+ CameraMetadata mMetadata;
+ Vector<sp<Surface> > mSurfaceList;
+
+ /**
+ * Keep impl up-to-date with CaptureRequest.java in frameworks/base
+ */
+ status_t readFromParcel(Parcel* parcel);
+ status_t writeToParcel(Parcel* parcel) const;
+};
+}; // namespace android
+
+#endif
diff --git a/include/camera/camera2/ICameraDeviceCallbacks.h b/include/camera/camera2/ICameraDeviceCallbacks.h
new file mode 100644
index 0000000..8dac4f2
--- /dev/null
+++ b/include/camera/camera2/ICameraDeviceCallbacks.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_PHOTOGRAPHY_CALLBACKS_H
+#define ANDROID_HARDWARE_PHOTOGRAPHY_CALLBACKS_H
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+#include <binder/IMemory.h>
+#include <utils/Timers.h>
+#include <system/camera.h>
+
+namespace android {
+class CameraMetadata;
+
+class ICameraDeviceCallbacks : public IInterface
+{
+ /**
+ * Keep up-to-date with ICameraDeviceCallbacks.aidl in frameworks/base
+ */
+public:
+ DECLARE_META_INTERFACE(CameraDeviceCallbacks);
+
+ /**
+ * Error codes for CAMERA_MSG_ERROR
+ */
+ enum CameraErrorCode {
+ ERROR_CAMERA_DISCONNECTED = 0,
+ ERROR_CAMERA_DEVICE = 1,
+ ERROR_CAMERA_SERVICE = 2
+ };
+
+ // One way
+ virtual void onDeviceError(CameraErrorCode errorCode) = 0;
+
+ // One way
+ virtual void onDeviceIdle() = 0;
+
+ // One way
+ virtual void onCaptureStarted(int32_t requestId,
+ int64_t timestamp) = 0;
+
+ // One way
+ virtual void onResultReceived(int32_t requestId,
+ const CameraMetadata& result) = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnCameraDeviceCallbacks : public BnInterface<ICameraDeviceCallbacks>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif
diff --git a/include/camera/camera2/ICameraDeviceUser.h b/include/camera/camera2/ICameraDeviceUser.h
new file mode 100644
index 0000000..f71f302
--- /dev/null
+++ b/include/camera/camera2/ICameraDeviceUser.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_PHOTOGRAPHY_ICAMERADEVICEUSER_H
+#define ANDROID_HARDWARE_PHOTOGRAPHY_ICAMERADEVICEUSER_H
+
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+struct camera_metadata;
+
+namespace android {
+
+class ICameraDeviceUserClient;
+class IGraphicBufferProducer;
+class Surface;
+class CaptureRequest;
+class CameraMetadata;
+
+class ICameraDeviceUser : public IInterface
+{
+ /**
+ * Keep up-to-date with ICameraDeviceUser.aidl in frameworks/base
+ */
+public:
+ DECLARE_META_INTERFACE(CameraDeviceUser);
+
+ virtual void disconnect() = 0;
+
+ /**
+ * Request Handling
+ **/
+
+ virtual int submitRequest(sp<CaptureRequest> request,
+ bool streaming = false) = 0;
+ virtual status_t cancelRequest(int requestId) = 0;
+
+ virtual status_t deleteStream(int streamId) = 0;
+ virtual status_t createStream(
+ int width, int height, int format,
+ const sp<IGraphicBufferProducer>& bufferProducer) = 0;
+
+ // Create a request object from a template.
+ virtual status_t createDefaultRequest(int templateId,
+ /*out*/
+ CameraMetadata* request) = 0;
+ // Get static camera metadata
+ virtual status_t getCameraInfo(/*out*/
+ CameraMetadata* info) = 0;
+
+ // Wait until all the submitted requests have finished processing
+ virtual status_t waitUntilIdle() = 0;
+
+ // Flush all pending and in-progress work as quickly as possible.
+ virtual status_t flush() = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnCameraDeviceUser: public BnInterface<ICameraDeviceUser>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif
diff --git a/include/cpustats/CentralTendencyStatistics.h b/include/cpustats/CentralTendencyStatistics.h
new file mode 100644
index 0000000..21b6981
--- /dev/null
+++ b/include/cpustats/CentralTendencyStatistics.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _CENTRAL_TENDENCY_STATISTICS_H
+#define _CENTRAL_TENDENCY_STATISTICS_H
+
+#include <math.h>
+
+// Not multithread safe
+class CentralTendencyStatistics {
+
+public:
+
+ CentralTendencyStatistics() :
+ mMean(NAN), mMedian(NAN), mMinimum(INFINITY), mMaximum(-INFINITY), mN(0), mM2(0),
+ mVariance(NAN), mVarianceKnownForN(0), mStddev(NAN), mStddevKnownForN(0) { }
+
+ ~CentralTendencyStatistics() { }
+
+ // add x to the set of samples
+ void sample(double x);
+
+ // return the arithmetic mean of all samples so far
+ double mean() const { return mMean; }
+
+ // return the minimum of all samples so far
+ double minimum() const { return mMinimum; }
+
+ // return the maximum of all samples so far
+ double maximum() const { return mMaximum; }
+
+ // return the variance of all samples so far
+ double variance() const;
+
+ // return the standard deviation of all samples so far
+ double stddev() const;
+
+ // return the number of samples added so far
+ unsigned n() const { return mN; }
+
+ // reset the set of samples to be empty
+ void reset();
+
+private:
+ double mMean;
+ double mMedian;
+ double mMinimum;
+ double mMaximum;
+ unsigned mN; // number of samples so far
+ double mM2;
+
+ // cached variance, and n at time of caching
+ mutable double mVariance;
+ mutable unsigned mVarianceKnownForN;
+
+ // cached standard deviation, and n at time of caching
+ mutable double mStddev;
+ mutable unsigned mStddevKnownForN;
+
+};
+
+#endif // _CENTRAL_TENDENCY_STATISTICS_H
diff --git a/include/cpustats/README.txt b/include/cpustats/README.txt
new file mode 100644
index 0000000..14439f0
--- /dev/null
+++ b/include/cpustats/README.txt
@@ -0,0 +1,6 @@
+This is a static library of CPU usage statistics, originally written
+for audio but most are not actually specific to audio.
+
+Requirements to be here:
+ * should be related to CPU usage statistics
+ * should be portable to host; avoid Android OS dependencies without a conditional
diff --git a/include/cpustats/ThreadCpuUsage.h b/include/cpustats/ThreadCpuUsage.h
new file mode 100644
index 0000000..9756844
--- /dev/null
+++ b/include/cpustats/ThreadCpuUsage.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _THREAD_CPU_USAGE_H
+#define _THREAD_CPU_USAGE_H
+
+#include <fcntl.h>
+#include <pthread.h>
+
+namespace android {
+
+// Track CPU usage for the current thread.
+// Units are in per-thread CPU ns, as reported by
+// clock_gettime(CLOCK_THREAD_CPUTIME_ID). Simple usage: for cyclic
+// threads where you want to measure the execution time of the whole
+// cycle, just call sampleAndEnable() at the start of each cycle.
+// For acyclic threads, or for cyclic threads where you want to measure/track
+// only part of each cycle, call enable(), disable(), and/or setEnabled()
+// to demarcate the region(s) of interest, and then call sample() periodically.
+// This class is not thread-safe for concurrent calls from multiple threads;
+// the methods of this class may only be called by the current thread
+// which constructed the object.
+
+class ThreadCpuUsage
+{
+
+public:
+ ThreadCpuUsage() :
+ mIsEnabled(false),
+ mWasEverEnabled(false),
+ mAccumulator(0),
+ // mPreviousTs
+ // mMonotonicTs
+ mMonotonicKnown(false)
+ {
+ (void) pthread_once(&sOnceControl, &init);
+ for (int i = 0; i < sKernelMax; ++i) {
+ mCurrentkHz[i] = (uint32_t) ~0; // unknown
+ }
+ }
+
+ ~ThreadCpuUsage() { }
+
+ // Return whether currently tracking CPU usage by current thread
+ bool isEnabled() const { return mIsEnabled; }
+
+ // Enable tracking of CPU usage by current thread;
+ // any CPU used from this point forward will be tracked.
+ // Returns the previous enabled status.
+ bool enable() { return setEnabled(true); }
+
+ // Disable tracking of CPU usage by current thread;
+ // any CPU used from this point forward will be ignored.
+ // Returns the previous enabled status.
+ bool disable() { return setEnabled(false); }
+
+ // Set the enabled status and return the previous enabled status.
+ // This method is intended to be used for safe nested enable/disabling.
+ bool setEnabled(bool isEnabled);
+
+ // Add a sample point, and also enable tracking if needed.
+ // If tracking has never been enabled, then this call enables tracking but
+ // does _not_ add a sample -- it is not possible to add a sample the
+ // first time because there is no previous point to subtract from.
+ // Otherwise, if tracking is enabled,
+ // then adds a sample for tracked CPU ns since the previous
+ // sample, or since the first call to sampleAndEnable(), enable(), or
+ // setEnabled(true). If there was a previous sample but tracking is
+ // now disabled, then adds a sample for the tracked CPU ns accumulated
+ // up until the most recent disable(), resets this accumulator, and then
+ // enables tracking. Calling this method rather than enable() followed
+ // by sample() avoids a race condition for the first sample.
+ // Returns true if the sample 'ns' is valid, or false if invalid.
+ // Note that 'ns' is an output parameter passed by reference.
+ // The caller does not need to initialize this variable.
+ // The units are CPU nanoseconds consumed by current thread.
+ bool sampleAndEnable(double& ns);
+
+ // Add a sample point, but do not
+ // change the tracking enabled status. If tracking has either never been
+ // enabled, or has never been enabled since the last sample, then log a warning
+ // and don't add sample. Otherwise, adds a sample for tracked CPU ns since
+ // the previous sample or since the first call to sampleAndEnable(),
+ // enable(), or setEnabled(true) if no previous sample.
+ // Returns true if the sample is valid, or false if invalid.
+ // Note that 'ns' is an output parameter passed by reference.
+ // The caller does not need to initialize this variable.
+ // The units are CPU nanoseconds consumed by current thread.
+ bool sample(double& ns);
+
+ // Return the elapsed delta wall clock ns since initial enable or reset,
+ // as reported by clock_gettime(CLOCK_MONOTONIC).
+ long long elapsed() const;
+
+ // Reset elapsed wall clock. Has no effect on tracking or accumulator.
+ void resetElapsed();
+
+ // Return current clock frequency for specified CPU, in kHz.
+ // You can get your CPU number using sched_getcpu(2). Note that, unless CPU affinity
+ // has been configured appropriately, the CPU number can change.
+ // Also note that, unless the CPU governor has been configured appropriately,
+ // the CPU frequency can change. And even if the CPU frequency is locked down
+ // to a particular value, that the frequency might still be adjusted
+ // to prevent thermal overload. Therefore you should poll for your thread's
+ // current CPU number and clock frequency periodically.
+ uint32_t getCpukHz(int cpuNum);
+
+private:
+ bool mIsEnabled; // whether tracking is currently enabled
+ bool mWasEverEnabled; // whether tracking was ever enabled
+ long long mAccumulator; // accumulated thread CPU time since last sample, in ns
+ struct timespec mPreviousTs; // most recent thread CPU time, valid only if mIsEnabled is true
+ struct timespec mMonotonicTs; // most recent monotonic time
+ bool mMonotonicKnown; // whether mMonotonicTs has been set
+
+ static const int MAX_CPU = 8;
+ static int sScalingFds[MAX_CPU];// file descriptor per CPU for reading scaling_cur_freq
+ uint32_t mCurrentkHz[MAX_CPU]; // current CPU frequency in kHz, not static to avoid a race
+ static pthread_once_t sOnceControl;
+ static int sKernelMax; // like MAX_CPU, but determined at runtime == cpu/kernel_max + 1
+ static void init(); // called once at first ThreadCpuUsage construction
+ static pthread_mutex_t sMutex; // protects sScalingFds[] after initialization
+};
+
+} // namespace android
+
+#endif // _THREAD_CPU_USAGE_H
diff --git a/include/media/AudioBufferProvider.h b/include/media/AudioBufferProvider.h
index 43e4de7..ef392f0 100644
--- a/include/media/AudioBufferProvider.h
+++ b/include/media/AudioBufferProvider.h
@@ -26,6 +26,8 @@ class AudioBufferProvider
{
public:
+ // FIXME merge with AudioTrackShared::Buffer, AudioTrack::Buffer, and AudioRecord::Buffer
+ // and rename getNextBuffer() to obtainBuffer()
struct Buffer {
Buffer() : raw(NULL), frameCount(0) { }
union {
@@ -44,6 +46,19 @@ public:
// pts is the local time when the next sample yielded by getNextBuffer
// will be rendered.
// Pass kInvalidPTS if the PTS is unknown or not applicable.
+ // On entry:
+ // buffer != NULL
+ // buffer->raw unused
+ // buffer->frameCount maximum number of desired frames
+ // On successful return:
+ // status NO_ERROR
+ // buffer->raw non-NULL pointer to buffer->frameCount contiguous available frames
+ // buffer->frameCount number of contiguous available frames at buffer->raw,
+ // 0 < buffer->frameCount <= entry value
+ // On error return:
+ // status != NO_ERROR
+ // buffer->raw NULL
+ // buffer->frameCount 0
virtual status_t getNextBuffer(Buffer* buffer, int64_t pts = kInvalidPTS) = 0;
virtual void releaseBuffer(Buffer* buffer) = 0;
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 38c6548..052064d 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -14,31 +14,27 @@
* limitations under the License.
*/
-#ifndef AUDIORECORD_H_
-#define AUDIORECORD_H_
+#ifndef ANDROID_AUDIORECORD_H
+#define ANDROID_AUDIORECORD_H
-#include <binder/IMemory.h>
#include <cutils/sched_policy.h>
#include <media/AudioSystem.h>
#include <media/IAudioRecord.h>
-#include <system/audio.h>
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
#include <utils/threads.h>
namespace android {
+// ----------------------------------------------------------------------------
+
class audio_track_cblk_t;
class AudioRecordClientProxy;
// ----------------------------------------------------------------------------
-class AudioRecord : virtual public RefBase
+class AudioRecord : public RefBase
{
public:
- static const int DEFAULT_SAMPLE_RATE = 8000;
-
/* Events used by AudioRecord callback function (callback_t).
* Keep in sync with frameworks/base/media/java/android/media/AudioRecord.java NATIVE_EVENT_*.
*/
@@ -49,6 +45,8 @@ public:
// (See setMarkerPosition()).
EVENT_NEW_POS = 3, // Record head is at a new position
// (See setPositionUpdatePeriod()).
+ EVENT_NEW_IAUDIORECORD = 4, // IAudioRecord was re-created, either due to re-routing and
+ // voluntary invalidation by mediaserver, or mediaserver crash.
};
/* Client should declare Buffer on the stack and pass address to obtainBuffer()
@@ -58,11 +56,17 @@ public:
class Buffer
{
public:
+ // FIXME use m prefix
size_t frameCount; // number of sample frames corresponding to size;
// on input it is the number of frames available,
// on output is the number of frames actually drained
+ // (currently ignored, but will make the primary field in future)
+
+ size_t size; // input/output in bytes == frameCount * frameSize
+ // FIXME this is redundant with respect to frameCount,
+ // and TRANSFER_OBTAIN mode is broken for 8-bit data
+ // since we don't define the frame format
- size_t size; // total size in bytes == frameCount * frameSize
union {
void* raw;
short* i16; // signed 16-bit
@@ -84,6 +88,7 @@ public:
* - EVENT_OVERRUN: unused.
* - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames.
* - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames.
+ * - EVENT_NEW_IAUDIORECORD: unused.
*/
typedef void (*callback_t)(int event, void* user, void *info);
@@ -101,94 +106,112 @@ public:
audio_format_t format,
audio_channel_mask_t channelMask);
+ /* How data is transferred from AudioRecord
+ */
+ enum transfer_type {
+ TRANSFER_DEFAULT, // not specified explicitly; determine from other parameters
+ TRANSFER_CALLBACK, // callback EVENT_MORE_DATA
+ TRANSFER_OBTAIN, // FIXME deprecated: call obtainBuffer() and releaseBuffer()
+ TRANSFER_SYNC, // synchronous read()
+ };
+
/* Constructs an uninitialized AudioRecord. No connection with
- * AudioFlinger takes place.
+ * AudioFlinger takes place. Use set() after this.
*/
AudioRecord();
/* Creates an AudioRecord object and registers it with AudioFlinger.
* Once created, the track needs to be started before it can be used.
- * Unspecified values are set to the audio hardware's current
- * values.
+ * Unspecified values are set to appropriate default values.
*
* Parameters:
*
- * inputSource: Select the audio input to record to (e.g. AUDIO_SOURCE_DEFAULT).
- * sampleRate: Track sampling rate in Hz.
+ * inputSource: Select the audio input to record from (e.g. AUDIO_SOURCE_DEFAULT).
+ * sampleRate: Data sink sampling rate in Hz.
* format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
* 16 bits per sample).
- * channelMask: Channel mask.
+ * channelMask: Channel mask, such that audio_is_input_channel(channelMask) is true.
* frameCount: Minimum size of track PCM buffer in frames. This defines the
* application's contribution to the
* latency of the track. The actual size selected by the AudioRecord could
* be larger if the requested size is not compatible with current audio HAL
* latency. Zero means to use a default value.
* cbf: Callback function. If not null, this function is called periodically
- * to consume new PCM data.
+ * to consume new PCM data and inform of marker, position updates, etc.
* user: Context for use by the callback receiver.
* notificationFrames: The callback function is called each time notificationFrames PCM
* frames are ready in record track output buffer.
* sessionId: Not yet supported.
+ * transferType: How data is transferred from AudioRecord.
+ * flags: See comments on audio_input_flags_t in <system/audio.h>
+ * threadCanCallJava: Not present in parameter list, and so is fixed at false.
*/
AudioRecord(audio_source_t inputSource,
- uint32_t sampleRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = AUDIO_CHANNEL_IN_MONO,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
int frameCount = 0,
callback_t cbf = NULL,
void* user = NULL,
int notificationFrames = 0,
- int sessionId = 0);
-
+ int sessionId = 0,
+ transfer_type transferType = TRANSFER_DEFAULT,
+ audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE);
/* Terminates the AudioRecord and unregisters it from AudioFlinger.
* Also destroys all resources associated with the AudioRecord.
*/
- ~AudioRecord();
-
+protected:
+ virtual ~AudioRecord();
+public:
- /* Initialize an uninitialized AudioRecord.
+ /* Initialize an AudioRecord that was created using the AudioRecord() constructor.
+ * Don't call set() more than once, or after an AudioRecord() constructor that takes parameters.
* Returned status (from utils/Errors.h) can be:
* - NO_ERROR: successful intialization
- * - INVALID_OPERATION: AudioRecord is already intitialized or record device is already in use
+ * - INVALID_OPERATION: AudioRecord is already initialized or record device is already in use
* - BAD_VALUE: invalid parameter (channels, format, sampleRate...)
* - NO_INIT: audio server or audio hardware not initialized
* - PERMISSION_DENIED: recording is not allowed for the requesting process
+ *
+ * Parameters not listed in the AudioRecord constructors above:
+ *
+ * threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI.
*/
- status_t set(audio_source_t inputSource = AUDIO_SOURCE_DEFAULT,
- uint32_t sampleRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = AUDIO_CHANNEL_IN_MONO,
+ status_t set(audio_source_t inputSource,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
int frameCount = 0,
callback_t cbf = NULL,
void* user = NULL,
int notificationFrames = 0,
bool threadCanCallJava = false,
- int sessionId = 0);
-
+ int sessionId = 0,
+ transfer_type transferType = TRANSFER_DEFAULT,
+ audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE);
/* Result of constructing the AudioRecord. This must be checked
* before using any AudioRecord API (except for set()), because using
* an uninitialized AudioRecord produces undefined results.
* See set() method above for possible return codes.
*/
- status_t initCheck() const;
+ status_t initCheck() const { return mStatus; }
/* Returns this track's estimated latency in milliseconds.
* This includes the latency due to AudioRecord buffer size,
* and audio hardware driver.
*/
- uint32_t latency() const;
+ uint32_t latency() const { return mLatency; }
/* getters, see constructor and set() */
- audio_format_t format() const;
- uint32_t channelCount() const;
- size_t frameCount() const;
- size_t frameSize() const { return mFrameSize; }
- audio_source_t inputSource() const;
-
+ audio_format_t format() const { return mFormat; }
+ uint32_t channelCount() const { return mChannelCount; }
+ size_t frameCount() const { return mFrameCount; }
+ size_t frameSize() const { return mFrameSize; }
+ audio_source_t inputSource() const { return mInputSource; }
/* After it's created the track is not active. Call start() to
* make it active. If set, the callback will start being called.
@@ -198,26 +221,29 @@ public:
status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
int triggerSession = 0);
- /* Stop a track. If set, the callback will cease being called and
- * obtainBuffer returns STOPPED. Note that obtainBuffer() still works
- * and will drain buffers until the pool is exhausted.
+ /* Stop a track. If set, the callback will cease being called. Note that obtainBuffer() still
+ * works and will drain buffers until the pool is exhausted, and then will return WOULD_BLOCK.
*/
void stop();
bool stopped() const;
- /* Get sample rate for this record track in Hz.
+ /* Return the sink sample rate for this record track in Hz.
+ * Unlike AudioTrack, the sample rate is const after initialization, so doesn't need a lock.
*/
- uint32_t getSampleRate() const;
+ uint32_t getSampleRate() const { return mSampleRate; }
/* Sets marker position. When record reaches the number of frames specified,
* a callback with event type EVENT_MARKER is called. Calling setMarkerPosition
* with marker == 0 cancels marker notification callback.
+ * To set a marker at a position which would compute as 0,
+ * a workaround is to the set the marker at a nearby position such as ~0 or 1.
* If the AudioRecord has been opened with no callback function associated,
* the operation will fail.
*
* Parameters:
*
- * marker: marker position expressed in frames.
+ * marker: marker position expressed in wrapping (overflow) frame units,
+ * like the return value of getPosition().
*
* Returned status (from utils/Errors.h) can be:
* - NO_ERROR: successful operation
@@ -226,13 +252,13 @@ public:
status_t setMarkerPosition(uint32_t marker);
status_t getMarkerPosition(uint32_t *marker) const;
-
/* Sets position update period. Every time the number of frames specified has been recorded,
* a callback with event type EVENT_NEW_POS is called.
* Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification
* callback.
* If the AudioRecord has been opened with no callback function associated,
* the operation will fail.
+ * Extremely small values may be rounded up to a value the implementation can support.
*
* Parameters:
*
@@ -245,13 +271,13 @@ public:
status_t setPositionUpdatePeriod(uint32_t updatePeriod);
status_t getPositionUpdatePeriod(uint32_t *updatePeriod) const;
-
- /* Gets record head position. The position is the total number of frames
- * recorded since record start.
+ /* Return the total number of frames recorded since recording started.
+ * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz.
+ * It is reset to zero by stop().
*
* Parameters:
*
- * position: Address where to return record head position within AudioRecord buffer.
+ * position: Address where to return record head position.
*
* Returned status (from utils/Errors.h) can be:
* - NO_ERROR: successful operation
@@ -276,38 +302,74 @@ public:
*
* Returned value:
* AudioRecord session ID.
+ *
+ * No lock needed because session ID doesn't change after first set().
*/
- int getSessionId() const;
-
- /* Obtains a buffer of "frameCount" frames. The buffer must be
- * drained entirely, and then released with releaseBuffer().
- * If the track is stopped, obtainBuffer() returns
- * STOPPED instead of NO_ERROR as long as there are buffers available,
- * at which point NO_MORE_BUFFERS is returned.
+ int getSessionId() const { return mSessionId; }
+
+ /* Obtains a buffer of up to "audioBuffer->frameCount" full frames.
+ * After draining these frames of data, the caller should release them with releaseBuffer().
+ * If the track buffer is not empty, obtainBuffer() returns as many contiguous
+ * full frames as are available immediately.
+ * If the track buffer is empty and track is stopped, obtainBuffer() returns WOULD_BLOCK
+ * regardless of the value of waitCount.
+ * If the track buffer is empty and track is not stopped, obtainBuffer() blocks with a
+ * maximum timeout based on waitCount; see chart below.
* Buffers will be returned until the pool
* is exhausted, at which point obtainBuffer() will either block
- * or return WOULD_BLOCK depending on the value of the "blocking"
+ * or return WOULD_BLOCK depending on the value of the "waitCount"
* parameter.
*
+ * obtainBuffer() and releaseBuffer() are deprecated for direct use by applications,
+ * which should use read() or callback EVENT_MORE_DATA instead.
+ *
* Interpretation of waitCount:
* +n limits wait time to n * WAIT_PERIOD_MS,
* -1 causes an (almost) infinite wait time,
* 0 non-blocking.
+ *
+ * Buffer fields
+ * On entry:
+ * frameCount number of frames requested
+ * After error return:
+ * frameCount 0
+ * size 0
+ * raw undefined
+ * After successful return:
+ * frameCount actual number of frames available, <= number requested
+ * size actual number of bytes available
+ * raw pointer to the buffer
*/
- enum {
- NO_MORE_BUFFERS = 0x80000001, // same name in AudioFlinger.h, ok to be different value
- STOPPED = 1
- };
+ /* FIXME Deprecated public API for TRANSFER_OBTAIN mode */
+ status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
+ __attribute__((__deprecated__));
- status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount);
+private:
+ /* If nonContig is non-NULL, it is an output parameter that will be set to the number of
+ * additional non-contiguous frames that are available immediately.
+ * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(),
+ * in case the requested amount of frames is in two or more non-contiguous regions.
+ * FIXME requested and elapsed are both relative times. Consider changing to absolute time.
+ */
+ status_t obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
+ struct timespec *elapsed = NULL, size_t *nonContig = NULL);
+public:
- /* Release an emptied buffer of "frameCount" frames for AudioFlinger to re-fill. */
+ /* Release an emptied buffer of "audioBuffer->frameCount" frames for AudioFlinger to re-fill. */
+ // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed
void releaseBuffer(Buffer* audioBuffer);
-
/* As a convenience we provide a read() interface to the audio buffer.
- * This is implemented on top of obtainBuffer/releaseBuffer.
+ * Input parameter 'size' is in byte units.
+ * This is implemented on top of obtainBuffer/releaseBuffer. For best
+ * performance use callbacks. Returns actual number of bytes read >= 0,
+ * or one of the following negative status codes:
+ * INVALID_OPERATION AudioRecord is configured for streaming mode
+ * BAD_VALUE size is invalid
+ * WOULD_BLOCK when obtainBuffer() returns same, or
+ * AudioRecord was stopped during the read
+ * or any other error code returned by IAudioRecord::start() or restoreRecord_l().
*/
ssize_t read(void* buffer, size_t size);
@@ -338,66 +400,113 @@ private:
void resume(); // allow thread to execute, if not requested to exit
private:
+ void pauseInternal(nsecs_t ns = 0LL);
+ // like pause(), but only used internally within thread
+
friend class AudioRecord;
virtual bool threadLoop();
- AudioRecord& mReceiver;
+ AudioRecord& mReceiver;
virtual ~AudioRecordThread();
Mutex mMyLock; // Thread::mLock is private
Condition mMyCond; // Thread::mThreadExitedCondition is private
- bool mPaused; // whether thread is currently paused
+ bool mPaused; // whether thread is requested to pause at next loop entry
+ bool mPausedInt; // whether thread internally requests pause
+ nsecs_t mPausedNs; // if mPausedInt then associated timeout, otherwise ignored
};
// body of AudioRecordThread::threadLoop()
- bool processAudioBuffer(const sp<AudioRecordThread>& thread);
+ // returns the maximum amount of time before we would like to run again, where:
+ // 0 immediately
+ // > 0 no later than this many nanoseconds from now
+ // NS_WHENEVER still active but no particular deadline
+ // NS_INACTIVE inactive so don't run again until re-started
+ // NS_NEVER never again
+ static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3;
+ nsecs_t processAudioBuffer(const sp<AudioRecordThread>& thread);
+
+ // caller must hold lock on mLock for all _l methods
+ status_t openRecord_l(size_t epoch);
- status_t openRecord_l(uint32_t sampleRate,
- audio_format_t format,
- size_t frameCount,
- audio_io_handle_t input);
- audio_io_handle_t getInput_l();
- status_t restoreRecord_l(audio_track_cblk_t*& cblk);
+ // FIXME enum is faster than strcmp() for parameter 'from'
+ status_t restoreRecord_l(const char *from);
sp<AudioRecordThread> mAudioRecordThread;
mutable Mutex mLock;
- bool mActive; // protected by mLock
+ // Current client state: false = stopped, true = active. Protected by mLock. If more states
+ // are added, consider changing this to enum State { ... } mState as in AudioTrack.
+ bool mActive;
// for client callback handler
callback_t mCbf; // callback handler for events, or NULL
void* mUserData;
// for notification APIs
- uint32_t mNotificationFrames;
- uint32_t mRemainingFrames;
- uint32_t mMarkerPosition; // in frames
+ uint32_t mNotificationFramesReq; // requested number of frames between each
+ // notification callback
+ uint32_t mNotificationFramesAct; // actual number of frames between each
+ // notification callback
+ bool mRefreshRemaining; // processAudioBuffer() should refresh next 2
+
+ // These are private to processAudioBuffer(), and are not protected by a lock
+ uint32_t mRemainingFrames; // number of frames to request in obtainBuffer()
+ bool mRetryOnPartialBuffer; // sleep and retry after partial obtainBuffer()
+ int mObservedSequence; // last observed value of mSequence
+
+ uint32_t mMarkerPosition; // in wrapping (overflow) frame units
bool mMarkerReached;
uint32_t mNewPosition; // in frames
- uint32_t mUpdatePeriod; // in ms
+ uint32_t mUpdatePeriod; // in frames, zero means no EVENT_NEW_POS
+
+ status_t mStatus;
// constant after constructor or set()
uint32_t mSampleRate;
size_t mFrameCount;
audio_format_t mFormat;
- uint8_t mChannelCount;
+ uint32_t mChannelCount;
size_t mFrameSize; // app-level frame size == AudioFlinger frame size
audio_source_t mInputSource;
- status_t mStatus;
- uint32_t mLatency;
+ uint32_t mLatency; // in ms
audio_channel_mask_t mChannelMask;
- audio_io_handle_t mInput; // returned by AudioSystem::getInput()
+ audio_input_flags_t mFlags;
int mSessionId;
+ transfer_type mTransfer;
+
+ audio_io_handle_t mInput; // returned by AudioSystem::getInput()
// may be changed if IAudioRecord object is re-created
sp<IAudioRecord> mAudioRecord;
sp<IMemory> mCblkMemory;
- audio_track_cblk_t* mCblk;
- void* mBuffers; // starting address of buffers in shared memory
+ audio_track_cblk_t* mCblk; // re-load after mLock.unlock()
- int mPreviousPriority; // before start()
+ int mPreviousPriority; // before start()
SchedPolicy mPreviousSchedulingGroup;
- AudioRecordClientProxy* mProxy;
+ bool mAwaitBoost; // thread should wait for priority boost before running
+
+ // The proxy should only be referenced while a lock is held because the proxy isn't
+ // multi-thread safe.
+ // An exception is that a blocking ClientProxy::obtainBuffer() may be called without a lock,
+ // provided that the caller also holds an extra reference to the proxy and shared memory to keep
+ // them around in case they are replaced during the obtainBuffer().
+ sp<AudioRecordClientProxy> mProxy;
+
+ bool mInOverrun; // whether recorder is currently in overrun state
+
+private:
+ class DeathNotifier : public IBinder::DeathRecipient {
+ public:
+ DeathNotifier(AudioRecord* audioRecord) : mAudioRecord(audioRecord) { }
+ protected:
+ virtual void binderDied(const wp<IBinder>& who);
+ private:
+ const wp<AudioRecord> mAudioRecord;
+ };
+
+ sp<DeathNotifier> mDeathNotifier;
+ uint32_t mSequence; // incremented for each new IAudioRecord attempt
};
}; // namespace android
-#endif /*AUDIORECORD_H_*/
+#endif // ANDROID_AUDIORECORD_H
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index b11c812..225ef76 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -17,20 +17,18 @@
#ifndef ANDROID_AUDIOSYSTEM_H_
#define ANDROID_AUDIOSYSTEM_H_
-#include <utils/RefBase.h>
-#include <utils/threads.h>
-#include <media/IAudioFlinger.h>
-
+#include <hardware/audio_effect.h>
+#include <media/IAudioFlingerClient.h>
#include <system/audio.h>
#include <system/audio_policy.h>
-
-/* XXX: Should be include by all the users instead */
-#include <media/AudioParameter.h>
+#include <utils/Errors.h>
+#include <utils/Mutex.h>
namespace android {
typedef void (*audio_error_callback)(status_t err);
+class IAudioFlinger;
class IAudioPolicyService;
class String8;
@@ -128,8 +126,10 @@ public:
// - BAD_VALUE: invalid parameter
// NOTE: this feature is not supported on all hardware platforms and it is
// necessary to check returned status before using the returned values.
- static status_t getRenderPosition(size_t *halFrames, size_t *dspFrames,
- audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+ static status_t getRenderPosition(audio_io_handle_t output,
+ size_t *halFrames,
+ size_t *dspFrames,
+ audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
// return the number of input frames lost by HAL implementation, or 0 if the handle is invalid
static size_t getInputFramesLost(audio_io_handle_t ioHandle);
@@ -155,11 +155,11 @@ public:
class OutputDescriptor {
public:
OutputDescriptor()
- : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channels(0), frameCount(0), latency(0) {}
+ : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channelMask(0), frameCount(0), latency(0) {}
uint32_t samplingRate;
- int32_t format;
- int32_t channels;
+ audio_format_t format;
+ audio_channel_mask_t channelMask;
size_t frameCount;
uint32_t latency;
};
@@ -197,7 +197,8 @@ public:
uint32_t samplingRate = 0,
audio_format_t format = AUDIO_FORMAT_DEFAULT,
audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE);
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+ const audio_offload_info_t *offloadInfo = NULL);
static status_t startOutput(audio_io_handle_t output,
audio_stream_type_t stream,
int session = 0);
@@ -245,6 +246,15 @@ public:
static uint32_t getPrimaryOutputSamplingRate();
static size_t getPrimaryOutputFrameCount();
+ static status_t setLowRamDevice(bool isLowRamDevice);
+
+ // Check if hw offload is possible for given format, stream type, sample rate,
+ // bit rate, duration, video and streaming or offload property is enabled
+ static bool isOffloadSupported(const audio_offload_info_t& info);
+
+ // check presence of audio flinger service.
+ // returns NO_ERROR if binding to service succeeds, DEAD_OBJECT otherwise
+ static status_t checkAudioFlinger();
// ----------------------------------------------------------------------------
private:
diff --git a/include/media/AudioTimestamp.h b/include/media/AudioTimestamp.h
new file mode 100644
index 0000000..c29c7e5
--- /dev/null
+++ b/include/media/AudioTimestamp.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_TIMESTAMP_H
+#define ANDROID_AUDIO_TIMESTAMP_H
+
+#include <time.h>
+
+class AudioTimestamp {
+public:
+ AudioTimestamp() : mPosition(0) {
+ mTime.tv_sec = 0;
+ mTime.tv_nsec = 0;
+ }
+ // FIXME change type to match android.media.AudioTrack
+ uint32_t mPosition; // a frame position in AudioTrack::getPosition() units
+ struct timespec mTime; // corresponding CLOCK_MONOTONIC when frame is expected to present
+};
+
+#endif // ANDROID_AUDIO_TIMESTAMP_H
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 64f82bb..f2f9c22 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -17,18 +17,10 @@
#ifndef ANDROID_AUDIOTRACK_H
#define ANDROID_AUDIOTRACK_H
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <media/IAudioFlinger.h>
-#include <media/IAudioTrack.h>
-#include <media/AudioSystem.h>
-
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
-#include <binder/IInterface.h>
-#include <binder/IMemory.h>
#include <cutils/sched_policy.h>
+#include <media/AudioSystem.h>
+#include <media/AudioTimestamp.h>
+#include <media/IAudioTrack.h>
#include <utils/threads.h>
namespace android {
@@ -37,10 +29,11 @@ namespace android {
class audio_track_cblk_t;
class AudioTrackClientProxy;
+class StaticAudioTrackClientProxy;
// ----------------------------------------------------------------------------
-class AudioTrack : virtual public RefBase
+class AudioTrack : public RefBase
{
public:
enum channel_index {
@@ -49,7 +42,7 @@ public:
RIGHT = 1
};
- /* Events used by AudioTrack callback function (audio_track_cblk_t).
+ /* Events used by AudioTrack callback function (callback_t).
* Keep in sync with frameworks/base/media/java/android/media/AudioTrack.java NATIVE_EVENT_*.
*/
enum event_type {
@@ -64,7 +57,15 @@ public:
// (See setMarkerPosition()).
EVENT_NEW_POS = 4, // Playback head is at a new position
// (See setPositionUpdatePeriod()).
- EVENT_BUFFER_END = 5 // Playback head is at the end of the buffer.
+ EVENT_BUFFER_END = 5, // Playback head is at the end of the buffer.
+ // Not currently used by android.media.AudioTrack.
+ EVENT_NEW_IAUDIOTRACK = 6, // IAudioTrack was re-created, either due to re-routing and
+ // voluntary invalidation by mediaserver, or mediaserver crash.
+ EVENT_STREAM_END = 7, // Sent after all the buffers queued in AF and HW are played
+ // back (after stop is called)
+ EVENT_NEW_TIMESTAMP = 8, // Delivered periodically and when there's a significant change
+ // in the mapping from frame position to presentation time.
+ // See AudioTimestamp for the information included with event.
};
/* Client should declare Buffer on the stack and pass address to obtainBuffer()
@@ -74,19 +75,25 @@ public:
class Buffer
{
public:
+ // FIXME use m prefix
size_t frameCount; // number of sample frames corresponding to size;
// on input it is the number of frames desired,
// on output is the number of frames actually filled
+ // (currently ignored, but will make the primary field in future)
+
+ size_t size; // input/output in bytes == frameCount * frameSize
+ // on output is the number of bytes actually filled
+ // FIXME this is redundant with respect to frameCount,
+ // and TRANSFER_OBTAIN mode is broken for 8-bit data
+ // since we don't define the frame format
- size_t size; // input/output in byte units
union {
void* raw;
- short* i16; // signed 16-bit
- int8_t* i8; // unsigned 8-bit, offset by 0x80
+ short* i16; // signed 16-bit
+ int8_t* i8; // unsigned 8-bit, offset by 0x80
};
};
-
/* As a convenience, if a callback is supplied, a handler thread
* is automatically created with the appropriate priority. This thread
* invokes the callback when a new buffer becomes available or various conditions occur.
@@ -100,9 +107,12 @@ public:
* written.
* - EVENT_UNDERRUN: unused.
* - EVENT_LOOP_END: pointer to an int indicating the number of loops remaining.
- * - EVENT_MARKER: pointer to an uint32_t containing the marker position in frames.
- * - EVENT_NEW_POS: pointer to an uint32_t containing the new position in frames.
+ * - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames.
+ * - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames.
* - EVENT_BUFFER_END: unused.
+ * - EVENT_NEW_IAUDIOTRACK: unused.
+ * - EVENT_STREAM_END: unused.
+ * - EVENT_NEW_TIMESTAMP: pointer to const AudioTimestamp.
*/
typedef void (*callback_t)(int event, void* user, void *info);
@@ -112,11 +122,22 @@ public:
* Returned status (from utils/Errors.h) can be:
* - NO_ERROR: successful operation
* - NO_INIT: audio server or audio hardware not initialized
+ * - BAD_VALUE: unsupported configuration
*/
- static status_t getMinFrameCount(size_t* frameCount,
- audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT,
- uint32_t sampleRate = 0);
+ static status_t getMinFrameCount(size_t* frameCount,
+ audio_stream_type_t streamType,
+ uint32_t sampleRate);
+
+ /* How data is transferred to AudioTrack
+ */
+ enum transfer_type {
+ TRANSFER_DEFAULT, // not specified explicitly; determine from the other parameters
+ TRANSFER_CALLBACK, // callback EVENT_MORE_DATA
+ TRANSFER_OBTAIN, // FIXME deprecated: call obtainBuffer() and releaseBuffer()
+ TRANSFER_SYNC, // synchronous write()
+ TRANSFER_SHARED, // shared memory
+ };
/* Constructs an uninitialized AudioTrack. No connection with
* AudioFlinger takes place. Use set() after this.
@@ -128,13 +149,13 @@ public:
* Unspecified values are set to appropriate default values.
* With this constructor, the track is configured for streaming mode.
* Data to be rendered is supplied by write() or by the callback EVENT_MORE_DATA.
- * Intermixing a combination of write() and non-ignored EVENT_MORE_DATA is deprecated.
+ * Intermixing a combination of write() and non-ignored EVENT_MORE_DATA is not allowed.
*
* Parameters:
*
* streamType: Select the type of audio stream this track is attached to
* (e.g. AUDIO_STREAM_MUSIC).
- * sampleRate: Track sampling rate in Hz.
+ * sampleRate: Data source sampling rate in Hz.
* format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
* 16 bits per sample).
* channelMask: Channel mask.
@@ -149,21 +170,24 @@ public:
* user: Context for use by the callback receiver.
* notificationFrames: The callback function is called each time notificationFrames PCM
* frames have been consumed from track input buffer.
+ * This is expressed in units of frames at the initial source sample rate.
* sessionId: Specific session ID, or zero to use default.
- * threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI.
- * If not present in parameter list, then fixed at false.
+ * transferType: How data is transferred to AudioTrack.
+ * threadCanCallJava: Not present in parameter list, and so is fixed at false.
*/
AudioTrack( audio_stream_type_t streamType,
- uint32_t sampleRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = 0,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t,
int frameCount = 0,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
callback_t cbf = NULL,
void* user = NULL,
int notificationFrames = 0,
- int sessionId = 0);
+ int sessionId = 0,
+ transfer_type transferType = TRANSFER_DEFAULT,
+ const audio_offload_info_t *offloadInfo = NULL);
/* Creates an audio track and registers it with AudioFlinger.
* With this constructor, the track is configured for static buffer mode.
@@ -174,38 +198,47 @@ public:
* The write() method is not supported in this case.
* It is recommended to pass a callback function to be notified of playback end by an
* EVENT_UNDERRUN event.
- * FIXME EVENT_MORE_DATA still occurs; it must be ignored.
*/
AudioTrack( audio_stream_type_t streamType,
- uint32_t sampleRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = 0,
- const sp<IMemory>& sharedBuffer = 0,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ const sp<IMemory>& sharedBuffer,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
callback_t cbf = NULL,
void* user = NULL,
int notificationFrames = 0,
- int sessionId = 0);
+ int sessionId = 0,
+ transfer_type transferType = TRANSFER_DEFAULT,
+ const audio_offload_info_t *offloadInfo = NULL);
/* Terminates the AudioTrack and unregisters it from AudioFlinger.
* Also destroys all resources associated with the AudioTrack.
*/
- ~AudioTrack();
+protected:
+ virtual ~AudioTrack();
+public:
- /* Initialize an uninitialized AudioTrack.
+ /* Initialize an AudioTrack that was created using the AudioTrack() constructor.
+ * Don't call set() more than once, or after the AudioTrack() constructors that take parameters.
* Returned status (from utils/Errors.h) can be:
* - NO_ERROR: successful initialization
* - INVALID_OPERATION: AudioTrack is already initialized
* - BAD_VALUE: invalid parameter (channelMask, format, sampleRate...)
* - NO_INIT: audio server or audio hardware not initialized
+ * If status is not equal to NO_ERROR, don't call any other APIs on this AudioTrack.
* If sharedBuffer is non-0, the frameCount parameter is ignored and
* replaced by the shared buffer's total allocated size in frame units.
+ *
+ * Parameters not listed in the AudioTrack constructors above:
+ *
+ * threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI.
*/
- status_t set(audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT,
- uint32_t sampleRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = 0,
+ status_t set(audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
int frameCount = 0,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
callback_t cbf = NULL,
@@ -213,9 +246,11 @@ public:
int notificationFrames = 0,
const sp<IMemory>& sharedBuffer = 0,
bool threadCanCallJava = false,
- int sessionId = 0);
+ int sessionId = 0,
+ transfer_type transferType = TRANSFER_DEFAULT,
+ const audio_offload_info_t *offloadInfo = NULL);
- /* Result of constructing the AudioTrack. This must be checked
+ /* Result of constructing the AudioTrack. This must be checked for successful initialization
* before using any AudioTrack API (except for set()), because using
* an uninitialized AudioTrack produces undefined results.
* See set() method above for possible return codes.
@@ -233,14 +268,15 @@ public:
audio_stream_type_t streamType() const { return mStreamType; }
audio_format_t format() const { return mFormat; }
- /* Return frame size in bytes, which for linear PCM is channelCount * (bit depth per channel / 8).
+ /* Return frame size in bytes, which for linear PCM is
+ * channelCount * (bit depth per channel / 8).
* channelCount is determined from channelMask, and bit depth comes from format.
* For non-linear formats, the frame size is typically 1 byte.
*/
- uint32_t channelCount() const { return mChannelCount; }
+ size_t frameSize() const { return mFrameSize; }
+ uint32_t channelCount() const { return mChannelCount; }
uint32_t frameCount() const { return mFrameCount; }
- size_t frameSize() const { return mFrameSize; }
/* Return the static buffer specified in constructor or set(), or 0 for streaming mode */
sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
@@ -249,14 +285,13 @@ public:
* make it active. If set, the callback will start being called.
* If the track was previously paused, volume is ramped up over the first mix buffer.
*/
- void start();
+ status_t start();
/* Stop a track.
* In static buffer mode, the track is stopped immediately.
- * In streaming mode, the callback will cease being called and
- * obtainBuffer returns STOPPED. Note that obtainBuffer() still works
- * and will fill up buffers until the pool is exhausted.
- * The stop does not occur immediately: any data remaining in the buffer
+ * In streaming mode, the callback will cease being called. Note that obtainBuffer() still
+ * works and will fill up buffers until the pool is exhausted, and then will return WOULD_BLOCK.
+ * In streaming mode the stop does not occur immediately: any data remaining in the buffer
* is first drained, mixed, and output, and only then is the track marked as stopped.
*/
void stop();
@@ -270,7 +305,7 @@ public:
void flush();
/* Pause a track. After pause, the callback will cease being called and
- * obtainBuffer returns STOPPED. Note that obtainBuffer() still works
+ * obtainBuffer returns WOULD_BLOCK. Note that obtainBuffer() still works
* and will fill up buffers until the pool is exhausted.
* Volume is ramped down over the next mix buffer following the pause request,
* and then the track is marked as paused. It can be resumed with ramp up by start().
@@ -294,11 +329,11 @@ public:
status_t setAuxEffectSendLevel(float level);
void getAuxEffectSendLevel(float* level) const;
- /* Set sample rate for this track in Hz, mostly used for games' sound effects
+ /* Set source sample rate for this track in Hz, mostly used for games' sound effects
*/
status_t setSampleRate(uint32_t sampleRate);
- /* Return current sample rate in Hz, or 0 if unknown */
+ /* Return current source sample rate in Hz, or 0 if unknown */
uint32_t getSampleRate() const;
/* Enables looping and sets the start and end points of looping.
@@ -306,20 +341,24 @@ public:
*
* Parameters:
*
- * loopStart: loop start expressed as the number of PCM frames played since AudioTrack start.
- * loopEnd: loop end expressed as the number of PCM frames played since AudioTrack start.
+ * loopStart: loop start in frames relative to start of buffer.
+ * loopEnd: loop end in frames relative to start of buffer.
* loopCount: number of loops to execute. Calling setLoop() with loopCount == 0 cancels any
- * pending or active loop. loopCount = -1 means infinite looping.
+ * pending or active loop. loopCount == -1 means infinite looping.
*
* For proper operation the following condition must be respected:
- * (loopEnd-loopStart) <= framecount()
+ * loopCount != 0 implies 0 <= loopStart < loopEnd <= frameCount().
+ *
+ * If the loop period (loopEnd - loopStart) is too small for the implementation to support,
+ * setLoop() will return BAD_VALUE. loopCount must be >= -1.
+ *
*/
status_t setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount);
/* Sets marker position. When playback reaches the number of frames specified, a callback with
* event type EVENT_MARKER is called. Calling setMarkerPosition with marker == 0 cancels marker
* notification callback. To set a marker at a position which would compute as 0,
- * a workaround is to the set the marker at a nearby position such as -1 or 1.
+ * a workaround is to the set the marker at a nearby position such as ~0 or 1.
* If the AudioTrack has been opened with no callback function associated, the operation will
* fail.
*
@@ -354,18 +393,14 @@ public:
status_t setPositionUpdatePeriod(uint32_t updatePeriod);
status_t getPositionUpdatePeriod(uint32_t *updatePeriod) const;
- /* Sets playback head position within AudioTrack buffer. The new position is specified
- * in number of frames.
- * This method must be called with the AudioTrack in paused or stopped state.
- * Note that the actual position set is <position> modulo the AudioTrack buffer size in frames.
- * Therefore using this method makes sense only when playing a "static" audio buffer
- * as opposed to streaming.
- * The getPosition() method on the other hand returns the total number of frames played since
- * playback start.
+ /* Sets playback head position.
+ * Only supported for static buffer mode.
*
* Parameters:
*
- * position: New playback head position within AudioTrack buffer.
+ * position: New playback head position in frames relative to start of buffer.
+ * 0 <= position <= frameCount(). Note that end of buffer is permitted,
+ * but will result in an immediate underrun if started.
*
* Returned status (from utils/Errors.h) can be:
* - NO_ERROR: successful operation
@@ -378,8 +413,22 @@ public:
/* Return the total number of frames played since playback start.
* The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz.
* It is reset to zero by flush(), reload(), and stop().
+ *
+ * Parameters:
+ *
+ * position: Address where to return play head position.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - BAD_VALUE: position is NULL
*/
- status_t getPosition(uint32_t *position);
+ status_t getPosition(uint32_t *position) const;
+
+ /* For static buffer mode only, this returns the current playback position in frames
+ * relative to start of buffer. It is analogous to the position units used by
+ * setLoop() and setPosition(). After underrun, the position will be at end of buffer.
+ */
+ status_t getBufferPosition(uint32_t *position);
/* Forces AudioTrack buffer full condition. When playing a static buffer, this method avoids
* rewriting the buffer before restarting playback after a stop.
@@ -426,15 +475,19 @@ public:
*/
status_t attachAuxEffect(int effectId);
- /* Obtains a buffer of "frameCount" frames. The buffer must be
- * filled entirely, and then released with releaseBuffer().
- * If the track is stopped, obtainBuffer() returns
- * STOPPED instead of NO_ERROR as long as there are buffers available,
- * at which point NO_MORE_BUFFERS is returned.
+ /* Obtains a buffer of up to "audioBuffer->frameCount" empty slots for frames.
+ * After filling these slots with data, the caller should release them with releaseBuffer().
+ * If the track buffer is not full, obtainBuffer() returns as many contiguous
+ * [empty slots for] frames as are available immediately.
+ * If the track buffer is full and track is stopped, obtainBuffer() returns WOULD_BLOCK
+ * regardless of the value of waitCount.
+ * If the track buffer is full and track is not stopped, obtainBuffer() blocks with a
+ * maximum timeout based on waitCount; see chart below.
* Buffers will be returned until the pool
* is exhausted, at which point obtainBuffer() will either block
- * or return WOULD_BLOCK depending on the value of the "blocking"
+ * or return WOULD_BLOCK depending on the value of the "waitCount"
* parameter.
+ * Each sample is 16-bit signed PCM.
*
* obtainBuffer() and releaseBuffer() are deprecated for direct use by applications,
* which should use write() or callback EVENT_MORE_DATA instead.
@@ -457,33 +510,76 @@ public:
* raw pointer to the buffer
*/
- enum {
- NO_MORE_BUFFERS = 0x80000001, // same name in AudioFlinger.h, ok to be different value
- STOPPED = 1
- };
+ /* FIXME Deprecated public API for TRANSFER_OBTAIN mode */
+ status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
+ __attribute__((__deprecated__));
- status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount);
+private:
+ /* If nonContig is non-NULL, it is an output parameter that will be set to the number of
+ * additional non-contiguous frames that are available immediately.
+ * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(),
+ * in case the requested amount of frames is in two or more non-contiguous regions.
+ * FIXME requested and elapsed are both relative times. Consider changing to absolute time.
+ */
+ status_t obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
+ struct timespec *elapsed = NULL, size_t *nonContig = NULL);
+public:
- /* Release a filled buffer of "frameCount" frames for AudioFlinger to process. */
+//EL_FIXME to be reconciled with new obtainBuffer() return codes and control block proxy
+// enum {
+// NO_MORE_BUFFERS = 0x80000001, // same name in AudioFlinger.h, ok to be different value
+// TEAR_DOWN = 0x80000002,
+// STOPPED = 1,
+// STREAM_END_WAIT,
+// STREAM_END
+// };
+
+ /* Release a filled buffer of "audioBuffer->frameCount" frames for AudioFlinger to process. */
+ // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed
void releaseBuffer(Buffer* audioBuffer);
/* As a convenience we provide a write() interface to the audio buffer.
+ * Input parameter 'size' is in byte units.
* This is implemented on top of obtainBuffer/releaseBuffer. For best
* performance use callbacks. Returns actual number of bytes written >= 0,
* or one of the following negative status codes:
- * INVALID_OPERATION AudioTrack is configured for shared buffer mode
+ * INVALID_OPERATION AudioTrack is configured for static buffer or streaming mode
* BAD_VALUE size is invalid
- * STOPPED AudioTrack was stopped during the write
- * NO_MORE_BUFFERS when obtainBuffer() returns same
+ * WOULD_BLOCK when obtainBuffer() returns same, or
+ * AudioTrack was stopped during the write
* or any other error code returned by IAudioTrack::start() or restoreTrack_l().
- * Not supported for static buffer mode.
*/
ssize_t write(const void* buffer, size_t size);
/*
* Dumps the state of an audio track.
*/
- status_t dump(int fd, const Vector<String16>& args) const;
+ status_t dump(int fd, const Vector<String16>& args) const;
+
+ /*
+ * Return the total number of frames which AudioFlinger desired but were unavailable,
+ * and thus which resulted in an underrun. Reset to zero by stop().
+ */
+ uint32_t getUnderrunFrames() const;
+
+ /* Get the flags */
+ audio_output_flags_t getFlags() const { return mFlags; }
+
+ /* Set parameters - only possible when using direct output */
+ status_t setParameters(const String8& keyValuePairs);
+
+ /* Get parameters */
+ String8 getParameters(const String8& keys);
+
+ /* Poll for a timestamp on demand.
+ * Use if EVENT_NEW_TIMESTAMP is not delivered often enough for your needs,
+ * or if you need to get the most recent timestamp outside of the event callback handler.
+ * Caution: calling this method too often may be inefficient;
+ * if you need a high resolution mapping between frame position and presentation time,
+ * consider implementing that at application level, based on the low resolution timestamps.
+ * Returns NO_ERROR if timestamp is valid.
+ */
+ status_t getTimestamp(AudioTimestamp& timestamp);
protected:
/* copying audio tracks is not allowed */
@@ -504,39 +600,62 @@ protected:
void resume(); // allow thread to execute, if not requested to exit
private:
+ void pauseInternal(nsecs_t ns = 0LL);
+ // like pause(), but only used internally within thread
+
friend class AudioTrack;
virtual bool threadLoop();
- AudioTrack& mReceiver;
- ~AudioTrackThread();
+ AudioTrack& mReceiver;
+ virtual ~AudioTrackThread();
Mutex mMyLock; // Thread::mLock is private
Condition mMyCond; // Thread::mThreadExitedCondition is private
- bool mPaused; // whether thread is currently paused
+ bool mPaused; // whether thread is requested to pause at next loop entry
+ bool mPausedInt; // whether thread internally requests pause
+ nsecs_t mPausedNs; // if mPausedInt then associated timeout, otherwise ignored
+ bool mIgnoreNextPausedInt; // whether to ignore next mPausedInt request
};
// body of AudioTrackThread::threadLoop()
- bool processAudioBuffer(const sp<AudioTrackThread>& thread);
+ // returns the maximum amount of time before we would like to run again, where:
+ // 0 immediately
+ // > 0 no later than this many nanoseconds from now
+ // NS_WHENEVER still active but no particular deadline
+ // NS_INACTIVE inactive so don't run again until re-started
+ // NS_NEVER never again
+ static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3;
+ nsecs_t processAudioBuffer(const sp<AudioTrackThread>& thread);
+ status_t processStreamEnd(int32_t waitCount);
+
// caller must hold lock on mLock for all _l methods
+
status_t createTrack_l(audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
size_t frameCount,
audio_output_flags_t flags,
const sp<IMemory>& sharedBuffer,
- audio_io_handle_t output);
+ audio_io_handle_t output,
+ size_t epoch);
- // can only be called when !mActive
+ // can only be called when mState != STATE_ACTIVE
void flush_l();
- status_t setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount);
+ void setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount);
audio_io_handle_t getOutput_l();
- status_t restoreTrack_l(audio_track_cblk_t*& cblk, bool fromStart);
- bool stopped_l() const { return !mActive; }
+ // FIXME enum is faster than strcmp() for parameter 'from'
+ status_t restoreTrack_l(const char *from);
+
+ bool isOffloaded() const
+ { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; }
+
+ // Next 3 fields may be changed if IAudioTrack is re-created, but always != 0
sp<IAudioTrack> mAudioTrack;
sp<IMemory> mCblkMemory;
- sp<AudioTrackThread> mAudioTrackThread;
+ audio_track_cblk_t* mCblk; // re-load after mLock.unlock()
+ sp<AudioTrackThread> mAudioTrackThread;
float mVolume[2];
float mSendLevel;
uint32_t mSampleRate;
@@ -544,62 +663,94 @@ protected:
size_t mReqFrameCount; // frame count to request the next time a new
// IAudioTrack is needed
- audio_track_cblk_t* mCblk; // re-load after mLock.unlock()
-
- // Starting address of buffers in shared memory. If there is a shared buffer, mBuffers
- // is the value of pointer() for the shared buffer, otherwise mBuffers points
- // immediately after the control block. This address is for the mapping within client
- // address space. AudioFlinger::TrackBase::mBuffer is for the server address space.
- void* mBuffers;
+ // constant after constructor or set()
audio_format_t mFormat; // as requested by client, not forced to 16-bit
audio_stream_type_t mStreamType;
uint32_t mChannelCount;
audio_channel_mask_t mChannelMask;
+ transfer_type mTransfer;
- // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data.
- // For 8-bit PCM data, mFrameSizeAF is
- // twice as large because data is expanded to 16-bit before being stored in buffer.
+ // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data. For 8-bit PCM data, it's
+ // twice as large as mFrameSize because data is expanded to 16-bit before it's stored in buffer.
size_t mFrameSize; // app-level frame size
size_t mFrameSizeAF; // AudioFlinger frame size
status_t mStatus;
- uint32_t mLatency;
- bool mActive; // protected by mLock
+ // can change dynamically when IAudioTrack invalidated
+ uint32_t mLatency; // in ms
+ // Indicates the current track state. Protected by mLock.
+ enum State {
+ STATE_ACTIVE,
+ STATE_STOPPED,
+ STATE_PAUSED,
+ STATE_PAUSED_STOPPING,
+ STATE_FLUSHED,
+ STATE_STOPPING,
+ } mState;
+
+ // for client callback handler
callback_t mCbf; // callback handler for events, or NULL
- void* mUserData; // for client callback handler
+ void* mUserData;
// for notification APIs
uint32_t mNotificationFramesReq; // requested number of frames between each
- // notification callback
+ // notification callback,
+ // at initial source sample rate
uint32_t mNotificationFramesAct; // actual number of frames between each
- // notification callback
+ // notification callback,
+ // at initial source sample rate
+ bool mRefreshRemaining; // processAudioBuffer() should refresh next 2
+
+ // These are private to processAudioBuffer(), and are not protected by a lock
+ uint32_t mRemainingFrames; // number of frames to request in obtainBuffer()
+ bool mRetryOnPartialBuffer; // sleep and retry after partial obtainBuffer()
+ uint32_t mObservedSequence; // last observed value of mSequence
+
sp<IMemory> mSharedBuffer;
- int mLoopCount;
- uint32_t mRemainingFrames;
+ uint32_t mLoopPeriod; // in frames, zero means looping is disabled
uint32_t mMarkerPosition; // in wrapping (overflow) frame units
bool mMarkerReached;
uint32_t mNewPosition; // in frames
- uint32_t mUpdatePeriod; // in frames
+ uint32_t mUpdatePeriod; // in frames, zero means no EVENT_NEW_POS
- bool mFlushed; // FIXME will be made obsolete by making flush() synchronous
audio_output_flags_t mFlags;
int mSessionId;
int mAuxEffectId;
- // When locking both mLock and mCblk->lock, must lock in this order to avoid deadlock:
- // 1. mLock
- // 2. mCblk->lock
- // It is OK to lock only mCblk->lock.
mutable Mutex mLock;
bool mIsTimed;
int mPreviousPriority; // before start()
SchedPolicy mPreviousSchedulingGroup;
- AudioTrackClientProxy* mProxy;
bool mAwaitBoost; // thread should wait for priority boost before running
+
+ // The proxy should only be referenced while a lock is held because the proxy isn't
+ // multi-thread safe, especially the SingleStateQueue part of the proxy.
+ // An exception is that a blocking ClientProxy::obtainBuffer() may be called without a lock,
+ // provided that the caller also holds an extra reference to the proxy and shared memory to keep
+ // them around in case they are replaced during the obtainBuffer().
+ sp<StaticAudioTrackClientProxy> mStaticProxy; // for type safety only
+ sp<AudioTrackClientProxy> mProxy; // primary owner of the memory
+
+ bool mInUnderrun; // whether track is currently in underrun state
+ String8 mName; // server's name for this IAudioTrack
+
+private:
+ class DeathNotifier : public IBinder::DeathRecipient {
+ public:
+ DeathNotifier(AudioTrack* audioTrack) : mAudioTrack(audioTrack) { }
+ protected:
+ virtual void binderDied(const wp<IBinder>& who);
+ private:
+ const wp<AudioTrack> mAudioTrack;
+ };
+
+ sp<DeathNotifier> mDeathNotifier;
+ uint32_t mSequence; // incremented for each new IAudioTrack attempt
+ audio_io_handle_t mOutput; // cached output io handle
};
class TimedAudioTrack : public AudioTrack
diff --git a/include/media/EffectsFactoryApi.h b/include/media/EffectsFactoryApi.h
index b1ed7b0..b1143b9 100644
--- a/include/media/EffectsFactoryApi.h
+++ b/include/media/EffectsFactoryApi.h
@@ -171,6 +171,30 @@ int EffectGetDescriptor(const effect_uuid_t *pEffectUuid, effect_descriptor_t *p
////////////////////////////////////////////////////////////////////////////////
int EffectIsNullUuid(const effect_uuid_t *pEffectUuid);
+////////////////////////////////////////////////////////////////////////////////
+//
+// Function: EffectGetSubEffects
+//
+// Description: Returns the descriptors of the sub effects of the effect
+// whose uuid is pointed to by first argument.
+//
+// Input:
+// pEffectUuid: pointer to the effect uuid.
+// size: size of the buffer pointed by pDescriptor.
+//
+// Input/Output:
+// pDescriptor: address where to return the sub effect descriptors.
+//
+// Output:
+// returned value: 0 successful operation.
+// -ENODEV factory failed to initialize
+// -EINVAL invalid pEffectUuid or pDescriptor
+// -ENOENT no effect with this uuid found
+// *pDescriptor: updated with the sub effect descriptors.
+//
+////////////////////////////////////////////////////////////////////////////////
+int EffectGetSubEffects(const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptors, size_t size);
+
#if __cplusplus
} // extern "C"
#endif
diff --git a/include/media/ExtendedAudioBufferProvider.h b/include/media/ExtendedAudioBufferProvider.h
index 00c4444..2539ed3 100644
--- a/include/media/ExtendedAudioBufferProvider.h
+++ b/include/media/ExtendedAudioBufferProvider.h
@@ -18,12 +18,20 @@
#define ANDROID_EXTENDED_AUDIO_BUFFER_PROVIDER_H
#include <media/AudioBufferProvider.h>
+#include <media/AudioTimestamp.h>
namespace android {
class ExtendedAudioBufferProvider : public AudioBufferProvider {
public:
virtual size_t framesReady() const = 0; // see description at AudioFlinger.h
+
+ // Return the total number of frames that have been obtained and released
+ virtual size_t framesReleased() const { return 0; }
+
+ // Invoked by buffer consumer when a new timestamp is available.
+ // Default implementation ignores the timestamp.
+ virtual void onTimestamp(const AudioTimestamp& timestamp) { }
};
} // namespace android
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 9c3067e..eaf7780 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -49,9 +49,13 @@ public:
TRACK_DEFAULT = 0, // client requests a default AudioTrack
TRACK_TIMED = 1, // client requests a TimedAudioTrack
TRACK_FAST = 2, // client requests a fast AudioTrack or AudioRecord
+ TRACK_OFFLOAD = 4, // client requests offload to hw codec
};
typedef uint32_t track_flags_t;
+ // invariant on exit for all APIs that return an sp<>:
+ // (return value != 0) == (*status == NO_ERROR)
+
/* create an audio track and registers it with AudioFlinger.
* return null if the track cannot be created.
*/
@@ -66,6 +70,10 @@ public:
audio_io_handle_t output,
pid_t tid, // -1 means unused, otherwise must be valid non-0
int *sessionId,
+ // input: ignored
+ // output: server's description of IAudioTrack for display in logs.
+ // Don't attempt to parse, as the format could change.
+ String8& name,
status_t *status) = 0;
virtual sp<IAudioRecord> openRecord(
@@ -74,7 +82,7 @@ public:
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
- track_flags_t flags,
+ track_flags_t *flags,
pid_t tid, // -1 means unused, otherwise must be valid non-0
int *sessionId,
status_t *status) = 0;
@@ -124,7 +132,9 @@ public:
virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys)
const = 0;
- // register a current process for audio output change notifications
+ // Register an object to receive audio input/output change and track notifications.
+ // For a given calling pid, AudioFlinger disregards any registrations after the first.
+ // Thus the IAudioFlingerClient must be a singleton per process.
virtual void registerClient(const sp<IAudioFlingerClient>& client) = 0;
// retrieve the audio recording buffer size
@@ -137,7 +147,8 @@ public:
audio_format_t *pFormat,
audio_channel_mask_t *pChannelMask,
uint32_t *pLatencyMs,
- audio_output_flags_t flags) = 0;
+ audio_output_flags_t flags,
+ const audio_offload_info_t *offloadInfo = NULL) = 0;
virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
audio_io_handle_t output2) = 0;
virtual status_t closeOutput(audio_io_handle_t output) = 0;
@@ -193,6 +204,10 @@ public:
virtual uint32_t getPrimaryOutputSamplingRate() = 0;
virtual size_t getPrimaryOutputFrameCount() = 0;
+ // Intended for AudioService to inform AudioFlinger of device's low RAM attribute,
+ // and should be called at most once. For a definition of what "low RAM" means, see
+ // android.app.ActivityManager.isLowRamDevice().
+ virtual status_t setLowRamDevice(bool isLowRamDevice) = 0;
};
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index b5ad4ef..09b9ea6 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -53,7 +53,8 @@ public:
uint32_t samplingRate = 0,
audio_format_t format = AUDIO_FORMAT_DEFAULT,
audio_channel_mask_t channelMask = 0,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE) = 0;
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+ const audio_offload_info_t *offloadInfo = NULL) = 0;
virtual status_t startOutput(audio_io_handle_t output,
audio_stream_type_t stream,
int session = 0) = 0;
@@ -95,6 +96,9 @@ public:
virtual status_t queryDefaultPreProcessing(int audioSession,
effect_descriptor_t *descriptors,
uint32_t *count) = 0;
+ // Check if offload is possible for given format, stream type, sample rate,
+ // bit rate, duration, video and streaming or offload property is enabled
+ virtual bool isOffloadSupported(const audio_offload_info_t& info) = 0;
};
diff --git a/include/media/IAudioRecord.h b/include/media/IAudioRecord.h
index d6e3141..eccc2ca 100644
--- a/include/media/IAudioRecord.h
+++ b/include/media/IAudioRecord.h
@@ -34,6 +34,9 @@ class IAudioRecord : public IInterface
public:
DECLARE_META_INTERFACE(AudioRecord);
+ /* get this tracks control block */
+ virtual sp<IMemory> getCblk() const = 0;
+
/* After it's created the track is not active. Call start() to
* make it active.
*/
@@ -44,9 +47,6 @@ public:
* will be processed, unless flush() is called.
*/
virtual void stop() = 0;
-
- /* get this tracks control block */
- virtual sp<IMemory> getCblk() const = 0;
};
// ----------------------------------------------------------------------------
diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h
index 144be0e..5c8a484 100644
--- a/include/media/IAudioTrack.h
+++ b/include/media/IAudioTrack.h
@@ -25,6 +25,8 @@
#include <binder/IInterface.h>
#include <binder/IMemory.h>
#include <utils/LinearTransform.h>
+#include <utils/String8.h>
+#include <media/AudioTimestamp.h>
namespace android {
@@ -82,6 +84,15 @@ public:
or Tungsten time. The values for target are defined in AudioTrack.h */
virtual status_t setMediaTimeTransform(const LinearTransform& xform,
int target) = 0;
+
+ /* Send parameters to the audio hardware */
+ virtual status_t setParameters(const String8& keyValuePairs) = 0;
+
+ /* Return NO_ERROR if timestamp is valid */
+ virtual status_t getTimestamp(AudioTimestamp& timestamp) = 0;
+
+ /* Signal the playback thread for a change in control block */
+ virtual void signal() = 0;
};
// ----------------------------------------------------------------------------
diff --git a/include/media/IDrm.h b/include/media/IDrm.h
index d630c40..5ef26af 100644
--- a/include/media/IDrm.h
+++ b/include/media/IDrm.h
@@ -32,7 +32,7 @@ struct IDrm : public IInterface {
virtual status_t initCheck() const = 0;
- virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]) = 0;
+ virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) = 0;
virtual status_t createPlugin(const uint8_t uuid[16]) = 0;
diff --git a/include/media/IHDCP.h b/include/media/IHDCP.h
index 6d27b18..352561e 100644
--- a/include/media/IHDCP.h
+++ b/include/media/IHDCP.h
@@ -17,6 +17,7 @@
#include <binder/IInterface.h>
#include <media/hardware/HDCPAPI.h>
#include <media/stagefright/foundation/ABase.h>
+#include <ui/GraphicBuffer.h>
namespace android {
@@ -45,6 +46,17 @@ struct IHDCP : public IInterface {
// Request to shutdown the active HDCP session.
virtual status_t shutdownAsync() = 0;
+ // Returns the capability bitmask of this HDCP session.
+ // Possible return values (please refer to HDCAPAPI.h):
+ // HDCP_CAPS_ENCRYPT: mandatory, meaning the HDCP module can encrypt
+ // from an input byte-array buffer to an output byte-array buffer
+ // HDCP_CAPS_ENCRYPT_NATIVE: the HDCP module supports encryption from
+ // a native buffer to an output byte-array buffer. The format of the
+ // input native buffer is specific to vendor's encoder implementation.
+ // It is the same format as that used by the encoder when
+ // "storeMetaDataInBuffers" extension is enabled on its output port.
+ virtual uint32_t getCaps() = 0;
+
// ENCRYPTION only:
// Encrypt data according to the HDCP spec. "size" bytes of data are
// available at "inData" (virtual address), "size" may not be a multiple
@@ -59,6 +71,20 @@ struct IHDCP : public IInterface {
const void *inData, size_t size, uint32_t streamCTR,
uint64_t *outInputCTR, void *outData) = 0;
+ // Encrypt data according to the HDCP spec. "size" bytes of data starting
+ // at location "offset" are available in "buffer" (buffer handle). "size"
+ // may not be a multiple of 128 bits (16 bytes). An equal number of
+ // encrypted bytes should be written to the buffer at "outData" (virtual
+ // address). This operation is to be synchronous, i.e. this call does not
+ // return until outData contains size bytes of encrypted data.
+ // streamCTR will be assigned by the caller (to 0 for the first PES stream,
+ // 1 for the second and so on)
+ // inputCTR _will_be_maintained_by_the_callee_ for each PES stream.
+ virtual status_t encryptNative(
+ const sp<GraphicBuffer> &graphicBuffer,
+ size_t offset, size_t size, uint32_t streamCTR,
+ uint64_t *outInputCTR, void *outData) = 0;
+
// DECRYPTION only:
// Decrypt data according to the HDCP spec.
// "size" bytes of encrypted data are available at "inData"
diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h
index fef7af2..2998b37 100644
--- a/include/media/IMediaPlayerService.h
+++ b/include/media/IMediaPlayerService.h
@@ -49,8 +49,12 @@ public:
virtual sp<IMediaMetadataRetriever> createMetadataRetriever() = 0;
virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client, int audioSessionId = 0) = 0;
- virtual sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) = 0;
- virtual sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) = 0;
+ virtual status_t decode(const char* url, uint32_t *pSampleRate, int* pNumChannels,
+ audio_format_t* pFormat,
+ const sp<IMemoryHeap>& heap, size_t *pSize) = 0;
+ virtual status_t decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate,
+ int* pNumChannels, audio_format_t* pFormat,
+ const sp<IMemoryHeap>& heap, size_t *pSize) = 0;
virtual sp<IOMX> getOMX() = 0;
virtual sp<ICrypto> makeCrypto() = 0;
virtual sp<IDrm> makeDrm() = 0;
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index 0b1d1e4..9c8451c 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -83,6 +83,10 @@ public:
virtual status_t storeMetaDataInBuffers(
node_id node, OMX_U32 port_index, OMX_BOOL enable) = 0;
+ virtual status_t prepareForAdaptivePlayback(
+ node_id node, OMX_U32 portIndex, OMX_BOOL enable,
+ OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) = 0;
+
virtual status_t enableGraphicBuffers(
node_id node, OMX_U32 port_index, OMX_BOOL enable) = 0;
@@ -97,6 +101,10 @@ public:
node_id node, OMX_U32 port_index,
const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) = 0;
+ virtual status_t updateGraphicBufferInMeta(
+ node_id node, OMX_U32 port_index,
+ const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) = 0;
+
virtual status_t createInputSurface(
node_id node, OMX_U32 port_index,
sp<IGraphicBufferProducer> *bufferProducer) = 0;
@@ -130,6 +138,17 @@ public:
node_id node,
const char *parameter_name,
OMX_INDEXTYPE *index) = 0;
+
+ enum InternalOptionType {
+ INTERNAL_OPTION_SUSPEND, // data is a bool
+ INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY, // data is an int64_t
+ };
+ virtual status_t setInternalOption(
+ node_id node,
+ OMX_U32 port_index,
+ InternalOptionType type,
+ const void *data,
+ size_t size) = 0;
};
struct omx_message {
diff --git a/include/media/IRemoteDisplayClient.h b/include/media/IRemoteDisplayClient.h
index 7b0fa9e..0e6d55d 100644
--- a/include/media/IRemoteDisplayClient.h
+++ b/include/media/IRemoteDisplayClient.h
@@ -49,7 +49,7 @@ public:
// Provides a surface texture that the client should use to stream buffers to
// the remote display.
virtual void onDisplayConnected(const sp<IGraphicBufferProducer>& bufferProducer,
- uint32_t width, uint32_t height, uint32_t flags) = 0; // one-way
+ uint32_t width, uint32_t height, uint32_t flags, uint32_t session) = 0; // one-way
// Indicates that the remote display has been disconnected normally.
// This method should only be called once the client has called 'dispose()'
diff --git a/include/media/JetPlayer.h b/include/media/JetPlayer.h
index 0616bf0..388f767 100644
--- a/include/media/JetPlayer.h
+++ b/include/media/JetPlayer.h
@@ -88,7 +88,7 @@ private:
EAS_DATA_HANDLE mEasData;
EAS_FILE_LOCATOR mEasJetFileLoc;
EAS_PCM* mAudioBuffer;// EAS renders the MIDI data into this buffer,
- AudioTrack* mAudioTrack; // and we play it in this audio track
+ sp<AudioTrack> mAudioTrack; // and we play it in this audio track
int mTrackBufferSize;
S_JET_STATUS mJetStatus;
S_JET_STATUS mPreviousJetStatus;
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 9a75f81..3b151ef 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -74,9 +74,18 @@ public:
// AudioSink: abstraction layer for audio output
class AudioSink : public RefBase {
public:
+ enum cb_event_t {
+ CB_EVENT_FILL_BUFFER, // Request to write more data to buffer.
+ CB_EVENT_STREAM_END, // Sent after all the buffers queued in AF and HW are played
+ // back (after stop is called)
+ CB_EVENT_TEAR_DOWN // The AudioTrack was invalidated due to use case change:
+ // Need to re-evaluate offloading options
+ };
+
// Callback returns the number of bytes actually written to the buffer.
typedef size_t (*AudioCallback)(
- AudioSink *audioSink, void *buffer, size_t size, void *cookie);
+ AudioSink *audioSink, void *buffer, size_t size, void *cookie,
+ cb_event_t event);
virtual ~AudioSink() {}
virtual bool ready() const = 0; // audio output is open and ready
@@ -99,9 +108,10 @@ public:
int bufferCount=DEFAULT_AUDIOSINK_BUFFERCOUNT,
AudioCallback cb = NULL,
void *cookie = NULL,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE) = 0;
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+ const audio_offload_info_t *offloadInfo = NULL) = 0;
- virtual void start() = 0;
+ virtual status_t start() = 0;
virtual ssize_t write(const void* buffer, size_t size) = 0;
virtual void stop() = 0;
virtual void flush() = 0;
@@ -110,6 +120,9 @@ public:
virtual status_t setPlaybackRatePermille(int32_t rate) { return INVALID_OPERATION; }
virtual bool needsTrailingPadding() { return true; }
+
+ virtual status_t setParameters(const String8& keyValuePairs) { return NO_ERROR; };
+ virtual String8 getParameters(const String8& keys) { return String8::empty(); };
};
MediaPlayerBase() : mCookie(0), mNotify(0) {}
diff --git a/include/media/SoundPool.h b/include/media/SoundPool.h
index 7bf3069..2dd78cc 100644
--- a/include/media/SoundPool.h
+++ b/include/media/SoundPool.h
@@ -22,6 +22,8 @@
#include <utils/Vector.h>
#include <utils/KeyedVector.h>
#include <media/AudioTrack.h>
+#include <binder/MemoryHeapBase.h>
+#include <binder/MemoryBase.h>
namespace android {
@@ -85,6 +87,7 @@ private:
int64_t mLength;
char* mUrl;
sp<IMemory> mData;
+ sp<MemoryHeapBase> mHeap;
};
// stores pending events for stolen channels
@@ -118,7 +121,7 @@ protected:
class SoundChannel : public SoundEvent {
public:
enum state { IDLE, RESUMING, STOPPING, PAUSED, PLAYING };
- SoundChannel() : mAudioTrack(NULL), mState(IDLE), mNumChannels(1),
+ SoundChannel() : mState(IDLE), mNumChannels(1),
mPos(0), mToggle(0), mAutoPaused(false) {}
~SoundChannel();
void init(SoundPool* soundPool);
@@ -148,7 +151,7 @@ private:
bool doStop_l();
SoundPool* mSoundPool;
- AudioTrack* mAudioTrack;
+ sp<AudioTrack> mAudioTrack;
SoundEvent mNextEvent;
Mutex mLock;
int mState;
diff --git a/include/media/ToneGenerator.h b/include/media/ToneGenerator.h
index 2183fbe..98c4332 100644
--- a/include/media/ToneGenerator.h
+++ b/include/media/ToneGenerator.h
@@ -160,7 +160,7 @@ public:
bool isInited() { return (mState == TONE_IDLE)?false:true;}
// returns the audio session this ToneGenerator belongs to or 0 if an error occured.
- int getSessionId() { return (mpAudioTrack == NULL) ? 0 : mpAudioTrack->getSessionId(); }
+ int getSessionId() { return (mpAudioTrack == 0) ? 0 : mpAudioTrack->getSessionId(); }
private:
@@ -264,7 +264,7 @@ private:
unsigned short mLoopCounter; // Current tone loopback count
uint32_t mSamplingRate; // AudioFlinger Sampling rate
- AudioTrack *mpAudioTrack; // Pointer to audio track used for playback
+ sp<AudioTrack> mpAudioTrack; // Pointer to audio track used for playback
Mutex mLock; // Mutex to control concurent access to ToneGenerator object from audio callback and application API
Mutex mCbkCondLock; // Mutex associated to mWaitCbkCond
Condition mWaitCbkCond; // condition enabling interface to wait for audio callback completion after a change is requested
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
index aa58905..6167dd6 100644
--- a/include/media/Visualizer.h
+++ b/include/media/Visualizer.h
@@ -19,7 +19,7 @@
#include <media/AudioEffect.h>
#include <audio_effects/effect_visualizer.h>
-#include <string.h>
+#include <utils/Thread.h>
/**
* The Visualizer class enables application to retrieve part of the currently playing audio for
@@ -114,6 +114,14 @@ public:
status_t setScalingMode(uint32_t mode);
uint32_t getScalingMode() { return mScalingMode; }
+ // set which measurements are done on the audio buffers processed by the effect.
+ // valid measurements (mask): MEASUREMENT_MODE_PEAK_RMS
+ status_t setMeasurementMode(uint32_t mode);
+ uint32_t getMeasurementMode() { return mMeasurementMode; }
+
+ // return a set of int32_t measurements
+ status_t getIntMeasurements(uint32_t type, uint32_t number, int32_t *measurements);
+
// return a capture in PCM 8 bit unsigned format. The size of the capture is equal to
// getCaptureSize()
status_t getWaveForm(uint8_t *waveform);
@@ -156,6 +164,7 @@ private:
uint32_t mCaptureSize;
uint32_t mSampleRate;
uint32_t mScalingMode;
+ uint32_t mMeasurementMode;
capture_cbk_t mCaptureCallBack;
void *mCaptureCbkUser;
sp<CaptureThread> mCaptureThread;
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 14381c7..4c05fc3 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -42,9 +42,14 @@ enum media_event_type {
MEDIA_BUFFERING_UPDATE = 3,
MEDIA_SEEK_COMPLETE = 4,
MEDIA_SET_VIDEO_SIZE = 5,
+ MEDIA_STARTED = 6,
+ MEDIA_PAUSED = 7,
+ MEDIA_STOPPED = 8,
+ MEDIA_SKIPPED = 9,
MEDIA_TIMED_TEXT = 99,
MEDIA_ERROR = 100,
MEDIA_INFO = 200,
+ MEDIA_SUBTITLE_DATA = 201,
};
// Generic error codes for the media player framework. Errors are fatal, the
@@ -173,6 +178,7 @@ enum media_track_type {
MEDIA_TRACK_TYPE_VIDEO = 1,
MEDIA_TRACK_TYPE_AUDIO = 2,
MEDIA_TRACK_TYPE_TIMEDTEXT = 3,
+ MEDIA_TRACK_TYPE_SUBTITLE = 4,
};
// ----------------------------------------------------------------------------
@@ -218,8 +224,12 @@ public:
bool isLooping();
status_t setVolume(float leftVolume, float rightVolume);
void notify(int msg, int ext1, int ext2, const Parcel *obj = NULL);
- static sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat);
- static sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat);
+ static status_t decode(const char* url, uint32_t *pSampleRate, int* pNumChannels,
+ audio_format_t* pFormat,
+ const sp<IMemoryHeap>& heap, size_t *pSize);
+ static status_t decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate,
+ int* pNumChannels, audio_format_t* pFormat,
+ const sp<IMemoryHeap>& heap, size_t *pSize);
status_t invoke(const Parcel& request, Parcel *reply);
status_t setMetadataFilter(const Parcel& filter);
status_t getMetadata(bool update_only, bool apply_filter, Parcel *metadata);
diff --git a/include/media/nbaio/AudioStreamOutSink.h b/include/media/nbaio/AudioStreamOutSink.h
index 5976b18..7948d40 100644
--- a/include/media/nbaio/AudioStreamOutSink.h
+++ b/include/media/nbaio/AudioStreamOutSink.h
@@ -52,6 +52,8 @@ public:
// implementation of GNWT (if any)
virtual status_t getNextWriteTimestamp(int64_t *timestamp);
+ virtual status_t getTimestamp(AudioTimestamp& timestamp);
+
// NBAIO_Sink end
#if 0 // until necessary
diff --git a/include/media/nbaio/MonoPipe.h b/include/media/nbaio/MonoPipe.h
index 5fcfe9e..d3802fe 100644
--- a/include/media/nbaio/MonoPipe.h
+++ b/include/media/nbaio/MonoPipe.h
@@ -20,9 +20,12 @@
#include <time.h>
#include <utils/LinearTransform.h>
#include "NBAIO.h"
+#include <media/SingleStateQueue.h>
namespace android {
+typedef SingleStateQueue<AudioTimestamp> AudioTimestampSingleStateQueue;
+
// MonoPipe is similar to Pipe except:
// - supports only a single reader, called MonoPipeReader
// - write() cannot overrun; instead it will return a short actual count if insufficient space
@@ -88,6 +91,9 @@ public:
// Return true if the write side of a pipe is currently shutdown.
bool isShutdown();
+ // Return NO_ERROR if there is a timestamp available
+ status_t getTimestamp(AudioTimestamp& timestamp);
+
private:
// A pair of methods and a helper variable which allows the reader and the
// writer to update and observe the values of mFront and mNextRdPTS in an
@@ -127,6 +133,10 @@ private:
LinearTransform mSamplesToLocalTime;
bool mIsShutdown; // whether shutdown(true) was called, no barriers are needed
+
+ AudioTimestampSingleStateQueue::Shared mTimestampShared;
+ AudioTimestampSingleStateQueue::Mutator mTimestampMutator;
+ AudioTimestampSingleStateQueue::Observer mTimestampObserver;
};
} // namespace android
diff --git a/include/media/nbaio/MonoPipeReader.h b/include/media/nbaio/MonoPipeReader.h
index 0e1c992..78fe867 100644
--- a/include/media/nbaio/MonoPipeReader.h
+++ b/include/media/nbaio/MonoPipeReader.h
@@ -49,6 +49,8 @@ public:
virtual ssize_t read(void *buffer, size_t count, int64_t readPTS);
+ virtual void onTimestamp(const AudioTimestamp& timestamp);
+
// NBAIO_Source end
#if 0 // until necessary
diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h
index f5d6eb5..1da0c73 100644
--- a/include/media/nbaio/NBAIO.h
+++ b/include/media/nbaio/NBAIO.h
@@ -28,6 +28,7 @@
#include <stdlib.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
+#include <media/AudioTimestamp.h>
namespace android {
@@ -213,6 +214,11 @@ public:
// <other> Something unexpected happened internally. Check the logs and start debugging.
virtual status_t getNextWriteTimestamp(int64_t *ts) { return INVALID_OPERATION; }
+ // Returns NO_ERROR if a timestamp is available. The timestamp includes the total number
+ // of frames presented to an external observer, together with the value of CLOCK_MONOTONIC
+ // as of this presentation count.
+ virtual status_t getTimestamp(AudioTimestamp& timestamp) { return INVALID_OPERATION; }
+
protected:
NBAIO_Sink(NBAIO_Format format = Format_Invalid) : NBAIO_Port(format), mFramesWritten(0) { }
virtual ~NBAIO_Sink() { }
@@ -300,6 +306,10 @@ public:
virtual ssize_t readVia(readVia_t via, size_t total, void *user,
int64_t readPTS, size_t block = 0);
+ // Invoked asynchronously by corresponding sink when a new timestamp is available.
+ // Default implementation ignores the timestamp.
+ virtual void onTimestamp(const AudioTimestamp& timestamp) { }
+
protected:
NBAIO_Source(NBAIO_Format format = Format_Invalid) : NBAIO_Port(format), mFramesRead(0) { }
virtual ~NBAIO_Source() { }
diff --git a/include/media/nbaio/NBLog.h b/include/media/nbaio/NBLog.h
index 107ba66..6d59ea7 100644
--- a/include/media/nbaio/NBLog.h
+++ b/include/media/nbaio/NBLog.h
@@ -90,6 +90,8 @@ public:
virtual ~Timeline();
#endif
+ // Input parameter 'size' is the desired size of the timeline in byte units.
+ // Returns the size rounded up to a power-of-2, plus the constant size overhead for indices.
static size_t sharedSize(size_t size);
#if 0
@@ -110,8 +112,12 @@ private:
class Writer : public RefBase {
public:
Writer(); // dummy nop implementation without shared memory
+
+ // Input parameter 'size' is the desired size of the timeline in byte units.
+ // The size of the shared memory must be at least Timeline::sharedSize(size).
Writer(size_t size, void *shared);
Writer(size_t size, const sp<IMemory>& iMemory);
+
virtual ~Writer() { }
virtual void log(const char *string);
@@ -165,8 +171,12 @@ private:
class Reader : public RefBase {
public:
+
+ // Input parameter 'size' is the desired size of the timeline in byte units.
+ // The size of the shared memory must be at least Timeline::sharedSize(size).
Reader(size_t size, const void *shared);
Reader(size_t size, const sp<IMemory>& iMemory);
+
virtual ~Reader() { }
void dump(int fd, size_t indent = 0);
diff --git a/include/media/nbaio/SourceAudioBufferProvider.h b/include/media/nbaio/SourceAudioBufferProvider.h
index c08331b..cdfb6fe 100644
--- a/include/media/nbaio/SourceAudioBufferProvider.h
+++ b/include/media/nbaio/SourceAudioBufferProvider.h
@@ -36,6 +36,8 @@ public:
// ExtendedAudioBufferProvider interface
virtual size_t framesReady() const;
+ virtual size_t framesReleased() const;
+ virtual void onTimestamp(const AudioTimestamp& timestamp);
private:
const sp<NBAIO_Source> mSource; // the wrapped source
@@ -45,6 +47,7 @@ private:
size_t mOffset; // frame offset within mAllocated of valid data
size_t mRemaining; // frame count within mAllocated of valid data
size_t mGetCount; // buffer.frameCount of the most recent getNextBuffer
+ uint32_t mFramesReleased; // counter of the total number of frames released
};
} // namespace android
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index df25d7b..a8ffd4a 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -124,7 +124,8 @@ private:
};
enum {
- kFlagIsSecure = 1,
+ kFlagIsSecure = 1,
+ kFlagPushBlankBuffersToNativeWindowOnShutdown = 2,
};
struct BufferInfo {
@@ -138,6 +139,7 @@ private:
IOMX::buffer_id mBufferID;
Status mStatus;
+ unsigned mDequeuedAt;
sp<ABuffer> mData;
sp<GraphicBuffer> mGraphicBuffer;
@@ -182,7 +184,7 @@ private:
bool mSentFormat;
bool mIsEncoder;
-
+ bool mUseMetadataOnEncoderOutput;
bool mShutdownInProgress;
// If "mKeepComponentAllocated" we only transition back to Loaded state
@@ -194,12 +196,22 @@ private:
bool mChannelMaskPresent;
int32_t mChannelMask;
+ unsigned mDequeueCounter;
+ bool mStoreMetaDataInOutputBuffers;
+ int32_t mMetaDataBuffersToSubmit;
+
+ int64_t mRepeatFrameDelayUs;
status_t setCyclicIntraMacroblockRefresh(const sp<AMessage> &msg, int32_t mode);
status_t allocateBuffersOnPort(OMX_U32 portIndex);
status_t freeBuffersOnPort(OMX_U32 portIndex);
status_t freeBuffer(OMX_U32 portIndex, size_t i);
+ status_t configureOutputBuffersFromNativeWindow(
+ OMX_U32 *nBufferCount, OMX_U32 *nBufferSize,
+ OMX_U32 *nMinUndequeuedBuffers);
+ status_t allocateOutputMetaDataBuffers();
+ status_t submitOutputMetaDataBuffer();
status_t allocateOutputBuffersFromNativeWindow();
status_t cancelBufferToNativeWindow(BufferInfo *info);
status_t freeOutputBuffersNotOwnedByComponent();
diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h
index 1dc408f..912a43c 100644
--- a/include/media/stagefright/AudioPlayer.h
+++ b/include/media/stagefright/AudioPlayer.h
@@ -36,8 +36,16 @@ public:
SEEK_COMPLETE
};
+ enum {
+ ALLOW_DEEP_BUFFERING = 0x01,
+ USE_OFFLOAD = 0x02,
+ HAS_VIDEO = 0x1000,
+ IS_STREAMING = 0x2000
+
+ };
+
AudioPlayer(const sp<MediaPlayerBase::AudioSink> &audioSink,
- bool allowDeepBuffering = false,
+ uint32_t flags = 0,
AwesomePlayer *audioObserver = NULL);
virtual ~AudioPlayer();
@@ -51,7 +59,7 @@ public:
status_t start(bool sourceAlreadyStarted = false);
void pause(bool playPendingSamples = false);
- void resume();
+ status_t resume();
// Returns the timestamp of the last buffer played (in us).
int64_t getMediaTimeUs();
@@ -67,10 +75,12 @@ public:
status_t setPlaybackRatePermille(int32_t ratePermille);
+ void notifyAudioEOS();
+
private:
friend class VideoEditorAudioPlayer;
sp<MediaSource> mSource;
- AudioTrack *mAudioTrack;
+ sp<AudioTrack> mAudioTrack;
MediaBuffer *mInputBuffer;
@@ -97,17 +107,20 @@ private:
MediaBuffer *mFirstBuffer;
sp<MediaPlayerBase::AudioSink> mAudioSink;
- bool mAllowDeepBuffering; // allow audio deep audio buffers. Helps with low power audio
- // playback but implies high latency
AwesomePlayer *mObserver;
int64_t mPinnedTimeUs;
+ bool mPlaying;
+ int64_t mStartPosUs;
+ const uint32_t mCreateFlags;
+
static void AudioCallback(int event, void *user, void *info);
void AudioCallback(int event, void *info);
static size_t AudioSinkCallback(
MediaPlayerBase::AudioSink *audioSink,
- void *data, size_t size, void *me);
+ void *data, size_t size, void *me,
+ MediaPlayerBase::AudioSink::cb_event_t event);
size_t fillBuffer(void *data, size_t size);
@@ -116,6 +129,10 @@ private:
void reset();
uint32_t getNumFramesPendingPlayout() const;
+ int64_t getOutputPlayPositionUs_l() const;
+
+ bool allowDeepBuffering() const { return (mCreateFlags & ALLOW_DEEP_BUFFERING) != 0; }
+ bool useOffload() const { return (mCreateFlags & USE_OFFLOAD) != 0; }
AudioPlayer(const AudioPlayer &);
AudioPlayer &operator=(const AudioPlayer &);
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 99f3c3b..4c9aaad 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -73,7 +73,7 @@ private:
Condition mFrameAvailableCondition;
Condition mFrameEncodingCompletionCondition;
- AudioRecord *mRecord;
+ sp<AudioRecord> mRecord;
status_t mInitCheck;
bool mStarted;
int32_t mSampleRate;
diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h
index dfb845b..590623b 100644
--- a/include/media/stagefright/MediaCodecList.h
+++ b/include/media/stagefright/MediaCodecList.h
@@ -50,7 +50,8 @@ struct MediaCodecList {
status_t getCodecCapabilities(
size_t index, const char *type,
Vector<ProfileLevel> *profileLevels,
- Vector<uint32_t> *colorFormats) const;
+ Vector<uint32_t> *colorFormats,
+ uint32_t *flags) const;
private:
enum Section {
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index 81de6e4..85693d4 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -22,7 +22,8 @@ namespace android {
extern const char *MEDIA_MIMETYPE_IMAGE_JPEG;
-extern const char *MEDIA_MIMETYPE_VIDEO_VPX;
+extern const char *MEDIA_MIMETYPE_VIDEO_VP8;
+extern const char *MEDIA_MIMETYPE_VIDEO_VP9;
extern const char *MEDIA_MIMETYPE_VIDEO_AVC;
extern const char *MEDIA_MIMETYPE_VIDEO_MPEG4;
extern const char *MEDIA_MIMETYPE_VIDEO_H263;
diff --git a/include/media/stagefright/MediaErrors.h b/include/media/stagefright/MediaErrors.h
index ee5e4e2..686f286 100644
--- a/include/media/stagefright/MediaErrors.h
+++ b/include/media/stagefright/MediaErrors.h
@@ -56,14 +56,11 @@ enum {
ERROR_DRM_TAMPER_DETECTED = DRM_ERROR_BASE - 7,
ERROR_DRM_NOT_PROVISIONED = DRM_ERROR_BASE - 8,
ERROR_DRM_DEVICE_REVOKED = DRM_ERROR_BASE - 9,
+ ERROR_DRM_RESOURCE_BUSY = DRM_ERROR_BASE - 10,
ERROR_DRM_VENDOR_MAX = DRM_ERROR_BASE - 500,
ERROR_DRM_VENDOR_MIN = DRM_ERROR_BASE - 999,
- // Deprecated
- ERROR_DRM_WV_VENDOR_MAX = ERROR_DRM_VENDOR_MAX,
- ERROR_DRM_WV_VENDOR_MIN = ERROR_DRM_VENDOR_MIN,
-
// Heartbeat Error Codes
HEARTBEAT_ERROR_BASE = -3000,
ERROR_HEARTBEAT_TERMINATE_REQUESTED = HEARTBEAT_ERROR_BASE,
diff --git a/include/media/stagefright/MediaMuxer.h b/include/media/stagefright/MediaMuxer.h
index c1fdbad..ff6a66e 100644
--- a/include/media/stagefright/MediaMuxer.h
+++ b/include/media/stagefright/MediaMuxer.h
@@ -79,6 +79,16 @@ public:
status_t setOrientationHint(int degrees);
/**
+ * Set the location.
+ * @param latitude The latitude in degree x 1000. Its value must be in the range
+ * [-900000, 900000].
+ * @param longitude The longitude in degree x 1000. Its value must be in the range
+ * [-1800000, 1800000].
+ * @return OK if no error.
+ */
+ status_t setLocation(int latitude, int longitude);
+
+ /**
* Stop muxing.
* This method is a blocking call. Depending on how
* much data is bufferred internally, the time needed for stopping
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index 583c3b3..daaf20f 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -361,9 +361,14 @@ private:
};
struct CodecCapabilities {
+ enum {
+ kFlagSupportsAdaptivePlayback = 1 << 0,
+ };
+
String8 mComponentName;
Vector<CodecProfileLevel> mProfileLevels;
Vector<OMX_U32> mColorFormats;
+ uint32_t mFlags;
};
// Return a vector of componentNames with supported profile/level pairs
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index 5f21da9..db5f947 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -56,7 +56,7 @@ class GraphicBuffer;
class SurfaceMediaSource : public MediaSource,
public MediaBufferObserver,
- protected BufferQueue::ConsumerListener {
+ protected ConsumerListener {
public:
enum { MIN_UNDEQUEUED_BUFFERS = 4};
@@ -146,9 +146,13 @@ private:
// this consumer
sp<BufferQueue> mBufferQueue;
- // mBufferSlot caches GraphicBuffers from the buffer queue
- sp<GraphicBuffer> mBufferSlot[BufferQueue::NUM_BUFFER_SLOTS];
+ struct SlotData {
+ sp<GraphicBuffer> mGraphicBuffer;
+ uint64_t mFrameNumber;
+ };
+ // mSlots caches GraphicBuffers and frameNumbers from the buffer queue
+ SlotData mSlots[BufferQueue::NUM_BUFFER_SLOTS];
// The permenent width and height of SMS buffers
int mWidth;
diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h
index 73940d3..c24f612 100644
--- a/include/media/stagefright/Utils.h
+++ b/include/media/stagefright/Utils.h
@@ -22,6 +22,8 @@
#include <stdint.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
+#include <system/audio.h>
+#include <media/MediaPlayerInterface.h>
namespace android {
@@ -48,6 +50,15 @@ void convertMessageToMetaData(
AString MakeUserAgent();
+// Convert a MIME type to a AudioSystem::audio_format
+status_t mapMimeToAudioFormat(audio_format_t& format, const char* mime);
+
+// Send information from MetaData to the HAL via AudioSink
+status_t sendMetaDataToHal(sp<MediaPlayerBase::AudioSink>& sink, const sp<MetaData>& meta);
+
+// Check whether the stream defined by meta can be offloaded to hardware
+bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo, bool isStreaming);
+
} // namespace android
#endif // UTILS_H_
diff --git a/include/media/stagefright/foundation/ALooperRoster.h b/include/media/stagefright/foundation/ALooperRoster.h
index 2e5fd73..940fc55 100644
--- a/include/media/stagefright/foundation/ALooperRoster.h
+++ b/include/media/stagefright/foundation/ALooperRoster.h
@@ -30,6 +30,7 @@ struct ALooperRoster {
const sp<ALooper> looper, const sp<AHandler> &handler);
void unregisterHandler(ALooper::handler_id handlerID);
+ void unregisterStaleHandlers();
status_t postMessage(const sp<AMessage> &msg, int64_t delayUs = 0);
void deliverMessage(const sp<AMessage> &msg);
diff --git a/include/media/stagefright/foundation/ANetworkSession.h b/include/media/stagefright/foundation/ANetworkSession.h
new file mode 100644
index 0000000..fd3ebaa
--- /dev/null
+++ b/include/media/stagefright/foundation/ANetworkSession.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef A_NETWORK_SESSION_H_
+
+#define A_NETWORK_SESSION_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <utils/Thread.h>
+
+#include <netinet/in.h>
+
+namespace android {
+
+struct AMessage;
+
+// Helper class to manage a number of live sockets (datagram and stream-based)
+// on a single thread. Clients are notified about activity through AMessages.
+struct ANetworkSession : public RefBase {
+ ANetworkSession();
+
+ status_t start();
+ status_t stop();
+
+ status_t createRTSPClient(
+ const char *host, unsigned port, const sp<AMessage> &notify,
+ int32_t *sessionID);
+
+ status_t createRTSPServer(
+ const struct in_addr &addr, unsigned port,
+ const sp<AMessage> &notify, int32_t *sessionID);
+
+ status_t createUDPSession(
+ unsigned localPort, const sp<AMessage> &notify, int32_t *sessionID);
+
+ status_t createUDPSession(
+ unsigned localPort,
+ const char *remoteHost,
+ unsigned remotePort,
+ const sp<AMessage> &notify,
+ int32_t *sessionID);
+
+ status_t connectUDPSession(
+ int32_t sessionID, const char *remoteHost, unsigned remotePort);
+
+ // passive
+ status_t createTCPDatagramSession(
+ const struct in_addr &addr, unsigned port,
+ const sp<AMessage> &notify, int32_t *sessionID);
+
+ // active
+ status_t createTCPDatagramSession(
+ unsigned localPort,
+ const char *remoteHost,
+ unsigned remotePort,
+ const sp<AMessage> &notify,
+ int32_t *sessionID);
+
+ status_t destroySession(int32_t sessionID);
+
+ status_t sendRequest(
+ int32_t sessionID, const void *data, ssize_t size = -1,
+ bool timeValid = false, int64_t timeUs = -1ll);
+
+ status_t switchToWebSocketMode(int32_t sessionID);
+
+ enum NotificationReason {
+ kWhatError,
+ kWhatConnected,
+ kWhatClientConnected,
+ kWhatData,
+ kWhatDatagram,
+ kWhatBinaryData,
+ kWhatWebSocketMessage,
+ kWhatNetworkStall,
+ };
+
+protected:
+ virtual ~ANetworkSession();
+
+private:
+ struct NetworkThread;
+ struct Session;
+
+ Mutex mLock;
+ sp<Thread> mThread;
+
+ int32_t mNextSessionID;
+
+ int mPipeFd[2];
+
+ KeyedVector<int32_t, sp<Session> > mSessions;
+
+ enum Mode {
+ kModeCreateUDPSession,
+ kModeCreateTCPDatagramSessionPassive,
+ kModeCreateTCPDatagramSessionActive,
+ kModeCreateRTSPServer,
+ kModeCreateRTSPClient,
+ };
+ status_t createClientOrServer(
+ Mode mode,
+ const struct in_addr *addr,
+ unsigned port,
+ const char *remoteHost,
+ unsigned remotePort,
+ const sp<AMessage> &notify,
+ int32_t *sessionID);
+
+ void threadLoop();
+ void interrupt();
+
+ static status_t MakeSocketNonBlocking(int s);
+
+ DISALLOW_EVIL_CONSTRUCTORS(ANetworkSession);
+};
+
+} // namespace android
+
+#endif // A_NETWORK_SESSION_H_
diff --git a/include/media/stagefright/foundation/ParsedMessage.h b/include/media/stagefright/foundation/ParsedMessage.h
new file mode 100644
index 0000000..9d43a93
--- /dev/null
+++ b/include/media/stagefright/foundation/ParsedMessage.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/AString.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+// Encapsulates an "HTTP/RTSP style" response, i.e. a status line,
+// key/value pairs making up the headers and an optional body/content.
+struct ParsedMessage : public RefBase {
+ static sp<ParsedMessage> Parse(
+ const char *data, size_t size, bool noMoreData, size_t *length);
+
+ bool findString(const char *name, AString *value) const;
+ bool findInt32(const char *name, int32_t *value) const;
+
+ const char *getContent() const;
+
+ bool getRequestField(size_t index, AString *field) const;
+ bool getStatusCode(int32_t *statusCode) const;
+
+ AString debugString() const;
+
+ static bool GetAttribute(const char *s, const char *key, AString *value);
+
+ static bool GetInt32Attribute(
+ const char *s, const char *key, int32_t *value);
+
+
+protected:
+ virtual ~ParsedMessage();
+
+private:
+ KeyedVector<AString, AString> mDict;
+ AString mContent;
+
+ ParsedMessage();
+
+ ssize_t parse(const char *data, size_t size, bool noMoreData);
+
+ DISALLOW_EVIL_CONSTRUCTORS(ParsedMessage);
+};
+
+} // namespace android
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 41e20f8..395f164 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -22,32 +22,51 @@
#include <utils/threads.h>
#include <utils/Log.h>
+#include <utils/RefBase.h>
+#include <media/nbaio/roundup.h>
+#include <media/SingleStateQueue.h>
+#include <private/media/StaticAudioTrackState.h>
namespace android {
// ----------------------------------------------------------------------------
-// Maximum cumulated timeout milliseconds before restarting audioflinger thread
-#define MAX_STARTUP_TIMEOUT_MS 3000 // Longer timeout period at startup to cope with A2DP
- // init time
-#define MAX_RUN_TIMEOUT_MS 1000
-#define WAIT_PERIOD_MS 10
-
-#define CBLK_UNDERRUN 0x01 // set: underrun (out) or overrrun (in), clear: no underrun or overrun
+// for audio_track_cblk_t::mFlags
+#define CBLK_UNDERRUN 0x01 // set by server immediately on output underrun, cleared by client
#define CBLK_FORCEREADY 0x02 // set: track is considered ready immediately by AudioFlinger,
// clear: track is ready when buffer full
#define CBLK_INVALID 0x04 // track buffer invalidated by AudioFlinger, need to re-create
-#define CBLK_DISABLED 0x08 // track disabled by AudioFlinger due to underrun, need to re-start
+#define CBLK_DISABLED 0x08 // output track disabled by AudioFlinger due to underrun,
+ // need to re-start. Unlike CBLK_UNDERRUN, this is not set
+ // immediately, but only after a long string of underruns.
+// 0x10 unused
+#define CBLK_LOOP_CYCLE 0x20 // set by server each time a loop cycle other than final one completes
+#define CBLK_LOOP_FINAL 0x40 // set by server when the final loop cycle completes
+#define CBLK_BUFFER_END 0x80 // set by server when the position reaches end of buffer if not looping
+#define CBLK_OVERRUN 0x100 // set by server immediately on input overrun, cleared by client
+#define CBLK_INTERRUPT 0x200 // set by client on interrupt(), cleared by client in obtainBuffer()
+#define CBLK_STREAM_END_DONE 0x400 // set by server on render completion, cleared by client
+
+//EL_FIXME 20 seconds may not be enough and must be reconciled with new obtainBuffer implementation
+#define MAX_RUN_OFFLOADED_TIMEOUT_MS 20000 //assuming upto a maximum of 20 seconds of offloaded
struct AudioTrackSharedStreaming {
// similar to NBAIO MonoPipe
- volatile int32_t mFront;
- volatile int32_t mRear;
+ // in continuously incrementing frame units, take modulo buffer size, which must be a power of 2
+ volatile int32_t mFront; // read by server
+ volatile int32_t mRear; // write by client
+ volatile int32_t mFlush; // incremented by client to indicate a request to flush;
+ // server notices and discards all data between mFront and mRear
+ volatile uint32_t mUnderrunFrames; // server increments for each unavailable but desired frame
};
-// future
+typedef SingleStateQueue<StaticAudioTrackState> StaticAudioTrackSingleStateQueue;
+
struct AudioTrackSharedStatic {
- int mReserved;
+ StaticAudioTrackSingleStateQueue::Shared
+ mSingleStateQueue;
+ size_t mBufferPosition; // updated asynchronously by server,
+ // "for entertainment purposes only"
};
// ----------------------------------------------------------------------------
@@ -55,65 +74,63 @@ struct AudioTrackSharedStatic {
// Important: do not add any virtual methods, including ~
struct audio_track_cblk_t
{
+ // Since the control block is always located in shared memory, this constructor
+ // is only used for placement new(). It is never used for regular new() or stack.
+ audio_track_cblk_t();
+ /*virtual*/ ~audio_track_cblk_t() { }
+
friend class Proxy;
+ friend class ClientProxy;
friend class AudioTrackClientProxy;
friend class AudioRecordClientProxy;
friend class ServerProxy;
+ friend class AudioTrackServerProxy;
+ friend class AudioRecordServerProxy;
// The data members are grouped so that members accessed frequently and in the same context
// are in the same line of data cache.
- Mutex lock; // sizeof(int)
- Condition cv; // sizeof(int)
- // next 4 are offsets within "buffers"
- volatile uint32_t user;
- volatile uint32_t server;
- uint32_t userBase;
- uint32_t serverBase;
-
- int mPad1; // unused, but preserves cache line alignment
+ uint32_t mServer; // Number of filled frames consumed by server (mIsOut),
+ // or filled frames provided by server (!mIsOut).
+ // It is updated asynchronously by server without a barrier.
+ // The value should be used "for entertainment purposes only",
+ // which means don't make important decisions based on it.
size_t frameCount_; // used during creation to pass actual track buffer size
// from AudioFlinger to client, and not referenced again
- // FIXME remove here and replace by createTrack() in/out parameter
+ // FIXME remove here and replace by createTrack() in/out
+ // parameter
// renamed to "_" to detect incorrect use
- // Cache line boundary (32 bytes)
+ volatile int32_t mFutex; // event flag: down (P) by client,
+ // up (V) by server or binderDied() or interrupt()
+#define CBLK_FUTEX_WAKE 1 // if event flag bit is set, then a deferred wake is pending
- uint32_t loopStart;
- uint32_t loopEnd; // read-only for server, read/write for client
- int loopCount; // read/write for client
+private:
+
+ size_t mMinimum; // server wakes up client if available >= mMinimum
// Channel volumes are fixed point U4.12, so 0x1000 means 1.0.
// Left channel is in [0:15], right channel is in [16:31].
// Always read and write the combined pair atomically.
// For AudioTrack only, not used by AudioRecord.
-private:
uint32_t mVolumeLR;
uint32_t mSampleRate; // AudioTrack only: client's requested sample rate in Hz
// or 0 == default. Write-only client, read-only server.
- uint8_t mPad2; // unused
-
-public:
- // read-only for client, server writes once at initialization and is then read-only
- uint8_t mName; // normal tracks: track name, fast tracks: track index
-
- // used by client only
- uint16_t bufferTimeoutMs; // Maximum cumulated timeout before restarting
- // audioflinger
-
- uint16_t waitTimeMs; // Cumulated wait time, used by client only
-private:
// client write-only, server read-only
uint16_t mSendLevel; // Fixed point U4.12 so 0x1000 means 1.0
+
+ uint16_t mPad2; // unused
+
public:
- volatile int32_t flags;
+
+ volatile int32_t mFlags; // combinations of CBLK_*
// Cache line boundary (32 bytes)
-#if 0
+public:
union {
AudioTrackSharedStreaming mStreaming;
AudioTrackSharedStatic mStatic;
@@ -121,25 +138,6 @@ public:
} u;
// Cache line boundary (32 bytes)
-#endif
-
- // Since the control block is always located in shared memory, this constructor
- // is only used for placement new(). It is never used for regular new() or stack.
- audio_track_cblk_t();
-
-private:
- // if there is a shared buffer, "buffers" is the value of pointer() for the shared
- // buffer, otherwise "buffers" points immediately after the control block
- void* buffer(void *buffers, uint32_t frameSize, size_t offset) const;
-
- bool tryLock();
-
- // isOut == true means AudioTrack, isOut == false means AudioRecord
- bool stepServer(size_t stepCount, size_t frameCount, bool isOut);
- uint32_t stepUser(size_t stepCount, size_t frameCount, bool isOut);
- uint32_t framesAvailable(size_t frameCount, bool isOut);
- uint32_t framesAvailable_l(size_t frameCount, bool isOut);
- uint32_t framesReady(bool isOut);
};
// ----------------------------------------------------------------------------
@@ -147,29 +145,32 @@ private:
// Proxy for shared memory control block, to isolate callers from needing to know the details.
// There is exactly one ClientProxy and one ServerProxy per shared memory control block.
// The proxies are located in normal memory, and are not multi-thread safe within a given side.
-class Proxy {
+class Proxy : public RefBase {
protected:
- Proxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize)
- : mCblk(cblk), mBuffers(buffers), mFrameCount(frameCount), mFrameSize(frameSize) { }
+ Proxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize, bool isOut,
+ bool clientInServer);
virtual ~Proxy() { }
public:
- void* buffer(size_t offset) const {
- return mCblk->buffer(mBuffers, mFrameSize, offset);
- }
+ struct Buffer {
+ size_t mFrameCount; // number of frames available in this buffer
+ void* mRaw; // pointer to first frame
+ size_t mNonContig; // number of additional non-contiguous frames available
+ };
protected:
// These refer to shared memory, and are virtual addresses with respect to the current process.
// They may have different virtual addresses within the other process.
- audio_track_cblk_t* const mCblk; // the control block
- void* const mBuffers; // starting address of buffers
-
- const size_t mFrameCount; // not necessarily a power of 2
- const size_t mFrameSize; // in bytes
-#if 0
- const size_t mFrameCountP2; // mFrameCount rounded to power of 2, streaming mode
-#endif
-
+ audio_track_cblk_t* const mCblk; // the control block
+ void* const mBuffers; // starting address of buffers
+
+ const size_t mFrameCount; // not necessarily a power of 2
+ const size_t mFrameSize; // in bytes
+ const size_t mFrameCountP2; // mFrameCount rounded to power of 2, streaming mode
+ const bool mIsOut; // true for AudioTrack, false for AudioRecord
+ const bool mClientInServer; // true for OutputTrack, false for AudioTrack & AudioRecord
+ bool mIsShutdown; // latch set to true when shared memory corruption detected
+ size_t mUnreleased; // unreleased frames remaining from most recent obtainBuffer
};
// ----------------------------------------------------------------------------
@@ -177,9 +178,88 @@ protected:
// Proxy seen by AudioTrack client and AudioRecord client
class ClientProxy : public Proxy {
protected:
- ClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize)
- : Proxy(cblk, buffers, frameCount, frameSize) { }
+ ClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize,
+ bool isOut, bool clientInServer);
virtual ~ClientProxy() { }
+
+public:
+ static const struct timespec kForever;
+ static const struct timespec kNonBlocking;
+
+ // Obtain a buffer with filled frames (reading) or empty frames (writing).
+ // It is permitted to call obtainBuffer() multiple times in succession, without any intervening
+ // calls to releaseBuffer(). In that case, the final obtainBuffer() is the one that effectively
+ // sets or extends the unreleased frame count.
+ // On entry:
+ // buffer->mFrameCount should be initialized to maximum number of desired frames,
+ // which must be > 0.
+ // buffer->mNonContig is unused.
+ // buffer->mRaw is unused.
+ // requested is the requested timeout in local monotonic delta time units:
+ // NULL or &kNonBlocking means non-blocking (zero timeout).
+ // &kForever means block forever (infinite timeout).
+ // Other values mean a specific timeout in local monotonic delta time units.
+ // elapsed is a pointer to a location that will hold the total local monotonic time that
+ // elapsed while blocked, or NULL if not needed.
+ // On exit:
+ // buffer->mFrameCount has the actual number of contiguous available frames,
+ // which is always 0 when the return status != NO_ERROR.
+ // buffer->mNonContig is the number of additional non-contiguous available frames.
+ // buffer->mRaw is a pointer to the first available frame,
+ // or NULL when buffer->mFrameCount == 0.
+ // The return status is one of:
+ // NO_ERROR Success, buffer->mFrameCount > 0.
+ // WOULD_BLOCK Non-blocking mode and no frames are available.
+ // TIMED_OUT Timeout occurred before any frames became available.
+ // This can happen even for infinite timeout, due to a spurious wakeup.
+ // In this case, the caller should investigate and then re-try as appropriate.
+ // DEAD_OBJECT Server has died or invalidated, caller should destroy this proxy and re-create.
+ // -EINTR Call has been interrupted. Look around to see why, and then perhaps try again.
+ // NO_INIT Shared memory is corrupt.
+ // Assertion failure on entry, if buffer == NULL or buffer->mFrameCount == 0.
+ status_t obtainBuffer(Buffer* buffer, const struct timespec *requested = NULL,
+ struct timespec *elapsed = NULL);
+
+ // Release (some of) the frames last obtained.
+ // On entry, buffer->mFrameCount should have the number of frames to release,
+ // which must (cumulatively) be <= the number of frames last obtained but not yet released.
+ // buffer->mRaw is ignored, but is normally same pointer returned by last obtainBuffer().
+ // It is permitted to call releaseBuffer() multiple times to release the frames in chunks.
+ // On exit:
+ // buffer->mFrameCount is zero.
+ // buffer->mRaw is NULL.
+ void releaseBuffer(Buffer* buffer);
+
+ // Call after detecting server's death
+ void binderDied();
+
+ // Call to force an obtainBuffer() to return quickly with -EINTR
+ void interrupt();
+
+ size_t getPosition() {
+ return mEpoch + mCblk->mServer;
+ }
+
+ void setEpoch(size_t epoch) {
+ mEpoch = epoch;
+ }
+
+ void setMinimum(size_t minimum) {
+ mCblk->mMinimum = minimum;
+ }
+
+ // Return the number of frames that would need to be obtained and released
+ // in order for the client to be aligned at start of buffer
+ virtual size_t getMisalignment();
+
+ size_t getEpoch() const {
+ return mEpoch;
+ }
+
+ size_t getFramesFilled();
+
+private:
+ size_t mEpoch;
};
// ----------------------------------------------------------------------------
@@ -187,8 +267,10 @@ protected:
// Proxy used by AudioTrack client, which also includes AudioFlinger::PlaybackThread::OutputTrack
class AudioTrackClientProxy : public ClientProxy {
public:
- AudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize)
- : ClientProxy(cblk, buffers, frameCount, frameSize) { }
+ AudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+ size_t frameSize, bool clientInServer = false)
+ : ClientProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/,
+ clientInServer) { }
virtual ~AudioTrackClientProxy() { }
// No barriers on the following operations, so the ordering of loads/stores
@@ -208,27 +290,42 @@ public:
mCblk->mSampleRate = sampleRate;
}
- // called by:
- // PlaybackThread::OutputTrack::write
- // AudioTrack::createTrack_l
- // AudioTrack::releaseBuffer
- // AudioTrack::reload
- // AudioTrack::restoreTrack_l (2 places)
- size_t stepUser(size_t stepCount) {
- return mCblk->stepUser(stepCount, mFrameCount, true /*isOut*/);
+ virtual void flush();
+
+ virtual uint32_t getUnderrunFrames() const {
+ return mCblk->u.mStreaming.mUnderrunFrames;
}
- // called by AudioTrack::obtainBuffer and AudioTrack::processBuffer
- size_t framesAvailable() {
- return mCblk->framesAvailable(mFrameCount, true /*isOut*/);
+ bool clearStreamEndDone(); // and return previous value
+
+ bool getStreamEndDone() const;
+
+ status_t waitStreamEndDone(const struct timespec *requested);
+};
+
+class StaticAudioTrackClientProxy : public AudioTrackClientProxy {
+public:
+ StaticAudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+ size_t frameSize);
+ virtual ~StaticAudioTrackClientProxy() { }
+
+ virtual void flush();
+
+#define MIN_LOOP 16 // minimum length of each loop iteration in frames
+ void setLoop(size_t loopStart, size_t loopEnd, int loopCount);
+ size_t getBufferPosition();
+
+ virtual size_t getMisalignment() {
+ return 0;
}
- // called by AudioTrack::obtainBuffer and PlaybackThread::OutputTrack::obtainBuffer
- // FIXME remove this API since it assumes a lock that should be invisible to caller
- size_t framesAvailable_l() {
- return mCblk->framesAvailable_l(mFrameCount, true /*isOut*/);
+ virtual uint32_t getUnderrunFrames() const {
+ return 0;
}
+private:
+ StaticAudioTrackSingleStateQueue::Mutator mMutator;
+ size_t mBufferPosition; // so that getBufferPosition() appears to be synchronous
};
// ----------------------------------------------------------------------------
@@ -236,60 +333,133 @@ public:
// Proxy used by AudioRecord client
class AudioRecordClientProxy : public ClientProxy {
public:
- AudioRecordClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize)
- : ClientProxy(cblk, buffers, frameCount, frameSize) { }
+ AudioRecordClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+ size_t frameSize)
+ : ClientProxy(cblk, buffers, frameCount, frameSize,
+ false /*isOut*/, false /*clientInServer*/) { }
~AudioRecordClientProxy() { }
-
- // called by AudioRecord::releaseBuffer
- size_t stepUser(size_t stepCount) {
- return mCblk->stepUser(stepCount, mFrameCount, false /*isOut*/);
- }
-
- // called by AudioRecord::processBuffer
- size_t framesAvailable() {
- return mCblk->framesAvailable(mFrameCount, false /*isOut*/);
- }
-
- // called by AudioRecord::obtainBuffer
- size_t framesReady() {
- return mCblk->framesReady(false /*isOut*/);
- }
-
};
// ----------------------------------------------------------------------------
// Proxy used by AudioFlinger server
class ServerProxy : public Proxy {
+protected:
+ ServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize,
+ bool isOut, bool clientInServer);
public:
- ServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize, bool isOut)
- : Proxy(cblk, buffers, frameCount, frameSize), mIsOut(isOut) { }
virtual ~ServerProxy() { }
- // for AudioTrack and AudioRecord
- bool step(size_t stepCount) { return mCblk->stepServer(stepCount, mFrameCount, mIsOut); }
+ // Obtain a buffer with filled frames (writing) or empty frames (reading).
+ // It is permitted to call obtainBuffer() multiple times in succession, without any intervening
+ // calls to releaseBuffer(). In that case, the final obtainBuffer() is the one that effectively
+ // sets or extends the unreleased frame count.
+ // Always non-blocking.
+ // On entry:
+ // buffer->mFrameCount should be initialized to maximum number of desired frames,
+ // which must be > 0.
+ // buffer->mNonContig is unused.
+ // buffer->mRaw is unused.
+ // On exit:
+ // buffer->mFrameCount has the actual number of contiguous available frames,
+ // which is always 0 when the return status != NO_ERROR.
+ // buffer->mNonContig is the number of additional non-contiguous available frames.
+ // buffer->mRaw is a pointer to the first available frame,
+ // or NULL when buffer->mFrameCount == 0.
+ // The return status is one of:
+ // NO_ERROR Success, buffer->mFrameCount > 0.
+ // WOULD_BLOCK No frames are available.
+ // NO_INIT Shared memory is corrupt.
+ virtual status_t obtainBuffer(Buffer* buffer);
+
+ // Release (some of) the frames last obtained.
+ // On entry, buffer->mFrameCount should have the number of frames to release,
+ // which must (cumulatively) be <= the number of frames last obtained but not yet released.
+ // It is permitted to call releaseBuffer() multiple times to release the frames in chunks.
+ // buffer->mRaw is ignored, but is normally same pointer returned by last obtainBuffer().
+ // On exit:
+ // buffer->mFrameCount is zero.
+ // buffer->mRaw is NULL.
+ virtual void releaseBuffer(Buffer* buffer);
+protected:
+ size_t mAvailToClient; // estimated frames available to client prior to releaseBuffer()
+ int32_t mFlush; // our copy of cblk->u.mStreaming.mFlush, for streaming output only
+};
+
+// Proxy used by AudioFlinger for servicing AudioTrack
+class AudioTrackServerProxy : public ServerProxy {
+public:
+ AudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+ size_t frameSize, bool clientInServer = false)
+ : ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer) { }
+protected:
+ virtual ~AudioTrackServerProxy() { }
+
+public:
// return value of these methods must be validated by the caller
uint32_t getSampleRate() const { return mCblk->mSampleRate; }
uint16_t getSendLevel_U4_12() const { return mCblk->mSendLevel; }
uint32_t getVolumeLR() const { return mCblk->mVolumeLR; }
- // for AudioTrack only
- size_t framesReady() {
- ALOG_ASSERT(mIsOut);
- return mCblk->framesReady(true);
- }
+ // estimated total number of filled frames available to server to read,
+ // which may include non-contiguous frames
+ virtual size_t framesReady();
- // for AudioRecord only, called by RecordThread::RecordTrack::getNextBuffer
- // FIXME remove this API since it assumes a lock that should be invisible to caller
- size_t framesAvailableIn_l() {
- ALOG_ASSERT(!mIsOut);
- return mCblk->framesAvailable_l(mFrameCount, false);
- }
+ // Currently AudioFlinger will call framesReady() for a fast track from two threads:
+ // FastMixer thread, and normal mixer thread. This is dangerous, as the proxy is intended
+ // to be called from at most one thread of server, and one thread of client.
+ // As a temporary workaround, this method informs the proxy implementation that it
+ // should avoid doing a state queue poll from within framesReady().
+ // FIXME Change AudioFlinger to not call framesReady() from normal mixer thread.
+ virtual void framesReadyIsCalledByMultipleThreads() { }
+
+ bool setStreamEndDone(); // and return previous value
+
+ // Add to the tally of underrun frames, and inform client of underrun
+ virtual void tallyUnderrunFrames(uint32_t frameCount);
+
+ // Return the total number of frames which AudioFlinger desired but were unavailable,
+ // and thus which resulted in an underrun.
+ virtual uint32_t getUnderrunFrames() const { return mCblk->u.mStreaming.mUnderrunFrames; }
+
+ // Return the total number of frames that AudioFlinger has obtained and released
+ virtual size_t framesReleased() const { return mCblk->mServer; }
+};
+
+class StaticAudioTrackServerProxy : public AudioTrackServerProxy {
+public:
+ StaticAudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+ size_t frameSize);
+protected:
+ virtual ~StaticAudioTrackServerProxy() { }
+
+public:
+ virtual size_t framesReady();
+ virtual void framesReadyIsCalledByMultipleThreads();
+ virtual status_t obtainBuffer(Buffer* buffer);
+ virtual void releaseBuffer(Buffer* buffer);
+ virtual void tallyUnderrunFrames(uint32_t frameCount);
+ virtual uint32_t getUnderrunFrames() const { return 0; }
private:
- const bool mIsOut; // true for AudioTrack, false for AudioRecord
+ ssize_t pollPosition(); // poll for state queue update, and return current position
+ StaticAudioTrackSingleStateQueue::Observer mObserver;
+ size_t mPosition; // server's current play position in frames, relative to 0
+ size_t mEnd; // cached value computed from mState, safe for asynchronous read
+ bool mFramesReadyIsCalledByMultipleThreads;
+ StaticAudioTrackState mState;
+};
+// Proxy used by AudioFlinger for servicing AudioRecord
+class AudioRecordServerProxy : public ServerProxy {
+public:
+ AudioRecordServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+ size_t frameSize)
+ : ServerProxy(cblk, buffers, frameCount, frameSize, false /*isOut*/,
+ false /*clientInServer*/) { }
+protected:
+ virtual ~AudioRecordServerProxy() { }
};
// ----------------------------------------------------------------------------