diff options
Diffstat (limited to 'include/media')
41 files changed, 1020 insertions, 265 deletions
diff --git a/include/media/AudioBufferProvider.h b/include/media/AudioBufferProvider.h index 43e4de7..ef392f0 100644 --- a/include/media/AudioBufferProvider.h +++ b/include/media/AudioBufferProvider.h @@ -26,6 +26,8 @@ class AudioBufferProvider { public: + // FIXME merge with AudioTrackShared::Buffer, AudioTrack::Buffer, and AudioRecord::Buffer + // and rename getNextBuffer() to obtainBuffer() struct Buffer { Buffer() : raw(NULL), frameCount(0) { } union { @@ -44,6 +46,19 @@ public: // pts is the local time when the next sample yielded by getNextBuffer // will be rendered. // Pass kInvalidPTS if the PTS is unknown or not applicable. + // On entry: + // buffer != NULL + // buffer->raw unused + // buffer->frameCount maximum number of desired frames + // On successful return: + // status NO_ERROR + // buffer->raw non-NULL pointer to buffer->frameCount contiguous available frames + // buffer->frameCount number of contiguous available frames at buffer->raw, + // 0 < buffer->frameCount <= entry value + // On error return: + // status != NO_ERROR + // buffer->raw NULL + // buffer->frameCount 0 virtual status_t getNextBuffer(Buffer* buffer, int64_t pts = kInvalidPTS) = 0; virtual void releaseBuffer(Buffer* buffer) = 0; diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h index 38c6548..052064d 100644 --- a/include/media/AudioRecord.h +++ b/include/media/AudioRecord.h @@ -14,31 +14,27 @@ * limitations under the License. */ -#ifndef AUDIORECORD_H_ -#define AUDIORECORD_H_ +#ifndef ANDROID_AUDIORECORD_H +#define ANDROID_AUDIORECORD_H -#include <binder/IMemory.h> #include <cutils/sched_policy.h> #include <media/AudioSystem.h> #include <media/IAudioRecord.h> -#include <system/audio.h> -#include <utils/RefBase.h> -#include <utils/Errors.h> #include <utils/threads.h> namespace android { +// ---------------------------------------------------------------------------- + class audio_track_cblk_t; class AudioRecordClientProxy; // ---------------------------------------------------------------------------- -class AudioRecord : virtual public RefBase +class AudioRecord : public RefBase { public: - static const int DEFAULT_SAMPLE_RATE = 8000; - /* Events used by AudioRecord callback function (callback_t). * Keep in sync with frameworks/base/media/java/android/media/AudioRecord.java NATIVE_EVENT_*. */ @@ -49,6 +45,8 @@ public: // (See setMarkerPosition()). EVENT_NEW_POS = 3, // Record head is at a new position // (See setPositionUpdatePeriod()). + EVENT_NEW_IAUDIORECORD = 4, // IAudioRecord was re-created, either due to re-routing and + // voluntary invalidation by mediaserver, or mediaserver crash. }; /* Client should declare Buffer on the stack and pass address to obtainBuffer() @@ -58,11 +56,17 @@ public: class Buffer { public: + // FIXME use m prefix size_t frameCount; // number of sample frames corresponding to size; // on input it is the number of frames available, // on output is the number of frames actually drained + // (currently ignored, but will make the primary field in future) + + size_t size; // input/output in bytes == frameCount * frameSize + // FIXME this is redundant with respect to frameCount, + // and TRANSFER_OBTAIN mode is broken for 8-bit data + // since we don't define the frame format - size_t size; // total size in bytes == frameCount * frameSize union { void* raw; short* i16; // signed 16-bit @@ -84,6 +88,7 @@ public: * - EVENT_OVERRUN: unused. * - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames. * - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames. + * - EVENT_NEW_IAUDIORECORD: unused. */ typedef void (*callback_t)(int event, void* user, void *info); @@ -101,94 +106,112 @@ public: audio_format_t format, audio_channel_mask_t channelMask); + /* How data is transferred from AudioRecord + */ + enum transfer_type { + TRANSFER_DEFAULT, // not specified explicitly; determine from other parameters + TRANSFER_CALLBACK, // callback EVENT_MORE_DATA + TRANSFER_OBTAIN, // FIXME deprecated: call obtainBuffer() and releaseBuffer() + TRANSFER_SYNC, // synchronous read() + }; + /* Constructs an uninitialized AudioRecord. No connection with - * AudioFlinger takes place. + * AudioFlinger takes place. Use set() after this. */ AudioRecord(); /* Creates an AudioRecord object and registers it with AudioFlinger. * Once created, the track needs to be started before it can be used. - * Unspecified values are set to the audio hardware's current - * values. + * Unspecified values are set to appropriate default values. * * Parameters: * - * inputSource: Select the audio input to record to (e.g. AUDIO_SOURCE_DEFAULT). - * sampleRate: Track sampling rate in Hz. + * inputSource: Select the audio input to record from (e.g. AUDIO_SOURCE_DEFAULT). + * sampleRate: Data sink sampling rate in Hz. * format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed * 16 bits per sample). - * channelMask: Channel mask. + * channelMask: Channel mask, such that audio_is_input_channel(channelMask) is true. * frameCount: Minimum size of track PCM buffer in frames. This defines the * application's contribution to the * latency of the track. The actual size selected by the AudioRecord could * be larger if the requested size is not compatible with current audio HAL * latency. Zero means to use a default value. * cbf: Callback function. If not null, this function is called periodically - * to consume new PCM data. + * to consume new PCM data and inform of marker, position updates, etc. * user: Context for use by the callback receiver. * notificationFrames: The callback function is called each time notificationFrames PCM * frames are ready in record track output buffer. * sessionId: Not yet supported. + * transferType: How data is transferred from AudioRecord. + * flags: See comments on audio_input_flags_t in <system/audio.h> + * threadCanCallJava: Not present in parameter list, and so is fixed at false. */ AudioRecord(audio_source_t inputSource, - uint32_t sampleRate = 0, - audio_format_t format = AUDIO_FORMAT_DEFAULT, - audio_channel_mask_t channelMask = AUDIO_CHANNEL_IN_MONO, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, int frameCount = 0, callback_t cbf = NULL, void* user = NULL, int notificationFrames = 0, - int sessionId = 0); - + int sessionId = 0, + transfer_type transferType = TRANSFER_DEFAULT, + audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE); /* Terminates the AudioRecord and unregisters it from AudioFlinger. * Also destroys all resources associated with the AudioRecord. */ - ~AudioRecord(); - +protected: + virtual ~AudioRecord(); +public: - /* Initialize an uninitialized AudioRecord. + /* Initialize an AudioRecord that was created using the AudioRecord() constructor. + * Don't call set() more than once, or after an AudioRecord() constructor that takes parameters. * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful intialization - * - INVALID_OPERATION: AudioRecord is already intitialized or record device is already in use + * - INVALID_OPERATION: AudioRecord is already initialized or record device is already in use * - BAD_VALUE: invalid parameter (channels, format, sampleRate...) * - NO_INIT: audio server or audio hardware not initialized * - PERMISSION_DENIED: recording is not allowed for the requesting process + * + * Parameters not listed in the AudioRecord constructors above: + * + * threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI. */ - status_t set(audio_source_t inputSource = AUDIO_SOURCE_DEFAULT, - uint32_t sampleRate = 0, - audio_format_t format = AUDIO_FORMAT_DEFAULT, - audio_channel_mask_t channelMask = AUDIO_CHANNEL_IN_MONO, + status_t set(audio_source_t inputSource, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, int frameCount = 0, callback_t cbf = NULL, void* user = NULL, int notificationFrames = 0, bool threadCanCallJava = false, - int sessionId = 0); - + int sessionId = 0, + transfer_type transferType = TRANSFER_DEFAULT, + audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE); /* Result of constructing the AudioRecord. This must be checked * before using any AudioRecord API (except for set()), because using * an uninitialized AudioRecord produces undefined results. * See set() method above for possible return codes. */ - status_t initCheck() const; + status_t initCheck() const { return mStatus; } /* Returns this track's estimated latency in milliseconds. * This includes the latency due to AudioRecord buffer size, * and audio hardware driver. */ - uint32_t latency() const; + uint32_t latency() const { return mLatency; } /* getters, see constructor and set() */ - audio_format_t format() const; - uint32_t channelCount() const; - size_t frameCount() const; - size_t frameSize() const { return mFrameSize; } - audio_source_t inputSource() const; - + audio_format_t format() const { return mFormat; } + uint32_t channelCount() const { return mChannelCount; } + size_t frameCount() const { return mFrameCount; } + size_t frameSize() const { return mFrameSize; } + audio_source_t inputSource() const { return mInputSource; } /* After it's created the track is not active. Call start() to * make it active. If set, the callback will start being called. @@ -198,26 +221,29 @@ public: status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE, int triggerSession = 0); - /* Stop a track. If set, the callback will cease being called and - * obtainBuffer returns STOPPED. Note that obtainBuffer() still works - * and will drain buffers until the pool is exhausted. + /* Stop a track. If set, the callback will cease being called. Note that obtainBuffer() still + * works and will drain buffers until the pool is exhausted, and then will return WOULD_BLOCK. */ void stop(); bool stopped() const; - /* Get sample rate for this record track in Hz. + /* Return the sink sample rate for this record track in Hz. + * Unlike AudioTrack, the sample rate is const after initialization, so doesn't need a lock. */ - uint32_t getSampleRate() const; + uint32_t getSampleRate() const { return mSampleRate; } /* Sets marker position. When record reaches the number of frames specified, * a callback with event type EVENT_MARKER is called. Calling setMarkerPosition * with marker == 0 cancels marker notification callback. + * To set a marker at a position which would compute as 0, + * a workaround is to the set the marker at a nearby position such as ~0 or 1. * If the AudioRecord has been opened with no callback function associated, * the operation will fail. * * Parameters: * - * marker: marker position expressed in frames. + * marker: marker position expressed in wrapping (overflow) frame units, + * like the return value of getPosition(). * * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful operation @@ -226,13 +252,13 @@ public: status_t setMarkerPosition(uint32_t marker); status_t getMarkerPosition(uint32_t *marker) const; - /* Sets position update period. Every time the number of frames specified has been recorded, * a callback with event type EVENT_NEW_POS is called. * Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification * callback. * If the AudioRecord has been opened with no callback function associated, * the operation will fail. + * Extremely small values may be rounded up to a value the implementation can support. * * Parameters: * @@ -245,13 +271,13 @@ public: status_t setPositionUpdatePeriod(uint32_t updatePeriod); status_t getPositionUpdatePeriod(uint32_t *updatePeriod) const; - - /* Gets record head position. The position is the total number of frames - * recorded since record start. + /* Return the total number of frames recorded since recording started. + * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz. + * It is reset to zero by stop(). * * Parameters: * - * position: Address where to return record head position within AudioRecord buffer. + * position: Address where to return record head position. * * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful operation @@ -276,38 +302,74 @@ public: * * Returned value: * AudioRecord session ID. + * + * No lock needed because session ID doesn't change after first set(). */ - int getSessionId() const; - - /* Obtains a buffer of "frameCount" frames. The buffer must be - * drained entirely, and then released with releaseBuffer(). - * If the track is stopped, obtainBuffer() returns - * STOPPED instead of NO_ERROR as long as there are buffers available, - * at which point NO_MORE_BUFFERS is returned. + int getSessionId() const { return mSessionId; } + + /* Obtains a buffer of up to "audioBuffer->frameCount" full frames. + * After draining these frames of data, the caller should release them with releaseBuffer(). + * If the track buffer is not empty, obtainBuffer() returns as many contiguous + * full frames as are available immediately. + * If the track buffer is empty and track is stopped, obtainBuffer() returns WOULD_BLOCK + * regardless of the value of waitCount. + * If the track buffer is empty and track is not stopped, obtainBuffer() blocks with a + * maximum timeout based on waitCount; see chart below. * Buffers will be returned until the pool * is exhausted, at which point obtainBuffer() will either block - * or return WOULD_BLOCK depending on the value of the "blocking" + * or return WOULD_BLOCK depending on the value of the "waitCount" * parameter. * + * obtainBuffer() and releaseBuffer() are deprecated for direct use by applications, + * which should use read() or callback EVENT_MORE_DATA instead. + * * Interpretation of waitCount: * +n limits wait time to n * WAIT_PERIOD_MS, * -1 causes an (almost) infinite wait time, * 0 non-blocking. + * + * Buffer fields + * On entry: + * frameCount number of frames requested + * After error return: + * frameCount 0 + * size 0 + * raw undefined + * After successful return: + * frameCount actual number of frames available, <= number requested + * size actual number of bytes available + * raw pointer to the buffer */ - enum { - NO_MORE_BUFFERS = 0x80000001, // same name in AudioFlinger.h, ok to be different value - STOPPED = 1 - }; + /* FIXME Deprecated public API for TRANSFER_OBTAIN mode */ + status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount) + __attribute__((__deprecated__)); - status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount); +private: + /* If nonContig is non-NULL, it is an output parameter that will be set to the number of + * additional non-contiguous frames that are available immediately. + * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(), + * in case the requested amount of frames is in two or more non-contiguous regions. + * FIXME requested and elapsed are both relative times. Consider changing to absolute time. + */ + status_t obtainBuffer(Buffer* audioBuffer, const struct timespec *requested, + struct timespec *elapsed = NULL, size_t *nonContig = NULL); +public: - /* Release an emptied buffer of "frameCount" frames for AudioFlinger to re-fill. */ + /* Release an emptied buffer of "audioBuffer->frameCount" frames for AudioFlinger to re-fill. */ + // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed void releaseBuffer(Buffer* audioBuffer); - /* As a convenience we provide a read() interface to the audio buffer. - * This is implemented on top of obtainBuffer/releaseBuffer. + * Input parameter 'size' is in byte units. + * This is implemented on top of obtainBuffer/releaseBuffer. For best + * performance use callbacks. Returns actual number of bytes read >= 0, + * or one of the following negative status codes: + * INVALID_OPERATION AudioRecord is configured for streaming mode + * BAD_VALUE size is invalid + * WOULD_BLOCK when obtainBuffer() returns same, or + * AudioRecord was stopped during the read + * or any other error code returned by IAudioRecord::start() or restoreRecord_l(). */ ssize_t read(void* buffer, size_t size); @@ -338,66 +400,113 @@ private: void resume(); // allow thread to execute, if not requested to exit private: + void pauseInternal(nsecs_t ns = 0LL); + // like pause(), but only used internally within thread + friend class AudioRecord; virtual bool threadLoop(); - AudioRecord& mReceiver; + AudioRecord& mReceiver; virtual ~AudioRecordThread(); Mutex mMyLock; // Thread::mLock is private Condition mMyCond; // Thread::mThreadExitedCondition is private - bool mPaused; // whether thread is currently paused + bool mPaused; // whether thread is requested to pause at next loop entry + bool mPausedInt; // whether thread internally requests pause + nsecs_t mPausedNs; // if mPausedInt then associated timeout, otherwise ignored }; // body of AudioRecordThread::threadLoop() - bool processAudioBuffer(const sp<AudioRecordThread>& thread); + // returns the maximum amount of time before we would like to run again, where: + // 0 immediately + // > 0 no later than this many nanoseconds from now + // NS_WHENEVER still active but no particular deadline + // NS_INACTIVE inactive so don't run again until re-started + // NS_NEVER never again + static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3; + nsecs_t processAudioBuffer(const sp<AudioRecordThread>& thread); + + // caller must hold lock on mLock for all _l methods + status_t openRecord_l(size_t epoch); - status_t openRecord_l(uint32_t sampleRate, - audio_format_t format, - size_t frameCount, - audio_io_handle_t input); - audio_io_handle_t getInput_l(); - status_t restoreRecord_l(audio_track_cblk_t*& cblk); + // FIXME enum is faster than strcmp() for parameter 'from' + status_t restoreRecord_l(const char *from); sp<AudioRecordThread> mAudioRecordThread; mutable Mutex mLock; - bool mActive; // protected by mLock + // Current client state: false = stopped, true = active. Protected by mLock. If more states + // are added, consider changing this to enum State { ... } mState as in AudioTrack. + bool mActive; // for client callback handler callback_t mCbf; // callback handler for events, or NULL void* mUserData; // for notification APIs - uint32_t mNotificationFrames; - uint32_t mRemainingFrames; - uint32_t mMarkerPosition; // in frames + uint32_t mNotificationFramesReq; // requested number of frames between each + // notification callback + uint32_t mNotificationFramesAct; // actual number of frames between each + // notification callback + bool mRefreshRemaining; // processAudioBuffer() should refresh next 2 + + // These are private to processAudioBuffer(), and are not protected by a lock + uint32_t mRemainingFrames; // number of frames to request in obtainBuffer() + bool mRetryOnPartialBuffer; // sleep and retry after partial obtainBuffer() + int mObservedSequence; // last observed value of mSequence + + uint32_t mMarkerPosition; // in wrapping (overflow) frame units bool mMarkerReached; uint32_t mNewPosition; // in frames - uint32_t mUpdatePeriod; // in ms + uint32_t mUpdatePeriod; // in frames, zero means no EVENT_NEW_POS + + status_t mStatus; // constant after constructor or set() uint32_t mSampleRate; size_t mFrameCount; audio_format_t mFormat; - uint8_t mChannelCount; + uint32_t mChannelCount; size_t mFrameSize; // app-level frame size == AudioFlinger frame size audio_source_t mInputSource; - status_t mStatus; - uint32_t mLatency; + uint32_t mLatency; // in ms audio_channel_mask_t mChannelMask; - audio_io_handle_t mInput; // returned by AudioSystem::getInput() + audio_input_flags_t mFlags; int mSessionId; + transfer_type mTransfer; + + audio_io_handle_t mInput; // returned by AudioSystem::getInput() // may be changed if IAudioRecord object is re-created sp<IAudioRecord> mAudioRecord; sp<IMemory> mCblkMemory; - audio_track_cblk_t* mCblk; - void* mBuffers; // starting address of buffers in shared memory + audio_track_cblk_t* mCblk; // re-load after mLock.unlock() - int mPreviousPriority; // before start() + int mPreviousPriority; // before start() SchedPolicy mPreviousSchedulingGroup; - AudioRecordClientProxy* mProxy; + bool mAwaitBoost; // thread should wait for priority boost before running + + // The proxy should only be referenced while a lock is held because the proxy isn't + // multi-thread safe. + // An exception is that a blocking ClientProxy::obtainBuffer() may be called without a lock, + // provided that the caller also holds an extra reference to the proxy and shared memory to keep + // them around in case they are replaced during the obtainBuffer(). + sp<AudioRecordClientProxy> mProxy; + + bool mInOverrun; // whether recorder is currently in overrun state + +private: + class DeathNotifier : public IBinder::DeathRecipient { + public: + DeathNotifier(AudioRecord* audioRecord) : mAudioRecord(audioRecord) { } + protected: + virtual void binderDied(const wp<IBinder>& who); + private: + const wp<AudioRecord> mAudioRecord; + }; + + sp<DeathNotifier> mDeathNotifier; + uint32_t mSequence; // incremented for each new IAudioRecord attempt }; }; // namespace android -#endif /*AUDIORECORD_H_*/ +#endif // ANDROID_AUDIORECORD_H diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h index b11c812..225ef76 100644 --- a/include/media/AudioSystem.h +++ b/include/media/AudioSystem.h @@ -17,20 +17,18 @@ #ifndef ANDROID_AUDIOSYSTEM_H_ #define ANDROID_AUDIOSYSTEM_H_ -#include <utils/RefBase.h> -#include <utils/threads.h> -#include <media/IAudioFlinger.h> - +#include <hardware/audio_effect.h> +#include <media/IAudioFlingerClient.h> #include <system/audio.h> #include <system/audio_policy.h> - -/* XXX: Should be include by all the users instead */ -#include <media/AudioParameter.h> +#include <utils/Errors.h> +#include <utils/Mutex.h> namespace android { typedef void (*audio_error_callback)(status_t err); +class IAudioFlinger; class IAudioPolicyService; class String8; @@ -128,8 +126,10 @@ public: // - BAD_VALUE: invalid parameter // NOTE: this feature is not supported on all hardware platforms and it is // necessary to check returned status before using the returned values. - static status_t getRenderPosition(size_t *halFrames, size_t *dspFrames, - audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); + static status_t getRenderPosition(audio_io_handle_t output, + size_t *halFrames, + size_t *dspFrames, + audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); // return the number of input frames lost by HAL implementation, or 0 if the handle is invalid static size_t getInputFramesLost(audio_io_handle_t ioHandle); @@ -155,11 +155,11 @@ public: class OutputDescriptor { public: OutputDescriptor() - : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channels(0), frameCount(0), latency(0) {} + : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channelMask(0), frameCount(0), latency(0) {} uint32_t samplingRate; - int32_t format; - int32_t channels; + audio_format_t format; + audio_channel_mask_t channelMask; size_t frameCount; uint32_t latency; }; @@ -197,7 +197,8 @@ public: uint32_t samplingRate = 0, audio_format_t format = AUDIO_FORMAT_DEFAULT, audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO, - audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE); + audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, + const audio_offload_info_t *offloadInfo = NULL); static status_t startOutput(audio_io_handle_t output, audio_stream_type_t stream, int session = 0); @@ -245,6 +246,15 @@ public: static uint32_t getPrimaryOutputSamplingRate(); static size_t getPrimaryOutputFrameCount(); + static status_t setLowRamDevice(bool isLowRamDevice); + + // Check if hw offload is possible for given format, stream type, sample rate, + // bit rate, duration, video and streaming or offload property is enabled + static bool isOffloadSupported(const audio_offload_info_t& info); + + // check presence of audio flinger service. + // returns NO_ERROR if binding to service succeeds, DEAD_OBJECT otherwise + static status_t checkAudioFlinger(); // ---------------------------------------------------------------------------- private: diff --git a/include/media/AudioTimestamp.h b/include/media/AudioTimestamp.h new file mode 100644 index 0000000..c29c7e5 --- /dev/null +++ b/include/media/AudioTimestamp.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_TIMESTAMP_H +#define ANDROID_AUDIO_TIMESTAMP_H + +#include <time.h> + +class AudioTimestamp { +public: + AudioTimestamp() : mPosition(0) { + mTime.tv_sec = 0; + mTime.tv_nsec = 0; + } + // FIXME change type to match android.media.AudioTrack + uint32_t mPosition; // a frame position in AudioTrack::getPosition() units + struct timespec mTime; // corresponding CLOCK_MONOTONIC when frame is expected to present +}; + +#endif // ANDROID_AUDIO_TIMESTAMP_H diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h index 64f82bb..f2f9c22 100644 --- a/include/media/AudioTrack.h +++ b/include/media/AudioTrack.h @@ -17,18 +17,10 @@ #ifndef ANDROID_AUDIOTRACK_H #define ANDROID_AUDIOTRACK_H -#include <stdint.h> -#include <sys/types.h> - -#include <media/IAudioFlinger.h> -#include <media/IAudioTrack.h> -#include <media/AudioSystem.h> - -#include <utils/RefBase.h> -#include <utils/Errors.h> -#include <binder/IInterface.h> -#include <binder/IMemory.h> #include <cutils/sched_policy.h> +#include <media/AudioSystem.h> +#include <media/AudioTimestamp.h> +#include <media/IAudioTrack.h> #include <utils/threads.h> namespace android { @@ -37,10 +29,11 @@ namespace android { class audio_track_cblk_t; class AudioTrackClientProxy; +class StaticAudioTrackClientProxy; // ---------------------------------------------------------------------------- -class AudioTrack : virtual public RefBase +class AudioTrack : public RefBase { public: enum channel_index { @@ -49,7 +42,7 @@ public: RIGHT = 1 }; - /* Events used by AudioTrack callback function (audio_track_cblk_t). + /* Events used by AudioTrack callback function (callback_t). * Keep in sync with frameworks/base/media/java/android/media/AudioTrack.java NATIVE_EVENT_*. */ enum event_type { @@ -64,7 +57,15 @@ public: // (See setMarkerPosition()). EVENT_NEW_POS = 4, // Playback head is at a new position // (See setPositionUpdatePeriod()). - EVENT_BUFFER_END = 5 // Playback head is at the end of the buffer. + EVENT_BUFFER_END = 5, // Playback head is at the end of the buffer. + // Not currently used by android.media.AudioTrack. + EVENT_NEW_IAUDIOTRACK = 6, // IAudioTrack was re-created, either due to re-routing and + // voluntary invalidation by mediaserver, or mediaserver crash. + EVENT_STREAM_END = 7, // Sent after all the buffers queued in AF and HW are played + // back (after stop is called) + EVENT_NEW_TIMESTAMP = 8, // Delivered periodically and when there's a significant change + // in the mapping from frame position to presentation time. + // See AudioTimestamp for the information included with event. }; /* Client should declare Buffer on the stack and pass address to obtainBuffer() @@ -74,19 +75,25 @@ public: class Buffer { public: + // FIXME use m prefix size_t frameCount; // number of sample frames corresponding to size; // on input it is the number of frames desired, // on output is the number of frames actually filled + // (currently ignored, but will make the primary field in future) + + size_t size; // input/output in bytes == frameCount * frameSize + // on output is the number of bytes actually filled + // FIXME this is redundant with respect to frameCount, + // and TRANSFER_OBTAIN mode is broken for 8-bit data + // since we don't define the frame format - size_t size; // input/output in byte units union { void* raw; - short* i16; // signed 16-bit - int8_t* i8; // unsigned 8-bit, offset by 0x80 + short* i16; // signed 16-bit + int8_t* i8; // unsigned 8-bit, offset by 0x80 }; }; - /* As a convenience, if a callback is supplied, a handler thread * is automatically created with the appropriate priority. This thread * invokes the callback when a new buffer becomes available or various conditions occur. @@ -100,9 +107,12 @@ public: * written. * - EVENT_UNDERRUN: unused. * - EVENT_LOOP_END: pointer to an int indicating the number of loops remaining. - * - EVENT_MARKER: pointer to an uint32_t containing the marker position in frames. - * - EVENT_NEW_POS: pointer to an uint32_t containing the new position in frames. + * - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames. + * - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames. * - EVENT_BUFFER_END: unused. + * - EVENT_NEW_IAUDIOTRACK: unused. + * - EVENT_STREAM_END: unused. + * - EVENT_NEW_TIMESTAMP: pointer to const AudioTimestamp. */ typedef void (*callback_t)(int event, void* user, void *info); @@ -112,11 +122,22 @@ public: * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful operation * - NO_INIT: audio server or audio hardware not initialized + * - BAD_VALUE: unsupported configuration */ - static status_t getMinFrameCount(size_t* frameCount, - audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT, - uint32_t sampleRate = 0); + static status_t getMinFrameCount(size_t* frameCount, + audio_stream_type_t streamType, + uint32_t sampleRate); + + /* How data is transferred to AudioTrack + */ + enum transfer_type { + TRANSFER_DEFAULT, // not specified explicitly; determine from the other parameters + TRANSFER_CALLBACK, // callback EVENT_MORE_DATA + TRANSFER_OBTAIN, // FIXME deprecated: call obtainBuffer() and releaseBuffer() + TRANSFER_SYNC, // synchronous write() + TRANSFER_SHARED, // shared memory + }; /* Constructs an uninitialized AudioTrack. No connection with * AudioFlinger takes place. Use set() after this. @@ -128,13 +149,13 @@ public: * Unspecified values are set to appropriate default values. * With this constructor, the track is configured for streaming mode. * Data to be rendered is supplied by write() or by the callback EVENT_MORE_DATA. - * Intermixing a combination of write() and non-ignored EVENT_MORE_DATA is deprecated. + * Intermixing a combination of write() and non-ignored EVENT_MORE_DATA is not allowed. * * Parameters: * * streamType: Select the type of audio stream this track is attached to * (e.g. AUDIO_STREAM_MUSIC). - * sampleRate: Track sampling rate in Hz. + * sampleRate: Data source sampling rate in Hz. * format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed * 16 bits per sample). * channelMask: Channel mask. @@ -149,21 +170,24 @@ public: * user: Context for use by the callback receiver. * notificationFrames: The callback function is called each time notificationFrames PCM * frames have been consumed from track input buffer. + * This is expressed in units of frames at the initial source sample rate. * sessionId: Specific session ID, or zero to use default. - * threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI. - * If not present in parameter list, then fixed at false. + * transferType: How data is transferred to AudioTrack. + * threadCanCallJava: Not present in parameter list, and so is fixed at false. */ AudioTrack( audio_stream_type_t streamType, - uint32_t sampleRate = 0, - audio_format_t format = AUDIO_FORMAT_DEFAULT, - audio_channel_mask_t channelMask = 0, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t, int frameCount = 0, audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, callback_t cbf = NULL, void* user = NULL, int notificationFrames = 0, - int sessionId = 0); + int sessionId = 0, + transfer_type transferType = TRANSFER_DEFAULT, + const audio_offload_info_t *offloadInfo = NULL); /* Creates an audio track and registers it with AudioFlinger. * With this constructor, the track is configured for static buffer mode. @@ -174,38 +198,47 @@ public: * The write() method is not supported in this case. * It is recommended to pass a callback function to be notified of playback end by an * EVENT_UNDERRUN event. - * FIXME EVENT_MORE_DATA still occurs; it must be ignored. */ AudioTrack( audio_stream_type_t streamType, - uint32_t sampleRate = 0, - audio_format_t format = AUDIO_FORMAT_DEFAULT, - audio_channel_mask_t channelMask = 0, - const sp<IMemory>& sharedBuffer = 0, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + const sp<IMemory>& sharedBuffer, audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, callback_t cbf = NULL, void* user = NULL, int notificationFrames = 0, - int sessionId = 0); + int sessionId = 0, + transfer_type transferType = TRANSFER_DEFAULT, + const audio_offload_info_t *offloadInfo = NULL); /* Terminates the AudioTrack and unregisters it from AudioFlinger. * Also destroys all resources associated with the AudioTrack. */ - ~AudioTrack(); +protected: + virtual ~AudioTrack(); +public: - /* Initialize an uninitialized AudioTrack. + /* Initialize an AudioTrack that was created using the AudioTrack() constructor. + * Don't call set() more than once, or after the AudioTrack() constructors that take parameters. * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful initialization * - INVALID_OPERATION: AudioTrack is already initialized * - BAD_VALUE: invalid parameter (channelMask, format, sampleRate...) * - NO_INIT: audio server or audio hardware not initialized + * If status is not equal to NO_ERROR, don't call any other APIs on this AudioTrack. * If sharedBuffer is non-0, the frameCount parameter is ignored and * replaced by the shared buffer's total allocated size in frame units. + * + * Parameters not listed in the AudioTrack constructors above: + * + * threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI. */ - status_t set(audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT, - uint32_t sampleRate = 0, - audio_format_t format = AUDIO_FORMAT_DEFAULT, - audio_channel_mask_t channelMask = 0, + status_t set(audio_stream_type_t streamType, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, int frameCount = 0, audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, callback_t cbf = NULL, @@ -213,9 +246,11 @@ public: int notificationFrames = 0, const sp<IMemory>& sharedBuffer = 0, bool threadCanCallJava = false, - int sessionId = 0); + int sessionId = 0, + transfer_type transferType = TRANSFER_DEFAULT, + const audio_offload_info_t *offloadInfo = NULL); - /* Result of constructing the AudioTrack. This must be checked + /* Result of constructing the AudioTrack. This must be checked for successful initialization * before using any AudioTrack API (except for set()), because using * an uninitialized AudioTrack produces undefined results. * See set() method above for possible return codes. @@ -233,14 +268,15 @@ public: audio_stream_type_t streamType() const { return mStreamType; } audio_format_t format() const { return mFormat; } - /* Return frame size in bytes, which for linear PCM is channelCount * (bit depth per channel / 8). + /* Return frame size in bytes, which for linear PCM is + * channelCount * (bit depth per channel / 8). * channelCount is determined from channelMask, and bit depth comes from format. * For non-linear formats, the frame size is typically 1 byte. */ - uint32_t channelCount() const { return mChannelCount; } + size_t frameSize() const { return mFrameSize; } + uint32_t channelCount() const { return mChannelCount; } uint32_t frameCount() const { return mFrameCount; } - size_t frameSize() const { return mFrameSize; } /* Return the static buffer specified in constructor or set(), or 0 for streaming mode */ sp<IMemory> sharedBuffer() const { return mSharedBuffer; } @@ -249,14 +285,13 @@ public: * make it active. If set, the callback will start being called. * If the track was previously paused, volume is ramped up over the first mix buffer. */ - void start(); + status_t start(); /* Stop a track. * In static buffer mode, the track is stopped immediately. - * In streaming mode, the callback will cease being called and - * obtainBuffer returns STOPPED. Note that obtainBuffer() still works - * and will fill up buffers until the pool is exhausted. - * The stop does not occur immediately: any data remaining in the buffer + * In streaming mode, the callback will cease being called. Note that obtainBuffer() still + * works and will fill up buffers until the pool is exhausted, and then will return WOULD_BLOCK. + * In streaming mode the stop does not occur immediately: any data remaining in the buffer * is first drained, mixed, and output, and only then is the track marked as stopped. */ void stop(); @@ -270,7 +305,7 @@ public: void flush(); /* Pause a track. After pause, the callback will cease being called and - * obtainBuffer returns STOPPED. Note that obtainBuffer() still works + * obtainBuffer returns WOULD_BLOCK. Note that obtainBuffer() still works * and will fill up buffers until the pool is exhausted. * Volume is ramped down over the next mix buffer following the pause request, * and then the track is marked as paused. It can be resumed with ramp up by start(). @@ -294,11 +329,11 @@ public: status_t setAuxEffectSendLevel(float level); void getAuxEffectSendLevel(float* level) const; - /* Set sample rate for this track in Hz, mostly used for games' sound effects + /* Set source sample rate for this track in Hz, mostly used for games' sound effects */ status_t setSampleRate(uint32_t sampleRate); - /* Return current sample rate in Hz, or 0 if unknown */ + /* Return current source sample rate in Hz, or 0 if unknown */ uint32_t getSampleRate() const; /* Enables looping and sets the start and end points of looping. @@ -306,20 +341,24 @@ public: * * Parameters: * - * loopStart: loop start expressed as the number of PCM frames played since AudioTrack start. - * loopEnd: loop end expressed as the number of PCM frames played since AudioTrack start. + * loopStart: loop start in frames relative to start of buffer. + * loopEnd: loop end in frames relative to start of buffer. * loopCount: number of loops to execute. Calling setLoop() with loopCount == 0 cancels any - * pending or active loop. loopCount = -1 means infinite looping. + * pending or active loop. loopCount == -1 means infinite looping. * * For proper operation the following condition must be respected: - * (loopEnd-loopStart) <= framecount() + * loopCount != 0 implies 0 <= loopStart < loopEnd <= frameCount(). + * + * If the loop period (loopEnd - loopStart) is too small for the implementation to support, + * setLoop() will return BAD_VALUE. loopCount must be >= -1. + * */ status_t setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount); /* Sets marker position. When playback reaches the number of frames specified, a callback with * event type EVENT_MARKER is called. Calling setMarkerPosition with marker == 0 cancels marker * notification callback. To set a marker at a position which would compute as 0, - * a workaround is to the set the marker at a nearby position such as -1 or 1. + * a workaround is to the set the marker at a nearby position such as ~0 or 1. * If the AudioTrack has been opened with no callback function associated, the operation will * fail. * @@ -354,18 +393,14 @@ public: status_t setPositionUpdatePeriod(uint32_t updatePeriod); status_t getPositionUpdatePeriod(uint32_t *updatePeriod) const; - /* Sets playback head position within AudioTrack buffer. The new position is specified - * in number of frames. - * This method must be called with the AudioTrack in paused or stopped state. - * Note that the actual position set is <position> modulo the AudioTrack buffer size in frames. - * Therefore using this method makes sense only when playing a "static" audio buffer - * as opposed to streaming. - * The getPosition() method on the other hand returns the total number of frames played since - * playback start. + /* Sets playback head position. + * Only supported for static buffer mode. * * Parameters: * - * position: New playback head position within AudioTrack buffer. + * position: New playback head position in frames relative to start of buffer. + * 0 <= position <= frameCount(). Note that end of buffer is permitted, + * but will result in an immediate underrun if started. * * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful operation @@ -378,8 +413,22 @@ public: /* Return the total number of frames played since playback start. * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz. * It is reset to zero by flush(), reload(), and stop(). + * + * Parameters: + * + * position: Address where to return play head position. + * + * Returned status (from utils/Errors.h) can be: + * - NO_ERROR: successful operation + * - BAD_VALUE: position is NULL */ - status_t getPosition(uint32_t *position); + status_t getPosition(uint32_t *position) const; + + /* For static buffer mode only, this returns the current playback position in frames + * relative to start of buffer. It is analogous to the position units used by + * setLoop() and setPosition(). After underrun, the position will be at end of buffer. + */ + status_t getBufferPosition(uint32_t *position); /* Forces AudioTrack buffer full condition. When playing a static buffer, this method avoids * rewriting the buffer before restarting playback after a stop. @@ -426,15 +475,19 @@ public: */ status_t attachAuxEffect(int effectId); - /* Obtains a buffer of "frameCount" frames. The buffer must be - * filled entirely, and then released with releaseBuffer(). - * If the track is stopped, obtainBuffer() returns - * STOPPED instead of NO_ERROR as long as there are buffers available, - * at which point NO_MORE_BUFFERS is returned. + /* Obtains a buffer of up to "audioBuffer->frameCount" empty slots for frames. + * After filling these slots with data, the caller should release them with releaseBuffer(). + * If the track buffer is not full, obtainBuffer() returns as many contiguous + * [empty slots for] frames as are available immediately. + * If the track buffer is full and track is stopped, obtainBuffer() returns WOULD_BLOCK + * regardless of the value of waitCount. + * If the track buffer is full and track is not stopped, obtainBuffer() blocks with a + * maximum timeout based on waitCount; see chart below. * Buffers will be returned until the pool * is exhausted, at which point obtainBuffer() will either block - * or return WOULD_BLOCK depending on the value of the "blocking" + * or return WOULD_BLOCK depending on the value of the "waitCount" * parameter. + * Each sample is 16-bit signed PCM. * * obtainBuffer() and releaseBuffer() are deprecated for direct use by applications, * which should use write() or callback EVENT_MORE_DATA instead. @@ -457,33 +510,76 @@ public: * raw pointer to the buffer */ - enum { - NO_MORE_BUFFERS = 0x80000001, // same name in AudioFlinger.h, ok to be different value - STOPPED = 1 - }; + /* FIXME Deprecated public API for TRANSFER_OBTAIN mode */ + status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount) + __attribute__((__deprecated__)); - status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount); +private: + /* If nonContig is non-NULL, it is an output parameter that will be set to the number of + * additional non-contiguous frames that are available immediately. + * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(), + * in case the requested amount of frames is in two or more non-contiguous regions. + * FIXME requested and elapsed are both relative times. Consider changing to absolute time. + */ + status_t obtainBuffer(Buffer* audioBuffer, const struct timespec *requested, + struct timespec *elapsed = NULL, size_t *nonContig = NULL); +public: - /* Release a filled buffer of "frameCount" frames for AudioFlinger to process. */ +//EL_FIXME to be reconciled with new obtainBuffer() return codes and control block proxy +// enum { +// NO_MORE_BUFFERS = 0x80000001, // same name in AudioFlinger.h, ok to be different value +// TEAR_DOWN = 0x80000002, +// STOPPED = 1, +// STREAM_END_WAIT, +// STREAM_END +// }; + + /* Release a filled buffer of "audioBuffer->frameCount" frames for AudioFlinger to process. */ + // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed void releaseBuffer(Buffer* audioBuffer); /* As a convenience we provide a write() interface to the audio buffer. + * Input parameter 'size' is in byte units. * This is implemented on top of obtainBuffer/releaseBuffer. For best * performance use callbacks. Returns actual number of bytes written >= 0, * or one of the following negative status codes: - * INVALID_OPERATION AudioTrack is configured for shared buffer mode + * INVALID_OPERATION AudioTrack is configured for static buffer or streaming mode * BAD_VALUE size is invalid - * STOPPED AudioTrack was stopped during the write - * NO_MORE_BUFFERS when obtainBuffer() returns same + * WOULD_BLOCK when obtainBuffer() returns same, or + * AudioTrack was stopped during the write * or any other error code returned by IAudioTrack::start() or restoreTrack_l(). - * Not supported for static buffer mode. */ ssize_t write(const void* buffer, size_t size); /* * Dumps the state of an audio track. */ - status_t dump(int fd, const Vector<String16>& args) const; + status_t dump(int fd, const Vector<String16>& args) const; + + /* + * Return the total number of frames which AudioFlinger desired but were unavailable, + * and thus which resulted in an underrun. Reset to zero by stop(). + */ + uint32_t getUnderrunFrames() const; + + /* Get the flags */ + audio_output_flags_t getFlags() const { return mFlags; } + + /* Set parameters - only possible when using direct output */ + status_t setParameters(const String8& keyValuePairs); + + /* Get parameters */ + String8 getParameters(const String8& keys); + + /* Poll for a timestamp on demand. + * Use if EVENT_NEW_TIMESTAMP is not delivered often enough for your needs, + * or if you need to get the most recent timestamp outside of the event callback handler. + * Caution: calling this method too often may be inefficient; + * if you need a high resolution mapping between frame position and presentation time, + * consider implementing that at application level, based on the low resolution timestamps. + * Returns NO_ERROR if timestamp is valid. + */ + status_t getTimestamp(AudioTimestamp& timestamp); protected: /* copying audio tracks is not allowed */ @@ -504,39 +600,62 @@ protected: void resume(); // allow thread to execute, if not requested to exit private: + void pauseInternal(nsecs_t ns = 0LL); + // like pause(), but only used internally within thread + friend class AudioTrack; virtual bool threadLoop(); - AudioTrack& mReceiver; - ~AudioTrackThread(); + AudioTrack& mReceiver; + virtual ~AudioTrackThread(); Mutex mMyLock; // Thread::mLock is private Condition mMyCond; // Thread::mThreadExitedCondition is private - bool mPaused; // whether thread is currently paused + bool mPaused; // whether thread is requested to pause at next loop entry + bool mPausedInt; // whether thread internally requests pause + nsecs_t mPausedNs; // if mPausedInt then associated timeout, otherwise ignored + bool mIgnoreNextPausedInt; // whether to ignore next mPausedInt request }; // body of AudioTrackThread::threadLoop() - bool processAudioBuffer(const sp<AudioTrackThread>& thread); + // returns the maximum amount of time before we would like to run again, where: + // 0 immediately + // > 0 no later than this many nanoseconds from now + // NS_WHENEVER still active but no particular deadline + // NS_INACTIVE inactive so don't run again until re-started + // NS_NEVER never again + static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3; + nsecs_t processAudioBuffer(const sp<AudioTrackThread>& thread); + status_t processStreamEnd(int32_t waitCount); + // caller must hold lock on mLock for all _l methods + status_t createTrack_l(audio_stream_type_t streamType, uint32_t sampleRate, audio_format_t format, size_t frameCount, audio_output_flags_t flags, const sp<IMemory>& sharedBuffer, - audio_io_handle_t output); + audio_io_handle_t output, + size_t epoch); - // can only be called when !mActive + // can only be called when mState != STATE_ACTIVE void flush_l(); - status_t setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount); + void setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount); audio_io_handle_t getOutput_l(); - status_t restoreTrack_l(audio_track_cblk_t*& cblk, bool fromStart); - bool stopped_l() const { return !mActive; } + // FIXME enum is faster than strcmp() for parameter 'from' + status_t restoreTrack_l(const char *from); + + bool isOffloaded() const + { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; } + + // Next 3 fields may be changed if IAudioTrack is re-created, but always != 0 sp<IAudioTrack> mAudioTrack; sp<IMemory> mCblkMemory; - sp<AudioTrackThread> mAudioTrackThread; + audio_track_cblk_t* mCblk; // re-load after mLock.unlock() + sp<AudioTrackThread> mAudioTrackThread; float mVolume[2]; float mSendLevel; uint32_t mSampleRate; @@ -544,62 +663,94 @@ protected: size_t mReqFrameCount; // frame count to request the next time a new // IAudioTrack is needed - audio_track_cblk_t* mCblk; // re-load after mLock.unlock() - - // Starting address of buffers in shared memory. If there is a shared buffer, mBuffers - // is the value of pointer() for the shared buffer, otherwise mBuffers points - // immediately after the control block. This address is for the mapping within client - // address space. AudioFlinger::TrackBase::mBuffer is for the server address space. - void* mBuffers; + // constant after constructor or set() audio_format_t mFormat; // as requested by client, not forced to 16-bit audio_stream_type_t mStreamType; uint32_t mChannelCount; audio_channel_mask_t mChannelMask; + transfer_type mTransfer; - // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data. - // For 8-bit PCM data, mFrameSizeAF is - // twice as large because data is expanded to 16-bit before being stored in buffer. + // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data. For 8-bit PCM data, it's + // twice as large as mFrameSize because data is expanded to 16-bit before it's stored in buffer. size_t mFrameSize; // app-level frame size size_t mFrameSizeAF; // AudioFlinger frame size status_t mStatus; - uint32_t mLatency; - bool mActive; // protected by mLock + // can change dynamically when IAudioTrack invalidated + uint32_t mLatency; // in ms + // Indicates the current track state. Protected by mLock. + enum State { + STATE_ACTIVE, + STATE_STOPPED, + STATE_PAUSED, + STATE_PAUSED_STOPPING, + STATE_FLUSHED, + STATE_STOPPING, + } mState; + + // for client callback handler callback_t mCbf; // callback handler for events, or NULL - void* mUserData; // for client callback handler + void* mUserData; // for notification APIs uint32_t mNotificationFramesReq; // requested number of frames between each - // notification callback + // notification callback, + // at initial source sample rate uint32_t mNotificationFramesAct; // actual number of frames between each - // notification callback + // notification callback, + // at initial source sample rate + bool mRefreshRemaining; // processAudioBuffer() should refresh next 2 + + // These are private to processAudioBuffer(), and are not protected by a lock + uint32_t mRemainingFrames; // number of frames to request in obtainBuffer() + bool mRetryOnPartialBuffer; // sleep and retry after partial obtainBuffer() + uint32_t mObservedSequence; // last observed value of mSequence + sp<IMemory> mSharedBuffer; - int mLoopCount; - uint32_t mRemainingFrames; + uint32_t mLoopPeriod; // in frames, zero means looping is disabled uint32_t mMarkerPosition; // in wrapping (overflow) frame units bool mMarkerReached; uint32_t mNewPosition; // in frames - uint32_t mUpdatePeriod; // in frames + uint32_t mUpdatePeriod; // in frames, zero means no EVENT_NEW_POS - bool mFlushed; // FIXME will be made obsolete by making flush() synchronous audio_output_flags_t mFlags; int mSessionId; int mAuxEffectId; - // When locking both mLock and mCblk->lock, must lock in this order to avoid deadlock: - // 1. mLock - // 2. mCblk->lock - // It is OK to lock only mCblk->lock. mutable Mutex mLock; bool mIsTimed; int mPreviousPriority; // before start() SchedPolicy mPreviousSchedulingGroup; - AudioTrackClientProxy* mProxy; bool mAwaitBoost; // thread should wait for priority boost before running + + // The proxy should only be referenced while a lock is held because the proxy isn't + // multi-thread safe, especially the SingleStateQueue part of the proxy. + // An exception is that a blocking ClientProxy::obtainBuffer() may be called without a lock, + // provided that the caller also holds an extra reference to the proxy and shared memory to keep + // them around in case they are replaced during the obtainBuffer(). + sp<StaticAudioTrackClientProxy> mStaticProxy; // for type safety only + sp<AudioTrackClientProxy> mProxy; // primary owner of the memory + + bool mInUnderrun; // whether track is currently in underrun state + String8 mName; // server's name for this IAudioTrack + +private: + class DeathNotifier : public IBinder::DeathRecipient { + public: + DeathNotifier(AudioTrack* audioTrack) : mAudioTrack(audioTrack) { } + protected: + virtual void binderDied(const wp<IBinder>& who); + private: + const wp<AudioTrack> mAudioTrack; + }; + + sp<DeathNotifier> mDeathNotifier; + uint32_t mSequence; // incremented for each new IAudioTrack attempt + audio_io_handle_t mOutput; // cached output io handle }; class TimedAudioTrack : public AudioTrack diff --git a/include/media/EffectsFactoryApi.h b/include/media/EffectsFactoryApi.h index b1ed7b0..b1143b9 100644 --- a/include/media/EffectsFactoryApi.h +++ b/include/media/EffectsFactoryApi.h @@ -171,6 +171,30 @@ int EffectGetDescriptor(const effect_uuid_t *pEffectUuid, effect_descriptor_t *p //////////////////////////////////////////////////////////////////////////////// int EffectIsNullUuid(const effect_uuid_t *pEffectUuid); +//////////////////////////////////////////////////////////////////////////////// +// +// Function: EffectGetSubEffects +// +// Description: Returns the descriptors of the sub effects of the effect +// whose uuid is pointed to by first argument. +// +// Input: +// pEffectUuid: pointer to the effect uuid. +// size: size of the buffer pointed by pDescriptor. +// +// Input/Output: +// pDescriptor: address where to return the sub effect descriptors. +// +// Output: +// returned value: 0 successful operation. +// -ENODEV factory failed to initialize +// -EINVAL invalid pEffectUuid or pDescriptor +// -ENOENT no effect with this uuid found +// *pDescriptor: updated with the sub effect descriptors. +// +//////////////////////////////////////////////////////////////////////////////// +int EffectGetSubEffects(const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptors, size_t size); + #if __cplusplus } // extern "C" #endif diff --git a/include/media/ExtendedAudioBufferProvider.h b/include/media/ExtendedAudioBufferProvider.h index 00c4444..2539ed3 100644 --- a/include/media/ExtendedAudioBufferProvider.h +++ b/include/media/ExtendedAudioBufferProvider.h @@ -18,12 +18,20 @@ #define ANDROID_EXTENDED_AUDIO_BUFFER_PROVIDER_H #include <media/AudioBufferProvider.h> +#include <media/AudioTimestamp.h> namespace android { class ExtendedAudioBufferProvider : public AudioBufferProvider { public: virtual size_t framesReady() const = 0; // see description at AudioFlinger.h + + // Return the total number of frames that have been obtained and released + virtual size_t framesReleased() const { return 0; } + + // Invoked by buffer consumer when a new timestamp is available. + // Default implementation ignores the timestamp. + virtual void onTimestamp(const AudioTimestamp& timestamp) { } }; } // namespace android diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h index 9c3067e..eaf7780 100644 --- a/include/media/IAudioFlinger.h +++ b/include/media/IAudioFlinger.h @@ -49,9 +49,13 @@ public: TRACK_DEFAULT = 0, // client requests a default AudioTrack TRACK_TIMED = 1, // client requests a TimedAudioTrack TRACK_FAST = 2, // client requests a fast AudioTrack or AudioRecord + TRACK_OFFLOAD = 4, // client requests offload to hw codec }; typedef uint32_t track_flags_t; + // invariant on exit for all APIs that return an sp<>: + // (return value != 0) == (*status == NO_ERROR) + /* create an audio track and registers it with AudioFlinger. * return null if the track cannot be created. */ @@ -66,6 +70,10 @@ public: audio_io_handle_t output, pid_t tid, // -1 means unused, otherwise must be valid non-0 int *sessionId, + // input: ignored + // output: server's description of IAudioTrack for display in logs. + // Don't attempt to parse, as the format could change. + String8& name, status_t *status) = 0; virtual sp<IAudioRecord> openRecord( @@ -74,7 +82,7 @@ public: audio_format_t format, audio_channel_mask_t channelMask, size_t frameCount, - track_flags_t flags, + track_flags_t *flags, pid_t tid, // -1 means unused, otherwise must be valid non-0 int *sessionId, status_t *status) = 0; @@ -124,7 +132,9 @@ public: virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) const = 0; - // register a current process for audio output change notifications + // Register an object to receive audio input/output change and track notifications. + // For a given calling pid, AudioFlinger disregards any registrations after the first. + // Thus the IAudioFlingerClient must be a singleton per process. virtual void registerClient(const sp<IAudioFlingerClient>& client) = 0; // retrieve the audio recording buffer size @@ -137,7 +147,8 @@ public: audio_format_t *pFormat, audio_channel_mask_t *pChannelMask, uint32_t *pLatencyMs, - audio_output_flags_t flags) = 0; + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo = NULL) = 0; virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2) = 0; virtual status_t closeOutput(audio_io_handle_t output) = 0; @@ -193,6 +204,10 @@ public: virtual uint32_t getPrimaryOutputSamplingRate() = 0; virtual size_t getPrimaryOutputFrameCount() = 0; + // Intended for AudioService to inform AudioFlinger of device's low RAM attribute, + // and should be called at most once. For a definition of what "low RAM" means, see + // android.app.ActivityManager.isLowRamDevice(). + virtual status_t setLowRamDevice(bool isLowRamDevice) = 0; }; diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h index b5ad4ef..09b9ea6 100644 --- a/include/media/IAudioPolicyService.h +++ b/include/media/IAudioPolicyService.h @@ -53,7 +53,8 @@ public: uint32_t samplingRate = 0, audio_format_t format = AUDIO_FORMAT_DEFAULT, audio_channel_mask_t channelMask = 0, - audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE) = 0; + audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, + const audio_offload_info_t *offloadInfo = NULL) = 0; virtual status_t startOutput(audio_io_handle_t output, audio_stream_type_t stream, int session = 0) = 0; @@ -95,6 +96,9 @@ public: virtual status_t queryDefaultPreProcessing(int audioSession, effect_descriptor_t *descriptors, uint32_t *count) = 0; + // Check if offload is possible for given format, stream type, sample rate, + // bit rate, duration, video and streaming or offload property is enabled + virtual bool isOffloadSupported(const audio_offload_info_t& info) = 0; }; diff --git a/include/media/IAudioRecord.h b/include/media/IAudioRecord.h index d6e3141..eccc2ca 100644 --- a/include/media/IAudioRecord.h +++ b/include/media/IAudioRecord.h @@ -34,6 +34,9 @@ class IAudioRecord : public IInterface public: DECLARE_META_INTERFACE(AudioRecord); + /* get this tracks control block */ + virtual sp<IMemory> getCblk() const = 0; + /* After it's created the track is not active. Call start() to * make it active. */ @@ -44,9 +47,6 @@ public: * will be processed, unless flush() is called. */ virtual void stop() = 0; - - /* get this tracks control block */ - virtual sp<IMemory> getCblk() const = 0; }; // ---------------------------------------------------------------------------- diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h index 144be0e..5c8a484 100644 --- a/include/media/IAudioTrack.h +++ b/include/media/IAudioTrack.h @@ -25,6 +25,8 @@ #include <binder/IInterface.h> #include <binder/IMemory.h> #include <utils/LinearTransform.h> +#include <utils/String8.h> +#include <media/AudioTimestamp.h> namespace android { @@ -82,6 +84,15 @@ public: or Tungsten time. The values for target are defined in AudioTrack.h */ virtual status_t setMediaTimeTransform(const LinearTransform& xform, int target) = 0; + + /* Send parameters to the audio hardware */ + virtual status_t setParameters(const String8& keyValuePairs) = 0; + + /* Return NO_ERROR if timestamp is valid */ + virtual status_t getTimestamp(AudioTimestamp& timestamp) = 0; + + /* Signal the playback thread for a change in control block */ + virtual void signal() = 0; }; // ---------------------------------------------------------------------------- diff --git a/include/media/IDrm.h b/include/media/IDrm.h index d630c40..5ef26af 100644 --- a/include/media/IDrm.h +++ b/include/media/IDrm.h @@ -32,7 +32,7 @@ struct IDrm : public IInterface { virtual status_t initCheck() const = 0; - virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]) = 0; + virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) = 0; virtual status_t createPlugin(const uint8_t uuid[16]) = 0; diff --git a/include/media/IHDCP.h b/include/media/IHDCP.h index 6d27b18..352561e 100644 --- a/include/media/IHDCP.h +++ b/include/media/IHDCP.h @@ -17,6 +17,7 @@ #include <binder/IInterface.h> #include <media/hardware/HDCPAPI.h> #include <media/stagefright/foundation/ABase.h> +#include <ui/GraphicBuffer.h> namespace android { @@ -45,6 +46,17 @@ struct IHDCP : public IInterface { // Request to shutdown the active HDCP session. virtual status_t shutdownAsync() = 0; + // Returns the capability bitmask of this HDCP session. + // Possible return values (please refer to HDCAPAPI.h): + // HDCP_CAPS_ENCRYPT: mandatory, meaning the HDCP module can encrypt + // from an input byte-array buffer to an output byte-array buffer + // HDCP_CAPS_ENCRYPT_NATIVE: the HDCP module supports encryption from + // a native buffer to an output byte-array buffer. The format of the + // input native buffer is specific to vendor's encoder implementation. + // It is the same format as that used by the encoder when + // "storeMetaDataInBuffers" extension is enabled on its output port. + virtual uint32_t getCaps() = 0; + // ENCRYPTION only: // Encrypt data according to the HDCP spec. "size" bytes of data are // available at "inData" (virtual address), "size" may not be a multiple @@ -59,6 +71,20 @@ struct IHDCP : public IInterface { const void *inData, size_t size, uint32_t streamCTR, uint64_t *outInputCTR, void *outData) = 0; + // Encrypt data according to the HDCP spec. "size" bytes of data starting + // at location "offset" are available in "buffer" (buffer handle). "size" + // may not be a multiple of 128 bits (16 bytes). An equal number of + // encrypted bytes should be written to the buffer at "outData" (virtual + // address). This operation is to be synchronous, i.e. this call does not + // return until outData contains size bytes of encrypted data. + // streamCTR will be assigned by the caller (to 0 for the first PES stream, + // 1 for the second and so on) + // inputCTR _will_be_maintained_by_the_callee_ for each PES stream. + virtual status_t encryptNative( + const sp<GraphicBuffer> &graphicBuffer, + size_t offset, size_t size, uint32_t streamCTR, + uint64_t *outInputCTR, void *outData) = 0; + // DECRYPTION only: // Decrypt data according to the HDCP spec. // "size" bytes of encrypted data are available at "inData" diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h index fef7af2..2998b37 100644 --- a/include/media/IMediaPlayerService.h +++ b/include/media/IMediaPlayerService.h @@ -49,8 +49,12 @@ public: virtual sp<IMediaMetadataRetriever> createMetadataRetriever() = 0; virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client, int audioSessionId = 0) = 0; - virtual sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) = 0; - virtual sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) = 0; + virtual status_t decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, + audio_format_t* pFormat, + const sp<IMemoryHeap>& heap, size_t *pSize) = 0; + virtual status_t decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, + int* pNumChannels, audio_format_t* pFormat, + const sp<IMemoryHeap>& heap, size_t *pSize) = 0; virtual sp<IOMX> getOMX() = 0; virtual sp<ICrypto> makeCrypto() = 0; virtual sp<IDrm> makeDrm() = 0; diff --git a/include/media/IOMX.h b/include/media/IOMX.h index 0b1d1e4..9c8451c 100644 --- a/include/media/IOMX.h +++ b/include/media/IOMX.h @@ -83,6 +83,10 @@ public: virtual status_t storeMetaDataInBuffers( node_id node, OMX_U32 port_index, OMX_BOOL enable) = 0; + virtual status_t prepareForAdaptivePlayback( + node_id node, OMX_U32 portIndex, OMX_BOOL enable, + OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) = 0; + virtual status_t enableGraphicBuffers( node_id node, OMX_U32 port_index, OMX_BOOL enable) = 0; @@ -97,6 +101,10 @@ public: node_id node, OMX_U32 port_index, const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) = 0; + virtual status_t updateGraphicBufferInMeta( + node_id node, OMX_U32 port_index, + const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) = 0; + virtual status_t createInputSurface( node_id node, OMX_U32 port_index, sp<IGraphicBufferProducer> *bufferProducer) = 0; @@ -130,6 +138,17 @@ public: node_id node, const char *parameter_name, OMX_INDEXTYPE *index) = 0; + + enum InternalOptionType { + INTERNAL_OPTION_SUSPEND, // data is a bool + INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY, // data is an int64_t + }; + virtual status_t setInternalOption( + node_id node, + OMX_U32 port_index, + InternalOptionType type, + const void *data, + size_t size) = 0; }; struct omx_message { diff --git a/include/media/IRemoteDisplayClient.h b/include/media/IRemoteDisplayClient.h index 7b0fa9e..0e6d55d 100644 --- a/include/media/IRemoteDisplayClient.h +++ b/include/media/IRemoteDisplayClient.h @@ -49,7 +49,7 @@ public: // Provides a surface texture that the client should use to stream buffers to // the remote display. virtual void onDisplayConnected(const sp<IGraphicBufferProducer>& bufferProducer, - uint32_t width, uint32_t height, uint32_t flags) = 0; // one-way + uint32_t width, uint32_t height, uint32_t flags, uint32_t session) = 0; // one-way // Indicates that the remote display has been disconnected normally. // This method should only be called once the client has called 'dispose()' diff --git a/include/media/JetPlayer.h b/include/media/JetPlayer.h index 0616bf0..388f767 100644 --- a/include/media/JetPlayer.h +++ b/include/media/JetPlayer.h @@ -88,7 +88,7 @@ private: EAS_DATA_HANDLE mEasData; EAS_FILE_LOCATOR mEasJetFileLoc; EAS_PCM* mAudioBuffer;// EAS renders the MIDI data into this buffer, - AudioTrack* mAudioTrack; // and we play it in this audio track + sp<AudioTrack> mAudioTrack; // and we play it in this audio track int mTrackBufferSize; S_JET_STATUS mJetStatus; S_JET_STATUS mPreviousJetStatus; diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h index 9a75f81..3b151ef 100644 --- a/include/media/MediaPlayerInterface.h +++ b/include/media/MediaPlayerInterface.h @@ -74,9 +74,18 @@ public: // AudioSink: abstraction layer for audio output class AudioSink : public RefBase { public: + enum cb_event_t { + CB_EVENT_FILL_BUFFER, // Request to write more data to buffer. + CB_EVENT_STREAM_END, // Sent after all the buffers queued in AF and HW are played + // back (after stop is called) + CB_EVENT_TEAR_DOWN // The AudioTrack was invalidated due to use case change: + // Need to re-evaluate offloading options + }; + // Callback returns the number of bytes actually written to the buffer. typedef size_t (*AudioCallback)( - AudioSink *audioSink, void *buffer, size_t size, void *cookie); + AudioSink *audioSink, void *buffer, size_t size, void *cookie, + cb_event_t event); virtual ~AudioSink() {} virtual bool ready() const = 0; // audio output is open and ready @@ -99,9 +108,10 @@ public: int bufferCount=DEFAULT_AUDIOSINK_BUFFERCOUNT, AudioCallback cb = NULL, void *cookie = NULL, - audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE) = 0; + audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, + const audio_offload_info_t *offloadInfo = NULL) = 0; - virtual void start() = 0; + virtual status_t start() = 0; virtual ssize_t write(const void* buffer, size_t size) = 0; virtual void stop() = 0; virtual void flush() = 0; @@ -110,6 +120,9 @@ public: virtual status_t setPlaybackRatePermille(int32_t rate) { return INVALID_OPERATION; } virtual bool needsTrailingPadding() { return true; } + + virtual status_t setParameters(const String8& keyValuePairs) { return NO_ERROR; }; + virtual String8 getParameters(const String8& keys) { return String8::empty(); }; }; MediaPlayerBase() : mCookie(0), mNotify(0) {} diff --git a/include/media/SoundPool.h b/include/media/SoundPool.h index 7bf3069..2dd78cc 100644 --- a/include/media/SoundPool.h +++ b/include/media/SoundPool.h @@ -22,6 +22,8 @@ #include <utils/Vector.h> #include <utils/KeyedVector.h> #include <media/AudioTrack.h> +#include <binder/MemoryHeapBase.h> +#include <binder/MemoryBase.h> namespace android { @@ -85,6 +87,7 @@ private: int64_t mLength; char* mUrl; sp<IMemory> mData; + sp<MemoryHeapBase> mHeap; }; // stores pending events for stolen channels @@ -118,7 +121,7 @@ protected: class SoundChannel : public SoundEvent { public: enum state { IDLE, RESUMING, STOPPING, PAUSED, PLAYING }; - SoundChannel() : mAudioTrack(NULL), mState(IDLE), mNumChannels(1), + SoundChannel() : mState(IDLE), mNumChannels(1), mPos(0), mToggle(0), mAutoPaused(false) {} ~SoundChannel(); void init(SoundPool* soundPool); @@ -148,7 +151,7 @@ private: bool doStop_l(); SoundPool* mSoundPool; - AudioTrack* mAudioTrack; + sp<AudioTrack> mAudioTrack; SoundEvent mNextEvent; Mutex mLock; int mState; diff --git a/include/media/ToneGenerator.h b/include/media/ToneGenerator.h index 2183fbe..98c4332 100644 --- a/include/media/ToneGenerator.h +++ b/include/media/ToneGenerator.h @@ -160,7 +160,7 @@ public: bool isInited() { return (mState == TONE_IDLE)?false:true;} // returns the audio session this ToneGenerator belongs to or 0 if an error occured. - int getSessionId() { return (mpAudioTrack == NULL) ? 0 : mpAudioTrack->getSessionId(); } + int getSessionId() { return (mpAudioTrack == 0) ? 0 : mpAudioTrack->getSessionId(); } private: @@ -264,7 +264,7 @@ private: unsigned short mLoopCounter; // Current tone loopback count uint32_t mSamplingRate; // AudioFlinger Sampling rate - AudioTrack *mpAudioTrack; // Pointer to audio track used for playback + sp<AudioTrack> mpAudioTrack; // Pointer to audio track used for playback Mutex mLock; // Mutex to control concurent access to ToneGenerator object from audio callback and application API Mutex mCbkCondLock; // Mutex associated to mWaitCbkCond Condition mWaitCbkCond; // condition enabling interface to wait for audio callback completion after a change is requested diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h index aa58905..6167dd6 100644 --- a/include/media/Visualizer.h +++ b/include/media/Visualizer.h @@ -19,7 +19,7 @@ #include <media/AudioEffect.h> #include <audio_effects/effect_visualizer.h> -#include <string.h> +#include <utils/Thread.h> /** * The Visualizer class enables application to retrieve part of the currently playing audio for @@ -114,6 +114,14 @@ public: status_t setScalingMode(uint32_t mode); uint32_t getScalingMode() { return mScalingMode; } + // set which measurements are done on the audio buffers processed by the effect. + // valid measurements (mask): MEASUREMENT_MODE_PEAK_RMS + status_t setMeasurementMode(uint32_t mode); + uint32_t getMeasurementMode() { return mMeasurementMode; } + + // return a set of int32_t measurements + status_t getIntMeasurements(uint32_t type, uint32_t number, int32_t *measurements); + // return a capture in PCM 8 bit unsigned format. The size of the capture is equal to // getCaptureSize() status_t getWaveForm(uint8_t *waveform); @@ -156,6 +164,7 @@ private: uint32_t mCaptureSize; uint32_t mSampleRate; uint32_t mScalingMode; + uint32_t mMeasurementMode; capture_cbk_t mCaptureCallBack; void *mCaptureCbkUser; sp<CaptureThread> mCaptureThread; diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h index 14381c7..4c05fc3 100644 --- a/include/media/mediaplayer.h +++ b/include/media/mediaplayer.h @@ -42,9 +42,14 @@ enum media_event_type { MEDIA_BUFFERING_UPDATE = 3, MEDIA_SEEK_COMPLETE = 4, MEDIA_SET_VIDEO_SIZE = 5, + MEDIA_STARTED = 6, + MEDIA_PAUSED = 7, + MEDIA_STOPPED = 8, + MEDIA_SKIPPED = 9, MEDIA_TIMED_TEXT = 99, MEDIA_ERROR = 100, MEDIA_INFO = 200, + MEDIA_SUBTITLE_DATA = 201, }; // Generic error codes for the media player framework. Errors are fatal, the @@ -173,6 +178,7 @@ enum media_track_type { MEDIA_TRACK_TYPE_VIDEO = 1, MEDIA_TRACK_TYPE_AUDIO = 2, MEDIA_TRACK_TYPE_TIMEDTEXT = 3, + MEDIA_TRACK_TYPE_SUBTITLE = 4, }; // ---------------------------------------------------------------------------- @@ -218,8 +224,12 @@ public: bool isLooping(); status_t setVolume(float leftVolume, float rightVolume); void notify(int msg, int ext1, int ext2, const Parcel *obj = NULL); - static sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat); - static sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat); + static status_t decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, + audio_format_t* pFormat, + const sp<IMemoryHeap>& heap, size_t *pSize); + static status_t decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, + int* pNumChannels, audio_format_t* pFormat, + const sp<IMemoryHeap>& heap, size_t *pSize); status_t invoke(const Parcel& request, Parcel *reply); status_t setMetadataFilter(const Parcel& filter); status_t getMetadata(bool update_only, bool apply_filter, Parcel *metadata); diff --git a/include/media/nbaio/AudioStreamOutSink.h b/include/media/nbaio/AudioStreamOutSink.h index 5976b18..7948d40 100644 --- a/include/media/nbaio/AudioStreamOutSink.h +++ b/include/media/nbaio/AudioStreamOutSink.h @@ -52,6 +52,8 @@ public: // implementation of GNWT (if any) virtual status_t getNextWriteTimestamp(int64_t *timestamp); + virtual status_t getTimestamp(AudioTimestamp& timestamp); + // NBAIO_Sink end #if 0 // until necessary diff --git a/include/media/nbaio/MonoPipe.h b/include/media/nbaio/MonoPipe.h index 5fcfe9e..d3802fe 100644 --- a/include/media/nbaio/MonoPipe.h +++ b/include/media/nbaio/MonoPipe.h @@ -20,9 +20,12 @@ #include <time.h> #include <utils/LinearTransform.h> #include "NBAIO.h" +#include <media/SingleStateQueue.h> namespace android { +typedef SingleStateQueue<AudioTimestamp> AudioTimestampSingleStateQueue; + // MonoPipe is similar to Pipe except: // - supports only a single reader, called MonoPipeReader // - write() cannot overrun; instead it will return a short actual count if insufficient space @@ -88,6 +91,9 @@ public: // Return true if the write side of a pipe is currently shutdown. bool isShutdown(); + // Return NO_ERROR if there is a timestamp available + status_t getTimestamp(AudioTimestamp& timestamp); + private: // A pair of methods and a helper variable which allows the reader and the // writer to update and observe the values of mFront and mNextRdPTS in an @@ -127,6 +133,10 @@ private: LinearTransform mSamplesToLocalTime; bool mIsShutdown; // whether shutdown(true) was called, no barriers are needed + + AudioTimestampSingleStateQueue::Shared mTimestampShared; + AudioTimestampSingleStateQueue::Mutator mTimestampMutator; + AudioTimestampSingleStateQueue::Observer mTimestampObserver; }; } // namespace android diff --git a/include/media/nbaio/MonoPipeReader.h b/include/media/nbaio/MonoPipeReader.h index 0e1c992..78fe867 100644 --- a/include/media/nbaio/MonoPipeReader.h +++ b/include/media/nbaio/MonoPipeReader.h @@ -49,6 +49,8 @@ public: virtual ssize_t read(void *buffer, size_t count, int64_t readPTS); + virtual void onTimestamp(const AudioTimestamp& timestamp); + // NBAIO_Source end #if 0 // until necessary diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h index f5d6eb5..1da0c73 100644 --- a/include/media/nbaio/NBAIO.h +++ b/include/media/nbaio/NBAIO.h @@ -28,6 +28,7 @@ #include <stdlib.h> #include <utils/Errors.h> #include <utils/RefBase.h> +#include <media/AudioTimestamp.h> namespace android { @@ -213,6 +214,11 @@ public: // <other> Something unexpected happened internally. Check the logs and start debugging. virtual status_t getNextWriteTimestamp(int64_t *ts) { return INVALID_OPERATION; } + // Returns NO_ERROR if a timestamp is available. The timestamp includes the total number + // of frames presented to an external observer, together with the value of CLOCK_MONOTONIC + // as of this presentation count. + virtual status_t getTimestamp(AudioTimestamp& timestamp) { return INVALID_OPERATION; } + protected: NBAIO_Sink(NBAIO_Format format = Format_Invalid) : NBAIO_Port(format), mFramesWritten(0) { } virtual ~NBAIO_Sink() { } @@ -300,6 +306,10 @@ public: virtual ssize_t readVia(readVia_t via, size_t total, void *user, int64_t readPTS, size_t block = 0); + // Invoked asynchronously by corresponding sink when a new timestamp is available. + // Default implementation ignores the timestamp. + virtual void onTimestamp(const AudioTimestamp& timestamp) { } + protected: NBAIO_Source(NBAIO_Format format = Format_Invalid) : NBAIO_Port(format), mFramesRead(0) { } virtual ~NBAIO_Source() { } diff --git a/include/media/nbaio/NBLog.h b/include/media/nbaio/NBLog.h index 107ba66..6d59ea7 100644 --- a/include/media/nbaio/NBLog.h +++ b/include/media/nbaio/NBLog.h @@ -90,6 +90,8 @@ public: virtual ~Timeline(); #endif + // Input parameter 'size' is the desired size of the timeline in byte units. + // Returns the size rounded up to a power-of-2, plus the constant size overhead for indices. static size_t sharedSize(size_t size); #if 0 @@ -110,8 +112,12 @@ private: class Writer : public RefBase { public: Writer(); // dummy nop implementation without shared memory + + // Input parameter 'size' is the desired size of the timeline in byte units. + // The size of the shared memory must be at least Timeline::sharedSize(size). Writer(size_t size, void *shared); Writer(size_t size, const sp<IMemory>& iMemory); + virtual ~Writer() { } virtual void log(const char *string); @@ -165,8 +171,12 @@ private: class Reader : public RefBase { public: + + // Input parameter 'size' is the desired size of the timeline in byte units. + // The size of the shared memory must be at least Timeline::sharedSize(size). Reader(size_t size, const void *shared); Reader(size_t size, const sp<IMemory>& iMemory); + virtual ~Reader() { } void dump(int fd, size_t indent = 0); diff --git a/include/media/nbaio/SourceAudioBufferProvider.h b/include/media/nbaio/SourceAudioBufferProvider.h index c08331b..cdfb6fe 100644 --- a/include/media/nbaio/SourceAudioBufferProvider.h +++ b/include/media/nbaio/SourceAudioBufferProvider.h @@ -36,6 +36,8 @@ public: // ExtendedAudioBufferProvider interface virtual size_t framesReady() const; + virtual size_t framesReleased() const; + virtual void onTimestamp(const AudioTimestamp& timestamp); private: const sp<NBAIO_Source> mSource; // the wrapped source @@ -45,6 +47,7 @@ private: size_t mOffset; // frame offset within mAllocated of valid data size_t mRemaining; // frame count within mAllocated of valid data size_t mGetCount; // buffer.frameCount of the most recent getNextBuffer + uint32_t mFramesReleased; // counter of the total number of frames released }; } // namespace android diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h index df25d7b..a8ffd4a 100644 --- a/include/media/stagefright/ACodec.h +++ b/include/media/stagefright/ACodec.h @@ -124,7 +124,8 @@ private: }; enum { - kFlagIsSecure = 1, + kFlagIsSecure = 1, + kFlagPushBlankBuffersToNativeWindowOnShutdown = 2, }; struct BufferInfo { @@ -138,6 +139,7 @@ private: IOMX::buffer_id mBufferID; Status mStatus; + unsigned mDequeuedAt; sp<ABuffer> mData; sp<GraphicBuffer> mGraphicBuffer; @@ -182,7 +184,7 @@ private: bool mSentFormat; bool mIsEncoder; - + bool mUseMetadataOnEncoderOutput; bool mShutdownInProgress; // If "mKeepComponentAllocated" we only transition back to Loaded state @@ -194,12 +196,22 @@ private: bool mChannelMaskPresent; int32_t mChannelMask; + unsigned mDequeueCounter; + bool mStoreMetaDataInOutputBuffers; + int32_t mMetaDataBuffersToSubmit; + + int64_t mRepeatFrameDelayUs; status_t setCyclicIntraMacroblockRefresh(const sp<AMessage> &msg, int32_t mode); status_t allocateBuffersOnPort(OMX_U32 portIndex); status_t freeBuffersOnPort(OMX_U32 portIndex); status_t freeBuffer(OMX_U32 portIndex, size_t i); + status_t configureOutputBuffersFromNativeWindow( + OMX_U32 *nBufferCount, OMX_U32 *nBufferSize, + OMX_U32 *nMinUndequeuedBuffers); + status_t allocateOutputMetaDataBuffers(); + status_t submitOutputMetaDataBuffer(); status_t allocateOutputBuffersFromNativeWindow(); status_t cancelBufferToNativeWindow(BufferInfo *info); status_t freeOutputBuffersNotOwnedByComponent(); diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h index 1dc408f..912a43c 100644 --- a/include/media/stagefright/AudioPlayer.h +++ b/include/media/stagefright/AudioPlayer.h @@ -36,8 +36,16 @@ public: SEEK_COMPLETE }; + enum { + ALLOW_DEEP_BUFFERING = 0x01, + USE_OFFLOAD = 0x02, + HAS_VIDEO = 0x1000, + IS_STREAMING = 0x2000 + + }; + AudioPlayer(const sp<MediaPlayerBase::AudioSink> &audioSink, - bool allowDeepBuffering = false, + uint32_t flags = 0, AwesomePlayer *audioObserver = NULL); virtual ~AudioPlayer(); @@ -51,7 +59,7 @@ public: status_t start(bool sourceAlreadyStarted = false); void pause(bool playPendingSamples = false); - void resume(); + status_t resume(); // Returns the timestamp of the last buffer played (in us). int64_t getMediaTimeUs(); @@ -67,10 +75,12 @@ public: status_t setPlaybackRatePermille(int32_t ratePermille); + void notifyAudioEOS(); + private: friend class VideoEditorAudioPlayer; sp<MediaSource> mSource; - AudioTrack *mAudioTrack; + sp<AudioTrack> mAudioTrack; MediaBuffer *mInputBuffer; @@ -97,17 +107,20 @@ private: MediaBuffer *mFirstBuffer; sp<MediaPlayerBase::AudioSink> mAudioSink; - bool mAllowDeepBuffering; // allow audio deep audio buffers. Helps with low power audio - // playback but implies high latency AwesomePlayer *mObserver; int64_t mPinnedTimeUs; + bool mPlaying; + int64_t mStartPosUs; + const uint32_t mCreateFlags; + static void AudioCallback(int event, void *user, void *info); void AudioCallback(int event, void *info); static size_t AudioSinkCallback( MediaPlayerBase::AudioSink *audioSink, - void *data, size_t size, void *me); + void *data, size_t size, void *me, + MediaPlayerBase::AudioSink::cb_event_t event); size_t fillBuffer(void *data, size_t size); @@ -116,6 +129,10 @@ private: void reset(); uint32_t getNumFramesPendingPlayout() const; + int64_t getOutputPlayPositionUs_l() const; + + bool allowDeepBuffering() const { return (mCreateFlags & ALLOW_DEEP_BUFFERING) != 0; } + bool useOffload() const { return (mCreateFlags & USE_OFFLOAD) != 0; } AudioPlayer(const AudioPlayer &); AudioPlayer &operator=(const AudioPlayer &); diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h index 99f3c3b..4c9aaad 100644 --- a/include/media/stagefright/AudioSource.h +++ b/include/media/stagefright/AudioSource.h @@ -73,7 +73,7 @@ private: Condition mFrameAvailableCondition; Condition mFrameEncodingCompletionCondition; - AudioRecord *mRecord; + sp<AudioRecord> mRecord; status_t mInitCheck; bool mStarted; int32_t mSampleRate; diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h index dfb845b..590623b 100644 --- a/include/media/stagefright/MediaCodecList.h +++ b/include/media/stagefright/MediaCodecList.h @@ -50,7 +50,8 @@ struct MediaCodecList { status_t getCodecCapabilities( size_t index, const char *type, Vector<ProfileLevel> *profileLevels, - Vector<uint32_t> *colorFormats) const; + Vector<uint32_t> *colorFormats, + uint32_t *flags) const; private: enum Section { diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h index 81de6e4..85693d4 100644 --- a/include/media/stagefright/MediaDefs.h +++ b/include/media/stagefright/MediaDefs.h @@ -22,7 +22,8 @@ namespace android { extern const char *MEDIA_MIMETYPE_IMAGE_JPEG; -extern const char *MEDIA_MIMETYPE_VIDEO_VPX; +extern const char *MEDIA_MIMETYPE_VIDEO_VP8; +extern const char *MEDIA_MIMETYPE_VIDEO_VP9; extern const char *MEDIA_MIMETYPE_VIDEO_AVC; extern const char *MEDIA_MIMETYPE_VIDEO_MPEG4; extern const char *MEDIA_MIMETYPE_VIDEO_H263; diff --git a/include/media/stagefright/MediaErrors.h b/include/media/stagefright/MediaErrors.h index ee5e4e2..686f286 100644 --- a/include/media/stagefright/MediaErrors.h +++ b/include/media/stagefright/MediaErrors.h @@ -56,14 +56,11 @@ enum { ERROR_DRM_TAMPER_DETECTED = DRM_ERROR_BASE - 7, ERROR_DRM_NOT_PROVISIONED = DRM_ERROR_BASE - 8, ERROR_DRM_DEVICE_REVOKED = DRM_ERROR_BASE - 9, + ERROR_DRM_RESOURCE_BUSY = DRM_ERROR_BASE - 10, ERROR_DRM_VENDOR_MAX = DRM_ERROR_BASE - 500, ERROR_DRM_VENDOR_MIN = DRM_ERROR_BASE - 999, - // Deprecated - ERROR_DRM_WV_VENDOR_MAX = ERROR_DRM_VENDOR_MAX, - ERROR_DRM_WV_VENDOR_MIN = ERROR_DRM_VENDOR_MIN, - // Heartbeat Error Codes HEARTBEAT_ERROR_BASE = -3000, ERROR_HEARTBEAT_TERMINATE_REQUESTED = HEARTBEAT_ERROR_BASE, diff --git a/include/media/stagefright/MediaMuxer.h b/include/media/stagefright/MediaMuxer.h index c1fdbad..ff6a66e 100644 --- a/include/media/stagefright/MediaMuxer.h +++ b/include/media/stagefright/MediaMuxer.h @@ -79,6 +79,16 @@ public: status_t setOrientationHint(int degrees); /** + * Set the location. + * @param latitude The latitude in degree x 1000. Its value must be in the range + * [-900000, 900000]. + * @param longitude The longitude in degree x 1000. Its value must be in the range + * [-1800000, 1800000]. + * @return OK if no error. + */ + status_t setLocation(int latitude, int longitude); + + /** * Stop muxing. * This method is a blocking call. Depending on how * much data is bufferred internally, the time needed for stopping diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h index 583c3b3..daaf20f 100644 --- a/include/media/stagefright/OMXCodec.h +++ b/include/media/stagefright/OMXCodec.h @@ -361,9 +361,14 @@ private: }; struct CodecCapabilities { + enum { + kFlagSupportsAdaptivePlayback = 1 << 0, + }; + String8 mComponentName; Vector<CodecProfileLevel> mProfileLevels; Vector<OMX_U32> mColorFormats; + uint32_t mFlags; }; // Return a vector of componentNames with supported profile/level pairs diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h index 5f21da9..db5f947 100644 --- a/include/media/stagefright/SurfaceMediaSource.h +++ b/include/media/stagefright/SurfaceMediaSource.h @@ -56,7 +56,7 @@ class GraphicBuffer; class SurfaceMediaSource : public MediaSource, public MediaBufferObserver, - protected BufferQueue::ConsumerListener { + protected ConsumerListener { public: enum { MIN_UNDEQUEUED_BUFFERS = 4}; @@ -146,9 +146,13 @@ private: // this consumer sp<BufferQueue> mBufferQueue; - // mBufferSlot caches GraphicBuffers from the buffer queue - sp<GraphicBuffer> mBufferSlot[BufferQueue::NUM_BUFFER_SLOTS]; + struct SlotData { + sp<GraphicBuffer> mGraphicBuffer; + uint64_t mFrameNumber; + }; + // mSlots caches GraphicBuffers and frameNumbers from the buffer queue + SlotData mSlots[BufferQueue::NUM_BUFFER_SLOTS]; // The permenent width and height of SMS buffers int mWidth; diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h index 73940d3..c24f612 100644 --- a/include/media/stagefright/Utils.h +++ b/include/media/stagefright/Utils.h @@ -22,6 +22,8 @@ #include <stdint.h> #include <utils/Errors.h> #include <utils/RefBase.h> +#include <system/audio.h> +#include <media/MediaPlayerInterface.h> namespace android { @@ -48,6 +50,15 @@ void convertMessageToMetaData( AString MakeUserAgent(); +// Convert a MIME type to a AudioSystem::audio_format +status_t mapMimeToAudioFormat(audio_format_t& format, const char* mime); + +// Send information from MetaData to the HAL via AudioSink +status_t sendMetaDataToHal(sp<MediaPlayerBase::AudioSink>& sink, const sp<MetaData>& meta); + +// Check whether the stream defined by meta can be offloaded to hardware +bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo, bool isStreaming); + } // namespace android #endif // UTILS_H_ diff --git a/include/media/stagefright/foundation/ALooperRoster.h b/include/media/stagefright/foundation/ALooperRoster.h index 2e5fd73..940fc55 100644 --- a/include/media/stagefright/foundation/ALooperRoster.h +++ b/include/media/stagefright/foundation/ALooperRoster.h @@ -30,6 +30,7 @@ struct ALooperRoster { const sp<ALooper> looper, const sp<AHandler> &handler); void unregisterHandler(ALooper::handler_id handlerID); + void unregisterStaleHandlers(); status_t postMessage(const sp<AMessage> &msg, int64_t delayUs = 0); void deliverMessage(const sp<AMessage> &msg); diff --git a/include/media/stagefright/foundation/ANetworkSession.h b/include/media/stagefright/foundation/ANetworkSession.h new file mode 100644 index 0000000..fd3ebaa --- /dev/null +++ b/include/media/stagefright/foundation/ANetworkSession.h @@ -0,0 +1,135 @@ +/* + * Copyright 2012, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef A_NETWORK_SESSION_H_ + +#define A_NETWORK_SESSION_H_ + +#include <media/stagefright/foundation/ABase.h> +#include <utils/KeyedVector.h> +#include <utils/RefBase.h> +#include <utils/Thread.h> + +#include <netinet/in.h> + +namespace android { + +struct AMessage; + +// Helper class to manage a number of live sockets (datagram and stream-based) +// on a single thread. Clients are notified about activity through AMessages. +struct ANetworkSession : public RefBase { + ANetworkSession(); + + status_t start(); + status_t stop(); + + status_t createRTSPClient( + const char *host, unsigned port, const sp<AMessage> ¬ify, + int32_t *sessionID); + + status_t createRTSPServer( + const struct in_addr &addr, unsigned port, + const sp<AMessage> ¬ify, int32_t *sessionID); + + status_t createUDPSession( + unsigned localPort, const sp<AMessage> ¬ify, int32_t *sessionID); + + status_t createUDPSession( + unsigned localPort, + const char *remoteHost, + unsigned remotePort, + const sp<AMessage> ¬ify, + int32_t *sessionID); + + status_t connectUDPSession( + int32_t sessionID, const char *remoteHost, unsigned remotePort); + + // passive + status_t createTCPDatagramSession( + const struct in_addr &addr, unsigned port, + const sp<AMessage> ¬ify, int32_t *sessionID); + + // active + status_t createTCPDatagramSession( + unsigned localPort, + const char *remoteHost, + unsigned remotePort, + const sp<AMessage> ¬ify, + int32_t *sessionID); + + status_t destroySession(int32_t sessionID); + + status_t sendRequest( + int32_t sessionID, const void *data, ssize_t size = -1, + bool timeValid = false, int64_t timeUs = -1ll); + + status_t switchToWebSocketMode(int32_t sessionID); + + enum NotificationReason { + kWhatError, + kWhatConnected, + kWhatClientConnected, + kWhatData, + kWhatDatagram, + kWhatBinaryData, + kWhatWebSocketMessage, + kWhatNetworkStall, + }; + +protected: + virtual ~ANetworkSession(); + +private: + struct NetworkThread; + struct Session; + + Mutex mLock; + sp<Thread> mThread; + + int32_t mNextSessionID; + + int mPipeFd[2]; + + KeyedVector<int32_t, sp<Session> > mSessions; + + enum Mode { + kModeCreateUDPSession, + kModeCreateTCPDatagramSessionPassive, + kModeCreateTCPDatagramSessionActive, + kModeCreateRTSPServer, + kModeCreateRTSPClient, + }; + status_t createClientOrServer( + Mode mode, + const struct in_addr *addr, + unsigned port, + const char *remoteHost, + unsigned remotePort, + const sp<AMessage> ¬ify, + int32_t *sessionID); + + void threadLoop(); + void interrupt(); + + static status_t MakeSocketNonBlocking(int s); + + DISALLOW_EVIL_CONSTRUCTORS(ANetworkSession); +}; + +} // namespace android + +#endif // A_NETWORK_SESSION_H_ diff --git a/include/media/stagefright/foundation/ParsedMessage.h b/include/media/stagefright/foundation/ParsedMessage.h new file mode 100644 index 0000000..9d43a93 --- /dev/null +++ b/include/media/stagefright/foundation/ParsedMessage.h @@ -0,0 +1,60 @@ +/* + * Copyright 2012, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <media/stagefright/foundation/ABase.h> +#include <media/stagefright/foundation/AString.h> +#include <utils/KeyedVector.h> +#include <utils/RefBase.h> + +namespace android { + +// Encapsulates an "HTTP/RTSP style" response, i.e. a status line, +// key/value pairs making up the headers and an optional body/content. +struct ParsedMessage : public RefBase { + static sp<ParsedMessage> Parse( + const char *data, size_t size, bool noMoreData, size_t *length); + + bool findString(const char *name, AString *value) const; + bool findInt32(const char *name, int32_t *value) const; + + const char *getContent() const; + + bool getRequestField(size_t index, AString *field) const; + bool getStatusCode(int32_t *statusCode) const; + + AString debugString() const; + + static bool GetAttribute(const char *s, const char *key, AString *value); + + static bool GetInt32Attribute( + const char *s, const char *key, int32_t *value); + + +protected: + virtual ~ParsedMessage(); + +private: + KeyedVector<AString, AString> mDict; + AString mContent; + + ParsedMessage(); + + ssize_t parse(const char *data, size_t size, bool noMoreData); + + DISALLOW_EVIL_CONSTRUCTORS(ParsedMessage); +}; + +} // namespace android |