diff options
Diffstat (limited to 'include/media')
61 files changed, 2187 insertions, 468 deletions
diff --git a/include/media/AudioBufferProvider.h b/include/media/AudioBufferProvider.h index 43e4de7..ef392f0 100644 --- a/include/media/AudioBufferProvider.h +++ b/include/media/AudioBufferProvider.h @@ -26,6 +26,8 @@ class AudioBufferProvider { public: + // FIXME merge with AudioTrackShared::Buffer, AudioTrack::Buffer, and AudioRecord::Buffer + // and rename getNextBuffer() to obtainBuffer() struct Buffer { Buffer() : raw(NULL), frameCount(0) { } union { @@ -44,6 +46,19 @@ public: // pts is the local time when the next sample yielded by getNextBuffer // will be rendered. // Pass kInvalidPTS if the PTS is unknown or not applicable. + // On entry: + // buffer != NULL + // buffer->raw unused + // buffer->frameCount maximum number of desired frames + // On successful return: + // status NO_ERROR + // buffer->raw non-NULL pointer to buffer->frameCount contiguous available frames + // buffer->frameCount number of contiguous available frames at buffer->raw, + // 0 < buffer->frameCount <= entry value + // On error return: + // status != NO_ERROR + // buffer->raw NULL + // buffer->frameCount 0 virtual status_t getNextBuffer(Buffer* buffer, int64_t pts = kInvalidPTS) = 0; virtual void releaseBuffer(Buffer* buffer) = 0; diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h index 156c592..052064d 100644 --- a/include/media/AudioRecord.h +++ b/include/media/AudioRecord.h @@ -14,78 +14,81 @@ * limitations under the License. */ -#ifndef AUDIORECORD_H_ -#define AUDIORECORD_H_ +#ifndef ANDROID_AUDIORECORD_H +#define ANDROID_AUDIORECORD_H -#include <binder/IMemory.h> #include <cutils/sched_policy.h> #include <media/AudioSystem.h> #include <media/IAudioRecord.h> -#include <system/audio.h> -#include <utils/RefBase.h> -#include <utils/Errors.h> #include <utils/threads.h> namespace android { +// ---------------------------------------------------------------------------- + class audio_track_cblk_t; +class AudioRecordClientProxy; // ---------------------------------------------------------------------------- -class AudioRecord : virtual public RefBase +class AudioRecord : public RefBase { public: - static const int DEFAULT_SAMPLE_RATE = 8000; - /* Events used by AudioRecord callback function (callback_t). * Keep in sync with frameworks/base/media/java/android/media/AudioRecord.java NATIVE_EVENT_*. */ enum event_type { EVENT_MORE_DATA = 0, // Request to read more data from PCM buffer. - EVENT_OVERRUN = 1, // PCM buffer overrun occured. + EVENT_OVERRUN = 1, // PCM buffer overrun occurred. EVENT_MARKER = 2, // Record head is at the specified marker position // (See setMarkerPosition()). EVENT_NEW_POS = 3, // Record head is at a new position // (See setPositionUpdatePeriod()). + EVENT_NEW_IAUDIORECORD = 4, // IAudioRecord was re-created, either due to re-routing and + // voluntary invalidation by mediaserver, or mediaserver crash. }; - /* Create Buffer on the stack and pass it to obtainBuffer() - * and releaseBuffer(). + /* Client should declare Buffer on the stack and pass address to obtainBuffer() + * and releaseBuffer(). See also callback_t for EVENT_MORE_DATA. */ class Buffer { public: - enum { - MUTE = 0x00000001 - }; - uint32_t flags; - int channelCount; - audio_format_t format; - size_t frameCount; - size_t size; // total size in bytes == frameCount * frameSize + // FIXME use m prefix + size_t frameCount; // number of sample frames corresponding to size; + // on input it is the number of frames available, + // on output is the number of frames actually drained + // (currently ignored, but will make the primary field in future) + + size_t size; // input/output in bytes == frameCount * frameSize + // FIXME this is redundant with respect to frameCount, + // and TRANSFER_OBTAIN mode is broken for 8-bit data + // since we don't define the frame format + union { void* raw; - short* i16; - int8_t* i8; + short* i16; // signed 16-bit + int8_t* i8; // unsigned 8-bit, offset by 0x80 }; }; /* As a convenience, if a callback is supplied, a handler thread * is automatically created with the appropriate priority. This thread - * invokes the callback when a new buffer becomes ready or an overrun condition occurs. + * invokes the callback when a new buffer becomes ready or various conditions occur. * Parameters: * * event: type of event notified (see enum AudioRecord::event_type). * user: Pointer to context for use by the callback receiver. * info: Pointer to optional parameter according to event type: * - EVENT_MORE_DATA: pointer to AudioRecord::Buffer struct. The callback must not read - * more bytes than indicated by 'size' field and update 'size' if less bytes are - * read. + * more bytes than indicated by 'size' field and update 'size' if fewer bytes are + * consumed. * - EVENT_OVERRUN: unused. * - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames. * - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames. + * - EVENT_NEW_IAUDIORECORD: unused. */ typedef void (*callback_t)(int event, void* user, void *info); @@ -98,125 +101,149 @@ public: * - BAD_VALUE: unsupported configuration */ - static status_t getMinFrameCount(int* frameCount, + static status_t getMinFrameCount(size_t* frameCount, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask); + /* How data is transferred from AudioRecord + */ + enum transfer_type { + TRANSFER_DEFAULT, // not specified explicitly; determine from other parameters + TRANSFER_CALLBACK, // callback EVENT_MORE_DATA + TRANSFER_OBTAIN, // FIXME deprecated: call obtainBuffer() and releaseBuffer() + TRANSFER_SYNC, // synchronous read() + }; + /* Constructs an uninitialized AudioRecord. No connection with - * AudioFlinger takes place. + * AudioFlinger takes place. Use set() after this. */ AudioRecord(); - /* Creates an AudioRecord track and registers it with AudioFlinger. + /* Creates an AudioRecord object and registers it with AudioFlinger. * Once created, the track needs to be started before it can be used. - * Unspecified values are set to the audio hardware's current - * values. + * Unspecified values are set to appropriate default values. * * Parameters: * - * inputSource: Select the audio input to record to (e.g. AUDIO_SOURCE_DEFAULT). - * sampleRate: Track sampling rate in Hz. + * inputSource: Select the audio input to record from (e.g. AUDIO_SOURCE_DEFAULT). + * sampleRate: Data sink sampling rate in Hz. * format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed * 16 bits per sample). - * channelMask: Channel mask. - * frameCount: Total size of track PCM buffer in frames. This defines the - * latency of the track. + * channelMask: Channel mask, such that audio_is_input_channel(channelMask) is true. + * frameCount: Minimum size of track PCM buffer in frames. This defines the + * application's contribution to the + * latency of the track. The actual size selected by the AudioRecord could + * be larger if the requested size is not compatible with current audio HAL + * latency. Zero means to use a default value. * cbf: Callback function. If not null, this function is called periodically - * to provide new PCM data. + * to consume new PCM data and inform of marker, position updates, etc. * user: Context for use by the callback receiver. * notificationFrames: The callback function is called each time notificationFrames PCM * frames are ready in record track output buffer. * sessionId: Not yet supported. + * transferType: How data is transferred from AudioRecord. + * flags: See comments on audio_input_flags_t in <system/audio.h> + * threadCanCallJava: Not present in parameter list, and so is fixed at false. */ AudioRecord(audio_source_t inputSource, - uint32_t sampleRate = 0, - audio_format_t format = AUDIO_FORMAT_DEFAULT, - audio_channel_mask_t channelMask = AUDIO_CHANNEL_IN_MONO, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, int frameCount = 0, callback_t cbf = NULL, void* user = NULL, int notificationFrames = 0, - int sessionId = 0); - + int sessionId = 0, + transfer_type transferType = TRANSFER_DEFAULT, + audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE); /* Terminates the AudioRecord and unregisters it from AudioFlinger. * Also destroys all resources associated with the AudioRecord. */ - ~AudioRecord(); - +protected: + virtual ~AudioRecord(); +public: - /* Initialize an uninitialized AudioRecord. + /* Initialize an AudioRecord that was created using the AudioRecord() constructor. + * Don't call set() more than once, or after an AudioRecord() constructor that takes parameters. * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful intialization - * - INVALID_OPERATION: AudioRecord is already intitialized or record device is already in use + * - INVALID_OPERATION: AudioRecord is already initialized or record device is already in use * - BAD_VALUE: invalid parameter (channels, format, sampleRate...) * - NO_INIT: audio server or audio hardware not initialized * - PERMISSION_DENIED: recording is not allowed for the requesting process - * */ - status_t set(audio_source_t inputSource = AUDIO_SOURCE_DEFAULT, - uint32_t sampleRate = 0, - audio_format_t format = AUDIO_FORMAT_DEFAULT, - audio_channel_mask_t channelMask = AUDIO_CHANNEL_IN_MONO, + * + * Parameters not listed in the AudioRecord constructors above: + * + * threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI. + */ + status_t set(audio_source_t inputSource, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, int frameCount = 0, callback_t cbf = NULL, void* user = NULL, int notificationFrames = 0, bool threadCanCallJava = false, - int sessionId = 0); - + int sessionId = 0, + transfer_type transferType = TRANSFER_DEFAULT, + audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE); /* Result of constructing the AudioRecord. This must be checked - * before using any AudioRecord API (except for set()), using + * before using any AudioRecord API (except for set()), because using * an uninitialized AudioRecord produces undefined results. * See set() method above for possible return codes. */ - status_t initCheck() const; + status_t initCheck() const { return mStatus; } - /* Returns this track's latency in milliseconds. - * This includes the latency due to AudioRecord buffer size + /* Returns this track's estimated latency in milliseconds. + * This includes the latency due to AudioRecord buffer size, * and audio hardware driver. */ - uint32_t latency() const; + uint32_t latency() const { return mLatency; } /* getters, see constructor and set() */ - audio_format_t format() const; - int channelCount() const; - uint32_t frameCount() const; - size_t frameSize() const; - audio_source_t inputSource() const; - + audio_format_t format() const { return mFormat; } + uint32_t channelCount() const { return mChannelCount; } + size_t frameCount() const { return mFrameCount; } + size_t frameSize() const { return mFrameSize; } + audio_source_t inputSource() const { return mInputSource; } /* After it's created the track is not active. Call start() to * make it active. If set, the callback will start being called. - * if event is not AudioSystem::SYNC_EVENT_NONE, the capture start will be delayed until + * If event is not AudioSystem::SYNC_EVENT_NONE, the capture start will be delayed until * the specified event occurs on the specified trigger session. */ status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE, int triggerSession = 0); - /* Stop a track. If set, the callback will cease being called and - * obtainBuffer returns STOPPED. Note that obtainBuffer() still works - * and will fill up buffers until the pool is exhausted. + /* Stop a track. If set, the callback will cease being called. Note that obtainBuffer() still + * works and will drain buffers until the pool is exhausted, and then will return WOULD_BLOCK. */ void stop(); bool stopped() const; - /* get sample rate for this record track + /* Return the sink sample rate for this record track in Hz. + * Unlike AudioTrack, the sample rate is const after initialization, so doesn't need a lock. */ - uint32_t getSampleRate() const; + uint32_t getSampleRate() const { return mSampleRate; } /* Sets marker position. When record reaches the number of frames specified, * a callback with event type EVENT_MARKER is called. Calling setMarkerPosition * with marker == 0 cancels marker notification callback. + * To set a marker at a position which would compute as 0, + * a workaround is to the set the marker at a nearby position such as ~0 or 1. * If the AudioRecord has been opened with no callback function associated, * the operation will fail. * * Parameters: * - * marker: marker position expressed in frames. + * marker: marker position expressed in wrapping (overflow) frame units, + * like the return value of getPosition(). * * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful operation @@ -225,13 +252,13 @@ public: status_t setMarkerPosition(uint32_t marker); status_t getMarkerPosition(uint32_t *marker) const; - /* Sets position update period. Every time the number of frames specified has been recorded, * a callback with event type EVENT_NEW_POS is called. * Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification * callback. * If the AudioRecord has been opened with no callback function associated, * the operation will fail. + * Extremely small values may be rounded up to a value the implementation can support. * * Parameters: * @@ -244,13 +271,13 @@ public: status_t setPositionUpdatePeriod(uint32_t updatePeriod); status_t getPositionUpdatePeriod(uint32_t *updatePeriod) const; - - /* Gets record head position. The position is the total number of frames - * recorded since record start. + /* Return the total number of frames recorded since recording started. + * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz. + * It is reset to zero by stop(). * * Parameters: * - * position: Address where to return record head position within AudioRecord buffer. + * position: Address where to return record head position. * * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful operation @@ -258,7 +285,7 @@ public: */ status_t getPosition(uint32_t *position) const; - /* returns a handle on the audio input used by this AudioRecord. + /* Returns a handle on the audio input used by this AudioRecord. * * Parameters: * none. @@ -268,50 +295,94 @@ public: */ audio_io_handle_t getInput() const; - /* returns the audio session ID associated with this AudioRecord. + /* Returns the audio session ID associated with this AudioRecord. * * Parameters: * none. * * Returned value: * AudioRecord session ID. + * + * No lock needed because session ID doesn't change after first set(). */ - int getSessionId() const; - - /* obtains a buffer of "frameCount" frames. The buffer must be - * filled entirely. If the track is stopped, obtainBuffer() returns - * STOPPED instead of NO_ERROR as long as there are buffers available, - * at which point NO_MORE_BUFFERS is returned. - * Buffers will be returned until the pool (buffercount()) + int getSessionId() const { return mSessionId; } + + /* Obtains a buffer of up to "audioBuffer->frameCount" full frames. + * After draining these frames of data, the caller should release them with releaseBuffer(). + * If the track buffer is not empty, obtainBuffer() returns as many contiguous + * full frames as are available immediately. + * If the track buffer is empty and track is stopped, obtainBuffer() returns WOULD_BLOCK + * regardless of the value of waitCount. + * If the track buffer is empty and track is not stopped, obtainBuffer() blocks with a + * maximum timeout based on waitCount; see chart below. + * Buffers will be returned until the pool * is exhausted, at which point obtainBuffer() will either block - * or return WOULD_BLOCK depending on the value of the "blocking" + * or return WOULD_BLOCK depending on the value of the "waitCount" * parameter. + * + * obtainBuffer() and releaseBuffer() are deprecated for direct use by applications, + * which should use read() or callback EVENT_MORE_DATA instead. + * + * Interpretation of waitCount: + * +n limits wait time to n * WAIT_PERIOD_MS, + * -1 causes an (almost) infinite wait time, + * 0 non-blocking. + * + * Buffer fields + * On entry: + * frameCount number of frames requested + * After error return: + * frameCount 0 + * size 0 + * raw undefined + * After successful return: + * frameCount actual number of frames available, <= number requested + * size actual number of bytes available + * raw pointer to the buffer */ - enum { - NO_MORE_BUFFERS = 0x80000001, - STOPPED = 1 - }; + /* FIXME Deprecated public API for TRANSFER_OBTAIN mode */ + status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount) + __attribute__((__deprecated__)); - status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount); - void releaseBuffer(Buffer* audioBuffer); +private: + /* If nonContig is non-NULL, it is an output parameter that will be set to the number of + * additional non-contiguous frames that are available immediately. + * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(), + * in case the requested amount of frames is in two or more non-contiguous regions. + * FIXME requested and elapsed are both relative times. Consider changing to absolute time. + */ + status_t obtainBuffer(Buffer* audioBuffer, const struct timespec *requested, + struct timespec *elapsed = NULL, size_t *nonContig = NULL); +public: + /* Release an emptied buffer of "audioBuffer->frameCount" frames for AudioFlinger to re-fill. */ + // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed + void releaseBuffer(Buffer* audioBuffer); /* As a convenience we provide a read() interface to the audio buffer. - * This is implemented on top of obtainBuffer/releaseBuffer. + * Input parameter 'size' is in byte units. + * This is implemented on top of obtainBuffer/releaseBuffer. For best + * performance use callbacks. Returns actual number of bytes read >= 0, + * or one of the following negative status codes: + * INVALID_OPERATION AudioRecord is configured for streaming mode + * BAD_VALUE size is invalid + * WOULD_BLOCK when obtainBuffer() returns same, or + * AudioRecord was stopped during the read + * or any other error code returned by IAudioRecord::start() or restoreRecord_l(). */ ssize_t read(void* buffer, size_t size); - /* Return the amount of input frames lost in the audio driver since the last call of this + /* Return the number of input frames lost in the audio driver since the last call of this * function. Audio driver is expected to reset the value to 0 and restart counting upon * returning the current value by this function call. Such loss typically occurs when the * user space process is blocked longer than the capacity of audio driver buffers. - * Unit: the number of input audio frames + * Units: the number of input audio frames. */ unsigned int getInputFramesLost() const; private: - /* copying audio tracks is not allowed */ + /* copying audio record objects is not allowed */ AudioRecord(const AudioRecord& other); AudioRecord& operator = (const AudioRecord& other); @@ -329,63 +400,113 @@ private: void resume(); // allow thread to execute, if not requested to exit private: + void pauseInternal(nsecs_t ns = 0LL); + // like pause(), but only used internally within thread + friend class AudioRecord; virtual bool threadLoop(); - AudioRecord& mReceiver; + AudioRecord& mReceiver; virtual ~AudioRecordThread(); Mutex mMyLock; // Thread::mLock is private Condition mMyCond; // Thread::mThreadExitedCondition is private - bool mPaused; // whether thread is currently paused + bool mPaused; // whether thread is requested to pause at next loop entry + bool mPausedInt; // whether thread internally requests pause + nsecs_t mPausedNs; // if mPausedInt then associated timeout, otherwise ignored }; // body of AudioRecordThread::threadLoop() - bool processAudioBuffer(const sp<AudioRecordThread>& thread); + // returns the maximum amount of time before we would like to run again, where: + // 0 immediately + // > 0 no later than this many nanoseconds from now + // NS_WHENEVER still active but no particular deadline + // NS_INACTIVE inactive so don't run again until re-started + // NS_NEVER never again + static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3; + nsecs_t processAudioBuffer(const sp<AudioRecordThread>& thread); - status_t openRecord_l(uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, - audio_io_handle_t input); - audio_io_handle_t getInput_l(); - status_t restoreRecord_l(audio_track_cblk_t*& cblk); + // caller must hold lock on mLock for all _l methods + status_t openRecord_l(size_t epoch); + + // FIXME enum is faster than strcmp() for parameter 'from' + status_t restoreRecord_l(const char *from); sp<AudioRecordThread> mAudioRecordThread; mutable Mutex mLock; - bool mActive; // protected by mLock + // Current client state: false = stopped, true = active. Protected by mLock. If more states + // are added, consider changing this to enum State { ... } mState as in AudioTrack. + bool mActive; // for client callback handler - callback_t mCbf; + callback_t mCbf; // callback handler for events, or NULL void* mUserData; // for notification APIs - uint32_t mNotificationFrames; - uint32_t mRemainingFrames; - uint32_t mMarkerPosition; // in frames + uint32_t mNotificationFramesReq; // requested number of frames between each + // notification callback + uint32_t mNotificationFramesAct; // actual number of frames between each + // notification callback + bool mRefreshRemaining; // processAudioBuffer() should refresh next 2 + + // These are private to processAudioBuffer(), and are not protected by a lock + uint32_t mRemainingFrames; // number of frames to request in obtainBuffer() + bool mRetryOnPartialBuffer; // sleep and retry after partial obtainBuffer() + int mObservedSequence; // last observed value of mSequence + + uint32_t mMarkerPosition; // in wrapping (overflow) frame units bool mMarkerReached; uint32_t mNewPosition; // in frames - uint32_t mUpdatePeriod; // in ms + uint32_t mUpdatePeriod; // in frames, zero means no EVENT_NEW_POS + + status_t mStatus; // constant after constructor or set() - uint32_t mFrameCount; + uint32_t mSampleRate; + size_t mFrameCount; audio_format_t mFormat; - uint8_t mChannelCount; + uint32_t mChannelCount; + size_t mFrameSize; // app-level frame size == AudioFlinger frame size audio_source_t mInputSource; - status_t mStatus; - uint32_t mLatency; + uint32_t mLatency; // in ms audio_channel_mask_t mChannelMask; - audio_io_handle_t mInput; // returned by AudioSystem::getInput() + audio_input_flags_t mFlags; int mSessionId; + transfer_type mTransfer; + + audio_io_handle_t mInput; // returned by AudioSystem::getInput() // may be changed if IAudioRecord object is re-created sp<IAudioRecord> mAudioRecord; sp<IMemory> mCblkMemory; - audio_track_cblk_t* mCblk; + audio_track_cblk_t* mCblk; // re-load after mLock.unlock() - int mPreviousPriority; // before start() + int mPreviousPriority; // before start() SchedPolicy mPreviousSchedulingGroup; + bool mAwaitBoost; // thread should wait for priority boost before running + + // The proxy should only be referenced while a lock is held because the proxy isn't + // multi-thread safe. + // An exception is that a blocking ClientProxy::obtainBuffer() may be called without a lock, + // provided that the caller also holds an extra reference to the proxy and shared memory to keep + // them around in case they are replaced during the obtainBuffer(). + sp<AudioRecordClientProxy> mProxy; + + bool mInOverrun; // whether recorder is currently in overrun state + +private: + class DeathNotifier : public IBinder::DeathRecipient { + public: + DeathNotifier(AudioRecord* audioRecord) : mAudioRecord(audioRecord) { } + protected: + virtual void binderDied(const wp<IBinder>& who); + private: + const wp<AudioRecord> mAudioRecord; + }; + + sp<DeathNotifier> mDeathNotifier; + uint32_t mSequence; // incremented for each new IAudioRecord attempt }; }; // namespace android -#endif /*AUDIORECORD_H_*/ +#endif // ANDROID_AUDIORECORD_H diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h index 49e1afc..4c22412 100644 --- a/include/media/AudioSystem.h +++ b/include/media/AudioSystem.h @@ -17,20 +17,18 @@ #ifndef ANDROID_AUDIOSYSTEM_H_ #define ANDROID_AUDIOSYSTEM_H_ -#include <utils/RefBase.h> -#include <utils/threads.h> -#include <media/IAudioFlinger.h> - +#include <hardware/audio_effect.h> +#include <media/IAudioFlingerClient.h> #include <system/audio.h> #include <system/audio_policy.h> - -/* XXX: Should be include by all the users instead */ -#include <media/AudioParameter.h> +#include <utils/Errors.h> +#include <utils/Mutex.h> namespace android { typedef void (*audio_error_callback)(status_t err); +class IAudioFlinger; class IAudioPolicyService; class String8; @@ -67,9 +65,14 @@ public: // set audio mode in audio hardware static status_t setMode(audio_mode_t mode); - // returns true in *state if tracks are active on the specified stream or has been active + // returns true in *state if tracks are active on the specified stream or have been active // in the past inPastMs milliseconds static status_t isStreamActive(audio_stream_type_t stream, bool *state, uint32_t inPastMs = 0); + // returns true in *state if tracks are active for what qualifies as remote playback + // on the specified stream or have been active in the past inPastMs milliseconds. Remote + // playback isn't mutually exclusive with local playback. + static status_t isStreamActiveRemotely(audio_stream_type_t stream, bool *state, + uint32_t inPastMs = 0); // returns true in *state if a recorder is currently recording with the specified source static status_t isSourceActive(audio_source_t source, bool *state); @@ -87,29 +90,26 @@ public: static float linearToLog(int volume); static int logToLinear(float volume); - static status_t getOutputSamplingRate(int* samplingRate, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); - static status_t getOutputFrameCount(int* frameCount, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); - static status_t getOutputLatency(uint32_t* latency, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); + static status_t getOutputSamplingRate(uint32_t* samplingRate, + audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); + static status_t getOutputFrameCount(size_t* frameCount, + audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); + static status_t getOutputLatency(uint32_t* latency, + audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); static status_t getSamplingRate(audio_io_handle_t output, audio_stream_type_t streamType, - int* samplingRate); + uint32_t* samplingRate); // returns the number of frames per audio HAL write buffer. Corresponds to // audio_stream->get_buffer_size()/audio_stream_frame_size() static status_t getFrameCount(audio_io_handle_t output, audio_stream_type_t stream, - int* frameCount); + size_t* frameCount); // returns the audio output stream latency in ms. Corresponds to // audio_stream_out->get_latency() static status_t getLatency(audio_io_handle_t output, audio_stream_type_t stream, uint32_t* latency); - // DEPRECATED - static status_t getOutputSamplingRate(int* samplingRate, int stream = AUDIO_STREAM_DEFAULT); - - // DEPRECATED - static status_t getOutputFrameCount(int* frameCount, int stream = AUDIO_STREAM_DEFAULT); - static bool routedToA2dpOutput(audio_stream_type_t streamType); static status_t getInputBufferSize(uint32_t sampleRate, audio_format_t format, @@ -126,10 +126,13 @@ public: // - BAD_VALUE: invalid parameter // NOTE: this feature is not supported on all hardware platforms and it is // necessary to check returned status before using the returned values. - static status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); + static status_t getRenderPosition(audio_io_handle_t output, + uint32_t *halFrames, + uint32_t *dspFrames, + audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); // return the number of input frames lost by HAL implementation, or 0 if the handle is invalid - static unsigned int getInputFramesLost(audio_io_handle_t ioHandle); + static size_t getInputFramesLost(audio_io_handle_t ioHandle); static int newAudioSessionId(); static void acquireAudioSessionId(int audioSession); @@ -147,23 +150,23 @@ public: NUM_CONFIG_EVENTS }; - // audio output descriptor used to cache output configurations in client process to avoid frequent calls - // through IAudioFlinger + // audio output descriptor used to cache output configurations in client process to avoid + // frequent calls through IAudioFlinger class OutputDescriptor { public: OutputDescriptor() - : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channels(0), frameCount(0), latency(0) {} + : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channelMask(0), frameCount(0), latency(0) {} uint32_t samplingRate; - int32_t format; - int32_t channels; + audio_format_t format; + audio_channel_mask_t channelMask; size_t frameCount; uint32_t latency; }; // Events used to synchronize actions between audio sessions. - // For instance SYNC_EVENT_PRESENTATION_COMPLETE can be used to delay recording start until playback - // is complete on another audio session. + // For instance SYNC_EVENT_PRESENTATION_COMPLETE can be used to delay recording start until + // playback is complete on another audio session. // See definitions in MediaSyncEvent.java enum sync_event_t { SYNC_EVENT_SAME = -1, // used internally to indicate restart with same event @@ -183,8 +186,10 @@ public: // // IAudioPolicyService interface (see AudioPolicyInterface for method descriptions) // - static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state, const char *device_address); - static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device, const char *device_address); + static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state, + const char *device_address); + static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device, + const char *device_address); static status_t setPhoneState(audio_mode_t state); static status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config); static audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage); @@ -192,7 +197,8 @@ public: uint32_t samplingRate = 0, audio_format_t format = AUDIO_FORMAT_DEFAULT, audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO, - audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE); + audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, + const audio_offload_info_t *offloadInfo = NULL); static status_t startOutput(audio_io_handle_t output, audio_stream_type_t stream, int session = 0); @@ -237,9 +243,18 @@ public: static const sp<IAudioPolicyService>& get_audio_policy_service(); // helpers for android.media.AudioManager.getProperty(), see description there for meaning - static int32_t getPrimaryOutputSamplingRate(); - static int32_t getPrimaryOutputFrameCount(); + static uint32_t getPrimaryOutputSamplingRate(); + static size_t getPrimaryOutputFrameCount(); + + static status_t setLowRamDevice(bool isLowRamDevice); + + // Check if hw offload is possible for given format, stream type, sample rate, + // bit rate, duration, video and streaming or offload property is enabled + static bool isOffloadSupported(const audio_offload_info_t& info); + // check presence of audio flinger service. + // returns NO_ERROR if binding to service succeeds, DEAD_OBJECT otherwise + static status_t checkAudioFlinger(); // ---------------------------------------------------------------------------- private: diff --git a/include/media/AudioTimestamp.h b/include/media/AudioTimestamp.h new file mode 100644 index 0000000..c29c7e5 --- /dev/null +++ b/include/media/AudioTimestamp.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_TIMESTAMP_H +#define ANDROID_AUDIO_TIMESTAMP_H + +#include <time.h> + +class AudioTimestamp { +public: + AudioTimestamp() : mPosition(0) { + mTime.tv_sec = 0; + mTime.tv_nsec = 0; + } + // FIXME change type to match android.media.AudioTrack + uint32_t mPosition; // a frame position in AudioTrack::getPosition() units + struct timespec mTime; // corresponding CLOCK_MONOTONIC when frame is expected to present +}; + +#endif // ANDROID_AUDIO_TIMESTAMP_H diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h index 34108b3..f6646ab 100644 --- a/include/media/AudioTrack.h +++ b/include/media/AudioTrack.h @@ -17,18 +17,10 @@ #ifndef ANDROID_AUDIOTRACK_H #define ANDROID_AUDIOTRACK_H -#include <stdint.h> -#include <sys/types.h> - -#include <media/IAudioFlinger.h> -#include <media/IAudioTrack.h> -#include <media/AudioSystem.h> - -#include <utils/RefBase.h> -#include <utils/Errors.h> -#include <binder/IInterface.h> -#include <binder/IMemory.h> #include <cutils/sched_policy.h> +#include <media/AudioSystem.h> +#include <media/AudioTimestamp.h> +#include <media/IAudioTrack.h> #include <utils/threads.h> namespace android { @@ -36,10 +28,12 @@ namespace android { // ---------------------------------------------------------------------------- class audio_track_cblk_t; +class AudioTrackClientProxy; +class StaticAudioTrackClientProxy; // ---------------------------------------------------------------------------- -class AudioTrack : virtual public RefBase +class AudioTrack : public RefBase { public: enum channel_index { @@ -48,15 +42,30 @@ public: RIGHT = 1 }; - /* Events used by AudioTrack callback function (audio_track_cblk_t). + /* Events used by AudioTrack callback function (callback_t). + * Keep in sync with frameworks/base/media/java/android/media/AudioTrack.java NATIVE_EVENT_*. */ enum event_type { - EVENT_MORE_DATA = 0, // Request to write more data to PCM buffer. - EVENT_UNDERRUN = 1, // PCM buffer underrun occured. - EVENT_LOOP_END = 2, // Sample loop end was reached; playback restarted from loop start if loop count was not 0. - EVENT_MARKER = 3, // Playback head is at the specified marker position (See setMarkerPosition()). - EVENT_NEW_POS = 4, // Playback head is at a new position (See setPositionUpdatePeriod()). - EVENT_BUFFER_END = 5 // Playback head is at the end of the buffer. + EVENT_MORE_DATA = 0, // Request to write more data to buffer. + // If this event is delivered but the callback handler + // does not want to write more data, the handler must explicitly + // ignore the event by setting frameCount to zero. + EVENT_UNDERRUN = 1, // Buffer underrun occurred. + EVENT_LOOP_END = 2, // Sample loop end was reached; playback restarted from + // loop start if loop count was not 0. + EVENT_MARKER = 3, // Playback head is at the specified marker position + // (See setMarkerPosition()). + EVENT_NEW_POS = 4, // Playback head is at a new position + // (See setPositionUpdatePeriod()). + EVENT_BUFFER_END = 5, // Playback head is at the end of the buffer. + // Not currently used by android.media.AudioTrack. + EVENT_NEW_IAUDIOTRACK = 6, // IAudioTrack was re-created, either due to re-routing and + // voluntary invalidation by mediaserver, or mediaserver crash. + EVENT_STREAM_END = 7, // Sent after all the buffers queued in AF and HW are played + // back (after stop is called) + EVENT_NEW_TIMESTAMP = 8, // Delivered periodically and when there's a significant change + // in the mapping from frame position to presentation time. + // See AudioTimestamp for the information included with event. }; /* Client should declare Buffer on the stack and pass address to obtainBuffer() @@ -66,27 +75,25 @@ public: class Buffer { public: - enum { - MUTE = 0x00000001 - }; - uint32_t flags; // 0 or MUTE - audio_format_t format; // but AUDIO_FORMAT_PCM_8_BIT -> AUDIO_FORMAT_PCM_16_BIT - // accessed directly by WebKit ANP callback - int channelCount; // will be removed in the future, do not use - + // FIXME use m prefix size_t frameCount; // number of sample frames corresponding to size; // on input it is the number of frames desired, // on output is the number of frames actually filled + // (currently ignored, but will make the primary field in future) + + size_t size; // input/output in bytes == frameCount * frameSize + // on output is the number of bytes actually filled + // FIXME this is redundant with respect to frameCount, + // and TRANSFER_OBTAIN mode is broken for 8-bit data + // since we don't define the frame format - size_t size; // input/output in byte units union { void* raw; - short* i16; // signed 16-bit - int8_t* i8; // unsigned 8-bit, offset by 0x80 + short* i16; // signed 16-bit + int8_t* i8; // unsigned 8-bit, offset by 0x80 }; }; - /* As a convenience, if a callback is supplied, a handler thread * is automatically created with the appropriate priority. This thread * invokes the callback when a new buffer becomes available or various conditions occur. @@ -100,9 +107,12 @@ public: * written. * - EVENT_UNDERRUN: unused. * - EVENT_LOOP_END: pointer to an int indicating the number of loops remaining. - * - EVENT_MARKER: pointer to an uint32_t containing the marker position in frames. - * - EVENT_NEW_POS: pointer to an uint32_t containing the new position in frames. + * - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames. + * - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames. * - EVENT_BUFFER_END: unused. + * - EVENT_NEW_IAUDIOTRACK: unused. + * - EVENT_STREAM_END: unused. + * - EVENT_NEW_TIMESTAMP: pointer to const AudioTimestamp. */ typedef void (*callback_t)(int event, void* user, void *info); @@ -112,105 +122,125 @@ public: * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful operation * - NO_INIT: audio server or audio hardware not initialized + * - BAD_VALUE: unsupported configuration */ - static status_t getMinFrameCount(int* frameCount, - audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT, - uint32_t sampleRate = 0); + static status_t getMinFrameCount(size_t* frameCount, + audio_stream_type_t streamType, + uint32_t sampleRate); + + /* How data is transferred to AudioTrack + */ + enum transfer_type { + TRANSFER_DEFAULT, // not specified explicitly; determine from the other parameters + TRANSFER_CALLBACK, // callback EVENT_MORE_DATA + TRANSFER_OBTAIN, // FIXME deprecated: call obtainBuffer() and releaseBuffer() + TRANSFER_SYNC, // synchronous write() + TRANSFER_SHARED, // shared memory + }; /* Constructs an uninitialized AudioTrack. No connection with - * AudioFlinger takes place. + * AudioFlinger takes place. Use set() after this. */ AudioTrack(); - /* Creates an audio track and registers it with AudioFlinger. + /* Creates an AudioTrack object and registers it with AudioFlinger. * Once created, the track needs to be started before it can be used. - * Unspecified values are set to the audio hardware's current - * values. + * Unspecified values are set to appropriate default values. + * With this constructor, the track is configured for streaming mode. + * Data to be rendered is supplied by write() or by the callback EVENT_MORE_DATA. + * Intermixing a combination of write() and non-ignored EVENT_MORE_DATA is not allowed. * * Parameters: * * streamType: Select the type of audio stream this track is attached to * (e.g. AUDIO_STREAM_MUSIC). - * sampleRate: Track sampling rate in Hz. + * sampleRate: Data source sampling rate in Hz. * format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed * 16 bits per sample). * channelMask: Channel mask. * frameCount: Minimum size of track PCM buffer in frames. This defines the + * application's contribution to the * latency of the track. The actual size selected by the AudioTrack could be * larger if the requested size is not compatible with current audio HAL - * latency. Zero means to use a default value. + * configuration. Zero means to use a default value. * flags: See comments on audio_output_flags_t in <system/audio.h>. * cbf: Callback function. If not null, this function is called periodically - * to request new PCM data. + * to provide new data and inform of marker, position updates, etc. * user: Context for use by the callback receiver. * notificationFrames: The callback function is called each time notificationFrames PCM * frames have been consumed from track input buffer. + * This is expressed in units of frames at the initial source sample rate. * sessionId: Specific session ID, or zero to use default. - * threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI. - * If not present in parameter list, then fixed at false. + * transferType: How data is transferred to AudioTrack. + * threadCanCallJava: Not present in parameter list, and so is fixed at false. */ AudioTrack( audio_stream_type_t streamType, - uint32_t sampleRate = 0, - audio_format_t format = AUDIO_FORMAT_DEFAULT, - audio_channel_mask_t channelMask = 0, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t, int frameCount = 0, audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, callback_t cbf = NULL, void* user = NULL, int notificationFrames = 0, - int sessionId = 0); + int sessionId = 0, + transfer_type transferType = TRANSFER_DEFAULT, + const audio_offload_info_t *offloadInfo = NULL, + int uid = -1); - // DEPRECATED - explicit AudioTrack( int streamType, - uint32_t sampleRate = 0, - int format = AUDIO_FORMAT_DEFAULT, - int channelMask = 0, - int frameCount = 0, - uint32_t flags = (uint32_t) AUDIO_OUTPUT_FLAG_NONE, - callback_t cbf = 0, - void* user = 0, - int notificationFrames = 0, - int sessionId = 0); - - /* Creates an audio track and registers it with AudioFlinger. With this constructor, - * the PCM data to be rendered by AudioTrack is passed in a shared memory buffer - * identified by the argument sharedBuffer. This prototype is for static buffer playback. - * PCM data must be present in memory before the AudioTrack is started. - * The write() and flush() methods are not supported in this case. + /* Creates an audio track and registers it with AudioFlinger. + * With this constructor, the track is configured for static buffer mode. + * The format must not be 8-bit linear PCM. + * Data to be rendered is passed in a shared memory buffer + * identified by the argument sharedBuffer, which must be non-0. + * The memory should be initialized to the desired data before calling start(). + * The write() method is not supported in this case. * It is recommended to pass a callback function to be notified of playback end by an * EVENT_UNDERRUN event. */ AudioTrack( audio_stream_type_t streamType, - uint32_t sampleRate = 0, - audio_format_t format = AUDIO_FORMAT_DEFAULT, - audio_channel_mask_t channelMask = 0, - const sp<IMemory>& sharedBuffer = 0, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + const sp<IMemory>& sharedBuffer, audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, callback_t cbf = NULL, void* user = NULL, int notificationFrames = 0, - int sessionId = 0); + int sessionId = 0, + transfer_type transferType = TRANSFER_DEFAULT, + const audio_offload_info_t *offloadInfo = NULL, + int uid = -1); /* Terminates the AudioTrack and unregisters it from AudioFlinger. * Also destroys all resources associated with the AudioTrack. */ - ~AudioTrack(); - +protected: + virtual ~AudioTrack(); +public: - /* Initialize an uninitialized AudioTrack. + /* Initialize an AudioTrack that was created using the AudioTrack() constructor. + * Don't call set() more than once, or after the AudioTrack() constructors that take parameters. * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful initialization * - INVALID_OPERATION: AudioTrack is already initialized * - BAD_VALUE: invalid parameter (channelMask, format, sampleRate...) * - NO_INIT: audio server or audio hardware not initialized - * */ - status_t set(audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT, - uint32_t sampleRate = 0, - audio_format_t format = AUDIO_FORMAT_DEFAULT, - audio_channel_mask_t channelMask = 0, + * If status is not equal to NO_ERROR, don't call any other APIs on this AudioTrack. + * If sharedBuffer is non-0, the frameCount parameter is ignored and + * replaced by the shared buffer's total allocated size in frame units. + * + * Parameters not listed in the AudioTrack constructors above: + * + * threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI. + */ + status_t set(audio_stream_type_t streamType, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, int frameCount = 0, audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, callback_t cbf = NULL, @@ -218,71 +248,83 @@ public: int notificationFrames = 0, const sp<IMemory>& sharedBuffer = 0, bool threadCanCallJava = false, - int sessionId = 0); - + int sessionId = 0, + transfer_type transferType = TRANSFER_DEFAULT, + const audio_offload_info_t *offloadInfo = NULL, + int uid = -1); - /* Result of constructing the AudioTrack. This must be checked + /* Result of constructing the AudioTrack. This must be checked for successful initialization * before using any AudioTrack API (except for set()), because using * an uninitialized AudioTrack produces undefined results. * See set() method above for possible return codes. */ - status_t initCheck() const; + status_t initCheck() const { return mStatus; } /* Returns this track's estimated latency in milliseconds. * This includes the latency due to AudioTrack buffer size, AudioMixer (if any) * and audio hardware driver. */ - uint32_t latency() const; + uint32_t latency() const { return mLatency; } /* getters, see constructors and set() */ - audio_stream_type_t streamType() const; - audio_format_t format() const; - int channelCount() const; - uint32_t frameCount() const; + audio_stream_type_t streamType() const { return mStreamType; } + audio_format_t format() const { return mFormat; } - /* Return channelCount * (bit depth per channel / 8). + /* Return frame size in bytes, which for linear PCM is + * channelCount * (bit depth per channel / 8). * channelCount is determined from channelMask, and bit depth comes from format. + * For non-linear formats, the frame size is typically 1 byte. */ - size_t frameSize() const; + size_t frameSize() const { return mFrameSize; } - sp<IMemory>& sharedBuffer(); + uint32_t channelCount() const { return mChannelCount; } + uint32_t frameCount() const { return mFrameCount; } + /* Return the static buffer specified in constructor or set(), or 0 for streaming mode */ + sp<IMemory> sharedBuffer() const { return mSharedBuffer; } /* After it's created the track is not active. Call start() to * make it active. If set, the callback will start being called. + * If the track was previously paused, volume is ramped up over the first mix buffer. */ - void start(); - - /* Stop a track. If set, the callback will cease being called and - * obtainBuffer returns STOPPED. Note that obtainBuffer() still works - * and will fill up buffers until the pool is exhausted. + status_t start(); + + /* Stop a track. + * In static buffer mode, the track is stopped immediately. + * In streaming mode, the callback will cease being called. Note that obtainBuffer() still + * works and will fill up buffers until the pool is exhausted, and then will return WOULD_BLOCK. + * In streaming mode the stop does not occur immediately: any data remaining in the buffer + * is first drained, mixed, and output, and only then is the track marked as stopped. */ void stop(); bool stopped() const; - /* Flush a stopped track. All pending buffers are discarded. - * This function has no effect if the track is not stopped. + /* Flush a stopped or paused track. All previously buffered data is discarded immediately. + * This has the effect of draining the buffers without mixing or output. + * Flush is intended for streaming mode, for example before switching to non-contiguous content. + * This function is a no-op if the track is not stopped or paused, or uses a static buffer. */ void flush(); - /* Pause a track. If set, the callback will cease being called and - * obtainBuffer returns STOPPED. Note that obtainBuffer() still works + /* Pause a track. After pause, the callback will cease being called and + * obtainBuffer returns WOULD_BLOCK. Note that obtainBuffer() still works * and will fill up buffers until the pool is exhausted. + * Volume is ramped down over the next mix buffer following the pause request, + * and then the track is marked as paused. It can be resumed with ramp up by start(). */ void pause(); - /* Mute or unmute this track. - * While muted, the callback, if set, is still called. - */ - void mute(bool); - bool muted() const; - /* Set volume for this track, mostly used for games' sound effects * left and right volumes. Levels must be >= 0.0 and <= 1.0. + * This is the older API. New applications should use setVolume(float) when possible. */ status_t setVolume(float left, float right); - void getVolume(float* left, float* right) const; + + /* Set volume for all channels. This is the preferred API for new applications, + * especially for multi-channel content. + */ + status_t setVolume(float volume); /* Set the send level for this track. An auxiliary effect should be attached * to the track with attachEffect(). Level must be >= 0.0 and <= 1.0. @@ -290,33 +332,43 @@ public: status_t setAuxEffectSendLevel(float level); void getAuxEffectSendLevel(float* level) const; - /* Set sample rate for this track, mostly used for games' sound effects + /* Set source sample rate for this track in Hz, mostly used for games' sound effects */ - status_t setSampleRate(int sampleRate); + status_t setSampleRate(uint32_t sampleRate); + + /* Return current source sample rate in Hz, or 0 if unknown */ uint32_t getSampleRate() const; /* Enables looping and sets the start and end points of looping. + * Only supported for static buffer mode. * * Parameters: * - * loopStart: loop start expressed as the number of PCM frames played since AudioTrack start. - * loopEnd: loop end expressed as the number of PCM frames played since AudioTrack start. + * loopStart: loop start in frames relative to start of buffer. + * loopEnd: loop end in frames relative to start of buffer. * loopCount: number of loops to execute. Calling setLoop() with loopCount == 0 cancels any - * pending or active loop. loopCount = -1 means infinite looping. + * pending or active loop. loopCount == -1 means infinite looping. * * For proper operation the following condition must be respected: - * (loopEnd-loopStart) <= framecount() + * loopCount != 0 implies 0 <= loopStart < loopEnd <= frameCount(). + * + * If the loop period (loopEnd - loopStart) is too small for the implementation to support, + * setLoop() will return BAD_VALUE. loopCount must be >= -1. + * */ status_t setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount); /* Sets marker position. When playback reaches the number of frames specified, a callback with * event type EVENT_MARKER is called. Calling setMarkerPosition with marker == 0 cancels marker - * notification callback. - * If the AudioTrack has been opened with no callback function associated, the operation will fail. + * notification callback. To set a marker at a position which would compute as 0, + * a workaround is to the set the marker at a nearby position such as ~0 or 1. + * If the AudioTrack has been opened with no callback function associated, the operation will + * fail. * * Parameters: * - * marker: marker position expressed in frames. + * marker: marker position expressed in wrapping (overflow) frame units, + * like the return value of getPosition(). * * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful operation @@ -325,12 +377,13 @@ public: status_t setMarkerPosition(uint32_t marker); status_t getMarkerPosition(uint32_t *marker) const; - /* Sets position update period. Every time the number of frames specified has been played, * a callback with event type EVENT_NEW_POS is called. * Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification * callback. - * If the AudioTrack has been opened with no callback function associated, the operation will fail. + * If the AudioTrack has been opened with no callback function associated, the operation will + * fail. + * Extremely small values may be rounded up to a value the implementation can support. * * Parameters: * @@ -343,34 +396,51 @@ public: status_t setPositionUpdatePeriod(uint32_t updatePeriod); status_t getPositionUpdatePeriod(uint32_t *updatePeriod) const; - /* Sets playback head position within AudioTrack buffer. The new position is specified - * in number of frames. - * This method must be called with the AudioTrack in paused or stopped state. - * Note that the actual position set is <position> modulo the AudioTrack buffer size in frames. - * Therefore using this method makes sense only when playing a "static" audio buffer - * as opposed to streaming. - * The getPosition() method on the other hand returns the total number of frames played since - * playback start. + /* Sets playback head position. + * Only supported for static buffer mode. * * Parameters: * - * position: New playback head position within AudioTrack buffer. + * position: New playback head position in frames relative to start of buffer. + * 0 <= position <= frameCount(). Note that end of buffer is permitted, + * but will result in an immediate underrun if started. * * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful operation - * - INVALID_OPERATION: the AudioTrack is not stopped. - * - BAD_VALUE: The specified position is beyond the number of frames present in AudioTrack buffer + * - INVALID_OPERATION: the AudioTrack is not stopped or paused, or is streaming mode. + * - BAD_VALUE: The specified position is beyond the number of frames present in AudioTrack + * buffer */ status_t setPosition(uint32_t position); - status_t getPosition(uint32_t *position); + + /* Return the total number of frames played since playback start. + * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz. + * It is reset to zero by flush(), reload(), and stop(). + * + * Parameters: + * + * position: Address where to return play head position. + * + * Returned status (from utils/Errors.h) can be: + * - NO_ERROR: successful operation + * - BAD_VALUE: position is NULL + */ + status_t getPosition(uint32_t *position) const; + + /* For static buffer mode only, this returns the current playback position in frames + * relative to start of buffer. It is analogous to the position units used by + * setLoop() and setPosition(). After underrun, the position will be at end of buffer. + */ + status_t getBufferPosition(uint32_t *position); /* Forces AudioTrack buffer full condition. When playing a static buffer, this method avoids * rewriting the buffer before restarting playback after a stop. * This method must be called with the AudioTrack in paused or stopped state. + * Not allowed in streaming mode. * * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful operation - * - INVALID_OPERATION: the AudioTrack is not stopped. + * - INVALID_OPERATION: the AudioTrack is not stopped or paused, or is streaming mode. */ status_t reload(); @@ -392,7 +462,7 @@ public: * Returned value: * AudioTrack session ID. */ - int getSessionId() const; + int getSessionId() const { return mSessionId; } /* Attach track auxiliary output to specified effect. Use effectId = 0 * to detach track from effect. @@ -408,40 +478,78 @@ public: */ status_t attachAuxEffect(int effectId); - /* Obtains a buffer of "frameCount" frames. The buffer must be - * filled entirely, and then released with releaseBuffer(). - * If the track is stopped, obtainBuffer() returns - * STOPPED instead of NO_ERROR as long as there are buffers available, - * at which point NO_MORE_BUFFERS is returned. - * Buffers will be returned until the pool (buffercount()) + /* Obtains a buffer of up to "audioBuffer->frameCount" empty slots for frames. + * After filling these slots with data, the caller should release them with releaseBuffer(). + * If the track buffer is not full, obtainBuffer() returns as many contiguous + * [empty slots for] frames as are available immediately. + * If the track buffer is full and track is stopped, obtainBuffer() returns WOULD_BLOCK + * regardless of the value of waitCount. + * If the track buffer is full and track is not stopped, obtainBuffer() blocks with a + * maximum timeout based on waitCount; see chart below. + * Buffers will be returned until the pool * is exhausted, at which point obtainBuffer() will either block - * or return WOULD_BLOCK depending on the value of the "blocking" + * or return WOULD_BLOCK depending on the value of the "waitCount" * parameter. + * Each sample is 16-bit signed PCM. + * + * obtainBuffer() and releaseBuffer() are deprecated for direct use by applications, + * which should use write() or callback EVENT_MORE_DATA instead. * * Interpretation of waitCount: * +n limits wait time to n * WAIT_PERIOD_MS, * -1 causes an (almost) infinite wait time, * 0 non-blocking. + * + * Buffer fields + * On entry: + * frameCount number of frames requested + * After error return: + * frameCount 0 + * size 0 + * raw undefined + * After successful return: + * frameCount actual number of frames available, <= number requested + * size actual number of bytes available + * raw pointer to the buffer */ - enum { - NO_MORE_BUFFERS = 0x80000001, // same name in AudioFlinger.h, ok to be different value - STOPPED = 1 - }; + /* FIXME Deprecated public API for TRANSFER_OBTAIN mode */ + status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount) + __attribute__((__deprecated__)); - status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount); +private: + /* If nonContig is non-NULL, it is an output parameter that will be set to the number of + * additional non-contiguous frames that are available immediately. + * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(), + * in case the requested amount of frames is in two or more non-contiguous regions. + * FIXME requested and elapsed are both relative times. Consider changing to absolute time. + */ + status_t obtainBuffer(Buffer* audioBuffer, const struct timespec *requested, + struct timespec *elapsed = NULL, size_t *nonContig = NULL); +public: - /* Release a filled buffer of "frameCount" frames for AudioFlinger to process. */ +//EL_FIXME to be reconciled with new obtainBuffer() return codes and control block proxy +// enum { +// NO_MORE_BUFFERS = 0x80000001, // same name in AudioFlinger.h, ok to be different value +// TEAR_DOWN = 0x80000002, +// STOPPED = 1, +// STREAM_END_WAIT, +// STREAM_END +// }; + + /* Release a filled buffer of "audioBuffer->frameCount" frames for AudioFlinger to process. */ + // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed void releaseBuffer(Buffer* audioBuffer); /* As a convenience we provide a write() interface to the audio buffer. + * Input parameter 'size' is in byte units. * This is implemented on top of obtainBuffer/releaseBuffer. For best * performance use callbacks. Returns actual number of bytes written >= 0, * or one of the following negative status codes: - * INVALID_OPERATION AudioTrack is configured for shared buffer mode + * INVALID_OPERATION AudioTrack is configured for static buffer or streaming mode * BAD_VALUE size is invalid - * STOPPED AudioTrack was stopped during the write - * NO_MORE_BUFFERS when obtainBuffer() returns same + * WOULD_BLOCK when obtainBuffer() returns same, or + * AudioTrack was stopped during the write * or any other error code returned by IAudioTrack::start() or restoreTrack_l(). */ ssize_t write(const void* buffer, size_t size); @@ -449,7 +557,32 @@ public: /* * Dumps the state of an audio track. */ - status_t dump(int fd, const Vector<String16>& args) const; + status_t dump(int fd, const Vector<String16>& args) const; + + /* + * Return the total number of frames which AudioFlinger desired but were unavailable, + * and thus which resulted in an underrun. Reset to zero by stop(). + */ + uint32_t getUnderrunFrames() const; + + /* Get the flags */ + audio_output_flags_t getFlags() const { return mFlags; } + + /* Set parameters - only possible when using direct output */ + status_t setParameters(const String8& keyValuePairs); + + /* Get parameters */ + String8 getParameters(const String8& keys); + + /* Poll for a timestamp on demand. + * Use if EVENT_NEW_TIMESTAMP is not delivered often enough for your needs, + * or if you need to get the most recent timestamp outside of the event callback handler. + * Caution: calling this method too often may be inefficient; + * if you need a high resolution mapping between frame position and presentation time, + * consider implementing that at application level, based on the low resolution timestamps. + * Returns NO_ERROR if timestamp is valid. + */ + status_t getTimestamp(AudioTimestamp& timestamp); protected: /* copying audio tracks is not allowed */ @@ -470,72 +603,158 @@ protected: void resume(); // allow thread to execute, if not requested to exit private: + void pauseInternal(nsecs_t ns = 0LL); + // like pause(), but only used internally within thread + friend class AudioTrack; virtual bool threadLoop(); - AudioTrack& mReceiver; - ~AudioTrackThread(); + AudioTrack& mReceiver; + virtual ~AudioTrackThread(); Mutex mMyLock; // Thread::mLock is private Condition mMyCond; // Thread::mThreadExitedCondition is private - bool mPaused; // whether thread is currently paused + bool mPaused; // whether thread is requested to pause at next loop entry + bool mPausedInt; // whether thread internally requests pause + nsecs_t mPausedNs; // if mPausedInt then associated timeout, otherwise ignored + bool mIgnoreNextPausedInt; // whether to ignore next mPausedInt request }; // body of AudioTrackThread::threadLoop() - bool processAudioBuffer(const sp<AudioTrackThread>& thread); + // returns the maximum amount of time before we would like to run again, where: + // 0 immediately + // > 0 no later than this many nanoseconds from now + // NS_WHENEVER still active but no particular deadline + // NS_INACTIVE inactive so don't run again until re-started + // NS_NEVER never again + static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3; + nsecs_t processAudioBuffer(const sp<AudioTrackThread>& thread); + status_t processStreamEnd(int32_t waitCount); + + + // caller must hold lock on mLock for all _l methods status_t createTrack_l(audio_stream_type_t streamType, uint32_t sampleRate, audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, + size_t frameCount, audio_output_flags_t flags, const sp<IMemory>& sharedBuffer, - audio_io_handle_t output); + audio_io_handle_t output, + size_t epoch); + + // can only be called when mState != STATE_ACTIVE void flush_l(); - status_t setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount); + + void setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount); audio_io_handle_t getOutput_l(); - status_t restoreTrack_l(audio_track_cblk_t*& cblk, bool fromStart); - bool stopped_l() const { return !mActive; } + // FIXME enum is faster than strcmp() for parameter 'from' + status_t restoreTrack_l(const char *from); + + bool isOffloaded() const + { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; } + + // Next 3 fields may be changed if IAudioTrack is re-created, but always != 0 sp<IAudioTrack> mAudioTrack; sp<IMemory> mCblkMemory; - sp<AudioTrackThread> mAudioTrackThread; + audio_track_cblk_t* mCblk; // re-load after mLock.unlock() + sp<AudioTrackThread> mAudioTrackThread; float mVolume[2]; float mSendLevel; - uint32_t mFrameCount; + mutable uint32_t mSampleRate; // mutable because getSampleRate() can update it. + size_t mFrameCount; // corresponds to current IAudioTrack + size_t mReqFrameCount; // frame count to request the next time a new + // IAudioTrack is needed + - audio_track_cblk_t* mCblk; - audio_format_t mFormat; + // constant after constructor or set() + audio_format_t mFormat; // as requested by client, not forced to 16-bit audio_stream_type_t mStreamType; - uint8_t mChannelCount; - uint8_t mMuted; - uint8_t mReserved; + uint32_t mChannelCount; audio_channel_mask_t mChannelMask; + transfer_type mTransfer; + + // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data. For 8-bit PCM data, it's + // twice as large as mFrameSize because data is expanded to 16-bit before it's stored in buffer. + size_t mFrameSize; // app-level frame size + size_t mFrameSizeAF; // AudioFlinger frame size + status_t mStatus; - uint32_t mLatency; - bool mActive; // protected by mLock + // can change dynamically when IAudioTrack invalidated + uint32_t mLatency; // in ms + + // Indicates the current track state. Protected by mLock. + enum State { + STATE_ACTIVE, + STATE_STOPPED, + STATE_PAUSED, + STATE_PAUSED_STOPPING, + STATE_FLUSHED, + STATE_STOPPING, + } mState; + // for client callback handler callback_t mCbf; // callback handler for events, or NULL void* mUserData; - uint32_t mNotificationFramesReq; // requested number of frames between each notification callback - uint32_t mNotificationFramesAct; // actual number of frames between each notification callback + + // for notification APIs + uint32_t mNotificationFramesReq; // requested number of frames between each + // notification callback, + // at initial source sample rate + uint32_t mNotificationFramesAct; // actual number of frames between each + // notification callback, + // at initial source sample rate + bool mRefreshRemaining; // processAudioBuffer() should refresh next 2 + + // These are private to processAudioBuffer(), and are not protected by a lock + uint32_t mRemainingFrames; // number of frames to request in obtainBuffer() + bool mRetryOnPartialBuffer; // sleep and retry after partial obtainBuffer() + uint32_t mObservedSequence; // last observed value of mSequence + sp<IMemory> mSharedBuffer; - int mLoopCount; - uint32_t mRemainingFrames; - uint32_t mMarkerPosition; + uint32_t mLoopPeriod; // in frames, zero means looping is disabled + uint32_t mMarkerPosition; // in wrapping (overflow) frame units bool mMarkerReached; - uint32_t mNewPosition; - uint32_t mUpdatePeriod; - bool mFlushed; // FIXME will be made obsolete by making flush() synchronous + uint32_t mNewPosition; // in frames + uint32_t mUpdatePeriod; // in frames, zero means no EVENT_NEW_POS + audio_output_flags_t mFlags; int mSessionId; int mAuxEffectId; + mutable Mutex mLock; - status_t mRestoreStatus; + bool mIsTimed; int mPreviousPriority; // before start() SchedPolicy mPreviousSchedulingGroup; + bool mAwaitBoost; // thread should wait for priority boost before running + + // The proxy should only be referenced while a lock is held because the proxy isn't + // multi-thread safe, especially the SingleStateQueue part of the proxy. + // An exception is that a blocking ClientProxy::obtainBuffer() may be called without a lock, + // provided that the caller also holds an extra reference to the proxy and shared memory to keep + // them around in case they are replaced during the obtainBuffer(). + sp<StaticAudioTrackClientProxy> mStaticProxy; // for type safety only + sp<AudioTrackClientProxy> mProxy; // primary owner of the memory + + bool mInUnderrun; // whether track is currently in underrun state + String8 mName; // server's name for this IAudioTrack + +private: + class DeathNotifier : public IBinder::DeathRecipient { + public: + DeathNotifier(AudioTrack* audioTrack) : mAudioTrack(audioTrack) { } + protected: + virtual void binderDied(const wp<IBinder>& who); + private: + const wp<AudioTrack> mAudioTrack; + }; + + sp<DeathNotifier> mDeathNotifier; + uint32_t mSequence; // incremented for each new IAudioTrack attempt + audio_io_handle_t mOutput; // cached output io handle + int mClientUid; }; class TimedAudioTrack : public AudioTrack diff --git a/include/media/EffectsFactoryApi.h b/include/media/EffectsFactoryApi.h index 65c26f4..b1ed7b0 100644 --- a/include/media/EffectsFactoryApi.h +++ b/include/media/EffectsFactoryApi.h @@ -74,7 +74,8 @@ int EffectQueryNumberEffects(uint32_t *pNumEffects); // -ENOENT no more effect available // -ENODEV factory failed to initialize // -EINVAL invalid pDescriptor -// -ENOSYS effect list has changed since last execution of EffectQueryNumberEffects() +// -ENOSYS effect list has changed since last execution of +// EffectQueryNumberEffects() // *pDescriptor: updated with the effect descriptor. // //////////////////////////////////////////////////////////////////////////////// @@ -91,12 +92,12 @@ int EffectQueryEffect(uint32_t index, effect_descriptor_t *pDescriptor); // // Input: // pEffectUuid: pointer to the effect uuid. -// sessionId: audio session to which this effect instance will be attached. All effects created -// with the same session ID are connected in series and process the same signal stream. -// Knowing that two effects are part of the same effect chain can help the library implement -// some kind of optimizations. -// ioId: identifies the output or input stream this effect is directed to at audio HAL. For future -// use especially with tunneled HW accelerated effects +// sessionId: audio session to which this effect instance will be attached. All effects +// created with the same session ID are connected in series and process the same signal +// stream. Knowing that two effects are part of the same effect chain can help the +// library implement some kind of optimizations. +// ioId: identifies the output or input stream this effect is directed to at audio HAL. +// For future use especially with tunneled HW accelerated effects // // Input/Output: // pHandle: address where to return the effect handle. @@ -109,7 +110,8 @@ int EffectQueryEffect(uint32_t index, effect_descriptor_t *pDescriptor); // *pHandle: updated with the effect handle. // //////////////////////////////////////////////////////////////////////////////// -int EffectCreate(const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle); +int EffectCreate(const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId, + effect_handle_t *pHandle); //////////////////////////////////////////////////////////////////////////////// // diff --git a/include/media/ExtendedAudioBufferProvider.h b/include/media/ExtendedAudioBufferProvider.h index 00c4444..2539ed3 100644 --- a/include/media/ExtendedAudioBufferProvider.h +++ b/include/media/ExtendedAudioBufferProvider.h @@ -18,12 +18,20 @@ #define ANDROID_EXTENDED_AUDIO_BUFFER_PROVIDER_H #include <media/AudioBufferProvider.h> +#include <media/AudioTimestamp.h> namespace android { class ExtendedAudioBufferProvider : public AudioBufferProvider { public: virtual size_t framesReady() const = 0; // see description at AudioFlinger.h + + // Return the total number of frames that have been obtained and released + virtual size_t framesReleased() const { return 0; } + + // Invoked by buffer consumer when a new timestamp is available. + // Default implementation ignores the timestamp. + virtual void onTimestamp(const AudioTimestamp& timestamp) { } }; } // namespace android diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h index 5170a87..282f275 100644 --- a/include/media/IAudioFlinger.h +++ b/include/media/IAudioFlinger.h @@ -49,34 +49,41 @@ public: TRACK_DEFAULT = 0, // client requests a default AudioTrack TRACK_TIMED = 1, // client requests a TimedAudioTrack TRACK_FAST = 2, // client requests a fast AudioTrack or AudioRecord + TRACK_OFFLOAD = 4, // client requests offload to hw codec }; typedef uint32_t track_flags_t; + // invariant on exit for all APIs that return an sp<>: + // (return value != 0) == (*status == NO_ERROR) + /* create an audio track and registers it with AudioFlinger. * return null if the track cannot be created. */ virtual sp<IAudioTrack> createTrack( - pid_t pid, audio_stream_type_t streamType, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - int frameCount, - track_flags_t flags, + size_t frameCount, + track_flags_t *flags, const sp<IMemory>& sharedBuffer, audio_io_handle_t output, pid_t tid, // -1 means unused, otherwise must be valid non-0 int *sessionId, + // input: ignored + // output: server's description of IAudioTrack for display in logs. + // Don't attempt to parse, as the format could change. + String8& name, + int clientUid, status_t *status) = 0; virtual sp<IAudioRecord> openRecord( - pid_t pid, audio_io_handle_t input, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - int frameCount, - track_flags_t flags, + size_t frameCount, + track_flags_t *flags, pid_t tid, // -1 means unused, otherwise must be valid non-0 int *sessionId, status_t *status) = 0; @@ -123,9 +130,12 @@ public: virtual status_t setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs) = 0; - virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) const = 0; + virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) + const = 0; - // register a current process for audio output change notifications + // Register an object to receive audio input/output change and track notifications. + // For a given calling pid, AudioFlinger disregards any registrations after the first. + // Thus the IAudioFlingerClient must be a singleton per process. virtual void registerClient(const sp<IAudioFlingerClient>& client) = 0; // retrieve the audio recording buffer size @@ -138,7 +148,8 @@ public: audio_format_t *pFormat, audio_channel_mask_t *pChannelMask, uint32_t *pLatencyMs, - audio_output_flags_t flags) = 0; + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo = NULL) = 0; virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2) = 0; virtual status_t closeOutput(audio_io_handle_t output) = 0; @@ -159,7 +170,7 @@ public: virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, audio_io_handle_t output) const = 0; - virtual unsigned int getInputFramesLost(audio_io_handle_t ioHandle) const = 0; + virtual uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const = 0; virtual int newAudioSessionId() = 0; @@ -173,7 +184,7 @@ public: virtual status_t getEffectDescriptor(const effect_uuid_t *pEffectUUID, effect_descriptor_t *pDescriptor) const = 0; - virtual sp<IEffect> createEffect(pid_t pid, + virtual sp<IEffect> createEffect( effect_descriptor_t *pDesc, const sp<IEffectClient>& client, int32_t priority, @@ -191,9 +202,13 @@ public: // helpers for android.media.AudioManager.getProperty(), see description there for meaning // FIXME move these APIs to AudioPolicy to permit a more accurate implementation // that looks on primary device for a stream with fast flag, primary flag, or first one. - virtual int32_t getPrimaryOutputSamplingRate() = 0; - virtual int32_t getPrimaryOutputFrameCount() = 0; + virtual uint32_t getPrimaryOutputSamplingRate() = 0; + virtual size_t getPrimaryOutputFrameCount() = 0; + // Intended for AudioService to inform AudioFlinger of device's low RAM attribute, + // and should be called at most once. For a definition of what "low RAM" means, see + // android.app.ActivityManager.isLowRamDevice(). + virtual status_t setLowRamDevice(bool isLowRamDevice) = 0; }; diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h index cc2e069..09b9ea6 100644 --- a/include/media/IAudioPolicyService.h +++ b/include/media/IAudioPolicyService.h @@ -44,15 +44,17 @@ public: audio_policy_dev_state_t state, const char *device_address) = 0; virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device, - const char *device_address) = 0; + const char *device_address) = 0; virtual status_t setPhoneState(audio_mode_t state) = 0; - virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) = 0; + virtual status_t setForceUse(audio_policy_force_use_t usage, + audio_policy_forced_cfg_t config) = 0; virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) = 0; virtual audio_io_handle_t getOutput(audio_stream_type_t stream, uint32_t samplingRate = 0, audio_format_t format = AUDIO_FORMAT_DEFAULT, audio_channel_mask_t channelMask = 0, - audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE) = 0; + audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, + const audio_offload_info_t *offloadInfo = NULL) = 0; virtual status_t startOutput(audio_io_handle_t output, audio_stream_type_t stream, int session = 0) = 0; @@ -88,10 +90,15 @@ public: virtual status_t unregisterEffect(int id) = 0; virtual status_t setEffectEnabled(int id, bool enabled) = 0; virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const = 0; + virtual bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) + const = 0; virtual bool isSourceActive(audio_source_t source) const = 0; virtual status_t queryDefaultPreProcessing(int audioSession, effect_descriptor_t *descriptors, uint32_t *count) = 0; + // Check if offload is possible for given format, stream type, sample rate, + // bit rate, duration, video and streaming or offload property is enabled + virtual bool isOffloadSupported(const audio_offload_info_t& info) = 0; }; diff --git a/include/media/IAudioRecord.h b/include/media/IAudioRecord.h index d6e3141..eccc2ca 100644 --- a/include/media/IAudioRecord.h +++ b/include/media/IAudioRecord.h @@ -34,6 +34,9 @@ class IAudioRecord : public IInterface public: DECLARE_META_INTERFACE(AudioRecord); + /* get this tracks control block */ + virtual sp<IMemory> getCblk() const = 0; + /* After it's created the track is not active. Call start() to * make it active. */ @@ -44,9 +47,6 @@ public: * will be processed, unless flush() is called. */ virtual void stop() = 0; - - /* get this tracks control block */ - virtual sp<IMemory> getCblk() const = 0; }; // ---------------------------------------------------------------------------- diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h index 9e0e389..5c8a484 100644 --- a/include/media/IAudioTrack.h +++ b/include/media/IAudioTrack.h @@ -25,6 +25,8 @@ #include <binder/IInterface.h> #include <binder/IMemory.h> #include <utils/LinearTransform.h> +#include <utils/String8.h> +#include <media/AudioTimestamp.h> namespace android { @@ -54,11 +56,6 @@ public: */ virtual void flush() = 0; - /* Mute or unmute this track. - * While muted, the callback, if set, is still called. - */ - virtual void mute(bool) = 0; - /* Pause a track. If set, the callback will cease being called and * obtainBuffer will return an error. Buffers that are already released * will continue to be processed, unless/until flush() is called. @@ -87,6 +84,15 @@ public: or Tungsten time. The values for target are defined in AudioTrack.h */ virtual status_t setMediaTimeTransform(const LinearTransform& xform, int target) = 0; + + /* Send parameters to the audio hardware */ + virtual status_t setParameters(const String8& keyValuePairs) = 0; + + /* Return NO_ERROR if timestamp is valid */ + virtual status_t getTimestamp(AudioTimestamp& timestamp) = 0; + + /* Signal the playback thread for a change in control block */ + virtual void signal() = 0; }; // ---------------------------------------------------------------------------- diff --git a/include/media/ICrypto.h b/include/media/ICrypto.h index 61059bd..9dcb8d9 100644 --- a/include/media/ICrypto.h +++ b/include/media/ICrypto.h @@ -31,7 +31,7 @@ struct ICrypto : public IInterface { virtual status_t initCheck() const = 0; - virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]) const = 0; + virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]) = 0; virtual status_t createPlugin( const uint8_t uuid[16], const void *data, size_t size) = 0; diff --git a/include/media/IDrm.h b/include/media/IDrm.h new file mode 100644 index 0000000..5ef26af --- /dev/null +++ b/include/media/IDrm.h @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <binder/IInterface.h> +#include <media/stagefright/foundation/ABase.h> +#include <media/drm/DrmAPI.h> +#include <media/IDrmClient.h> + +#ifndef ANDROID_IDRM_H_ + +#define ANDROID_IDRM_H_ + +namespace android { + +struct AString; + +struct IDrm : public IInterface { + DECLARE_META_INTERFACE(Drm); + + virtual status_t initCheck() const = 0; + + virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) = 0; + + virtual status_t createPlugin(const uint8_t uuid[16]) = 0; + + virtual status_t destroyPlugin() = 0; + + virtual status_t openSession(Vector<uint8_t> &sessionId) = 0; + + virtual status_t closeSession(Vector<uint8_t> const &sessionId) = 0; + + virtual status_t + getKeyRequest(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &initData, + String8 const &mimeType, DrmPlugin::KeyType keyType, + KeyedVector<String8, String8> const &optionalParameters, + Vector<uint8_t> &request, String8 &defaultUrl) = 0; + + virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &response, + Vector<uint8_t> &keySetId) = 0; + + virtual status_t removeKeys(Vector<uint8_t> const &keySetId) = 0; + + virtual status_t restoreKeys(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keySetId) = 0; + + virtual status_t queryKeyStatus(Vector<uint8_t> const &sessionId, + KeyedVector<String8, String8> &infoMap) const = 0; + + virtual status_t getProvisionRequest(Vector<uint8_t> &request, + String8 &defaulUrl) = 0; + + virtual status_t provideProvisionResponse(Vector<uint8_t> const &response) = 0; + + virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops) = 0; + + virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease) = 0; + + virtual status_t getPropertyString(String8 const &name, String8 &value) const = 0; + virtual status_t getPropertyByteArray(String8 const &name, + Vector<uint8_t> &value) const = 0; + virtual status_t setPropertyString(String8 const &name, + String8 const &value ) const = 0; + virtual status_t setPropertyByteArray(String8 const &name, + Vector<uint8_t> const &value) const = 0; + + virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId, + String8 const &algorithm) = 0; + + virtual status_t setMacAlgorithm(Vector<uint8_t> const &sessionId, + String8 const &algorithm) = 0; + + virtual status_t encrypt(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &input, + Vector<uint8_t> const &iv, + Vector<uint8_t> &output) = 0; + + virtual status_t decrypt(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &input, + Vector<uint8_t> const &iv, + Vector<uint8_t> &output) = 0; + + virtual status_t sign(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &message, + Vector<uint8_t> &signature) = 0; + + virtual status_t verify(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &message, + Vector<uint8_t> const &signature, + bool &match) = 0; + + virtual status_t setListener(const sp<IDrmClient>& listener) = 0; + +private: + DISALLOW_EVIL_CONSTRUCTORS(IDrm); +}; + +struct BnDrm : public BnInterface<IDrm> { + virtual status_t onTransact( + uint32_t code, const Parcel &data, Parcel *reply, + uint32_t flags = 0); +private: + void readVector(const Parcel &data, Vector<uint8_t> &vector) const; + void writeVector(Parcel *reply, Vector<uint8_t> const &vector) const; +}; + +} // namespace android + +#endif // ANDROID_IDRM_H_ + diff --git a/include/media/IDrmClient.h b/include/media/IDrmClient.h new file mode 100644 index 0000000..3b2fc7c --- /dev/null +++ b/include/media/IDrmClient.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_IDRMCLIENT_H +#define ANDROID_IDRMCLIENT_H + +#include <utils/RefBase.h> +#include <binder/IInterface.h> +#include <binder/Parcel.h> +#include <media/drm/DrmAPI.h> + +namespace android { + +class IDrmClient: public IInterface +{ +public: + DECLARE_META_INTERFACE(DrmClient); + + virtual void notify(DrmPlugin::EventType eventType, int extra, const Parcel *obj) = 0; +}; + +// ---------------------------------------------------------------------------- + +class BnDrmClient: public BnInterface<IDrmClient> +{ +public: + virtual status_t onTransact(uint32_t code, + const Parcel& data, + Parcel* reply, + uint32_t flags = 0); +}; + +}; // namespace android + +#endif // ANDROID_IDRMCLIENT_H diff --git a/include/media/IHDCP.h b/include/media/IHDCP.h index a0613c7..352561e 100644 --- a/include/media/IHDCP.h +++ b/include/media/IHDCP.h @@ -17,6 +17,7 @@ #include <binder/IInterface.h> #include <media/hardware/HDCPAPI.h> #include <media/stagefright/foundation/ABase.h> +#include <ui/GraphicBuffer.h> namespace android { @@ -45,18 +46,59 @@ struct IHDCP : public IInterface { // Request to shutdown the active HDCP session. virtual status_t shutdownAsync() = 0; - // Encrypt a data according to the HDCP spec. The data is to be - // encrypted in-place, only size bytes of data should be read/write, - // even if the size is not a multiple of 128 bit (16 bytes). + // Returns the capability bitmask of this HDCP session. + // Possible return values (please refer to HDCAPAPI.h): + // HDCP_CAPS_ENCRYPT: mandatory, meaning the HDCP module can encrypt + // from an input byte-array buffer to an output byte-array buffer + // HDCP_CAPS_ENCRYPT_NATIVE: the HDCP module supports encryption from + // a native buffer to an output byte-array buffer. The format of the + // input native buffer is specific to vendor's encoder implementation. + // It is the same format as that used by the encoder when + // "storeMetaDataInBuffers" extension is enabled on its output port. + virtual uint32_t getCaps() = 0; + + // ENCRYPTION only: + // Encrypt data according to the HDCP spec. "size" bytes of data are + // available at "inData" (virtual address), "size" may not be a multiple + // of 128 bits (16 bytes). An equal number of encrypted bytes should be + // written to the buffer at "outData" (virtual address). // This operation is to be synchronous, i.e. this call does not return // until outData contains size bytes of encrypted data. // streamCTR will be assigned by the caller (to 0 for the first PES stream, // 1 for the second and so on) - // inputCTR will be maintained by the callee for each PES stream. + // inputCTR _will_be_maintained_by_the_callee_ for each PES stream. virtual status_t encrypt( const void *inData, size_t size, uint32_t streamCTR, uint64_t *outInputCTR, void *outData) = 0; + // Encrypt data according to the HDCP spec. "size" bytes of data starting + // at location "offset" are available in "buffer" (buffer handle). "size" + // may not be a multiple of 128 bits (16 bytes). An equal number of + // encrypted bytes should be written to the buffer at "outData" (virtual + // address). This operation is to be synchronous, i.e. this call does not + // return until outData contains size bytes of encrypted data. + // streamCTR will be assigned by the caller (to 0 for the first PES stream, + // 1 for the second and so on) + // inputCTR _will_be_maintained_by_the_callee_ for each PES stream. + virtual status_t encryptNative( + const sp<GraphicBuffer> &graphicBuffer, + size_t offset, size_t size, uint32_t streamCTR, + uint64_t *outInputCTR, void *outData) = 0; + + // DECRYPTION only: + // Decrypt data according to the HDCP spec. + // "size" bytes of encrypted data are available at "inData" + // (virtual address), "size" may not be a multiple of 128 bits (16 bytes). + // An equal number of decrypted bytes should be written to the buffer + // at "outData" (virtual address). + // This operation is to be synchronous, i.e. this call does not return + // until outData contains size bytes of decrypted data. + // Both streamCTR and inputCTR will be provided by the caller. + virtual status_t decrypt( + const void *inData, size_t size, + uint32_t streamCTR, uint64_t inputCTR, + void *outData) = 0; + private: DISALLOW_EVIL_CONSTRUCTORS(IHDCP); }; diff --git a/include/media/IMediaLogService.h b/include/media/IMediaLogService.h new file mode 100644 index 0000000..1f5777e --- /dev/null +++ b/include/media/IMediaLogService.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_IMEDIALOGSERVICE_H +#define ANDROID_IMEDIALOGSERVICE_H + +#include <binder/IInterface.h> +#include <binder/IMemory.h> +#include <binder/Parcel.h> + +namespace android { + +class IMediaLogService: public IInterface +{ +public: + DECLARE_META_INTERFACE(MediaLogService); + + virtual void registerWriter(const sp<IMemory>& shared, size_t size, const char *name) = 0; + virtual void unregisterWriter(const sp<IMemory>& shared) = 0; + +}; + +class BnMediaLogService: public BnInterface<IMediaLogService> +{ +public: + virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, + uint32_t flags = 0); +}; + +} // namespace android + +#endif // ANDROID_IMEDIALOGSERVICE_H diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h index 4ed1863..0cbd269 100644 --- a/include/media/IMediaPlayer.h +++ b/include/media/IMediaPlayer.h @@ -32,7 +32,7 @@ namespace android { class Parcel; class Surface; class IStreamSource; -class ISurfaceTexture; +class IGraphicBufferProducer; class IMediaPlayer: public IInterface { @@ -46,7 +46,7 @@ public: virtual status_t setDataSource(int fd, int64_t offset, int64_t length) = 0; virtual status_t setDataSource(const sp<IStreamSource>& source) = 0; virtual status_t setVideoSurfaceTexture( - const sp<ISurfaceTexture>& surfaceTexture) = 0; + const sp<IGraphicBufferProducer>& bufferProducer) = 0; virtual status_t prepareAsync() = 0; virtual status_t start() = 0; virtual status_t stop() = 0; diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h index 7a89135..2998b37 100644 --- a/include/media/IMediaPlayerService.h +++ b/include/media/IMediaPlayerService.h @@ -32,6 +32,7 @@ namespace android { struct ICrypto; +struct IDrm; struct IHDCP; class IMediaRecorder; class IOMX; @@ -44,15 +45,20 @@ class IMediaPlayerService: public IInterface public: DECLARE_META_INTERFACE(MediaPlayerService); - virtual sp<IMediaRecorder> createMediaRecorder(pid_t pid) = 0; - virtual sp<IMediaMetadataRetriever> createMetadataRetriever(pid_t pid) = 0; - virtual sp<IMediaPlayer> create(pid_t pid, const sp<IMediaPlayerClient>& client, int audioSessionId = 0) = 0; + virtual sp<IMediaRecorder> createMediaRecorder() = 0; + virtual sp<IMediaMetadataRetriever> createMetadataRetriever() = 0; + virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client, int audioSessionId = 0) = 0; - virtual sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) = 0; - virtual sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) = 0; + virtual status_t decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, + audio_format_t* pFormat, + const sp<IMemoryHeap>& heap, size_t *pSize) = 0; + virtual status_t decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, + int* pNumChannels, audio_format_t* pFormat, + const sp<IMemoryHeap>& heap, size_t *pSize) = 0; virtual sp<IOMX> getOMX() = 0; virtual sp<ICrypto> makeCrypto() = 0; - virtual sp<IHDCP> makeHDCP() = 0; + virtual sp<IDrm> makeDrm() = 0; + virtual sp<IHDCP> makeHDCP(bool createEncryptionModule) = 0; // Connects to a remote display. // 'iface' specifies the address of the local interface on which to listen for @@ -87,6 +93,9 @@ public: virtual void addBatteryData(uint32_t params) = 0; virtual status_t pullBatteryData(Parcel* reply) = 0; + + virtual status_t updateProxyConfig( + const char *host, int32_t port, const char *exclusionList) = 0; }; // ---------------------------------------------------------------------------- diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h index ec84e25..3e67550 100644 --- a/include/media/IMediaRecorder.h +++ b/include/media/IMediaRecorder.h @@ -26,7 +26,7 @@ class Surface; class ICamera; class ICameraRecordingProxy; class IMediaRecorderClient; -class ISurfaceTexture; +class IGraphicBufferProducer; class IMediaRecorder: public IInterface { @@ -35,7 +35,7 @@ public: virtual status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy) = 0; - virtual status_t setPreviewSurface(const sp<Surface>& surface) = 0; + virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0; virtual status_t setVideoSource(int vs) = 0; virtual status_t setAudioSource(int as) = 0; virtual status_t setOutputFormat(int of) = 0; @@ -47,6 +47,7 @@ public: virtual status_t setVideoFrameRate(int frames_per_second) = 0; virtual status_t setParameters(const String8& params) = 0; virtual status_t setListener(const sp<IMediaRecorderClient>& listener) = 0; + virtual status_t setClientName(const String16& clientName) = 0; virtual status_t prepare() = 0; virtual status_t getMaxAmplitude(int* max) = 0; virtual status_t start() = 0; @@ -55,7 +56,7 @@ public: virtual status_t init() = 0; virtual status_t close() = 0; virtual status_t release() = 0; - virtual sp<ISurfaceTexture> querySurfaceMediaSource() = 0; + virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() = 0; }; // ---------------------------------------------------------------------------- diff --git a/include/media/IOMX.h b/include/media/IOMX.h index be1b2fc..9c8451c 100644 --- a/include/media/IOMX.h +++ b/include/media/IOMX.h @@ -19,6 +19,7 @@ #define ANDROID_IOMX_H_ #include <binder/IInterface.h> +#include <gui/IGraphicBufferProducer.h> #include <ui/GraphicBuffer.h> #include <utils/List.h> #include <utils/String8.h> @@ -82,6 +83,10 @@ public: virtual status_t storeMetaDataInBuffers( node_id node, OMX_U32 port_index, OMX_BOOL enable) = 0; + virtual status_t prepareForAdaptivePlayback( + node_id node, OMX_U32 portIndex, OMX_BOOL enable, + OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) = 0; + virtual status_t enableGraphicBuffers( node_id node, OMX_U32 port_index, OMX_BOOL enable) = 0; @@ -96,6 +101,16 @@ public: node_id node, OMX_U32 port_index, const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) = 0; + virtual status_t updateGraphicBufferInMeta( + node_id node, OMX_U32 port_index, + const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) = 0; + + virtual status_t createInputSurface( + node_id node, OMX_U32 port_index, + sp<IGraphicBufferProducer> *bufferProducer) = 0; + + virtual status_t signalEndOfInputStream(node_id node) = 0; + // This API clearly only makes sense if the caller lives in the // same process as the callee, i.e. is the media_server, as the // returned "buffer_data" pointer is just that, a pointer into local @@ -123,6 +138,17 @@ public: node_id node, const char *parameter_name, OMX_INDEXTYPE *index) = 0; + + enum InternalOptionType { + INTERNAL_OPTION_SUSPEND, // data is a bool + INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY, // data is an int64_t + }; + virtual status_t setInternalOption( + node_id node, + OMX_U32 port_index, + InternalOptionType type, + const void *data, + size_t size) = 0; }; struct omx_message { diff --git a/include/media/IRemoteDisplay.h b/include/media/IRemoteDisplay.h index a61704e..c8baae9 100644 --- a/include/media/IRemoteDisplay.h +++ b/include/media/IRemoteDisplay.h @@ -39,6 +39,9 @@ class IRemoteDisplay : public IInterface public: DECLARE_META_INTERFACE(RemoteDisplay); + virtual status_t pause() = 0; + virtual status_t resume() = 0; + // Disconnects the remote display and stops listening for new connections. virtual status_t dispose() = 0; }; diff --git a/include/media/IRemoteDisplayClient.h b/include/media/IRemoteDisplayClient.h index 252b401..0e6d55d 100644 --- a/include/media/IRemoteDisplayClient.h +++ b/include/media/IRemoteDisplayClient.h @@ -26,7 +26,7 @@ namespace android { -class ISurfaceTexture; +class IGraphicBufferProducer; class IRemoteDisplayClient : public IInterface { @@ -48,8 +48,8 @@ public: // Indicates that the remote display has been connected successfully. // Provides a surface texture that the client should use to stream buffers to // the remote display. - virtual void onDisplayConnected(const sp<ISurfaceTexture>& surfaceTexture, - uint32_t width, uint32_t height, uint32_t flags) = 0; // one-way + virtual void onDisplayConnected(const sp<IGraphicBufferProducer>& bufferProducer, + uint32_t width, uint32_t height, uint32_t flags, uint32_t session) = 0; // one-way // Indicates that the remote display has been disconnected normally. // This method should only be called once the client has called 'dispose()' diff --git a/include/media/IStreamSource.h b/include/media/IStreamSource.h index 61b9d5a..677119b 100644 --- a/include/media/IStreamSource.h +++ b/include/media/IStreamSource.h @@ -37,6 +37,9 @@ struct IStreamSource : public IInterface { enum { // Video PES packets contain exactly one (aligned) access unit. kFlagAlignedVideoData = 1, + + // Timestamps are in ALooper::GetNowUs() units. + kFlagIsRealTimeData = 2, }; virtual uint32_t flags() const { return 0; } }; @@ -73,6 +76,11 @@ struct IStreamListener : public IInterface { // ATSParser::DiscontinuityType. static const char *const kKeyDiscontinuityMask; + // Optionally signalled as part of a discontinuity that includes + // DISCONTINUITY_TIME. It indicates the media time (in us) to be associated + // with the next PTS occuring in the stream. The value is of type int64_t. + static const char *const kKeyMediaTimeUs; + virtual void issueCommand( Command cmd, bool synchronous, const sp<AMessage> &msg = NULL) = 0; }; diff --git a/include/media/JetPlayer.h b/include/media/JetPlayer.h index 0616bf0..388f767 100644 --- a/include/media/JetPlayer.h +++ b/include/media/JetPlayer.h @@ -88,7 +88,7 @@ private: EAS_DATA_HANDLE mEasData; EAS_FILE_LOCATOR mEasJetFileLoc; EAS_PCM* mAudioBuffer;// EAS renders the MIDI data into this buffer, - AudioTrack* mAudioTrack; // and we play it in this audio track + sp<AudioTrack> mAudioTrack; // and we play it in this audio track int mTrackBufferSize; S_JET_STATUS mJetStatus; S_JET_STATUS mPreviousJetStatus; diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h index b7bee3f..26d8729 100644 --- a/include/media/MediaPlayerInterface.h +++ b/include/media/MediaPlayerInterface.h @@ -37,7 +37,7 @@ namespace android { class Parcel; class Surface; -class ISurfaceTexture; +class IGraphicBufferProducer; template<typename T> class SortedVector; @@ -74,9 +74,18 @@ public: // AudioSink: abstraction layer for audio output class AudioSink : public RefBase { public: + enum cb_event_t { + CB_EVENT_FILL_BUFFER, // Request to write more data to buffer. + CB_EVENT_STREAM_END, // Sent after all the buffers queued in AF and HW are played + // back (after stop is called) + CB_EVENT_TEAR_DOWN // The AudioTrack was invalidated due to use case change: + // Need to re-evaluate offloading options + }; + // Callback returns the number of bytes actually written to the buffer. typedef size_t (*AudioCallback)( - AudioSink *audioSink, void *buffer, size_t size, void *cookie); + AudioSink *audioSink, void *buffer, size_t size, void *cookie, + cb_event_t event); virtual ~AudioSink() {} virtual bool ready() const = 0; // audio output is open and ready @@ -90,6 +99,8 @@ public: virtual status_t getPosition(uint32_t *position) const = 0; virtual status_t getFramesWritten(uint32_t *frameswritten) const = 0; virtual int getSessionId() const = 0; + virtual audio_stream_type_t getAudioStreamType() const = 0; + virtual uint32_t getSampleRate() const = 0; // If no callback is specified, use the "write" API below to submit // audio data. @@ -99,9 +110,10 @@ public: int bufferCount=DEFAULT_AUDIOSINK_BUFFERCOUNT, AudioCallback cb = NULL, void *cookie = NULL, - audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE) = 0; + audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, + const audio_offload_info_t *offloadInfo = NULL) = 0; - virtual void start() = 0; + virtual status_t start() = 0; virtual ssize_t write(const void* buffer, size_t size) = 0; virtual void stop() = 0; virtual void flush() = 0; @@ -110,6 +122,9 @@ public: virtual status_t setPlaybackRatePermille(int32_t rate) { return INVALID_OPERATION; } virtual bool needsTrailingPadding() { return true; } + + virtual status_t setParameters(const String8& keyValuePairs) { return NO_ERROR; }; + virtual String8 getParameters(const String8& keys) { return String8::empty(); }; }; MediaPlayerBase() : mCookie(0), mNotify(0) {} @@ -131,9 +146,9 @@ public: return INVALID_OPERATION; } - // pass the buffered ISurfaceTexture to the media player service + // pass the buffered IGraphicBufferProducer to the media player service virtual status_t setVideoSurfaceTexture( - const sp<ISurfaceTexture>& surfaceTexture) = 0; + const sp<IGraphicBufferProducer>& bufferProducer) = 0; virtual status_t prepare() = 0; virtual status_t prepareAsync() = 0; @@ -198,6 +213,11 @@ public: return INVALID_OPERATION; } + virtual status_t updateProxyConfig( + const char *host, int32_t port, const char *exclusionList) { + return INVALID_OPERATION; + } + private: friend class MediaPlayerService; diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h index ef799f5..d7ac302 100644 --- a/include/media/MediaRecorderBase.h +++ b/include/media/MediaRecorderBase.h @@ -26,7 +26,7 @@ namespace android { class ICameraRecordingProxy; class Surface; -class ISurfaceTexture; +class IGraphicBufferProducer; struct MediaRecorderBase { MediaRecorderBase() {} @@ -42,12 +42,13 @@ struct MediaRecorderBase { virtual status_t setVideoFrameRate(int frames_per_second) = 0; virtual status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy) = 0; - virtual status_t setPreviewSurface(const sp<Surface>& surface) = 0; + virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0; virtual status_t setOutputFile(const char *path) = 0; virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0; virtual status_t setOutputFileAuxiliary(int fd) {return INVALID_OPERATION;} virtual status_t setParameters(const String8& params) = 0; virtual status_t setListener(const sp<IMediaRecorderClient>& listener) = 0; + virtual status_t setClientName(const String16& clientName) = 0; virtual status_t prepare() = 0; virtual status_t start() = 0; virtual status_t stop() = 0; @@ -55,7 +56,7 @@ struct MediaRecorderBase { virtual status_t reset() = 0; virtual status_t getMaxAmplitude(int *max) = 0; virtual status_t dump(int fd, const Vector<String16>& args) const = 0; - virtual sp<ISurfaceTexture> querySurfaceMediaSource() const = 0; + virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() const = 0; private: MediaRecorderBase(const MediaRecorderBase &); diff --git a/include/media/SingleStateQueue.h b/include/media/SingleStateQueue.h new file mode 100644 index 0000000..04c5fd0 --- /dev/null +++ b/include/media/SingleStateQueue.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SINGLE_STATE_QUEUE_H +#define SINGLE_STATE_QUEUE_H + +// Non-blocking single element state queue, or +// Non-blocking single-reader / single-writer multi-word atomic load / store + +#include <stdint.h> + +namespace android { + +template<typename T> class SingleStateQueue { + +public: + + class Mutator; + class Observer; + + struct Shared { + // needs to be part of a union so don't define constructor or destructor + + friend class Mutator; + friend class Observer; + +private: + void init() { mAck = 0; mSequence = 0; } + + volatile int32_t mAck; +#if 0 + int mPad[7]; + // cache line boundary +#endif + volatile int32_t mSequence; + T mValue; + }; + + class Mutator { + public: + Mutator(Shared *shared); + /*virtual*/ ~Mutator() { } + + // push new value onto state queue, overwriting previous value; + // returns a sequence number which can be used with ack() + int32_t push(const T& value); + + // return true if most recent push has been observed + bool ack(); + + // return true if a push with specified sequence number or later has been observed + bool ack(int32_t sequence); + + private: + int32_t mSequence; + Shared * const mShared; + }; + + class Observer { + public: + Observer(Shared *shared); + /*virtual*/ ~Observer() { } + + // return true if value has changed + bool poll(T& value); + + private: + int32_t mSequence; + int mSeed; // for PRNG + Shared * const mShared; + }; + +#if 0 + SingleStateQueue(void /*Shared*/ *shared); + /*virtual*/ ~SingleStateQueue() { } + + static size_t size() { return sizeof(Shared); } +#endif + +}; + +} // namespace android + +#endif // SINGLE_STATE_QUEUE_H diff --git a/include/media/SoundPool.h b/include/media/SoundPool.h index 002b045..2dd78cc 100644 --- a/include/media/SoundPool.h +++ b/include/media/SoundPool.h @@ -22,6 +22,8 @@ #include <utils/Vector.h> #include <utils/KeyedVector.h> #include <media/AudioTrack.h> +#include <binder/MemoryHeapBase.h> +#include <binder/MemoryBase.h> namespace android { @@ -65,8 +67,10 @@ public: sp<IMemory> getIMemory() { return mData; } // hack - void init(int numChannels, int sampleRate, audio_format_t format, size_t size, sp<IMemory> data ) { - mNumChannels = numChannels; mSampleRate = sampleRate; mFormat = format; mSize = size; mData = data; } + void init(int numChannels, int sampleRate, audio_format_t format, size_t size, + sp<IMemory> data ) { + mNumChannels = numChannels; mSampleRate = sampleRate; mFormat = format; mSize = size; + mData = data; } private: void init(); @@ -83,6 +87,7 @@ private: int64_t mLength; char* mUrl; sp<IMemory> mData; + sp<MemoryHeapBase> mHeap; }; // stores pending events for stolen channels @@ -116,7 +121,7 @@ protected: class SoundChannel : public SoundEvent { public: enum state { IDLE, RESUMING, STOPPING, PAUSED, PLAYING }; - SoundChannel() : mAudioTrack(NULL), mState(IDLE), mNumChannels(1), + SoundChannel() : mState(IDLE), mNumChannels(1), mPos(0), mToggle(0), mAutoPaused(false) {} ~SoundChannel(); void init(SoundPool* soundPool); @@ -146,7 +151,7 @@ private: bool doStop_l(); SoundPool* mSoundPool; - AudioTrack* mAudioTrack; + sp<AudioTrack> mAudioTrack; SoundEvent mNextEvent; Mutex mLock; int mState; diff --git a/include/media/ToneGenerator.h b/include/media/ToneGenerator.h index 29c8fd9..98c4332 100644 --- a/include/media/ToneGenerator.h +++ b/include/media/ToneGenerator.h @@ -160,7 +160,7 @@ public: bool isInited() { return (mState == TONE_IDLE)?false:true;} // returns the audio session this ToneGenerator belongs to or 0 if an error occured. - int getSessionId() { return (mpAudioTrack == NULL) ? 0 : mpAudioTrack->getSessionId(); } + int getSessionId() { return (mpAudioTrack == 0) ? 0 : mpAudioTrack->getSessionId(); } private: @@ -263,14 +263,15 @@ private: unsigned short mLoopCounter; // Current tone loopback count - int mSamplingRate; // AudioFlinger Sampling rate - AudioTrack *mpAudioTrack; // Pointer to audio track used for playback + uint32_t mSamplingRate; // AudioFlinger Sampling rate + sp<AudioTrack> mpAudioTrack; // Pointer to audio track used for playback Mutex mLock; // Mutex to control concurent access to ToneGenerator object from audio callback and application API Mutex mCbkCondLock; // Mutex associated to mWaitCbkCond Condition mWaitCbkCond; // condition enabling interface to wait for audio callback completion after a change is requested float mVolume; // Volume applied to audio track audio_stream_type_t mStreamType; // Audio stream used for output unsigned int mProcessSize; // Size of audio blocks generated at a time by audioCallback() (in PCM frames). + struct timespec mStartTime; // tone start time: needed to guaranty actual tone duration bool initAudioTrack(); static void audioCallback(int event, void* user, void *info); diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h index aa58905..6167dd6 100644 --- a/include/media/Visualizer.h +++ b/include/media/Visualizer.h @@ -19,7 +19,7 @@ #include <media/AudioEffect.h> #include <audio_effects/effect_visualizer.h> -#include <string.h> +#include <utils/Thread.h> /** * The Visualizer class enables application to retrieve part of the currently playing audio for @@ -114,6 +114,14 @@ public: status_t setScalingMode(uint32_t mode); uint32_t getScalingMode() { return mScalingMode; } + // set which measurements are done on the audio buffers processed by the effect. + // valid measurements (mask): MEASUREMENT_MODE_PEAK_RMS + status_t setMeasurementMode(uint32_t mode); + uint32_t getMeasurementMode() { return mMeasurementMode; } + + // return a set of int32_t measurements + status_t getIntMeasurements(uint32_t type, uint32_t number, int32_t *measurements); + // return a capture in PCM 8 bit unsigned format. The size of the capture is equal to // getCaptureSize() status_t getWaveForm(uint8_t *waveform); @@ -156,6 +164,7 @@ private: uint32_t mCaptureSize; uint32_t mSampleRate; uint32_t mScalingMode; + uint32_t mMeasurementMode; capture_cbk_t mCaptureCallBack; void *mCaptureCbkUser; sp<CaptureThread> mCaptureThread; diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h index f7cebc5..4c05fc3 100644 --- a/include/media/mediaplayer.h +++ b/include/media/mediaplayer.h @@ -33,7 +33,7 @@ class ANativeWindow; namespace android { class Surface; -class ISurfaceTexture; +class IGraphicBufferProducer; enum media_event_type { MEDIA_NOP = 0, // interface test message @@ -42,9 +42,14 @@ enum media_event_type { MEDIA_BUFFERING_UPDATE = 3, MEDIA_SEEK_COMPLETE = 4, MEDIA_SET_VIDEO_SIZE = 5, + MEDIA_STARTED = 6, + MEDIA_PAUSED = 7, + MEDIA_STOPPED = 8, + MEDIA_SKIPPED = 9, MEDIA_TIMED_TEXT = 99, MEDIA_ERROR = 100, MEDIA_INFO = 200, + MEDIA_SUBTITLE_DATA = 201, }; // Generic error codes for the media player framework. Errors are fatal, the @@ -173,6 +178,7 @@ enum media_track_type { MEDIA_TRACK_TYPE_VIDEO = 1, MEDIA_TRACK_TYPE_AUDIO = 2, MEDIA_TRACK_TYPE_TIMEDTEXT = 3, + MEDIA_TRACK_TYPE_SUBTITLE = 4, }; // ---------------------------------------------------------------------------- @@ -199,7 +205,7 @@ public: status_t setDataSource(int fd, int64_t offset, int64_t length); status_t setDataSource(const sp<IStreamSource> &source); status_t setVideoSurfaceTexture( - const sp<ISurfaceTexture>& surfaceTexture); + const sp<IGraphicBufferProducer>& bufferProducer); status_t setListener(const sp<MediaPlayerListener>& listener); status_t prepare(); status_t prepareAsync(); @@ -218,8 +224,12 @@ public: bool isLooping(); status_t setVolume(float leftVolume, float rightVolume); void notify(int msg, int ext1, int ext2, const Parcel *obj = NULL); - static sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat); - static sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat); + static status_t decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, + audio_format_t* pFormat, + const sp<IMemoryHeap>& heap, size_t *pSize); + static status_t decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, + int* pNumChannels, audio_format_t* pFormat, + const sp<IMemoryHeap>& heap, size_t *pSize); status_t invoke(const Parcel& request, Parcel *reply); status_t setMetadataFilter(const Parcel& filter); status_t getMetadata(bool update_only, bool apply_filter, Parcel *metadata); @@ -232,6 +242,9 @@ public: status_t setRetransmitEndpoint(const char* addrString, uint16_t port); status_t setNextMediaPlayer(const sp<MediaPlayer>& player); + status_t updateProxyConfig( + const char *host, int32_t port, const char *exclusionList); + private: void clear_l(); status_t seekTo_l(int msec); @@ -249,7 +262,6 @@ private: sp<MediaPlayerListener> mListener; void* mCookie; media_player_states mCurrentState; - int mDuration; int mCurrentPosition; int mSeekPosition; bool mPrepareSync; diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h index 6d304e0..88a42a0 100644 --- a/include/media/mediarecorder.h +++ b/include/media/mediarecorder.h @@ -31,8 +31,8 @@ class Surface; class IMediaRecorder; class ICamera; class ICameraRecordingProxy; -class ISurfaceTexture; -class SurfaceTextureClient; +class IGraphicBufferProducer; +class Surface; typedef void (*media_completion_f)(status_t status, void *cookie); @@ -207,7 +207,7 @@ public: void died(); status_t initCheck(); status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy); - status_t setPreviewSurface(const sp<Surface>& surface); + status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface); status_t setVideoSource(int vs); status_t setAudioSource(int as); status_t setOutputFormat(int of); @@ -219,6 +219,7 @@ public: status_t setVideoFrameRate(int frames_per_second); status_t setParameters(const String8& params); status_t setListener(const sp<MediaRecorderListener>& listener); + status_t setClientName(const String16& clientName); status_t prepare(); status_t getMaxAmplitude(int* max); status_t start(); @@ -228,7 +229,7 @@ public: status_t close(); status_t release(); void notify(int msg, int ext1, int ext2); - sp<ISurfaceTexture> querySurfaceMediaSourceFromMediaServer(); + sp<IGraphicBufferProducer> querySurfaceMediaSourceFromMediaServer(); private: void doCleanUp(); @@ -237,10 +238,10 @@ private: sp<IMediaRecorder> mMediaRecorder; sp<MediaRecorderListener> mListener; - // Reference toISurfaceTexture + // Reference to IGraphicBufferProducer // for encoding GL Frames. That is useful only when the // video source is set to VIDEO_SOURCE_GRALLOC_BUFFER - sp<ISurfaceTexture> mSurfaceMediaSource; + sp<IGraphicBufferProducer> mSurfaceMediaSource; media_recorder_states mCurrentState; bool mIsAudioSourceSet; diff --git a/include/media/nbaio/AudioStreamOutSink.h b/include/media/nbaio/AudioStreamOutSink.h index 5976b18..7948d40 100644 --- a/include/media/nbaio/AudioStreamOutSink.h +++ b/include/media/nbaio/AudioStreamOutSink.h @@ -52,6 +52,8 @@ public: // implementation of GNWT (if any) virtual status_t getNextWriteTimestamp(int64_t *timestamp); + virtual status_t getTimestamp(AudioTimestamp& timestamp); + // NBAIO_Sink end #if 0 // until necessary diff --git a/include/media/nbaio/MonoPipe.h b/include/media/nbaio/MonoPipe.h index 5fcfe9e..d3802fe 100644 --- a/include/media/nbaio/MonoPipe.h +++ b/include/media/nbaio/MonoPipe.h @@ -20,9 +20,12 @@ #include <time.h> #include <utils/LinearTransform.h> #include "NBAIO.h" +#include <media/SingleStateQueue.h> namespace android { +typedef SingleStateQueue<AudioTimestamp> AudioTimestampSingleStateQueue; + // MonoPipe is similar to Pipe except: // - supports only a single reader, called MonoPipeReader // - write() cannot overrun; instead it will return a short actual count if insufficient space @@ -88,6 +91,9 @@ public: // Return true if the write side of a pipe is currently shutdown. bool isShutdown(); + // Return NO_ERROR if there is a timestamp available + status_t getTimestamp(AudioTimestamp& timestamp); + private: // A pair of methods and a helper variable which allows the reader and the // writer to update and observe the values of mFront and mNextRdPTS in an @@ -127,6 +133,10 @@ private: LinearTransform mSamplesToLocalTime; bool mIsShutdown; // whether shutdown(true) was called, no barriers are needed + + AudioTimestampSingleStateQueue::Shared mTimestampShared; + AudioTimestampSingleStateQueue::Mutator mTimestampMutator; + AudioTimestampSingleStateQueue::Observer mTimestampObserver; }; } // namespace android diff --git a/include/media/nbaio/MonoPipeReader.h b/include/media/nbaio/MonoPipeReader.h index 0e1c992..78fe867 100644 --- a/include/media/nbaio/MonoPipeReader.h +++ b/include/media/nbaio/MonoPipeReader.h @@ -49,6 +49,8 @@ public: virtual ssize_t read(void *buffer, size_t count, int64_t readPTS); + virtual void onTimestamp(const AudioTimestamp& timestamp); + // NBAIO_Source end #if 0 // until necessary diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h index 81f42ed..1da0c73 100644 --- a/include/media/nbaio/NBAIO.h +++ b/include/media/nbaio/NBAIO.h @@ -28,6 +28,7 @@ #include <stdlib.h> #include <utils/Errors.h> #include <utils/RefBase.h> +#include <media/AudioTimestamp.h> namespace android { @@ -45,17 +46,15 @@ enum { // Negotiation of format is based on the data provider and data sink, or the data consumer and // data source, exchanging prioritized arrays of offers and counter-offers until a single offer is // mutually agreed upon. Each offer is an NBAIO_Format. For simplicity and performance, -// NBAIO_Format is an enum that ties together the most important combinations of the various +// NBAIO_Format is a typedef that ties together the most important combinations of the various // attributes, rather than a struct with separate fields for format, sample rate, channel count, // interleave, packing, alignment, etc. The reason is that NBAIO_Format tries to abstract out only -// the combinations that are actually needed within AudioFligner. If the list of combinations grows +// the combinations that are actually needed within AudioFlinger. If the list of combinations grows // too large, then this decision should be re-visited. -enum NBAIO_Format { - Format_Invalid, - Format_SR44_1_C2_I16, // 44.1 kHz PCM stereo interleaved 16-bit signed - Format_SR48_C2_I16, // 48 kHz PCM stereo interleaved 16-bit signed - Format_SR44_1_C1_I16, // 44.1 kHz PCM mono interleaved 16-bit signed - Format_SR48_C1_I16, // 48 kHz PCM mono interleaved 16-bit signed +// Sample rate and channel count are explicit, PCM interleaved 16-bit is assumed. +typedef unsigned NBAIO_Format; +enum { + Format_Invalid }; // Return the frame size of an NBAIO_Format in bytes @@ -215,6 +214,11 @@ public: // <other> Something unexpected happened internally. Check the logs and start debugging. virtual status_t getNextWriteTimestamp(int64_t *ts) { return INVALID_OPERATION; } + // Returns NO_ERROR if a timestamp is available. The timestamp includes the total number + // of frames presented to an external observer, together with the value of CLOCK_MONOTONIC + // as of this presentation count. + virtual status_t getTimestamp(AudioTimestamp& timestamp) { return INVALID_OPERATION; } + protected: NBAIO_Sink(NBAIO_Format format = Format_Invalid) : NBAIO_Port(format), mFramesWritten(0) { } virtual ~NBAIO_Sink() { } @@ -302,6 +306,10 @@ public: virtual ssize_t readVia(readVia_t via, size_t total, void *user, int64_t readPTS, size_t block = 0); + // Invoked asynchronously by corresponding sink when a new timestamp is available. + // Default implementation ignores the timestamp. + virtual void onTimestamp(const AudioTimestamp& timestamp) { } + protected: NBAIO_Source(NBAIO_Format format = Format_Invalid) : NBAIO_Port(format), mFramesRead(0) { } virtual ~NBAIO_Source() { } diff --git a/include/media/nbaio/NBLog.h b/include/media/nbaio/NBLog.h new file mode 100644 index 0000000..6d59ea7 --- /dev/null +++ b/include/media/nbaio/NBLog.h @@ -0,0 +1,198 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Non-blocking event logger intended for safe communication between processes via shared memory + +#ifndef ANDROID_MEDIA_NBLOG_H +#define ANDROID_MEDIA_NBLOG_H + +#include <binder/IMemory.h> +#include <utils/Mutex.h> +#include <media/nbaio/roundup.h> + +namespace android { + +class NBLog { + +public: + +class Writer; +class Reader; + +private: + +enum Event { + EVENT_RESERVED, + EVENT_STRING, // ASCII string, not NUL-terminated + EVENT_TIMESTAMP, // clock_gettime(CLOCK_MONOTONIC) +}; + +// --------------------------------------------------------------------------- + +// representation of a single log entry in private memory +struct Entry { + Entry(Event event, const void *data, size_t length) + : mEvent(event), mLength(length), mData(data) { } + /*virtual*/ ~Entry() { } + + int readAt(size_t offset) const; + +private: + friend class Writer; + Event mEvent; // event type + size_t mLength; // length of additional data, 0 <= mLength <= 255 + const void *mData; // event type-specific data +}; + +// representation of a single log entry in shared memory +// byte[0] mEvent +// byte[1] mLength +// byte[2] mData[0] +// ... +// byte[2+i] mData[i] +// ... +// byte[2+mLength-1] mData[mLength-1] +// byte[2+mLength] duplicate copy of mLength to permit reverse scan +// byte[3+mLength] start of next log entry + +// located in shared memory +struct Shared { + Shared() : mRear(0) { } + /*virtual*/ ~Shared() { } + + volatile int32_t mRear; // index one byte past the end of most recent Entry + char mBuffer[0]; // circular buffer for entries +}; + +public: + +// --------------------------------------------------------------------------- + +// FIXME Timeline was intended to wrap Writer and Reader, but isn't actually used yet. +// For now it is just a namespace for sharedSize(). +class Timeline : public RefBase { +public: +#if 0 + Timeline(size_t size, void *shared = NULL); + virtual ~Timeline(); +#endif + + // Input parameter 'size' is the desired size of the timeline in byte units. + // Returns the size rounded up to a power-of-2, plus the constant size overhead for indices. + static size_t sharedSize(size_t size); + +#if 0 +private: + friend class Writer; + friend class Reader; + + const size_t mSize; // circular buffer size in bytes, must be a power of 2 + bool mOwn; // whether I own the memory at mShared + Shared* const mShared; // pointer to shared memory +#endif +}; + +// --------------------------------------------------------------------------- + +// Writer is thread-safe with respect to Reader, but not with respect to multiple threads +// calling Writer methods. If you need multi-thread safety for writing, use LockedWriter. +class Writer : public RefBase { +public: + Writer(); // dummy nop implementation without shared memory + + // Input parameter 'size' is the desired size of the timeline in byte units. + // The size of the shared memory must be at least Timeline::sharedSize(size). + Writer(size_t size, void *shared); + Writer(size_t size, const sp<IMemory>& iMemory); + + virtual ~Writer() { } + + virtual void log(const char *string); + virtual void logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); + virtual void logvf(const char *fmt, va_list ap); + virtual void logTimestamp(); + virtual void logTimestamp(const struct timespec& ts); + + virtual bool isEnabled() const; + + // return value for all of these is the previous isEnabled() + virtual bool setEnabled(bool enabled); // but won't enable if no shared memory + bool enable() { return setEnabled(true); } + bool disable() { return setEnabled(false); } + + sp<IMemory> getIMemory() const { return mIMemory; } + +private: + void log(Event event, const void *data, size_t length); + void log(const Entry *entry, bool trusted = false); + + const size_t mSize; // circular buffer size in bytes, must be a power of 2 + Shared* const mShared; // raw pointer to shared memory + const sp<IMemory> mIMemory; // ref-counted version + int32_t mRear; // my private copy of mShared->mRear + bool mEnabled; // whether to actually log +}; + +// --------------------------------------------------------------------------- + +// Similar to Writer, but safe for multiple threads to call concurrently +class LockedWriter : public Writer { +public: + LockedWriter(); + LockedWriter(size_t size, void *shared); + + virtual void log(const char *string); + virtual void logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); + virtual void logvf(const char *fmt, va_list ap); + virtual void logTimestamp(); + virtual void logTimestamp(const struct timespec& ts); + + virtual bool isEnabled() const; + virtual bool setEnabled(bool enabled); + +private: + mutable Mutex mLock; +}; + +// --------------------------------------------------------------------------- + +class Reader : public RefBase { +public: + + // Input parameter 'size' is the desired size of the timeline in byte units. + // The size of the shared memory must be at least Timeline::sharedSize(size). + Reader(size_t size, const void *shared); + Reader(size_t size, const sp<IMemory>& iMemory); + + virtual ~Reader() { } + + void dump(int fd, size_t indent = 0); + bool isIMemory(const sp<IMemory>& iMemory) const; + +private: + const size_t mSize; // circular buffer size in bytes, must be a power of 2 + const Shared* const mShared; // raw pointer to shared memory + const sp<IMemory> mIMemory; // ref-counted version + int32_t mFront; // index of oldest acknowledged Entry + + static const size_t kSquashTimestamp = 5; // squash this many or more adjacent timestamps +}; + +}; // class NBLog + +} // namespace android + +#endif // ANDROID_MEDIA_NBLOG_H diff --git a/include/media/nbaio/SourceAudioBufferProvider.h b/include/media/nbaio/SourceAudioBufferProvider.h index c08331b..cdfb6fe 100644 --- a/include/media/nbaio/SourceAudioBufferProvider.h +++ b/include/media/nbaio/SourceAudioBufferProvider.h @@ -36,6 +36,8 @@ public: // ExtendedAudioBufferProvider interface virtual size_t framesReady() const; + virtual size_t framesReleased() const; + virtual void onTimestamp(const AudioTimestamp& timestamp); private: const sp<NBAIO_Source> mSource; // the wrapped source @@ -45,6 +47,7 @@ private: size_t mOffset; // frame offset within mAllocated of valid data size_t mRemaining; // frame count within mAllocated of valid data size_t mGetCount; // buffer.frameCount of the most recent getNextBuffer + uint32_t mFramesReleased; // counter of the total number of frames released }; } // namespace android diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h index cba8a6b..e796ab3 100644 --- a/include/media/stagefright/ACodec.h +++ b/include/media/stagefright/ACodec.h @@ -43,7 +43,10 @@ struct ACodec : public AHierarchicalStateMachine { kWhatError = 'erro', kWhatComponentAllocated = 'cAll', kWhatComponentConfigured = 'cCon', + kWhatInputSurfaceCreated = 'isfc', + kWhatSignaledInputEOS = 'seos', kWhatBuffersAllocated = 'allc', + kWhatOMXDied = 'OMXd', }; ACodec(); @@ -54,8 +57,12 @@ struct ACodec : public AHierarchicalStateMachine { void signalResume(); void initiateShutdown(bool keepComponentAllocated = false); + void signalSetParameters(const sp<AMessage> &msg); + void signalEndOfInputStream(); + void initiateAllocateComponent(const sp<AMessage> &msg); void initiateConfigureComponent(const sp<AMessage> &msg); + void initiateCreateInputSurface(); void initiateStart(); void signalRequestIDRFrame(); @@ -91,6 +98,7 @@ private: struct ExecutingToIdleState; struct IdleToLoadedState; struct FlushingState; + struct DeathNotifier; enum { kWhatSetup = 'setu', @@ -103,8 +111,11 @@ private: kWhatDrainDeferredMessages = 'drai', kWhatAllocateComponent = 'allo', kWhatConfigureComponent = 'conf', + kWhatCreateInputSurface = 'cisf', + kWhatSignalEndOfInputStream = 'eois', kWhatStart = 'star', kWhatRequestIDRFrame = 'ridr', + kWhatSetParameters = 'setP', }; enum { @@ -113,7 +124,8 @@ private: }; enum { - kFlagIsSecure = 1, + kFlagIsSecure = 1, + kFlagPushBlankBuffersToNativeWindowOnShutdown = 2, }; struct BufferInfo { @@ -127,6 +139,7 @@ private: IOMX::buffer_id mBufferID; Status mStatus; + unsigned mDequeuedAt; sp<ABuffer> mData; sp<GraphicBuffer> mGraphicBuffer; @@ -171,7 +184,7 @@ private: bool mSentFormat; bool mIsEncoder; - + bool mUseMetadataOnEncoderOutput; bool mShutdownInProgress; // If "mKeepComponentAllocated" we only transition back to Loaded state @@ -183,11 +196,22 @@ private: bool mChannelMaskPresent; int32_t mChannelMask; + unsigned mDequeueCounter; + bool mStoreMetaDataInOutputBuffers; + int32_t mMetaDataBuffersToSubmit; + + int64_t mRepeatFrameDelayUs; + status_t setCyclicIntraMacroblockRefresh(const sp<AMessage> &msg, int32_t mode); status_t allocateBuffersOnPort(OMX_U32 portIndex); status_t freeBuffersOnPort(OMX_U32 portIndex); status_t freeBuffer(OMX_U32 portIndex, size_t i); + status_t configureOutputBuffersFromNativeWindow( + OMX_U32 *nBufferCount, OMX_U32 *nBufferSize, + OMX_U32 *nMinUndequeuedBuffers); + status_t allocateOutputMetaDataBuffers(); + status_t submitOutputMetaDataBuffer(); status_t allocateOutputBuffersFromNativeWindow(); status_t cancelBufferToNativeWindow(BufferInfo *info); status_t freeOutputBuffersNotOwnedByComponent(); @@ -240,6 +264,7 @@ private: status_t setupMPEG4EncoderParameters(const sp<AMessage> &msg); status_t setupH263EncoderParameters(const sp<AMessage> &msg); status_t setupAVCEncoderParameters(const sp<AMessage> &msg); + status_t setupVPXEncoderParameters(const sp<AMessage> &msg); status_t verifySupportForProfileAndLevel(int32_t profile, int32_t level); @@ -252,23 +277,31 @@ private: status_t pushBlankBuffersToNativeWindow(); - // Returns true iff all buffers on the given port have status OWNED_BY_US. + // Returns true iff all buffers on the given port have status + // OWNED_BY_US or OWNED_BY_NATIVE_WINDOW. bool allYourBuffersAreBelongToUs(OMX_U32 portIndex); bool allYourBuffersAreBelongToUs(); + void waitUntilAllPossibleNativeWindowBuffersAreReturnedToUs(); + size_t countBuffersOwnedByComponent(OMX_U32 portIndex) const; + size_t countBuffersOwnedByNativeWindow() const; void deferMessage(const sp<AMessage> &msg); void processDeferredMessages(); - void sendFormatChange(); + void sendFormatChange(const sp<AMessage> &reply); void signalError( OMX_ERRORTYPE error = OMX_ErrorUndefined, status_t internalError = UNKNOWN_ERROR); status_t requestIDRFrame(); + status_t setParameters(const sp<AMessage> ¶ms); + + // Send EOS on input stream. + void onSignalEndOfInputStream(); DISALLOW_EVIL_CONSTRUCTORS(ACodec); }; diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h index 1dc408f..14afb85 100644 --- a/include/media/stagefright/AudioPlayer.h +++ b/include/media/stagefright/AudioPlayer.h @@ -36,8 +36,16 @@ public: SEEK_COMPLETE }; + enum { + ALLOW_DEEP_BUFFERING = 0x01, + USE_OFFLOAD = 0x02, + HAS_VIDEO = 0x1000, + IS_STREAMING = 0x2000 + + }; + AudioPlayer(const sp<MediaPlayerBase::AudioSink> &audioSink, - bool allowDeepBuffering = false, + uint32_t flags = 0, AwesomePlayer *audioObserver = NULL); virtual ~AudioPlayer(); @@ -51,7 +59,7 @@ public: status_t start(bool sourceAlreadyStarted = false); void pause(bool playPendingSamples = false); - void resume(); + status_t resume(); // Returns the timestamp of the last buffer played (in us). int64_t getMediaTimeUs(); @@ -67,10 +75,12 @@ public: status_t setPlaybackRatePermille(int32_t ratePermille); + void notifyAudioEOS(); + private: friend class VideoEditorAudioPlayer; sp<MediaSource> mSource; - AudioTrack *mAudioTrack; + sp<AudioTrack> mAudioTrack; MediaBuffer *mInputBuffer; @@ -97,17 +107,20 @@ private: MediaBuffer *mFirstBuffer; sp<MediaPlayerBase::AudioSink> mAudioSink; - bool mAllowDeepBuffering; // allow audio deep audio buffers. Helps with low power audio - // playback but implies high latency AwesomePlayer *mObserver; int64_t mPinnedTimeUs; + bool mPlaying; + int64_t mStartPosUs; + const uint32_t mCreateFlags; + static void AudioCallback(int event, void *user, void *info); void AudioCallback(int event, void *info); static size_t AudioSinkCallback( MediaPlayerBase::AudioSink *audioSink, - void *data, size_t size, void *me); + void *data, size_t size, void *me, + MediaPlayerBase::AudioSink::cb_event_t event); size_t fillBuffer(void *data, size_t size); @@ -116,6 +129,10 @@ private: void reset(); uint32_t getNumFramesPendingPlayout() const; + int64_t getOutputPlayPositionUs_l(); + + bool allowDeepBuffering() const { return (mCreateFlags & ALLOW_DEEP_BUFFERING) != 0; } + bool useOffload() const { return (mCreateFlags & USE_OFFLOAD) != 0; } AudioPlayer(const AudioPlayer &); AudioPlayer &operator=(const AudioPlayer &); diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h index 99f3c3b..4c9aaad 100644 --- a/include/media/stagefright/AudioSource.h +++ b/include/media/stagefright/AudioSource.h @@ -73,7 +73,7 @@ private: Condition mFrameAvailableCondition; Condition mFrameEncodingCompletionCondition; - AudioRecord *mRecord; + sp<AudioRecord> mRecord; status_t mInitCheck; bool mStarted; int32_t mSampleRate; diff --git a/include/media/stagefright/BufferProducerWrapper.h b/include/media/stagefright/BufferProducerWrapper.h new file mode 100644 index 0000000..d8acf30 --- /dev/null +++ b/include/media/stagefright/BufferProducerWrapper.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef BUFFER_PRODUCER_WRAPPER_H_ + +#define BUFFER_PRODUCER_WRAPPER_H_ + +#include <gui/IGraphicBufferProducer.h> + +namespace android { + +// Can't use static_cast to cast a RefBase back to an IGraphicBufferProducer, +// because IGBP's parent (IInterface) uses virtual inheritance. This class +// wraps IGBP while we pass it through AMessage. + +struct BufferProducerWrapper : RefBase { + BufferProducerWrapper( + const sp<IGraphicBufferProducer>& bufferProducer) : + mBufferProducer(bufferProducer) { } + + sp<IGraphicBufferProducer> getBufferProducer() const { + return mBufferProducer; + } + +private: + const sp<IGraphicBufferProducer> mBufferProducer; + + DISALLOW_EVIL_CONSTRUCTORS(BufferProducerWrapper); +}; + +} // namespace android + +#endif // BUFFER_PRODUCER_WRAPPER_H_ diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h index 6d6b8a9..a829916 100644 --- a/include/media/stagefright/CameraSource.h +++ b/include/media/stagefright/CameraSource.h @@ -25,6 +25,7 @@ #include <camera/CameraParameters.h> #include <utils/List.h> #include <utils/RefBase.h> +#include <utils/String16.h> namespace android { @@ -39,9 +40,11 @@ public: * settings (such as video size, frame rate, color format, etc) * from the default camera. * + * @param clientName The package/process name of the client application. + * This is used for permissions checking. * @return NULL on error. */ - static CameraSource *Create(); + static CameraSource *Create(const String16 &clientName); /** * Factory method to create a new CameraSource. @@ -52,7 +55,11 @@ public: * * @param cameraId the id of the camera that the source will connect * to if camera is NULL; otherwise ignored. - * + * @param clientName the package/process name of the camera-using + * application if camera is NULL; otherwise ignored. Used for + * permissions checking. + * @param clientUid the UID of the camera-using application if camera is + * NULL; otherwise ignored. Used for permissions checking. * @param videoSize the dimension (in pixels) of the video frame * @param frameRate the target frames per second * @param surface the preview surface for display where preview @@ -71,9 +78,11 @@ public: static CameraSource *CreateFromCamera(const sp<ICamera> &camera, const sp<ICameraRecordingProxy> &proxy, int32_t cameraId, + const String16& clientName, + uid_t clientUid, Size videoSize, int32_t frameRate, - const sp<Surface>& surface, + const sp<IGraphicBufferProducer>& surface, bool storeMetaDataInVideoBuffers = false); virtual ~CameraSource(); @@ -145,7 +154,7 @@ protected: sp<Camera> mCamera; sp<ICameraRecordingProxy> mCameraRecordingProxy; sp<DeathNotifier> mDeathNotifier; - sp<Surface> mSurface; + sp<IGraphicBufferProducer> mSurface; sp<MetaData> mMeta; int64_t mStartTimeUs; @@ -158,9 +167,9 @@ protected: int64_t mTimeBetweenFrameCaptureUs; CameraSource(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy, - int32_t cameraId, + int32_t cameraId, const String16& clientName, uid_t clientUid, Size videoSize, int32_t frameRate, - const sp<Surface>& surface, + const sp<IGraphicBufferProducer>& surface, bool storeMetaDataInVideoBuffers); virtual void startCameraRecording(); @@ -198,17 +207,20 @@ private: status_t init(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy, - int32_t cameraId, Size videoSize, int32_t frameRate, - bool storeMetaDataInVideoBuffers); + int32_t cameraId, const String16& clientName, uid_t clientUid, + Size videoSize, int32_t frameRate, bool storeMetaDataInVideoBuffers); status_t initWithCameraAccess( const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy, - int32_t cameraId, Size videoSize, int32_t frameRate, - bool storeMetaDataInVideoBuffers); + int32_t cameraId, const String16& clientName, uid_t clientUid, + Size videoSize, int32_t frameRate, bool storeMetaDataInVideoBuffers); status_t isCameraAvailable(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy, - int32_t cameraId); + int32_t cameraId, + const String16& clientName, + uid_t clientUid); + status_t isCameraColorFormatSupported(const CameraParameters& params); status_t configureCamera(CameraParameters* params, int32_t width, int32_t height, diff --git a/include/media/stagefright/CameraSourceTimeLapse.h b/include/media/stagefright/CameraSourceTimeLapse.h index 0936da2..34213be 100644 --- a/include/media/stagefright/CameraSourceTimeLapse.h +++ b/include/media/stagefright/CameraSourceTimeLapse.h @@ -22,6 +22,7 @@ #include <utils/RefBase.h> #include <utils/threads.h> +#include <utils/String16.h> namespace android { @@ -35,10 +36,13 @@ public: const sp<ICamera> &camera, const sp<ICameraRecordingProxy> &proxy, int32_t cameraId, + const String16& clientName, + uid_t clientUid, Size videoSize, int32_t videoFrameRate, - const sp<Surface>& surface, - int64_t timeBetweenTimeLapseFrameCaptureUs); + const sp<IGraphicBufferProducer>& surface, + int64_t timeBetweenTimeLapseFrameCaptureUs, + bool storeMetaDataInVideoBuffers = true); virtual ~CameraSourceTimeLapse(); @@ -108,10 +112,13 @@ private: const sp<ICamera> &camera, const sp<ICameraRecordingProxy> &proxy, int32_t cameraId, + const String16& clientName, + uid_t clientUid, Size videoSize, int32_t videoFrameRate, - const sp<Surface>& surface, - int64_t timeBetweenTimeLapseFrameCaptureUs); + const sp<IGraphicBufferProducer>& surface, + int64_t timeBetweenTimeLapseFrameCaptureUs, + bool storeMetaDataInVideoBuffers = true); // Wrapper over CameraSource::signalBufferReturned() to implement quick stop. // It only handles the case when mLastReadBufferCopy is signalled. Otherwise diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h index 00d583e..157b1aa 100644 --- a/include/media/stagefright/DataSource.h +++ b/include/media/stagefright/DataSource.h @@ -54,6 +54,9 @@ public: // Convenience methods: bool getUInt16(off64_t offset, uint16_t *x); + bool getUInt24(off64_t offset, uint32_t *x); // 3 byte int, returned as a 32-bit int + bool getUInt32(off64_t offset, uint32_t *x); + bool getUInt64(off64_t offset, uint64_t *x); // May return ERROR_UNSUPPORTED. virtual status_t getSize(off64_t *size); @@ -77,7 +80,6 @@ public: const sp<DataSource> &source, String8 *mimeType, float *confidence, sp<AMessage> *meta); - static void RegisterSniffer(SnifferFunc func); static void RegisterDefaultSniffers(); // for DRM @@ -98,6 +100,9 @@ protected: private: static Mutex gSnifferMutex; static List<SnifferFunc> gSniffers; + static bool gSniffersRegistered; + + static void RegisterSniffer_l(SnifferFunc func); DataSource(const DataSource &); DataSource &operator=(const DataSource &); diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h index 3596b38..3ef6b9a 100644 --- a/include/media/stagefright/MPEG4Writer.h +++ b/include/media/stagefright/MPEG4Writer.h @@ -35,7 +35,13 @@ public: MPEG4Writer(const char *filename); MPEG4Writer(int fd); + // Limitations + // 1. No more than 2 tracks can be added + // 2. Only video or audio source can be added + // 3. No more than one video and/or one audio source can be added. virtual status_t addSource(const sp<MediaSource> &source); + + // Returns INVALID_OPERATION if there is no source or track. virtual status_t start(MetaData *param = NULL); virtual status_t stop() { return reset(); } virtual status_t pause(); @@ -68,6 +74,7 @@ private: int mFd; status_t mInitCheck; + bool mIsRealTimeRecording; bool mUse4ByteNalLength; bool mUse32BitOffset; bool mIsFileSizeLimitExplicitlyRequested; @@ -162,6 +169,13 @@ private: // Only makes sense for H.264/AVC bool useNalLengthFour(); + // Return whether the writer is used for real time recording. + // In real time recording mode, new samples will be allowed to buffered into + // chunks in higher priority thread, even though the file writer has not + // drained the chunks yet. + // By default, real time recording is on. + bool isRealTimeRecording() const; + void lock(); void unlock(); diff --git a/include/media/stagefright/MediaAdapter.h b/include/media/stagefright/MediaAdapter.h new file mode 100644 index 0000000..369fce6 --- /dev/null +++ b/include/media/stagefright/MediaAdapter.h @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MEDIA_ADAPTER_H +#define MEDIA_ADAPTER_H + +#include <media/stagefright/foundation/ABase.h> +#include <media/stagefright/MediaSource.h> +#include <media/stagefright/MediaBuffer.h> +#include <media/stagefright/MetaData.h> +#include <utils/threads.h> + +namespace android { + +// Convert the MediaMuxer's push model into MPEG4Writer's pull model. +// Used only by the MediaMuxer for now. +struct MediaAdapter : public MediaSource, public MediaBufferObserver { +public: + // MetaData is used to set the format and returned at getFormat. + MediaAdapter(const sp<MetaData> &meta); + virtual ~MediaAdapter(); + ///////////////////////////////////////////////// + // Inherited functions from MediaSource + ///////////////////////////////////////////////// + + virtual status_t start(MetaData *params = NULL); + virtual status_t stop(); + virtual sp<MetaData> getFormat(); + virtual status_t read( + MediaBuffer **buffer, const ReadOptions *options = NULL); + + ///////////////////////////////////////////////// + // Inherited functions from MediaBufferObserver + ///////////////////////////////////////////////// + + virtual void signalBufferReturned(MediaBuffer *buffer); + + ///////////////////////////////////////////////// + // Non-inherited functions: + ///////////////////////////////////////////////// + + // pushBuffer() will wait for the read() finish, and read() will have a + // deep copy, such that after pushBuffer return, the buffer can be re-used. + status_t pushBuffer(MediaBuffer *buffer); + +private: + Mutex mAdapterLock; + // Make sure the read() wait for the incoming buffer. + Condition mBufferReadCond; + // Make sure the pushBuffer() wait for the current buffer consumed. + Condition mBufferReturnedCond; + + MediaBuffer *mCurrentMediaBuffer; + + bool mStarted; + sp<MetaData> mOutputFormat; + + DISALLOW_EVIL_CONSTRUCTORS(MediaAdapter); +}; + +} // namespace android + +#endif // MEDIA_ADAPTER_H diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h index b1e57cf..76aa503 100644 --- a/include/media/stagefright/MediaCodec.h +++ b/include/media/stagefright/MediaCodec.h @@ -18,7 +18,7 @@ #define MEDIA_CODEC_H_ -#include <gui/ISurfaceTexture.h> +#include <gui/IGraphicBufferProducer.h> #include <media/hardware/CryptoAPI.h> #include <media/stagefright/foundation/AHandler.h> #include <utils/Vector.h> @@ -31,7 +31,7 @@ struct AMessage; struct AString; struct ICrypto; struct SoftwareRenderer; -struct SurfaceTextureClient; +struct Surface; struct MediaCodec : public AHandler { enum ConfigureFlags { @@ -52,10 +52,12 @@ struct MediaCodec : public AHandler { status_t configure( const sp<AMessage> &format, - const sp<SurfaceTextureClient> &nativeWindow, + const sp<Surface> &nativeWindow, const sp<ICrypto> &crypto, uint32_t flags); + status_t createInputSurface(sp<IGraphicBufferProducer>* bufferProducer); + status_t start(); // Returns to a state in which the component remains allocated but @@ -101,6 +103,8 @@ struct MediaCodec : public AHandler { status_t renderOutputBufferAndRelease(size_t index); status_t releaseOutputBuffer(size_t index); + status_t signalEndOfInputStream(); + status_t getOutputFormat(sp<AMessage> *format) const; status_t getInputBuffers(Vector<sp<ABuffer> > *buffers) const; @@ -115,6 +119,8 @@ struct MediaCodec : public AHandler { status_t getName(AString *componentName) const; + status_t setParameters(const sp<AMessage> ¶ms); + protected: virtual ~MediaCodec(); virtual void onMessageReceived(const sp<AMessage> &msg); @@ -141,6 +147,7 @@ private: enum { kWhatInit = 'init', kWhatConfigure = 'conf', + kWhatCreateInputSurface = 'cisf', kWhatStart = 'strt', kWhatStop = 'stop', kWhatRelease = 'rele', @@ -148,6 +155,7 @@ private: kWhatQueueInputBuffer = 'queI', kWhatDequeueOutputBuffer = 'deqO', kWhatReleaseOutputBuffer = 'relO', + kWhatSignalEndOfInputStream = 'eois', kWhatGetBuffers = 'getB', kWhatFlush = 'flus', kWhatGetOutputFormat = 'getO', @@ -157,6 +165,7 @@ private: kWhatRequestIDRFrame = 'ridr', kWhatRequestActivityNotification = 'racN', kWhatGetName = 'getN', + kWhatSetParameters = 'setP', }; enum { @@ -167,6 +176,9 @@ private: kFlagDequeueInputPending = 16, kFlagDequeueOutputPending = 32, kFlagIsSecure = 64, + kFlagSawMediaServerDie = 128, + kFlagIsEncoder = 256, + kFlagGatherCodecSpecificData = 512, }; struct BufferInfo { @@ -184,7 +196,7 @@ private: AString mComponentName; uint32_t mReplyID; uint32_t mFlags; - sp<SurfaceTextureClient> mNativeWindow; + sp<Surface> mNativeWindow; SoftwareRenderer *mSoftRenderer; sp<AMessage> mOutputFormat; @@ -203,6 +215,8 @@ private: sp<AMessage> mActivityNotify; + bool mHaveInputSurface; + MediaCodec(const sp<ALooper> &looper); static status_t PostAndAwaitResponse( @@ -226,10 +240,14 @@ private: status_t queueCSDInputBuffer(size_t bufferIndex); status_t setNativeWindow( - const sp<SurfaceTextureClient> &surfaceTextureClient); + const sp<Surface> &surface); void postActivityNotificationIfPossible(); + status_t onSetParameters(const sp<AMessage> ¶ms); + + status_t amendOutputFormatWithCodecSpecificData(const sp<ABuffer> &buffer); + DISALLOW_EVIL_CONSTRUCTORS(MediaCodec); }; diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h index dfb845b..590623b 100644 --- a/include/media/stagefright/MediaCodecList.h +++ b/include/media/stagefright/MediaCodecList.h @@ -50,7 +50,8 @@ struct MediaCodecList { status_t getCodecCapabilities( size_t index, const char *type, Vector<ProfileLevel> *profileLevels, - Vector<uint32_t> *colorFormats) const; + Vector<uint32_t> *colorFormats, + uint32_t *flags) const; private: enum Section { diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h index 457d5d7..85693d4 100644 --- a/include/media/stagefright/MediaDefs.h +++ b/include/media/stagefright/MediaDefs.h @@ -22,7 +22,8 @@ namespace android { extern const char *MEDIA_MIMETYPE_IMAGE_JPEG; -extern const char *MEDIA_MIMETYPE_VIDEO_VPX; +extern const char *MEDIA_MIMETYPE_VIDEO_VP8; +extern const char *MEDIA_MIMETYPE_VIDEO_VP9; extern const char *MEDIA_MIMETYPE_VIDEO_AVC; extern const char *MEDIA_MIMETYPE_VIDEO_MPEG4; extern const char *MEDIA_MIMETYPE_VIDEO_H263; @@ -42,6 +43,7 @@ extern const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW; extern const char *MEDIA_MIMETYPE_AUDIO_RAW; extern const char *MEDIA_MIMETYPE_AUDIO_FLAC; extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS; +extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM; extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4; extern const char *MEDIA_MIMETYPE_CONTAINER_WAV; diff --git a/include/media/stagefright/MediaErrors.h b/include/media/stagefright/MediaErrors.h index bb01467..686f286 100644 --- a/include/media/stagefright/MediaErrors.h +++ b/include/media/stagefright/MediaErrors.h @@ -54,14 +54,13 @@ enum { ERROR_DRM_DECRYPT = DRM_ERROR_BASE - 5, ERROR_DRM_CANNOT_HANDLE = DRM_ERROR_BASE - 6, ERROR_DRM_TAMPER_DETECTED = DRM_ERROR_BASE - 7, + ERROR_DRM_NOT_PROVISIONED = DRM_ERROR_BASE - 8, + ERROR_DRM_DEVICE_REVOKED = DRM_ERROR_BASE - 9, + ERROR_DRM_RESOURCE_BUSY = DRM_ERROR_BASE - 10, ERROR_DRM_VENDOR_MAX = DRM_ERROR_BASE - 500, ERROR_DRM_VENDOR_MIN = DRM_ERROR_BASE - 999, - // Deprecated - ERROR_DRM_WV_VENDOR_MAX = ERROR_DRM_VENDOR_MAX, - ERROR_DRM_WV_VENDOR_MIN = ERROR_DRM_VENDOR_MIN, - // Heartbeat Error Codes HEARTBEAT_ERROR_BASE = -3000, ERROR_HEARTBEAT_TERMINATE_REQUESTED = HEARTBEAT_ERROR_BASE, diff --git a/include/media/stagefright/MediaMuxer.h b/include/media/stagefright/MediaMuxer.h new file mode 100644 index 0000000..ff6a66e --- /dev/null +++ b/include/media/stagefright/MediaMuxer.h @@ -0,0 +1,138 @@ +/* + * Copyright 2013, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MEDIA_MUXER_H_ +#define MEDIA_MUXER_H_ + +#include <utils/Errors.h> +#include <utils/RefBase.h> +#include <utils/Vector.h> +#include <utils/threads.h> + +namespace android { + +struct ABuffer; +struct AMessage; +struct MediaAdapter; +struct MediaBuffer; +struct MediaSource; +struct MetaData; +struct MPEG4Writer; + +// MediaMuxer is used to mux multiple tracks into a video. Currently, we only +// support a mp4 file as the output. +// The expected calling order of the functions is: +// Constructor -> addTrack+ -> start -> writeSampleData+ -> stop +// If muxing operation need to be cancelled, the app is responsible for +// deleting the output file after stop. +struct MediaMuxer : public RefBase { +public: + // Please update media/java/android/media/MediaMuxer.java if the + // OutputFormat is updated. + enum OutputFormat { + OUTPUT_FORMAT_MPEG_4 = 0, + OUTPUT_FORMAT_LIST_END // must be last - used to validate format type + }; + + // Construct the muxer with the output file path. + MediaMuxer(const char *path, OutputFormat format); + + // Construct the muxer with the file descriptor. Note that the MediaMuxer + // will close this file at stop(). + MediaMuxer(int fd, OutputFormat format); + + virtual ~MediaMuxer(); + + /** + * Add a track with its format information. This should be + * called before start(). + * @param format the track's format. + * @return the track's index or negative number if error. + */ + ssize_t addTrack(const sp<AMessage> &format); + + /** + * Start muxing. Make sure all the tracks have been added before + * calling this. + */ + status_t start(); + + /** + * Set the orientation hint. + * @param degrees The rotation degrees. It has to be either 0, + * 90, 180 or 270. + * @return OK if no error. + */ + status_t setOrientationHint(int degrees); + + /** + * Set the location. + * @param latitude The latitude in degree x 1000. Its value must be in the range + * [-900000, 900000]. + * @param longitude The longitude in degree x 1000. Its value must be in the range + * [-1800000, 1800000]. + * @return OK if no error. + */ + status_t setLocation(int latitude, int longitude); + + /** + * Stop muxing. + * This method is a blocking call. Depending on how + * much data is bufferred internally, the time needed for stopping + * the muxer may be time consuming. UI thread is + * not recommended for launching this call. + * @return OK if no error. + */ + status_t stop(); + + /** + * Send a sample buffer for muxing. + * The buffer can be reused once this method returns. Typically, + * this function won't be blocked for very long, and thus there + * is no need to use a separate thread calling this method to + * push a buffer. + * @param buffer the incoming sample buffer. + * @param trackIndex the buffer's track index number. + * @param timeUs the buffer's time stamp. + * @param flags the only supported flag for now is + * MediaCodec::BUFFER_FLAG_SYNCFRAME. + * @return OK if no error. + */ + status_t writeSampleData(const sp<ABuffer> &buffer, size_t trackIndex, + int64_t timeUs, uint32_t flags) ; + +private: + sp<MPEG4Writer> mWriter; + Vector< sp<MediaAdapter> > mTrackList; // Each track has its MediaAdapter. + sp<MetaData> mFileMeta; // Metadata for the whole file. + + Mutex mMuxerLock; + + enum State { + UNINITIALIZED, + INITIALIZED, + STARTED, + STOPPED + }; + State mState; + + DISALLOW_EVIL_CONSTRUCTORS(MediaMuxer); +}; + +} // namespace android + +#endif // MEDIA_MUXER_H_ + diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h index e91904c..db8216b 100644 --- a/include/media/stagefright/MetaData.h +++ b/include/media/stagefright/MetaData.h @@ -35,6 +35,8 @@ enum { kKeyHeight = 'heig', // int32_t, image pixel kKeyDisplayWidth = 'dWid', // int32_t, display/presentation kKeyDisplayHeight = 'dHgt', // int32_t, display/presentation + kKeySARWidth = 'sarW', // int32_t, sampleAspectRatio width + kKeySARHeight = 'sarH', // int32_t, sampleAspectRatio height // a rectangle, if absent assumed to be (0, 0, width - 1, height - 1) kKeyCropRect = 'crop', @@ -110,7 +112,7 @@ enum { // kKeyTrackTimeStatus is used to track progress in elapsed time kKeyTrackTimeStatus = 'tktm', // int64_t - kKeyNotRealTime = 'ntrt', // bool (int32_t) + kKeyRealTimeRecording = 'rtrc', // bool (int32_t) kKeyNumBuffers = 'nbbf', // int32_t // Ogg files can be tagged to be automatically looping... @@ -132,6 +134,7 @@ enum { kKeyRequiresSecureBuffers = 'secu', // bool (int32_t) kKeyIsADTS = 'adts', // bool (int32_t) + kKeyAACAOT = 'aaot', // int32_t // If a MediaBuffer's data represents (at least partially) encrypted // data, the following fields aid in decryption. @@ -155,6 +158,10 @@ enum { kKeyCryptoKey = 'cryK', // uint8_t[16] kKeyCryptoIV = 'cryI', // uint8_t[16] kKeyCryptoMode = 'cryM', // int32_t + + kKeyCryptoDefaultIVSize = 'cryS', // int32_t + + kKeyPssh = 'pssh', // raw data }; enum { @@ -208,6 +215,8 @@ public: bool findData(uint32_t key, uint32_t *type, const void **data, size_t *size) const; + bool hasData(uint32_t key) const; + void dumpToLog() const; protected: diff --git a/include/media/stagefright/NativeWindowWrapper.h b/include/media/stagefright/NativeWindowWrapper.h index 97cc0ce..cfeec22 100644 --- a/include/media/stagefright/NativeWindowWrapper.h +++ b/include/media/stagefright/NativeWindowWrapper.h @@ -18,29 +18,29 @@ #define NATIVE_WINDOW_WRAPPER_H_ -#include <gui/SurfaceTextureClient.h> +#include <gui/Surface.h> namespace android { -// SurfaceTextureClient derives from ANativeWindow which derives from multiple +// Surface derives from ANativeWindow which derives from multiple // base classes, in order to carry it in AMessages, we'll temporarily wrap it // into a NativeWindowWrapper. struct NativeWindowWrapper : RefBase { NativeWindowWrapper( - const sp<SurfaceTextureClient> &surfaceTextureClient) : + const sp<Surface> &surfaceTextureClient) : mSurfaceTextureClient(surfaceTextureClient) { } sp<ANativeWindow> getNativeWindow() const { return mSurfaceTextureClient; } - sp<SurfaceTextureClient> getSurfaceTextureClient() const { + sp<Surface> getSurfaceTextureClient() const { return mSurfaceTextureClient; } private: - const sp<SurfaceTextureClient> mSurfaceTextureClient; + const sp<Surface> mSurfaceTextureClient; DISALLOW_EVIL_CONSTRUCTORS(NativeWindowWrapper); }; diff --git a/include/media/stagefright/NuMediaExtractor.h b/include/media/stagefright/NuMediaExtractor.h index 0833110..5ae6f6b 100644 --- a/include/media/stagefright/NuMediaExtractor.h +++ b/include/media/stagefright/NuMediaExtractor.h @@ -55,6 +55,8 @@ struct NuMediaExtractor : public RefBase { size_t countTracks() const; status_t getTrackFormat(size_t index, sp<AMessage> *format) const; + status_t getFileFormat(sp<AMessage> *format) const; + status_t selectTrack(size_t index); status_t unselectTrack(size_t index); diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h index 583c3b3..daaf20f 100644 --- a/include/media/stagefright/OMXCodec.h +++ b/include/media/stagefright/OMXCodec.h @@ -361,9 +361,14 @@ private: }; struct CodecCapabilities { + enum { + kFlagSupportsAdaptivePlayback = 1 << 0, + }; + String8 mComponentName; Vector<CodecProfileLevel> mProfileLevels; Vector<OMX_U32> mColorFormats; + uint32_t mFlags; }; // Return a vector of componentNames with supported profile/level pairs diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h index e56527d..db5f947 100644 --- a/include/media/stagefright/SurfaceMediaSource.h +++ b/include/media/stagefright/SurfaceMediaSource.h @@ -17,7 +17,7 @@ #ifndef ANDROID_GUI_SURFACEMEDIASOURCE_H #define ANDROID_GUI_SURFACEMEDIASOURCE_H -#include <gui/ISurfaceTexture.h> +#include <gui/IGraphicBufferProducer.h> #include <gui/BufferQueue.h> #include <utils/threads.h> @@ -35,7 +35,7 @@ class GraphicBuffer; // ASSUMPTIONS // 1. SurfaceMediaSource is initialized with width*height which // can never change. However, deqeueue buffer does not currently -// enforce this as in BufferQueue, dequeue can be used by SurfaceTexture +// enforce this as in BufferQueue, dequeue can be used by Surface // which can modify the default width and heght. Also neither the width // nor height can be 0. // 2. setSynchronousMode is never used (basically no one should call @@ -56,7 +56,7 @@ class GraphicBuffer; class SurfaceMediaSource : public MediaSource, public MediaBufferObserver, - protected BufferQueue::ConsumerListener { + protected ConsumerListener { public: enum { MIN_UNDEQUEUED_BUFFERS = 4}; @@ -122,7 +122,7 @@ public: protected: // Implementation of the BufferQueue::ConsumerListener interface. These - // calls are used to notify the SurfaceTexture of asynchronous events in the + // calls are used to notify the Surface of asynchronous events in the // BufferQueue. virtual void onFrameAvailable(); @@ -146,9 +146,13 @@ private: // this consumer sp<BufferQueue> mBufferQueue; - // mBufferSlot caches GraphicBuffers from the buffer queue - sp<GraphicBuffer> mBufferSlot[BufferQueue::NUM_BUFFER_SLOTS]; + struct SlotData { + sp<GraphicBuffer> mGraphicBuffer; + uint64_t mFrameNumber; + }; + // mSlots caches GraphicBuffers and frameNumbers from the buffer queue + SlotData mSlots[BufferQueue::NUM_BUFFER_SLOTS]; // The permenent width and height of SMS buffers int mWidth; @@ -157,7 +161,7 @@ private: // mCurrentSlot is the buffer slot index of the buffer that is currently // being used by buffer consumer // (e.g. StageFrightRecorder in the case of SurfaceMediaSource or GLTexture - // in the case of SurfaceTexture). + // in the case of Surface). // It is initialized to INVALID_BUFFER_SLOT, // indicating that no buffer slot is currently bound to the texture. Note, // however, that a value of INVALID_BUFFER_SLOT does not necessarily mean diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h index 8213af9..bbad271 100644 --- a/include/media/stagefright/Utils.h +++ b/include/media/stagefright/Utils.h @@ -18,9 +18,12 @@ #define UTILS_H_ +#include <media/stagefright/foundation/AString.h> #include <stdint.h> #include <utils/Errors.h> #include <utils/RefBase.h> +#include <system/audio.h> +#include <media/MediaPlayerInterface.h> namespace android { @@ -45,6 +48,18 @@ status_t convertMetaDataToMessage( void convertMessageToMetaData( const sp<AMessage> &format, sp<MetaData> &meta); +AString MakeUserAgent(); + +// Convert a MIME type to a AudioSystem::audio_format +status_t mapMimeToAudioFormat(audio_format_t& format, const char* mime); + +// Send information from MetaData to the HAL via AudioSink +status_t sendMetaDataToHal(sp<MediaPlayerBase::AudioSink>& sink, const sp<MetaData>& meta); + +// Check whether the stream defined by meta can be offloaded to hardware +bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo, + bool isStreaming, audio_stream_type_t streamType); + } // namespace android #endif // UTILS_H_ diff --git a/include/media/stagefright/foundation/ALooperRoster.h b/include/media/stagefright/foundation/ALooperRoster.h index 2e5fd73..940fc55 100644 --- a/include/media/stagefright/foundation/ALooperRoster.h +++ b/include/media/stagefright/foundation/ALooperRoster.h @@ -30,6 +30,7 @@ struct ALooperRoster { const sp<ALooper> looper, const sp<AHandler> &handler); void unregisterHandler(ALooper::handler_id handlerID); + void unregisterStaleHandlers(); status_t postMessage(const sp<AMessage> &msg, int64_t delayUs = 0); void deliverMessage(const sp<AMessage> &msg); diff --git a/include/media/stagefright/foundation/ANetworkSession.h b/include/media/stagefright/foundation/ANetworkSession.h new file mode 100644 index 0000000..fd3ebaa --- /dev/null +++ b/include/media/stagefright/foundation/ANetworkSession.h @@ -0,0 +1,135 @@ +/* + * Copyright 2012, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef A_NETWORK_SESSION_H_ + +#define A_NETWORK_SESSION_H_ + +#include <media/stagefright/foundation/ABase.h> +#include <utils/KeyedVector.h> +#include <utils/RefBase.h> +#include <utils/Thread.h> + +#include <netinet/in.h> + +namespace android { + +struct AMessage; + +// Helper class to manage a number of live sockets (datagram and stream-based) +// on a single thread. Clients are notified about activity through AMessages. +struct ANetworkSession : public RefBase { + ANetworkSession(); + + status_t start(); + status_t stop(); + + status_t createRTSPClient( + const char *host, unsigned port, const sp<AMessage> ¬ify, + int32_t *sessionID); + + status_t createRTSPServer( + const struct in_addr &addr, unsigned port, + const sp<AMessage> ¬ify, int32_t *sessionID); + + status_t createUDPSession( + unsigned localPort, const sp<AMessage> ¬ify, int32_t *sessionID); + + status_t createUDPSession( + unsigned localPort, + const char *remoteHost, + unsigned remotePort, + const sp<AMessage> ¬ify, + int32_t *sessionID); + + status_t connectUDPSession( + int32_t sessionID, const char *remoteHost, unsigned remotePort); + + // passive + status_t createTCPDatagramSession( + const struct in_addr &addr, unsigned port, + const sp<AMessage> ¬ify, int32_t *sessionID); + + // active + status_t createTCPDatagramSession( + unsigned localPort, + const char *remoteHost, + unsigned remotePort, + const sp<AMessage> ¬ify, + int32_t *sessionID); + + status_t destroySession(int32_t sessionID); + + status_t sendRequest( + int32_t sessionID, const void *data, ssize_t size = -1, + bool timeValid = false, int64_t timeUs = -1ll); + + status_t switchToWebSocketMode(int32_t sessionID); + + enum NotificationReason { + kWhatError, + kWhatConnected, + kWhatClientConnected, + kWhatData, + kWhatDatagram, + kWhatBinaryData, + kWhatWebSocketMessage, + kWhatNetworkStall, + }; + +protected: + virtual ~ANetworkSession(); + +private: + struct NetworkThread; + struct Session; + + Mutex mLock; + sp<Thread> mThread; + + int32_t mNextSessionID; + + int mPipeFd[2]; + + KeyedVector<int32_t, sp<Session> > mSessions; + + enum Mode { + kModeCreateUDPSession, + kModeCreateTCPDatagramSessionPassive, + kModeCreateTCPDatagramSessionActive, + kModeCreateRTSPServer, + kModeCreateRTSPClient, + }; + status_t createClientOrServer( + Mode mode, + const struct in_addr *addr, + unsigned port, + const char *remoteHost, + unsigned remotePort, + const sp<AMessage> ¬ify, + int32_t *sessionID); + + void threadLoop(); + void interrupt(); + + static status_t MakeSocketNonBlocking(int s); + + DISALLOW_EVIL_CONSTRUCTORS(ANetworkSession); +}; + +} // namespace android + +#endif // A_NETWORK_SESSION_H_ diff --git a/include/media/stagefright/foundation/ParsedMessage.h b/include/media/stagefright/foundation/ParsedMessage.h new file mode 100644 index 0000000..9d43a93 --- /dev/null +++ b/include/media/stagefright/foundation/ParsedMessage.h @@ -0,0 +1,60 @@ +/* + * Copyright 2012, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <media/stagefright/foundation/ABase.h> +#include <media/stagefright/foundation/AString.h> +#include <utils/KeyedVector.h> +#include <utils/RefBase.h> + +namespace android { + +// Encapsulates an "HTTP/RTSP style" response, i.e. a status line, +// key/value pairs making up the headers and an optional body/content. +struct ParsedMessage : public RefBase { + static sp<ParsedMessage> Parse( + const char *data, size_t size, bool noMoreData, size_t *length); + + bool findString(const char *name, AString *value) const; + bool findInt32(const char *name, int32_t *value) const; + + const char *getContent() const; + + bool getRequestField(size_t index, AString *field) const; + bool getStatusCode(int32_t *statusCode) const; + + AString debugString() const; + + static bool GetAttribute(const char *s, const char *key, AString *value); + + static bool GetInt32Attribute( + const char *s, const char *key, int32_t *value); + + +protected: + virtual ~ParsedMessage(); + +private: + KeyedVector<AString, AString> mDict; + AString mContent; + + ParsedMessage(); + + ssize_t parse(const char *data, size_t size, bool noMoreData); + + DISALLOW_EVIL_CONSTRUCTORS(ParsedMessage); +}; + +} // namespace android |