summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/gui/SurfaceTexture.h5
-rw-r--r--include/media/AudioEffect.h31
-rw-r--r--include/media/IAudioPolicyService.h3
-rw-r--r--include/media/IMediaRecorder.h1
-rw-r--r--include/media/mediarecorder.h2
-rw-r--r--include/media/stagefright/CameraSourceTimeLapse.h76
-rw-r--r--include/media/stagefright/MetadataBufferType.h10
-rw-r--r--include/media/stagefright/SurfaceMediaSource.h6
-rw-r--r--include/surfaceflinger/Surface.h7
9 files changed, 54 insertions, 87 deletions
diff --git a/include/gui/SurfaceTexture.h b/include/gui/SurfaceTexture.h
index 134c208..2a8e725 100644
--- a/include/gui/SurfaceTexture.h
+++ b/include/gui/SurfaceTexture.h
@@ -211,7 +211,6 @@ protected:
// all slots.
void freeAllBuffers();
static bool isExternalFormat(uint32_t format);
- static GLenum getTextureTarget(uint32_t format);
private:
@@ -348,10 +347,6 @@ private:
// reset mCurrentTexture to INVALID_BUFFER_SLOT.
int mCurrentTexture;
- // mCurrentTextureTarget is the GLES texture target to be used with the
- // current texture.
- GLenum mCurrentTextureTarget;
-
// mCurrentTextureBuf is the graphic buffer of the current texture. It's
// possible that this buffer is not associated with any buffer slot, so we
// must track it separately in order to support the getCurrentBuffer method.
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index 496b23e..1417416 100644
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -21,6 +21,7 @@
#include <sys/types.h>
#include <media/IAudioFlinger.h>
+#include <media/IAudioPolicyService.h>
#include <media/IEffect.h>
#include <media/IEffectClient.h>
#include <hardware/audio_effect.h>
@@ -111,6 +112,36 @@ public:
/*
+ * Returns a list of descriptors corresponding to the pre processings enabled by default
+ * on an AudioRecord with the supplied audio session ID.
+ *
+ * Parameters:
+ * audioSession: audio session ID.
+ * descriptors: address where the effect descriptors should be returned.
+ * count: as input, the maximum number of descriptor than should be returned
+ * as output, the number of descriptor returned if status is NO_ERROR or the actual
+ * number of enabled pre processings if status is NO_MEMORY
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * NO_ERROR successful operation.
+ * NO_MEMORY the number of descriptor to return is more than the maximum number
+ * indicated by count.
+ * PERMISSION_DENIED could not get AudioFlinger interface
+ * NO_INIT effect library failed to initialize
+ * BAD_VALUE invalid audio session or descriptor pointers
+ *
+ * Returned value
+ * *descriptor updated with descriptors of pre processings enabled by default
+ * *count number of descriptors returned if returned status is N_ERROR.
+ * total number of pre processing enabled by default if returned status is
+ * NO_MEMORY. This happens if the count passed as input is less than the number
+ * of descriptors to return
+ */
+ static status_t queryDefaultPreProcessing(int audioSession,
+ effect_descriptor_t *descriptors,
+ uint32_t *count);
+
+ /*
* Events used by callback function (effect_callback_t).
*/
enum event_type {
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index 86b9f85..ed265e1 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -85,6 +85,9 @@ public:
int id) = 0;
virtual status_t unregisterEffect(int id) = 0;
virtual bool isStreamActive(int stream, uint32_t inPastMs = 0) const = 0;
+ virtual status_t queryDefaultPreProcessing(int audioSession,
+ effect_descriptor_t *descriptors,
+ uint32_t *count) = 0;
};
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
index 007aea6..ec84e25 100644
--- a/include/media/IMediaRecorder.h
+++ b/include/media/IMediaRecorder.h
@@ -43,7 +43,6 @@ public:
virtual status_t setAudioEncoder(int ae) = 0;
virtual status_t setOutputFile(const char* path) = 0;
virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0;
- virtual status_t setOutputFileAuxiliary(int fd) = 0;
virtual status_t setVideoSize(int width, int height) = 0;
virtual status_t setVideoFrameRate(int frames_per_second) = 0;
virtual status_t setParameters(const String8& params) = 0;
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 72d3736..30db642 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -215,7 +215,6 @@ public:
status_t setAudioEncoder(int ae);
status_t setOutputFile(const char* path);
status_t setOutputFile(int fd, int64_t offset, int64_t length);
- status_t setOutputFileAuxiliary(int fd);
status_t setVideoSize(int width, int height);
status_t setVideoFrameRate(int frames_per_second);
status_t setParameters(const String8& params);
@@ -249,7 +248,6 @@ private:
bool mIsAudioEncoderSet;
bool mIsVideoEncoderSet;
bool mIsOutputFileSet;
- bool mIsAuxiliaryOutputFileSet;
Mutex mLock;
Mutex mNotifyLock;
};
diff --git a/include/media/stagefright/CameraSourceTimeLapse.h b/include/media/stagefright/CameraSourceTimeLapse.h
index f07ebba..0e264c7 100644
--- a/include/media/stagefright/CameraSourceTimeLapse.h
+++ b/include/media/stagefright/CameraSourceTimeLapse.h
@@ -53,27 +53,10 @@ public:
void startQuickReadReturns();
private:
- // If true, will use still camera takePicture() for time lapse frames
- // If false, will use the videocamera frames instead.
- bool mUseStillCameraForTimeLapse;
-
- // Size of picture taken from still camera. This may be larger than the size
- // of the video, as still camera may not support the exact video resolution
- // demanded. See setPictureSizeToClosestSupported().
- int32_t mPictureWidth;
- int32_t mPictureHeight;
-
// size of the encoded video.
int32_t mVideoWidth;
int32_t mVideoHeight;
- // True if we need to crop the still camera image to get the video frame.
- bool mNeedCropping;
-
- // Start location of the cropping rectangle.
- int32_t mCropRectStartX;
- int32_t mCropRectStartY;
-
// Time between capture of two frames during time lapse recording
// Negative value indicates that timelapse is disabled.
int64_t mTimeBetweenTimeLapseFrameCaptureUs;
@@ -84,9 +67,6 @@ private:
// Real timestamp of the last encoded time lapse frame
int64_t mLastTimeLapseFrameRealTimestampUs;
- // Thread id of thread which takes still picture and sleeps in a loop.
- pthread_t mThreadTimeLapse;
-
// Variable set in dataCallbackTimestamp() to help skipCurrentFrame()
// to know if current frame needs to be skipped.
bool mSkipCurrentFrame;
@@ -111,9 +91,6 @@ private:
// Lock for accessing quick stop variables.
Mutex mQuickStopLock;
- // Condition variable to wake up still picture thread.
- Condition mTakePictureCondition;
-
// mQuickStop is set to true if we use quick read() returns, otherwise it is set
// to false. Once in this mode read() return a copy of the last read frame
// with the same time stamp. See startQuickReadReturns().
@@ -148,32 +125,13 @@ private:
// Wrapper over CameraSource::read() to implement quick stop.
virtual status_t read(MediaBuffer **buffer, const ReadOptions *options = NULL);
- // For still camera case starts a thread which calls camera's takePicture()
- // in a loop. For video camera case, just starts the camera's video recording.
- virtual void startCameraRecording();
-
- // For still camera case joins the thread created in startCameraRecording().
// For video camera case, just stops the camera's video recording.
virtual void stopCameraRecording();
- // For still camera case don't need to do anything as memory is locally
- // allocated with refcounting.
- // For video camera case just tell the camera to release the frame.
- virtual void releaseRecordingFrame(const sp<IMemory>& frame);
-
// mSkipCurrentFrame is set to true in dataCallbackTimestamp() if the current
// frame needs to be skipped and this function just returns the value of mSkipCurrentFrame.
virtual bool skipCurrentFrame(int64_t timestampUs);
- // Handles the callback to handle raw frame data from the still camera.
- // Creates a copy of the frame data as the camera can reuse the frame memory
- // once this callback returns. The function also sets a new timstamp corresponding
- // to one frame time ahead of the last encoded frame's time stamp. It then
- // calls dataCallbackTimestamp() of the base class with the copied data and the
- // modified timestamp, which will think that it recieved the frame from a video
- // camera and proceed as usual.
- virtual void dataCallback(int32_t msgType, const sp<IMemory> &data);
-
// In the video camera case calls skipFrameAndModifyTimeStamp() to modify
// timestamp and set mSkipCurrentFrame.
// Then it calls the base CameraSource::dataCallbackTimestamp()
@@ -189,24 +147,6 @@ private:
// Otherwise returns false.
bool trySettingVideoSize(int32_t width, int32_t height);
- // The still camera may not support the demanded video width and height.
- // We look for the supported picture sizes from the still camera and
- // choose the smallest one with either dimensions higher than the corresponding
- // video dimensions. The still picture will be cropped to get the video frame.
- // The function returns true if the camera supports picture sizes greater than
- // or equal to the passed in width and height, and false otherwise.
- bool setPictureSizeToClosestSupported(int32_t width, int32_t height);
-
- // Computes the offset of the rectangle from where to start cropping the
- // still image into the video frame. We choose the center of the image to be
- // cropped. The offset is stored in (mCropRectStartX, mCropRectStartY).
- bool computeCropRectangleOffset();
-
- // Crops the source data into a smaller image starting at
- // (mCropRectStartX, mCropRectStartY) and of the size of the video frame.
- // The data is returned into a newly allocated IMemory.
- sp<IMemory> cropYUVImage(const sp<IMemory> &source_data);
-
// When video camera is used for time lapse capture, returns true
// until enough time has passed for the next time lapse frame. When
// the frame needs to be encoded, it returns false and also modifies
@@ -217,22 +157,6 @@ private:
// Wrapper to enter threadTimeLapseEntry()
static void *ThreadTimeLapseWrapper(void *me);
- // Runs a loop which sleeps until a still picture is required
- // and then calls mCamera->takePicture() to take the still picture.
- // Used only in the case mUseStillCameraForTimeLapse = true.
- void threadTimeLapseEntry();
-
- // Wrapper to enter threadStartPreview()
- static void *ThreadStartPreviewWrapper(void *me);
-
- // Starts the camera's preview.
- void threadStartPreview();
-
- // Starts thread ThreadStartPreviewWrapper() for restarting preview.
- // Needs to be done in a thread so that dataCallback() which calls this function
- // can return, and the camera can know that takePicture() is done.
- void restartPreview();
-
// Creates a copy of source_data into a new memory of final type MemoryBase.
sp<IMemory> createIMemoryCopy(const sp<IMemory> &source_data);
diff --git a/include/media/stagefright/MetadataBufferType.h b/include/media/stagefright/MetadataBufferType.h
index 52a3257..4eaf8ac 100644
--- a/include/media/stagefright/MetadataBufferType.h
+++ b/include/media/stagefright/MetadataBufferType.h
@@ -69,6 +69,16 @@ typedef enum {
* kMetadataBufferTypeGrallocSource is used to indicate that
* the payload of the metadata buffers can be interpreted as
* a buffer_handle_t.
+ * So in this case,the metadata that the encoder receives
+ * will have a byte stream that consists of two parts:
+ * 1. First, there is an integer indicating that it is a GRAlloc
+ * source (kMetadataBufferTypeGrallocSource)
+ * 2. This is followed by the buffer_handle_t that is a handle to the
+ * GRalloc buffer. The encoder needs to interpret this GRalloc handle
+ * and encode the frames.
+ * --------------------------------------------------------------
+ * | kMetadataBufferTypeGrallocSource | sizeof(buffer_handle_t) |
+ * --------------------------------------------------------------
*/
kMetadataBufferTypeGrallocSource = 1,
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index 56bd9c3..fab258c 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -63,6 +63,10 @@ public:
MediaBuffer **buffer, const ReadOptions *options = NULL);
virtual sp<MetaData> getFormat();
+ // Pass the metadata over to the buffer, call when you have the lock
+ void passMetadataBufferLocked(MediaBuffer **buffer);
+ bool checkBufferMatchesSlot(int slot, MediaBuffer *buffer);
+
// Get / Set the frame rate used for encoding. Default fps = 30
status_t setFrameRate(int32_t fps) ;
int32_t getFrameRate( ) const;
@@ -152,7 +156,7 @@ public:
status_t setBufferCountServer(int bufferCount);
// getTimestamp retrieves the timestamp associated with the image
- // set by the most recent call to updateFrameInfoLocked().
+ // set by the most recent call to read()
//
// The timestamp is in nanoseconds, and is monotonically increasing. Its
// other semantics (zero point, etc) are source-dependent and should be
diff --git a/include/surfaceflinger/Surface.h b/include/surfaceflinger/Surface.h
index 9c352ad..0460bbd 100644
--- a/include/surfaceflinger/Surface.h
+++ b/include/surfaceflinger/Surface.h
@@ -122,7 +122,10 @@ public:
uint32_t reserved[2];
};
+ explicit Surface(const sp<ISurfaceTexture>& st);
+
static status_t writeToParcel(const sp<Surface>& control, Parcel* parcel);
+
static sp<Surface> readFromParcel(const Parcel& data);
static bool isValid(const sp<Surface>& surface) {
return (surface != 0) && surface->isValid();
@@ -147,14 +150,14 @@ private:
Surface& operator = (Surface& rhs);
Surface(const Surface& rhs);
- Surface(const sp<SurfaceControl>& control);
+ explicit Surface(const sp<SurfaceControl>& control);
Surface(const Parcel& data, const sp<IBinder>& ref);
~Surface();
/*
* private stuff...
*/
- void init();
+ void init(const sp<ISurfaceTexture>& surfaceTexture);
static void cleanCachedSurfacesLocked();