diff options
Diffstat (limited to 'media')
47 files changed, 2382 insertions, 488 deletions
diff --git a/media/java/android/media/AudioRecord.java b/media/java/android/media/AudioRecord.java index c567a6e..855e831 100644 --- a/media/java/android/media/AudioRecord.java +++ b/media/java/android/media/AudioRecord.java @@ -185,7 +185,10 @@ public class AudioRecord * Size of the native audio buffer. */ private int mNativeBufferSizeInBytes = 0; - + /** + * Audio session ID + */ + private int mSessionId = 0; //--------------------------------------------------------- // Constructor, Finalize @@ -227,15 +230,20 @@ public class AudioRecord audioBuffSizeCheck(bufferSizeInBytes); // native initialization + int[] session = new int[1]; + session[0] = 0; //TODO: update native initialization when information about hardware init failure // due to capture device already open is available. int initResult = native_setup( new WeakReference<AudioRecord>(this), - mRecordSource, mSampleRate, mChannels, mAudioFormat, mNativeBufferSizeInBytes); + mRecordSource, mSampleRate, mChannels, mAudioFormat, mNativeBufferSizeInBytes, + session); if (initResult != SUCCESS) { loge("Error code "+initResult+" when initializing native AudioRecord object."); return; // with mState == STATE_UNINITIALIZED } + mSessionId = session[0]; + mState = STATE_INITIALIZED; } @@ -485,6 +493,15 @@ public class AudioRecord } } + /** + * Returns the audio session ID. + * + * @return the ID of the audio session this AudioRecord belongs to. + * @hide + */ + public int getAudioSessionId() { + return mSessionId; + } //--------------------------------------------------------- // Transport control methods @@ -763,7 +780,8 @@ public class AudioRecord //-------------------- private native final int native_setup(Object audiorecord_this, - int recordSource, int sampleRate, int nbChannels, int audioFormat, int buffSizeInBytes); + int recordSource, int sampleRate, int nbChannels, int audioFormat, + int buffSizeInBytes, int[] sessionId); private native final void native_finalize(); diff --git a/media/java/android/media/AudioService.java b/media/java/android/media/AudioService.java index 2e9b64c..682560a 100644 --- a/media/java/android/media/AudioService.java +++ b/media/java/android/media/AudioService.java @@ -332,10 +332,10 @@ public class AudioService extends IAudioService.Stub { SOUND_EFFECT_DEFAULT_VOLUME_DB); mVolumePanel = new VolumePanel(context, this); - mSettingsObserver = new SettingsObserver(); mForcedUseForComm = AudioSystem.FORCE_NONE; createAudioSystemThread(); readPersistedSettings(); + mSettingsObserver = new SettingsObserver(); createStreamStates(); // Call setMode() to initialize mSetModeDeathHandlers mMode = AudioSystem.MODE_INVALID; @@ -433,15 +433,20 @@ public class AudioService extends IAudioService.Stub { mVibrateSetting = System.getInt(cr, System.VIBRATE_ON, 0); + // make sure settings for ringer mode are consistent with device type: non voice capable + // devices (tablets) include media stream in silent mode whereas phones don't. mRingerModeAffectedStreams = Settings.System.getInt(cr, Settings.System.MODE_RINGER_STREAMS_AFFECTED, ((1 << AudioSystem.STREAM_RING)|(1 << AudioSystem.STREAM_NOTIFICATION)| - (1 << AudioSystem.STREAM_SYSTEM)|(1 << AudioSystem.STREAM_SYSTEM_ENFORCED)| - (1 << AudioSystem.STREAM_MUSIC))); - - if (!mVoiceCapable) { + (1 << AudioSystem.STREAM_SYSTEM)|(1 << AudioSystem.STREAM_SYSTEM_ENFORCED))); + if (mVoiceCapable) { + mRingerModeAffectedStreams &= ~(1 << AudioSystem.STREAM_MUSIC); + } else { mRingerModeAffectedStreams |= (1 << AudioSystem.STREAM_MUSIC); } + Settings.System.putInt(cr, + Settings.System.MODE_RINGER_STREAMS_AFFECTED, mRingerModeAffectedStreams); + mMuteAffectedStreams = System.getInt(cr, System.MUTE_STREAMS_AFFECTED, ((1 << AudioSystem.STREAM_MUSIC)|(1 << AudioSystem.STREAM_RING)|(1 << AudioSystem.STREAM_SYSTEM))); @@ -2172,12 +2177,14 @@ public class AudioService extends IAudioService.Stub { super.onChange(selfChange); synchronized (mSettingsLock) { int ringerModeAffectedStreams = Settings.System.getInt(mContentResolver, - Settings.System.MODE_RINGER_STREAMS_AFFECTED, - 0); - if (!mVoiceCapable) { + Settings.System.MODE_RINGER_STREAMS_AFFECTED, + ((1 << AudioSystem.STREAM_RING)|(1 << AudioSystem.STREAM_NOTIFICATION)| + (1 << AudioSystem.STREAM_SYSTEM)|(1 << AudioSystem.STREAM_SYSTEM_ENFORCED))); + if (mVoiceCapable) { + ringerModeAffectedStreams &= ~(1 << AudioSystem.STREAM_MUSIC); + } else { ringerModeAffectedStreams |= (1 << AudioSystem.STREAM_MUSIC); } - if (ringerModeAffectedStreams != mRingerModeAffectedStreams) { /* * Ensure all stream types that should be affected by ringer mode diff --git a/media/java/android/media/MediaRecorder.java b/media/java/android/media/MediaRecorder.java index e3cbd57..72069ac 100644 --- a/media/java/android/media/MediaRecorder.java +++ b/media/java/android/media/MediaRecorder.java @@ -81,9 +81,6 @@ public class MediaRecorder private String mPath; private FileDescriptor mFd; - private boolean mPrepareAuxiliaryFile = false; - private String mPathAux; - private FileDescriptor mFdAux; private EventHandler mEventHandler; private OnErrorListener mOnErrorListener; private OnInfoListener mOnInfoListener; @@ -557,84 +554,23 @@ public class MediaRecorder } /** - * Sets the auxiliary time lapse video's resolution and bitrate. - * - * The auxiliary video's resolution and bitrate are determined by the CamcorderProfile - * quality level {@link android.media.CamcorderProfile#QUALITY_HIGH}. - */ - private void setAuxVideoParameters() { - CamcorderProfile profile = CamcorderProfile.get(CamcorderProfile.QUALITY_HIGH); - setParameter(String.format("video-aux-param-width=%d", profile.videoFrameWidth)); - setParameter(String.format("video-aux-param-height=%d", profile.videoFrameHeight)); - setParameter(String.format("video-aux-param-encoding-bitrate=%d", profile.videoBitRate)); - } - - /** - * Pass in the file descriptor for the auxiliary time lapse video. Call this before - * prepare(). - * - * Sets file descriptor and parameters for auxiliary time lapse video. Time lapse mode - * can capture video (using the still camera) at resolutions higher than that can be - * played back on the device. This function or - * {@link #setAuxiliaryOutputFile(String)} enable capture of a smaller video in - * parallel with the main time lapse video, which can be used to play back on the - * device. The smaller video is created by downsampling the main video. This call is - * optional and does not have to be called if parallel capture of a downsampled video - * is not desired. - * - * Note that while the main video resolution and bitrate is determined from the - * CamcorderProfile in {@link #setProfile(CamcorderProfile)}, the auxiliary video's - * resolution and bitrate are determined by the CamcorderProfile quality level - * {@link android.media.CamcorderProfile#QUALITY_HIGH}. All other encoding parameters - * remain the same for the main video and the auxiliary video. - * - * E.g. if the device supports the time lapse profile quality level - * {@link android.media.CamcorderProfile#QUALITY_TIME_LAPSE_1080P} but can playback at - * most 480p, the application might want to capture an auxiliary video of resolution - * 480p using this call. - * - * @param fd an open file descriptor to be written into. + * Currently not implemented. It does nothing. + * @deprecated Time lapse mode video recording using camera still image capture + * is not desirable, and will not be supported. */ public void setAuxiliaryOutputFile(FileDescriptor fd) { - mPrepareAuxiliaryFile = true; - mPathAux = null; - mFdAux = fd; - setAuxVideoParameters(); + Log.w(TAG, "setAuxiliaryOutputFile(FileDescriptor) is no longer supported."); } /** - * Pass in the file path for the auxiliary time lapse video. Call this before - * prepare(). - * - * Sets file path and parameters for auxiliary time lapse video. Time lapse mode can - * capture video (using the still camera) at resolutions higher than that can be - * played back on the device. This function or - * {@link #setAuxiliaryOutputFile(FileDescriptor)} enable capture of a smaller - * video in parallel with the main time lapse video, which can be used to play back on - * the device. The smaller video is created by downsampling the main video. This call - * is optional and does not have to be called if parallel capture of a downsampled - * video is not desired. - * - * Note that while the main video resolution and bitrate is determined from the - * CamcorderProfile in {@link #setProfile(CamcorderProfile)}, the auxiliary video's - * resolution and bitrate are determined by the CamcorderProfile quality level - * {@link android.media.CamcorderProfile#QUALITY_HIGH}. All other encoding parameters - * remain the same for the main video and the auxiliary video. - * - * E.g. if the device supports the time lapse profile quality level - * {@link android.media.CamcorderProfile#QUALITY_TIME_LAPSE_1080P} but can playback at - * most 480p, the application might want to capture an auxiliary video of resolution - * 480p using this call. - * - * @param path The pathname to use. + * Currently not implemented. It does nothing. + * @deprecated Time lapse mode video recording using camera still image capture + * is not desirable, and will not be supported. */ public void setAuxiliaryOutputFile(String path) { - mPrepareAuxiliaryFile = true; - mFdAux = null; - mPathAux = path; - setAuxVideoParameters(); + Log.w(TAG, "setAuxiliaryOutputFile(String) is no longer supported."); } /** @@ -668,8 +604,6 @@ public class MediaRecorder // native implementation private native void _setOutputFile(FileDescriptor fd, long offset, long length) throws IllegalStateException, IOException; - private native void _setOutputFileAux(FileDescriptor fd) - throws IllegalStateException, IOException; private native void _prepare() throws IllegalStateException, IOException; /** @@ -696,21 +630,6 @@ public class MediaRecorder throw new IOException("No valid output file"); } - if (mPrepareAuxiliaryFile) { - if (mPathAux != null) { - FileOutputStream fos = new FileOutputStream(mPathAux); - try { - _setOutputFileAux(fos.getFD()); - } finally { - fos.close(); - } - } else if (mFdAux != null) { - _setOutputFileAux(mFdAux); - } else { - throw new IOException("No valid output file"); - } - } - _prepare(); } diff --git a/media/java/android/media/MediaScanner.java b/media/java/android/media/MediaScanner.java index 8c8569a..a8ac510 100644 --- a/media/java/android/media/MediaScanner.java +++ b/media/java/android/media/MediaScanner.java @@ -372,8 +372,14 @@ public class MediaScanner private class FileInserter { - ContentValues[] mValues = new ContentValues[1000]; - int mIndex = 0; + private final Uri mUri; + private final ContentValues[] mValues; + private int mIndex; + + public FileInserter(Uri uri, int count) { + mUri = uri; + mValues = new ContentValues[count]; + } public Uri insert(ContentValues values) { if (mIndex == mValues.length) { @@ -389,13 +395,17 @@ public class MediaScanner mValues[mIndex++] = null; } try { - mMediaProvider.bulkInsert(mFilesUri, mValues); + mMediaProvider.bulkInsert(mUri, mValues); } catch (RemoteException e) { Log.e(TAG, "RemoteException in FileInserter.flush()", e); } mIndex = 0; } } + + private FileInserter mAudioInserter; + private FileInserter mVideoInserter; + private FileInserter mImageInserter; private FileInserter mFileInserter; // hashes file path to FileCacheEntry. @@ -707,9 +717,7 @@ public class MediaScanner map.put(MediaStore.MediaColumns.MIME_TYPE, mMimeType); map.put(MediaStore.MediaColumns.IS_DRM, mIsDrm); - if (mNoMedia) { - map.put(MediaStore.MediaColumns.NO_MEDIA, true); - } else { + if (!mNoMedia) { if (MediaFile.isVideoFileType(mFileType)) { map.put(Video.Media.ARTIST, (mArtist != null && mArtist.length() > 0 ? mArtist : MediaStore.UNKNOWN_STRING)); @@ -837,23 +845,35 @@ public class MediaScanner } } - // For inserts we always use the file URI so we can insert in bulk. - // For updates we compute the URI based on the media type. Uri tableUri = mFilesUri; + FileInserter inserter = mFileInserter; + if (!mNoMedia) { + if (MediaFile.isVideoFileType(mFileType)) { + tableUri = mVideoUri; + inserter = mVideoInserter; + } else if (MediaFile.isImageFileType(mFileType)) { + tableUri = mImagesUri; + inserter = mImageInserter; + } else if (MediaFile.isAudioFileType(mFileType)) { + tableUri = mAudioUri; + inserter = mAudioInserter; + } + } Uri result = null; if (rowId == 0) { if (mMtpObjectHandle != 0) { values.put(MediaStore.MediaColumns.MEDIA_SCANNER_NEW_OBJECT_ID, mMtpObjectHandle); } - int format = entry.mFormat; - if (format == 0) { - format = MediaFile.getFormatCode(entry.mPath, mMimeType); + if (tableUri == mFilesUri) { + int format = entry.mFormat; + if (format == 0) { + format = MediaFile.getFormatCode(entry.mPath, mMimeType); + } + values.put(Files.FileColumns.FORMAT, format); } - values.put(Files.FileColumns.FORMAT, format); - // new file, insert it - if (mFileInserter != null) { - result = mFileInserter.insert(values); + if (inserter != null) { + result = inserter.insert(values); } else { result = mMediaProvider.insert(tableUri, values); } @@ -863,16 +883,6 @@ public class MediaScanner entry.mRowId = rowId; } } else { - if (!mNoMedia) { - if (MediaFile.isVideoFileType(mFileType)) { - tableUri = mVideoUri; - } else if (MediaFile.isImageFileType(mFileType)) { - tableUri = mImagesUri; - } else if (MediaFile.isAudioFileType(mFileType)) { - tableUri = mAudioUri; - } - } - // updated file result = ContentUris.withAppendedId(tableUri, rowId); // path should never change, and we want to avoid replacing mixed cased paths @@ -1200,12 +1210,25 @@ public class MediaScanner initialize(volumeName); prescan(null, true); long prescan = System.currentTimeMillis(); - mFileInserter = new FileInserter(); + + // create FileInserters for bulk inserts + mAudioInserter = new FileInserter(mAudioUri, 500); + mVideoInserter = new FileInserter(mVideoUri, 500); + mImageInserter = new FileInserter(mImagesUri, 500); + mFileInserter = new FileInserter(mFilesUri, 500); for (int i = 0; i < directories.length; i++) { processDirectory(directories[i], mClient); } + + // flush remaining inserts + mAudioInserter.flush(); + mVideoInserter.flush(); + mImageInserter.flush(); mFileInserter.flush(); + mAudioInserter = null; + mVideoInserter = null; + mImageInserter = null; mFileInserter = null; long scan = System.currentTimeMillis(); diff --git a/media/java/android/media/videoeditor/MediaArtistNativeHelper.java b/media/java/android/media/videoeditor/MediaArtistNativeHelper.java index 0d2bcd5..6b0fb12 100644 --- a/media/java/android/media/videoeditor/MediaArtistNativeHelper.java +++ b/media/java/android/media/videoeditor/MediaArtistNativeHelper.java @@ -912,11 +912,14 @@ class MediaArtistNativeHelper { /** 720p 1280 X 720 */ public static final int V720p = 10; - /** 1080 x 720 */ + /** W720p 1080 x 720 */ public static final int W720p = 11; - /** 1080 960 x 720 */ + /** S720p 960 x 720 */ public static final int S720p = 12; + + /** 1080p 1920 x 1080 */ + public static final int V1080p = 13; } /** @@ -3548,6 +3551,8 @@ class MediaArtistNativeHelper { retValue = VideoFrameSize.WVGA16x9; else if (height == MediaProperties.HEIGHT_720) retValue = VideoFrameSize.V720p; + else if (height == MediaProperties.HEIGHT_1080) + retValue = VideoFrameSize.V1080p; break; case MediaProperties.ASPECT_RATIO_4_3: if (height == MediaProperties.HEIGHT_480) diff --git a/media/java/android/media/videoeditor/MediaImageItem.java b/media/java/android/media/videoeditor/MediaImageItem.java index 4faa83a..73cc7e2 100755 --- a/media/java/android/media/videoeditor/MediaImageItem.java +++ b/media/java/android/media/videoeditor/MediaImageItem.java @@ -503,51 +503,75 @@ public class MediaImageItem extends MediaItem { return adjustedOverlays; } - - /** - * This function sets the Ken Burn effect generated clip - * name. + * This function get the proper width by given aspect ratio + * and height. * - * @param generatedFilePath The name of the generated clip + * @param aspectRatio Given aspect ratio + * @param height Given height */ - @Override - void setGeneratedImageClip(String generatedFilePath) { - super.setGeneratedImageClip(generatedFilePath); + private int getWidthByAspectRatioAndHeight(int aspectRatio, int height) { + int width = 0; - - // set the Kenburns clip width and height - mGeneratedClipHeight = getScaledHeight(); - switch (mVideoEditor.getAspectRatio()) { + switch (aspectRatio) { case MediaProperties.ASPECT_RATIO_3_2: - if (mGeneratedClipHeight == MediaProperties.HEIGHT_480) - mGeneratedClipWidth = 720; - else if (mGeneratedClipHeight == MediaProperties.HEIGHT_720) - mGeneratedClipWidth = 1080; + if (height == MediaProperties.HEIGHT_480) + width = 720; + else if (height == MediaProperties.HEIGHT_720) + width = 1080; break; + case MediaProperties.ASPECT_RATIO_16_9: - if (mGeneratedClipHeight == MediaProperties.HEIGHT_360) - mGeneratedClipWidth = 640; - else if (mGeneratedClipHeight == MediaProperties.HEIGHT_480) - mGeneratedClipWidth = 854; - else if (mGeneratedClipHeight == MediaProperties.HEIGHT_720) - mGeneratedClipWidth = 1280; + if (height == MediaProperties.HEIGHT_360) + width = 640; + else if (height == MediaProperties.HEIGHT_480) + width = 854; + else if (height == MediaProperties.HEIGHT_720) + width = 1280; + else if (height == MediaProperties.HEIGHT_1080) + width = 1920; break; + case MediaProperties.ASPECT_RATIO_4_3: - if (mGeneratedClipHeight == MediaProperties.HEIGHT_480) - mGeneratedClipWidth = 640; - if (mGeneratedClipHeight == MediaProperties.HEIGHT_720) - mGeneratedClipWidth = 960; + if (height == MediaProperties.HEIGHT_480) + width = 640; + if (height == MediaProperties.HEIGHT_720) + width = 960; break; + case MediaProperties.ASPECT_RATIO_5_3: - if (mGeneratedClipHeight == MediaProperties.HEIGHT_480) - mGeneratedClipWidth = 800; + if (height == MediaProperties.HEIGHT_480) + width = 800; break; + case MediaProperties.ASPECT_RATIO_11_9: - if (mGeneratedClipHeight == MediaProperties.HEIGHT_144) - mGeneratedClipWidth = 176; + if (height == MediaProperties.HEIGHT_144) + width = 176; break; + + default : { + throw new IllegalArgumentException( + "Illegal arguments for aspectRatio"); + } } + + return width; + } + + /** + * This function sets the Ken Burn effect generated clip + * name. + * + * @param generatedFilePath The name of the generated clip + */ + @Override + void setGeneratedImageClip(String generatedFilePath) { + super.setGeneratedImageClip(generatedFilePath); + + // set the Kenburns clip width and height + mGeneratedClipHeight = getScaledHeight(); + mGeneratedClipWidth = getWidthByAspectRatioAndHeight( + mVideoEditor.getAspectRatio(), mGeneratedClipHeight); } /** @@ -841,37 +865,8 @@ public class MediaImageItem extends MediaItem { clipSettings.fileType = FileType.THREE_GPP; mGeneratedClipHeight = getScaledHeight(); - switch (mVideoEditor.getAspectRatio()) { - case MediaProperties.ASPECT_RATIO_3_2: - if (mGeneratedClipHeight == MediaProperties.HEIGHT_480) - mGeneratedClipWidth = 720; - else if (mGeneratedClipHeight == MediaProperties.HEIGHT_720) - mGeneratedClipWidth = 1080; - break; - case MediaProperties.ASPECT_RATIO_16_9: - if (mGeneratedClipHeight == MediaProperties.HEIGHT_360) - mGeneratedClipWidth = 640; - else if (mGeneratedClipHeight == MediaProperties.HEIGHT_480) - mGeneratedClipWidth = 854; - else if (mGeneratedClipHeight == MediaProperties.HEIGHT_720) - mGeneratedClipWidth = 1280; - break; - case MediaProperties.ASPECT_RATIO_4_3: - if (mGeneratedClipHeight == MediaProperties.HEIGHT_480) - mGeneratedClipWidth = 640; - if (mGeneratedClipHeight == MediaProperties.HEIGHT_720) - mGeneratedClipWidth = 960; - break; - case MediaProperties.ASPECT_RATIO_5_3: - if (mGeneratedClipHeight == MediaProperties.HEIGHT_480) - mGeneratedClipWidth = 800; - break; - case MediaProperties.ASPECT_RATIO_11_9: - if (mGeneratedClipHeight == MediaProperties.HEIGHT_144) - mGeneratedClipWidth = 176; - break; - } - + mGeneratedClipWidth = getWidthByAspectRatioAndHeight( + mVideoEditor.getAspectRatio(), mGeneratedClipHeight); } else { if (getGeneratedImageClip() == null) { clipSettings.clipPath = getDecodedImageFileName(); diff --git a/media/java/android/media/videoeditor/MediaProperties.java b/media/java/android/media/videoeditor/MediaProperties.java index 0225807..ff13e5d 100755 --- a/media/java/android/media/videoeditor/MediaProperties.java +++ b/media/java/android/media/videoeditor/MediaProperties.java @@ -17,8 +17,9 @@ package android.media.videoeditor; +import android.media.videoeditor.VideoEditorProfile; import android.util.Pair; - +import java.lang.System; /** * This class defines all properties of a media file such as supported height, * aspect ratio, bitrate for export function. @@ -33,7 +34,7 @@ public class MediaProperties { public static final int HEIGHT_360 = 360; public static final int HEIGHT_480 = 480; public static final int HEIGHT_720 = 720; - public static final int HEIGHT_1088 = 1088; + public static final int HEIGHT_1080 = 1080; /** * Supported aspect ratios @@ -63,8 +64,7 @@ public class MediaProperties { private static final Pair<Integer, Integer>[] ASPECT_RATIO_3_2_RESOLUTIONS = new Pair[] { new Pair<Integer, Integer>(720, HEIGHT_480), -//*tmpLSA*/ new Pair<Integer, Integer>(1080, HEIGHT_720) -/*tmpLSA*/ new Pair<Integer, Integer>(1088, HEIGHT_720) + new Pair<Integer, Integer>(1080, HEIGHT_720) }; @SuppressWarnings({"unchecked"}) @@ -92,6 +92,7 @@ public class MediaProperties { new Pair[] { new Pair<Integer, Integer>(848, HEIGHT_480), new Pair<Integer, Integer>(1280, HEIGHT_720), + new Pair<Integer, Integer>(1920, HEIGHT_1080), }; /** @@ -345,7 +346,31 @@ public class MediaProperties { } } - return resolutions; + /** Check the platform specific maximum export resolution */ + VideoEditorProfile veProfile = VideoEditorProfile.get(); + if (veProfile == null) { + throw new RuntimeException("Can't get the video editor profile"); + } + final int maxWidth = veProfile.maxOutputVideoFrameWidth; + final int maxHeight = veProfile.maxOutputVideoFrameHeight; + Pair<Integer, Integer>[] tmpResolutions = new Pair[resolutions.length]; + int numSupportedResolution = 0; + int i = 0; + + /** Get supported resolution list */ + for (i = 0; i < resolutions.length; i++) { + if ((resolutions[i].first <= maxWidth) && + (resolutions[i].second <= maxHeight)) { + tmpResolutions[numSupportedResolution] = resolutions[i]; + numSupportedResolution++; + } + } + final Pair<Integer, Integer>[] supportedResolutions = + new Pair[numSupportedResolution]; + System.arraycopy(tmpResolutions, 0, + supportedResolutions, 0, numSupportedResolution); + + return supportedResolutions; } /** diff --git a/media/java/android/media/videoeditor/MediaVideoItem.java b/media/java/android/media/videoeditor/MediaVideoItem.java index 4758de6..6248651 100755 --- a/media/java/android/media/videoeditor/MediaVideoItem.java +++ b/media/java/android/media/videoeditor/MediaVideoItem.java @@ -23,6 +23,7 @@ import java.lang.ref.SoftReference; import android.graphics.Bitmap; import android.media.videoeditor.MediaArtistNativeHelper.ClipSettings; import android.media.videoeditor.MediaArtistNativeHelper.Properties; +import android.media.videoeditor.VideoEditorProfile; import android.view.Surface; import android.view.SurfaceHolder; @@ -118,6 +119,21 @@ public class MediaVideoItem extends MediaItem { throw new IllegalArgumentException(e.getMessage() + " : " + filename); } + /** Check the platform specific maximum import resolution */ + VideoEditorProfile veProfile = VideoEditorProfile.get(); + if (veProfile == null) { + throw new RuntimeException("Can't get the video editor profile"); + } + final int maxInputWidth = veProfile.maxInputVideoFrameWidth; + final int maxInputHeight = veProfile.maxInputVideoFrameHeight; + if ((properties.width > maxInputWidth) || + (properties.height > maxInputHeight)) { + throw new IllegalArgumentException( + "Unsupported import resolution. Supported maximum width:" + + maxInputWidth + " height:" + maxInputHeight + + ", current width:" + properties.width + + " height:" + properties.height); + } switch (mMANativeHelper.getFileType(properties.fileType)) { case MediaProperties.FILE_3GP: case MediaProperties.FILE_MP4: diff --git a/media/java/android/media/videoeditor/VideoEditor.java b/media/java/android/media/videoeditor/VideoEditor.java index 59e4540..720e8022 100755 --- a/media/java/android/media/videoeditor/VideoEditor.java +++ b/media/java/android/media/videoeditor/VideoEditor.java @@ -370,7 +370,7 @@ public interface VideoEditor { */ public void export(String filename, int height, int bitrate, ExportProgressListener listener) - throws IOException; + throws IOException; /** * Create the output movie based on all media items added and the applied @@ -413,7 +413,7 @@ public interface VideoEditor { */ public void export(String filename, int height, int bitrate, int audioCodec, int videoCodec, ExportProgressListener listener) - throws IOException; + throws IOException; /** * Cancel the running export operation. This method blocks until the export diff --git a/media/java/android/media/videoeditor/VideoEditorImpl.java b/media/java/android/media/videoeditor/VideoEditorImpl.java index 649b98a..ea7fe63 100755 --- a/media/java/android/media/videoeditor/VideoEditorImpl.java +++ b/media/java/android/media/videoeditor/VideoEditorImpl.java @@ -337,7 +337,8 @@ public class VideoEditorImpl implements VideoEditor { */ public void export(String filename, int height, int bitrate, int audioCodec, int videoCodec, - ExportProgressListener listener) throws IOException { + ExportProgressListener listener) + throws IOException { switch (audioCodec) { case MediaProperties.ACODEC_AAC_LC: @@ -372,7 +373,8 @@ public class VideoEditorImpl implements VideoEditor { * {@inheritDoc} */ public void export(String filename, int height, int bitrate, - ExportProgressListener listener) throws IOException { + ExportProgressListener listener) + throws IOException { if (filename == null) { throw new IllegalArgumentException("export: filename is null"); } @@ -386,6 +388,20 @@ public class VideoEditorImpl implements VideoEditor { throw new IllegalStateException("No MediaItems added"); } + /** Check the platform specific maximum export resolution */ + VideoEditorProfile veProfile = VideoEditorProfile.get(); + if (veProfile == null) { + throw new RuntimeException("Can't get the video editor profile"); + } + final int maxOutputHeight = veProfile.maxOutputVideoFrameHeight; + final int maxOutputWidth = veProfile.maxOutputVideoFrameWidth; + if (height > maxOutputHeight) { + throw new IllegalArgumentException( + "Unsupported export resolution. Supported maximum width:" + + maxOutputWidth + " height:" + maxOutputHeight + + " current height:" + height); + } + switch (height) { case MediaProperties.HEIGHT_144: break; @@ -397,6 +413,8 @@ public class VideoEditorImpl implements VideoEditor { break; case MediaProperties.HEIGHT_720: break; + case MediaProperties.HEIGHT_1080: + break; default: { String message = "Unsupported height value " + height; diff --git a/media/java/android/media/videoeditor/VideoEditorProfile.java b/media/java/android/media/videoeditor/VideoEditorProfile.java new file mode 100755 index 0000000..7d9fc8f --- /dev/null +++ b/media/java/android/media/videoeditor/VideoEditorProfile.java @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package android.media.videoeditor; + +/** + * The VideoEditorProfile class is used to retrieve the + * predefined videoeditor profile settings for videoeditor applications. + * These settings are read-only. + * + * <p>The videoeditor profile specifies the following set of parameters: + * <ul> + * <li> max input video frame width + * <li> max input video frame height + * <li> max output video frame width + * <li> max output video frame height + * </ul> + * {@hide} + */ +public class VideoEditorProfile +{ + /** + * The max input video frame width + */ + public int maxInputVideoFrameWidth; + + /** + * The max input video frame height + */ + public int maxInputVideoFrameHeight; + + /** + * The max ouput video frame width + */ + public int maxOutputVideoFrameWidth; + + /** + * The max ouput video frame height + */ + public int maxOutputVideoFrameHeight; + + /** + * Returns the videoeditor profile + */ + public static VideoEditorProfile get() { + return native_get_videoeditor_profile(); + } + + static { + System.loadLibrary("media_jni"); + native_init(); + } + + // Private constructor called by JNI + private VideoEditorProfile(int inputWidth, + int inputHeight, + int outputWidth, + int outputHeight) { + + this.maxInputVideoFrameWidth = inputWidth; + this.maxInputVideoFrameHeight = inputHeight; + this.maxOutputVideoFrameWidth = outputWidth; + this.maxOutputVideoFrameHeight = outputHeight; + } + + // Methods implemented by JNI + private static native final void native_init(); + private static native final VideoEditorProfile + native_get_videoeditor_profile(); +} diff --git a/media/jni/android_media_MediaProfiles.cpp b/media/jni/android_media_MediaProfiles.cpp index 08a6de1..2b8dfe4 100644 --- a/media/jni/android_media_MediaProfiles.cpp +++ b/media/jni/android_media_MediaProfiles.cpp @@ -286,6 +286,44 @@ android_media_MediaProfiles_native_get_image_encoding_quality_level(JNIEnv *env, } return static_cast<jint>(levels[index]); } +static jobject +android_media_MediaProfiles_native_get_videoeditor_profile(JNIEnv *env, jobject thiz) +{ + LOGV("native_get_videoeditor_profile"); + + int maxInputFrameWidth = + sProfiles->getVideoEditorCapParamByName("videoeditor.input.width.max"); + int maxInputFrameHeight = + sProfiles->getVideoEditorCapParamByName("videoeditor.input.height.max"); + int maxOutputFrameWidth = + sProfiles->getVideoEditorCapParamByName("videoeditor.output.width.max"); + int maxOutputFrameHeight = + sProfiles->getVideoEditorCapParamByName("videoeditor.output.height.max"); + + // Check on the values retrieved + if (maxInputFrameWidth == -1 || maxInputFrameHeight == -1 || + maxOutputFrameWidth == -1 || maxOutputFrameHeight == -1) { + + jniThrowException(env, "java/lang/RuntimeException",\ + "Error retrieving videoeditor profile params"); + return NULL; + } + LOGV("native_get_videoeditor_profile \ + inWidth:%d inHeight:%d,outWidth:%d, outHeight:%d",\ + maxInputFrameWidth,maxInputFrameHeight,\ + maxOutputFrameWidth,maxOutputFrameHeight); + + jclass VideoEditorProfileClazz = + env->FindClass("android/media/videoeditor/VideoEditorProfile"); + jmethodID VideoEditorProfileConstructorMethodID = + env->GetMethodID(VideoEditorProfileClazz, "<init>", "(IIII)V"); + return env->NewObject(VideoEditorProfileClazz, + VideoEditorProfileConstructorMethodID, + maxInputFrameWidth, + maxInputFrameHeight, + maxOutputFrameWidth, + maxOutputFrameHeight); +} static JNINativeMethod gMethodsForEncoderCapabilitiesClass[] = { {"native_init", "()V", (void *)android_media_MediaProfiles_native_init}, @@ -324,10 +362,17 @@ static JNINativeMethod gMethodsForCameraProfileClass[] = { {"native_get_image_encoding_quality_level","(II)I", (void *)android_media_MediaProfiles_native_get_image_encoding_quality_level}, }; +static JNINativeMethod gMethodsForVideoEditorProfileClass[] = { + {"native_init", "()V", (void *)android_media_MediaProfiles_native_init}, + {"native_get_videoeditor_profile", "()Landroid/media/videoeditor/VideoEditorProfile;", + (void *)android_media_MediaProfiles_native_get_videoeditor_profile}, +}; + static const char* const kEncoderCapabilitiesClassPathName = "android/media/EncoderCapabilities"; static const char* const kDecoderCapabilitiesClassPathName = "android/media/DecoderCapabilities"; static const char* const kCamcorderProfileClassPathName = "android/media/CamcorderProfile"; static const char* const kCameraProfileClassPathName = "android/media/CameraProfile"; +static const char* const kVideoEditorProfileClassPathName = "android/media/videoeditor/VideoEditorProfile"; // This function only registers the native methods, and is called from // JNI_OnLoad in android_media_MediaPlayer.cpp @@ -353,6 +398,11 @@ int register_android_media_MediaProfiles(JNIEnv *env) gMethodsForCameraProfileClass, NELEM(gMethodsForCameraProfileClass)); + int ret5 = AndroidRuntime::registerNativeMethods(env, + kVideoEditorProfileClassPathName, + gMethodsForVideoEditorProfileClass, + NELEM(gMethodsForVideoEditorProfileClass)); + // Success if all return values from above are 0 - return (ret1 || ret2 || ret3 || ret4); + return (ret1 || ret2 || ret3 || ret4 || ret5); } diff --git a/media/jni/android_media_MediaRecorder.cpp b/media/jni/android_media_MediaRecorder.cpp index 12391c8..922f7ed 100644 --- a/media/jni/android_media_MediaRecorder.cpp +++ b/media/jni/android_media_MediaRecorder.cpp @@ -127,7 +127,7 @@ static bool process_media_recorder_call(JNIEnv *env, status_t opStatus, const ch return false; } -static sp<MediaRecorder> getMediaRecorder(JNIEnv* env, jobject thiz) +sp<MediaRecorder> getMediaRecorder(JNIEnv* env, jobject thiz) { Mutex::Autolock l(sLock); MediaRecorder* const p = (MediaRecorder*)env->GetIntField(thiz, fields.context); @@ -261,20 +261,6 @@ android_media_MediaRecorder_setOutputFileFD(JNIEnv *env, jobject thiz, jobject f } static void -android_media_MediaRecorder_setOutputFileAuxFD(JNIEnv *env, jobject thiz, jobject fileDescriptor) -{ - LOGV("setOutputFile"); - if (fileDescriptor == NULL) { - jniThrowException(env, "java/lang/IllegalArgumentException", NULL); - return; - } - int fd = jniGetFDFromFileDescriptor(env, fileDescriptor); - sp<MediaRecorder> mr = getMediaRecorder(env, thiz); - status_t opStatus = mr->setOutputFileAuxiliary(fd); - process_media_recorder_call(env, opStatus, "java/io/IOException", "setOutputFile failed."); -} - -static void android_media_MediaRecorder_setVideoSize(JNIEnv *env, jobject thiz, jint width, jint height) { LOGV("setVideoSize(%d, %d)", width, height); @@ -475,7 +461,6 @@ static JNINativeMethod gMethods[] = { {"setAudioEncoder", "(I)V", (void *)android_media_MediaRecorder_setAudioEncoder}, {"setParameter", "(Ljava/lang/String;)V", (void *)android_media_MediaRecorder_setParameter}, {"_setOutputFile", "(Ljava/io/FileDescriptor;JJ)V", (void *)android_media_MediaRecorder_setOutputFileFD}, - {"_setOutputFileAux", "(Ljava/io/FileDescriptor;)V", (void *)android_media_MediaRecorder_setOutputFileAuxFD}, {"setVideoSize", "(II)V", (void *)android_media_MediaRecorder_setVideoSize}, {"setVideoFrameRate", "(I)V", (void *)android_media_MediaRecorder_setVideoFrameRate}, {"setMaxDuration", "(I)V", (void *)android_media_MediaRecorder_setMaxDuration}, diff --git a/media/jni/android_media_MediaScanner.cpp b/media/jni/android_media_MediaScanner.cpp index d0d2d1e..b88296f 100644 --- a/media/jni/android_media_MediaScanner.cpp +++ b/media/jni/android_media_MediaScanner.cpp @@ -46,6 +46,16 @@ struct fields_t { }; static fields_t fields; +static status_t checkAndClearExceptionFromCallback(JNIEnv* env, const char* methodName) { + if (env->ExceptionCheck()) { + LOGE("An exception was thrown by callback '%s'.", methodName); + LOGE_EX(env); + env->ExceptionClear(); + return UNKNOWN_ERROR; + } + return OK; +} + class MyMediaScannerClient : public MediaScannerClient { public: @@ -86,9 +96,7 @@ public: mEnv->DeleteGlobalRef(mClient); } - // Returns true if it succeeded, false if an exception occured - // in the Java code - virtual bool scanFile(const char* path, long long lastModified, + virtual status_t scanFile(const char* path, long long lastModified, long long fileSize, bool isDirectory, bool noMedia) { LOGV("scanFile: path(%s), time(%lld), size(%lld) and isDir(%d)", @@ -96,27 +104,29 @@ public: jstring pathStr; if ((pathStr = mEnv->NewStringUTF(path)) == NULL) { - return false; + mEnv->ExceptionClear(); + return NO_MEMORY; } mEnv->CallVoidMethod(mClient, mScanFileMethodID, pathStr, lastModified, fileSize, isDirectory, noMedia); mEnv->DeleteLocalRef(pathStr); - return (!mEnv->ExceptionCheck()); + return checkAndClearExceptionFromCallback(mEnv, "scanFile"); } - // Returns true if it succeeded, false if an exception occured - // in the Java code - virtual bool handleStringTag(const char* name, const char* value) + virtual status_t handleStringTag(const char* name, const char* value) { LOGV("handleStringTag: name(%s) and value(%s)", name, value); jstring nameStr, valueStr; if ((nameStr = mEnv->NewStringUTF(name)) == NULL) { - return false; + mEnv->ExceptionClear(); + return NO_MEMORY; } if ((valueStr = mEnv->NewStringUTF(value)) == NULL) { - return false; + mEnv->DeleteLocalRef(nameStr); + mEnv->ExceptionClear(); + return NO_MEMORY; } mEnv->CallVoidMethod( @@ -124,23 +134,22 @@ public: mEnv->DeleteLocalRef(nameStr); mEnv->DeleteLocalRef(valueStr); - return (!mEnv->ExceptionCheck()); + return checkAndClearExceptionFromCallback(mEnv, "handleStringTag"); } - // Returns true if it succeeded, false if an exception occured - // in the Java code - virtual bool setMimeType(const char* mimeType) + virtual status_t setMimeType(const char* mimeType) { LOGV("setMimeType: %s", mimeType); jstring mimeTypeStr; if ((mimeTypeStr = mEnv->NewStringUTF(mimeType)) == NULL) { - return false; + mEnv->ExceptionClear(); + return NO_MEMORY; } mEnv->CallVoidMethod(mClient, mSetMimeTypeMethodID, mimeTypeStr); mEnv->DeleteLocalRef(mimeTypeStr); - return (!mEnv->ExceptionCheck()); + return checkAndClearExceptionFromCallback(mEnv, "setMimeType"); } private: @@ -152,12 +161,6 @@ private: }; -static bool ExceptionCheck(void* env) -{ - LOGV("ExceptionCheck"); - return ((JNIEnv *)env)->ExceptionCheck(); -} - static MediaScanner *getNativeScanner_l(JNIEnv* env, jobject thiz) { return (MediaScanner *) env->GetIntField(thiz, fields.context); @@ -190,7 +193,10 @@ android_media_MediaScanner_processDirectory( } MyMediaScannerClient myClient(env, client); - mp->processDirectory(pathStr, myClient, ExceptionCheck, env); + MediaScanResult result = mp->processDirectory(pathStr, myClient); + if (result == MEDIA_SCAN_RESULT_ERROR) { + LOGE("An error occurred while scanning directory '%s'.", pathStr); + } env->ReleaseStringUTFChars(path, pathStr); } @@ -227,7 +233,10 @@ android_media_MediaScanner_processFile( } MyMediaScannerClient myClient(env, client); - mp->processFile(pathStr, mimeTypeStr, myClient); + MediaScanResult result = mp->processFile(pathStr, mimeTypeStr, myClient); + if (result == MEDIA_SCAN_RESULT_ERROR) { + LOGE("An error occurred while scanning file '%s'.", pathStr); + } env->ReleaseStringUTFChars(path, pathStr); if (mimeType) { env->ReleaseStringUTFChars(mimeType, mimeTypeStr); diff --git a/media/jni/mediaeditor/VideoEditorClasses.cpp b/media/jni/mediaeditor/VideoEditorClasses.cpp index 277e16c..4c0e731 100755 --- a/media/jni/mediaeditor/VideoEditorClasses.cpp +++ b/media/jni/mediaeditor/VideoEditorClasses.cpp @@ -439,9 +439,10 @@ VIDEOEDIT_JAVA_DEFINE_CONSTANTS(VideoFrameSize) VIDEOEDIT_JAVA_CONSTANT_INIT("NTSC", M4VIDEOEDITING_kNTSC), VIDEOEDIT_JAVA_CONSTANT_INIT("nHD", M4VIDEOEDITING_k640_360), VIDEOEDIT_JAVA_CONSTANT_INIT("WVGA16x9", M4VIDEOEDITING_k854_480), - VIDEOEDIT_JAVA_CONSTANT_INIT("V720p", M4VIDEOEDITING_kHD1280), - VIDEOEDIT_JAVA_CONSTANT_INIT("W720p", M4VIDEOEDITING_kHD1080), - VIDEOEDIT_JAVA_CONSTANT_INIT("S720p", M4VIDEOEDITING_kHD960) + VIDEOEDIT_JAVA_CONSTANT_INIT("V720p", M4VIDEOEDITING_k1280_720), + VIDEOEDIT_JAVA_CONSTANT_INIT("W720p", M4VIDEOEDITING_k1080_720), + VIDEOEDIT_JAVA_CONSTANT_INIT("S720p", M4VIDEOEDITING_k960_720), + VIDEOEDIT_JAVA_CONSTANT_INIT("V1080p", M4VIDEOEDITING_k1920_1080) }; VIDEOEDIT_JAVA_DEFINE_CONSTANT_CLASS(VideoFrameSize, VIDEO_FRAME_SIZE_CLASS_NAME, diff --git a/media/jni/mediaeditor/VideoEditorPropertiesMain.cpp b/media/jni/mediaeditor/VideoEditorPropertiesMain.cpp index 9de7207..93fe702 100755 --- a/media/jni/mediaeditor/VideoEditorPropertiesMain.cpp +++ b/media/jni/mediaeditor/VideoEditorPropertiesMain.cpp @@ -214,18 +214,6 @@ jobject videoEditProp_getProperties( "Invalid File or File not found "); } - /** - * Max resolution supported is 1280 x 720. - */ - if ( (pClipProperties->uiVideoWidth > 1280) - || (pClipProperties->uiVideoHeight > 720) ) - { - result = M4MCS_ERR_INVALID_INPUT_VIDEO_FRAME_SIZE; - videoEditJava_checkAndThrowIllegalArgumentException( - &gotten, pEnv, (M4NO_ERROR != result), - "Unsupported input video frame size"); - } - #ifdef USE_SOFTWARE_DECODER /** * Input clip with non-multiples of 16 is not supported. diff --git a/media/libeffects/data/audio_effects.conf b/media/libeffects/data/audio_effects.conf index e6a7b37..b8fa487 100644 --- a/media/libeffects/data/audio_effects.conf +++ b/media/libeffects/data/audio_effects.conf @@ -1,5 +1,10 @@ # List of effect libraries to load. Each library element must contain a "path" element # giving the full path of the library .so file. +# libraries { +# <lib name> { +# path <lib path> +# } +# } libraries { bundle { path /system/lib/soundfx/libbundlewrapper.so @@ -10,6 +15,9 @@ libraries { visualizer { path /system/lib/soundfx/libvisualizer.so } + pre_processing { + path /system/lib/soundfx/libaudiopreprocessing.so + } } # list of effects to load. Each effect element must contain a "library" and a "uuid" element. @@ -17,6 +25,16 @@ libraries { # "libraries" element. # The name of the effect element is indicative, only the value of the "uuid" element # designates the effect. +# The uuid is the implementation specific UUID as specified by the effect vendor. This is not the +# generic effect type UUID. +# effects { +# <fx name> { +# library <lib name> +# uuid <effect uuid> +# } +# ... +# } + effects { bassboost { library bundle @@ -54,4 +72,55 @@ effects { library visualizer uuid d069d9e0-8329-11df-9168-0002a5d5c51b } + agc { + library pre_processing + uuid aa8130e0-66fc-11e0-bad0-0002a5d5c51b + } + aec { + library pre_processing + uuid bb392ec0-8d4d-11e0-a896-0002a5d5c51b + } + ns { + library pre_processing + uuid c06c8400-8e06-11e0-9cb6-0002a5d5c51b + } } +# Audio preprocessor configurations. +# The pre processor configuration consists in a list of elements each describing +# pre processor settings for a given input source. Valid input source names are: +# "mic", "camcorder", "voice_recognition", "voice_communication" +# Each input source element contains a list of effects elements. The name of the effect +# element must be the name of one of the effects in the "effects" list of the file. +# Each effect element may optionally contain a list of parameters and their +# default value to apply when the pre processor effect is created. +# A parameter is defined by a "param" element and a "value" element. Each of these elements +# consists in one or more elements specifying a type followed by a value. +# The types defined are: "int", "short", "float", "bool" and "string" +# When both "param" and "value" are a single int, a simple form is allowed where just +# the param and value pair is present in the parameter description +# pre_processing { +# <input source name> { +# <fx name> { +# <param 1 name> { +# param { +# int|short|float|bool|string <value> +# [ int|short|float|bool|string <value> ] +# ... +# } +# value { +# int|short|float|bool|string <value> +# [ int|short|float|bool|string <value> ] +# ... +# } +# } +# <param 2 name > {<param> <value>} +# ... +# } +# ... +# } +# ... +# } + +# +# TODO: add default audio pre processor configurations after debug and tuning phase +# diff --git a/media/libeffects/factory/Android.mk b/media/libeffects/factory/Android.mk index 26265ae..2f2b974 100644 --- a/media/libeffects/factory/Android.mk +++ b/media/libeffects/factory/Android.mk @@ -14,4 +14,7 @@ LOCAL_MODULE:= libeffects LOCAL_SHARED_LIBRARIES += libdl +LOCAL_C_INCLUDES := \ + system/media/audio_effects/include + include $(BUILD_SHARED_LIBRARY) diff --git a/media/libeffects/factory/EffectsFactory.c b/media/libeffects/factory/EffectsFactory.c index a9689bc..d333510 100644 --- a/media/libeffects/factory/EffectsFactory.c +++ b/media/libeffects/factory/EffectsFactory.c @@ -24,6 +24,7 @@ #include <cutils/misc.h> #include <cutils/config_utils.h> +#include <audio_effects/audio_effects_conf.h> static list_elem_t *gEffectList; // list of effect_entry_t: all currently created effects static list_elem_t *gLibraryList; // list of lib_entry_t: all currently loaded libraries diff --git a/media/libeffects/factory/EffectsFactory.h b/media/libeffects/factory/EffectsFactory.h index fcc0dba..c1d4319 100644 --- a/media/libeffects/factory/EffectsFactory.h +++ b/media/libeffects/factory/EffectsFactory.h @@ -26,13 +26,6 @@ extern "C" { #endif -#define AUDIO_EFFECT_DEFAULT_CONFIG_FILE "/system/etc/audio_effects.conf" -#define AUDIO_EFFECT_VENDOR_CONFIG_FILE "/vendor/etc/audio_effects.conf" -#define EFFECTS_TAG "effects" -#define LIBRARIES_TAG "libraries" -#define PATH_TAG "path" -#define LIBRARY_TAG "library" -#define UUID_TAG "uuid" typedef struct list_elem_s { void *object; diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp index 8d98900..3919551 100644 --- a/media/libmedia/AudioEffect.cpp +++ b/media/libmedia/AudioEffect.cpp @@ -47,11 +47,11 @@ AudioEffect::AudioEffect(const effect_uuid_t *type, effect_callback_t cbf, void* user, int sessionId, - audio_io_handle_t output + audio_io_handle_t io ) : mStatus(NO_INIT) { - mStatus = set(type, uuid, priority, cbf, user, sessionId, output); + mStatus = set(type, uuid, priority, cbf, user, sessionId, io); } AudioEffect::AudioEffect(const char *typeStr, @@ -60,7 +60,7 @@ AudioEffect::AudioEffect(const char *typeStr, effect_callback_t cbf, void* user, int sessionId, - audio_io_handle_t output + audio_io_handle_t io ) : mStatus(NO_INIT) { @@ -83,7 +83,7 @@ AudioEffect::AudioEffect(const char *typeStr, } } - mStatus = set(pType, pUuid, priority, cbf, user, sessionId, output); + mStatus = set(pType, pUuid, priority, cbf, user, sessionId, io); } status_t AudioEffect::set(const effect_uuid_t *type, @@ -92,13 +92,13 @@ status_t AudioEffect::set(const effect_uuid_t *type, effect_callback_t cbf, void* user, int sessionId, - audio_io_handle_t output) + audio_io_handle_t io) { sp<IEffect> iEffect; sp<IMemory> cblk; int enabled; - LOGV("set %p mUserData: %p", this, user); + LOGV("set %p mUserData: %p uuid: %p timeLow %08x", this, user, type, type ? type->timeLow : 0); if (mIEffect != 0) { LOGW("Effect already in use"); @@ -135,7 +135,7 @@ status_t AudioEffect::set(const effect_uuid_t *type, mIEffectClient = new EffectClient(this); iEffect = audioFlinger->createEffect(getpid(), (effect_descriptor_t *)&mDescriptor, - mIEffectClient, priority, output, mSessionId, &mStatus, &mId, &enabled); + mIEffectClient, priority, io, mSessionId, &mStatus, &mId, &enabled); if (iEffect == 0 || (mStatus != NO_ERROR && mStatus != ALREADY_EXISTS)) { LOGE("set(): AudioFlinger could not create effect, status: %d", mStatus); diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp index 4c4aad0..1ec596e 100644 --- a/media/libmedia/AudioRecord.cpp +++ b/media/libmedia/AudioRecord.cpp @@ -162,8 +162,19 @@ status_t AudioRecord::set( int channelCount = popcount(channelMask); + if (sessionId == 0 ) { + mSessionId = AudioSystem::newAudioSessionId(); + } else { + mSessionId = sessionId; + } + LOGV("set(): mSessionId %d", mSessionId); + audio_io_handle_t input = AudioSystem::getInput(inputSource, - sampleRate, format, channelMask, (audio_in_acoustics_t)flags); + sampleRate, + format, + channelMask, + (audio_in_acoustics_t)flags, + mSessionId); if (input == 0) { LOGE("Could not get audio input for record source %d", inputSource); return BAD_VALUE; @@ -187,8 +198,6 @@ status_t AudioRecord::set( notificationFrames = frameCount/2; } - mSessionId = sessionId; - // create the IAudioRecord status = openRecord_l(sampleRate, format, channelMask, frameCount, flags, input); @@ -589,8 +598,10 @@ audio_io_handle_t AudioRecord::getInput_l() { mInput = AudioSystem::getInput(mInputSource, mCblk->sampleRate, - mFormat, mChannelMask, - (audio_in_acoustics_t)mFlags); + mFormat, + mChannelMask, + (audio_in_acoustics_t)mFlags, + mSessionId); return mInput; } diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp index 6cb3847..5009957 100644 --- a/media/libmedia/AudioSystem.cpp +++ b/media/libmedia/AudioSystem.cpp @@ -605,11 +605,12 @@ audio_io_handle_t AudioSystem::getInput(int inputSource, uint32_t samplingRate, uint32_t format, uint32_t channels, - audio_in_acoustics_t acoustics) + audio_in_acoustics_t acoustics, + int sessionId) { const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); if (aps == 0) return 0; - return aps->getInput(inputSource, samplingRate, format, channels, acoustics); + return aps->getInput(inputSource, samplingRate, format, channels, acoustics, sessionId); } status_t AudioSystem::startInput(audio_io_handle_t input) @@ -678,14 +679,14 @@ audio_io_handle_t AudioSystem::getOutputForEffect(effect_descriptor_t *desc) } status_t AudioSystem::registerEffect(effect_descriptor_t *desc, - audio_io_handle_t output, + audio_io_handle_t io, uint32_t strategy, int session, int id) { const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); if (aps == 0) return PERMISSION_DENIED; - return aps->registerEffect(desc, output, strategy, session, id); + return aps->registerEffect(desc, io, strategy, session, id); } status_t AudioSystem::unregisterEffect(int id) @@ -695,9 +696,11 @@ status_t AudioSystem::unregisterEffect(int id) return aps->unregisterEffect(id); } -status_t AudioSystem::isStreamActive(int stream, bool* state, uint32_t inPastMs) { +status_t AudioSystem::isStreamActive(int stream, bool* state, uint32_t inPastMs) +{ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); if (aps == 0) return PERMISSION_DENIED; + if (state == NULL) return BAD_VALUE; *state = aps->isStreamActive(stream, inPastMs); return NO_ERROR; } diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp index 9fbcee0..49d410f 100644 --- a/media/libmedia/IAudioPolicyService.cpp +++ b/media/libmedia/IAudioPolicyService.cpp @@ -184,7 +184,8 @@ public: uint32_t samplingRate, uint32_t format, uint32_t channels, - audio_in_acoustics_t acoustics) + audio_in_acoustics_t acoustics, + int audioSession) { Parcel data, reply; data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); @@ -193,6 +194,7 @@ public: data.writeInt32(static_cast <uint32_t>(format)); data.writeInt32(channels); data.writeInt32(static_cast <uint32_t>(acoustics)); + data.writeInt32(audioSession); remote()->transact(GET_INPUT, data, &reply); return static_cast <audio_io_handle_t> (reply.readInt32()); } @@ -285,7 +287,7 @@ public: } virtual status_t registerEffect(effect_descriptor_t *desc, - audio_io_handle_t output, + audio_io_handle_t io, uint32_t strategy, int session, int id) @@ -293,7 +295,7 @@ public: Parcel data, reply; data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); data.write(desc, sizeof(effect_descriptor_t)); - data.writeInt32(output); + data.writeInt32(io); data.writeInt32(strategy); data.writeInt32(session); data.writeInt32(id); @@ -439,11 +441,13 @@ status_t BnAudioPolicyService::onTransact( uint32_t channels = data.readInt32(); audio_in_acoustics_t acoustics = static_cast <audio_in_acoustics_t>(data.readInt32()); + int audioSession = data.readInt32(); audio_io_handle_t input = getInput(inputSource, samplingRate, format, channels, - acoustics); + acoustics, + audioSession); reply->writeInt32(static_cast <int>(input)); return NO_ERROR; } break; @@ -528,12 +532,12 @@ status_t BnAudioPolicyService::onTransact( CHECK_INTERFACE(IAudioPolicyService, data, reply); effect_descriptor_t desc; data.read(&desc, sizeof(effect_descriptor_t)); - audio_io_handle_t output = data.readInt32(); + audio_io_handle_t io = data.readInt32(); uint32_t strategy = data.readInt32(); int session = data.readInt32(); int id = data.readInt32(); reply->writeInt32(static_cast <int32_t>(registerEffect(&desc, - output, + io, strategy, session, id))); diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp index a44ef5a..7e44c29 100644 --- a/media/libmedia/IMediaRecorder.cpp +++ b/media/libmedia/IMediaRecorder.cpp @@ -23,14 +23,17 @@ #include <camera/ICamera.h> #include <media/IMediaRecorderClient.h> #include <media/IMediaRecorder.h> +#include <gui/ISurfaceTexture.h> #include <unistd.h> + namespace android { enum { RELEASE = IBinder::FIRST_CALL_TRANSACTION, INIT, CLOSE, + QUERY_SURFACE_MEDIASOURCE, RESET, STOP, START, @@ -71,6 +74,19 @@ public: return reply.readInt32(); } + sp<ISurfaceTexture> querySurfaceMediaSource() + { + LOGV("Query SurfaceMediaSource"); + Parcel data, reply; + data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor()); + remote()->transact(QUERY_SURFACE_MEDIASOURCE, data, &reply); + int returnedNull = reply.readInt32(); + if (returnedNull) { + return NULL; + } + return interface_cast<ISurfaceTexture>(reply.readStrongBinder()); + } + status_t setPreviewSurface(const sp<Surface>& surface) { LOGV("setPreviewSurface(%p)", surface.get()); @@ -440,6 +456,20 @@ status_t BnMediaRecorder::onTransact( reply->writeInt32(setCamera(camera, proxy)); return NO_ERROR; } break; + case QUERY_SURFACE_MEDIASOURCE: { + LOGV("QUERY_SURFACE_MEDIASOURCE"); + CHECK_INTERFACE(IMediaRecorder, data, reply); + // call the mediaserver side to create + // a surfacemediasource + sp<ISurfaceTexture> surfaceMediaSource = querySurfaceMediaSource(); + // The mediaserver might have failed to create a source + int returnedNull= (surfaceMediaSource == NULL) ? 1 : 0 ; + reply->writeInt32(returnedNull); + if (!returnedNull) { + reply->writeStrongBinder(surfaceMediaSource->asBinder()); + } + return NO_ERROR; + } break; default: return BBinder::onTransact(code, data, reply, flags); } diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp index 069bbb7..f0f07a2 100644 --- a/media/libmedia/MediaProfiles.cpp +++ b/media/libmedia/MediaProfiles.cpp @@ -132,6 +132,16 @@ MediaProfiles::logAudioDecoderCap(const MediaProfiles::AudioDecoderCap& cap) LOGV("codec = %d", cap.mCodec); } +/*static*/ void +MediaProfiles::logVideoEditorCap(const MediaProfiles::VideoEditorCap& cap) +{ + LOGV("videoeditor cap:"); + LOGV("mMaxInputFrameWidth = %d", cap.mMaxInputFrameWidth); + LOGV("mMaxInputFrameHeight = %d", cap.mMaxInputFrameHeight); + LOGV("mMaxOutputFrameWidth = %d", cap.mMaxOutputFrameWidth); + LOGV("mMaxOutputFrameHeight = %d", cap.mMaxOutputFrameHeight); +} + /*static*/ int MediaProfiles::findTagForName(const MediaProfiles::NameToTagMap *map, size_t nMappings, const char *name) { @@ -368,6 +378,24 @@ void MediaProfiles::addStartTimeOffset(int cameraId, const char** atts) mStartTimeOffsets.replaceValueFor(cameraId, offsetTimeMs); } +/*static*/ MediaProfiles::VideoEditorCap* +MediaProfiles::createVideoEditorCap(const char **atts, MediaProfiles *profiles) +{ + CHECK(!strcmp("maxInputFrameWidth", atts[0]) && + !strcmp("maxInputFrameHeight", atts[2]) && + !strcmp("maxOutputFrameWidth", atts[4]) && + !strcmp("maxOutputFrameHeight", atts[6])); + + MediaProfiles::VideoEditorCap *pVideoEditorCap = + new MediaProfiles::VideoEditorCap(atoi(atts[1]), atoi(atts[3]), + atoi(atts[5]), atoi(atts[7])); + + logVideoEditorCap(*pVideoEditorCap); + profiles->mVideoEditorCap = pVideoEditorCap; + + return pVideoEditorCap; +} + /*static*/ void MediaProfiles::startElementHandler(void *userData, const char *name, const char **atts) { @@ -398,6 +426,8 @@ MediaProfiles::startElementHandler(void *userData, const char *name, const char createCamcorderProfile(profiles->mCurrentCameraId, atts, profiles->mCameraIds)); } else if (strcmp("ImageEncoding", name) == 0) { profiles->addImageEncodingQualityLevel(profiles->mCurrentCameraId, atts); + } else if (strcmp("VideoEditorCap", name) == 0) { + createVideoEditorCap(atts, profiles); } } @@ -790,6 +820,17 @@ MediaProfiles::createDefaultImageEncodingQualityLevels(MediaProfiles *profiles) profiles->mImageEncodingQualityLevels.add(levels); } +/*static*/ void +MediaProfiles::createDefaultVideoEditorCap(MediaProfiles *profiles) +{ + profiles->mVideoEditorCap = + new MediaProfiles::VideoEditorCap( + VIDEOEDITOR_DEFAULT_MAX_INPUT_FRAME_WIDTH, + VIDEOEDITOR_DEFUALT_MAX_INPUT_FRAME_HEIGHT, + VIDEOEDITOR_DEFAULT_MAX_OUTPUT_FRAME_WIDTH, + VIDEOEDITOR_DEFUALT_MAX_OUTPUT_FRAME_HEIGHT); +} + /*static*/ MediaProfiles* MediaProfiles::createDefaultInstance() { @@ -801,6 +842,7 @@ MediaProfiles::createDefaultInstance() createDefaultAudioDecoders(profiles); createDefaultEncoderOutputFileFormats(profiles); createDefaultImageEncodingQualityLevels(profiles); + createDefaultVideoEditorCap(profiles); return profiles; } @@ -899,6 +941,28 @@ int MediaProfiles::getVideoEncoderParamByName(const char *name, video_encoder co return -1; } +int MediaProfiles::getVideoEditorCapParamByName(const char *name) const +{ + LOGV("getVideoEditorCapParamByName: %s", name); + + if (mVideoEditorCap == NULL) { + LOGE("The mVideoEditorCap is not created, then create default cap."); + createDefaultVideoEditorCap(sInstance); + } + + if (!strcmp("videoeditor.input.width.max", name)) + return mVideoEditorCap->mMaxInputFrameWidth; + if (!strcmp("videoeditor.input.height.max", name)) + return mVideoEditorCap->mMaxInputFrameHeight; + if (!strcmp("videoeditor.output.width.max", name)) + return mVideoEditorCap->mMaxOutputFrameWidth; + if (!strcmp("videoeditor.output.height.max", name)) + return mVideoEditorCap->mMaxOutputFrameHeight; + + LOGE("The given video editor param name %s is not found", name); + return -1; +} + Vector<audio_encoder> MediaProfiles::getAudioEncoders() const { Vector<audio_encoder> encoders; diff --git a/media/libmedia/MediaScanner.cpp b/media/libmedia/MediaScanner.cpp index 45bdff4..41f8593 100644 --- a/media/libmedia/MediaScanner.cpp +++ b/media/libmedia/MediaScanner.cpp @@ -47,16 +47,15 @@ const char *MediaScanner::locale() const { return mLocale; } -status_t MediaScanner::processDirectory( - const char *path, MediaScannerClient &client, - ExceptionCheck exceptionCheck, void *exceptionEnv) { +MediaScanResult MediaScanner::processDirectory( + const char *path, MediaScannerClient &client) { int pathLength = strlen(path); if (pathLength >= PATH_MAX) { - return UNKNOWN_ERROR; + return MEDIA_SCAN_RESULT_SKIPPED; } char* pathBuffer = (char *)malloc(PATH_MAX + 1); if (!pathBuffer) { - return UNKNOWN_ERROR; + return MEDIA_SCAN_RESULT_ERROR; } int pathRemaining = PATH_MAX - pathLength; @@ -69,21 +68,18 @@ status_t MediaScanner::processDirectory( client.setLocale(locale()); - status_t result = - doProcessDirectory(pathBuffer, pathRemaining, client, false, exceptionCheck, exceptionEnv); + MediaScanResult result = doProcessDirectory(pathBuffer, pathRemaining, client, false); free(pathBuffer); return result; } -status_t MediaScanner::doProcessDirectory( - char *path, int pathRemaining, MediaScannerClient &client, - bool noMedia, ExceptionCheck exceptionCheck, void *exceptionEnv) { +MediaScanResult MediaScanner::doProcessDirectory( + char *path, int pathRemaining, MediaScannerClient &client, bool noMedia) { // place to copy file or directory name char* fileSpot = path + strlen(path); struct dirent* entry; - struct stat statbuf; // Treat all files as non-media in directories that contain a ".nomedia" file if (pathRemaining >= 8 /* strlen(".nomedia") */ ) { @@ -99,76 +95,88 @@ status_t MediaScanner::doProcessDirectory( DIR* dir = opendir(path); if (!dir) { - LOGD("opendir %s failed, errno: %d", path, errno); - return UNKNOWN_ERROR; + LOGW("Error opening directory '%s', skipping: %s.", path, strerror(errno)); + return MEDIA_SCAN_RESULT_SKIPPED; } + MediaScanResult result = MEDIA_SCAN_RESULT_OK; while ((entry = readdir(dir))) { - const char* name = entry->d_name; - - // ignore "." and ".." - if (name[0] == '.' && (name[1] == 0 || (name[1] == '.' && name[2] == 0))) { - continue; + if (doProcessDirectoryEntry(path, pathRemaining, client, noMedia, entry, fileSpot) + == MEDIA_SCAN_RESULT_ERROR) { + result = MEDIA_SCAN_RESULT_ERROR; + break; } + } + closedir(dir); + return result; +} - int nameLength = strlen(name); - if (nameLength + 1 > pathRemaining) { - // path too long! - continue; - } - strcpy(fileSpot, name); - - int type = entry->d_type; - if (type == DT_UNKNOWN) { - // If the type is unknown, stat() the file instead. - // This is sometimes necessary when accessing NFS mounted filesystems, but - // could be needed in other cases well. - if (stat(path, &statbuf) == 0) { - if (S_ISREG(statbuf.st_mode)) { - type = DT_REG; - } else if (S_ISDIR(statbuf.st_mode)) { - type = DT_DIR; - } - } else { - LOGD("stat() failed for %s: %s", path, strerror(errno) ); +MediaScanResult MediaScanner::doProcessDirectoryEntry( + char *path, int pathRemaining, MediaScannerClient &client, bool noMedia, + struct dirent* entry, char* fileSpot) { + struct stat statbuf; + const char* name = entry->d_name; + + // ignore "." and ".." + if (name[0] == '.' && (name[1] == 0 || (name[1] == '.' && name[2] == 0))) { + return MEDIA_SCAN_RESULT_SKIPPED; + } + + int nameLength = strlen(name); + if (nameLength + 1 > pathRemaining) { + // path too long! + return MEDIA_SCAN_RESULT_SKIPPED; + } + strcpy(fileSpot, name); + + int type = entry->d_type; + if (type == DT_UNKNOWN) { + // If the type is unknown, stat() the file instead. + // This is sometimes necessary when accessing NFS mounted filesystems, but + // could be needed in other cases well. + if (stat(path, &statbuf) == 0) { + if (S_ISREG(statbuf.st_mode)) { + type = DT_REG; + } else if (S_ISDIR(statbuf.st_mode)) { + type = DT_DIR; } + } else { + LOGD("stat() failed for %s: %s", path, strerror(errno) ); } - if (type == DT_REG || type == DT_DIR) { - if (type == DT_DIR) { - bool childNoMedia = noMedia; - // set noMedia flag on directories with a name that starts with '.' - // for example, the Mac ".Trashes" directory - if (name[0] == '.') - childNoMedia = true; - - // report the directory to the client - if (stat(path, &statbuf) == 0) { - client.scanFile(path, statbuf.st_mtime, 0, true, childNoMedia); - } - - // and now process its contents - strcat(fileSpot, "/"); - int err = doProcessDirectory(path, pathRemaining - nameLength - 1, client, - childNoMedia, exceptionCheck, exceptionEnv); - if (err) { - // pass exceptions up - ignore other errors - if (exceptionCheck && exceptionCheck(exceptionEnv)) goto failure; - LOGE("Error processing '%s' - skipping\n", path); - continue; - } - } else { - stat(path, &statbuf); - client.scanFile(path, statbuf.st_mtime, statbuf.st_size, false, noMedia); - if (exceptionCheck && exceptionCheck(exceptionEnv)) goto failure; + } + if (type == DT_DIR) { + bool childNoMedia = noMedia; + // set noMedia flag on directories with a name that starts with '.' + // for example, the Mac ".Trashes" directory + if (name[0] == '.') + childNoMedia = true; + + // report the directory to the client + if (stat(path, &statbuf) == 0) { + status_t status = client.scanFile(path, statbuf.st_mtime, 0, + true /*isDirectory*/, childNoMedia); + if (status) { + return MEDIA_SCAN_RESULT_ERROR; } } + + // and now process its contents + strcat(fileSpot, "/"); + MediaScanResult result = doProcessDirectory(path, pathRemaining - nameLength - 1, + client, childNoMedia); + if (result == MEDIA_SCAN_RESULT_ERROR) { + return MEDIA_SCAN_RESULT_ERROR; + } + } else if (type == DT_REG) { + stat(path, &statbuf); + status_t status = client.scanFile(path, statbuf.st_mtime, statbuf.st_size, + false /*isDirectory*/, noMedia); + if (status) { + return MEDIA_SCAN_RESULT_ERROR; + } } - closedir(dir); - return OK; -failure: - closedir(dir); - return -1; + return MEDIA_SCAN_RESULT_OK; } } // namespace android diff --git a/media/libmedia/MediaScannerClient.cpp b/media/libmedia/MediaScannerClient.cpp index bd3596e..7a7aeb6 100644 --- a/media/libmedia/MediaScannerClient.cpp +++ b/media/libmedia/MediaScannerClient.cpp @@ -62,7 +62,7 @@ void MediaScannerClient::beginFile() mValues = new StringArray; } -bool MediaScannerClient::addStringTag(const char* name, const char* value) +status_t MediaScannerClient::addStringTag(const char* name, const char* value) { if (mLocaleEncoding != kEncodingNone) { // don't bother caching strings that are all ASCII. @@ -212,8 +212,10 @@ void MediaScannerClient::endFile() // finally, push all name/value pairs to the client for (int i = 0; i < mNames->size(); i++) { - if (!handleStringTag(mNames->getEntry(i), mValues->getEntry(i))) + status_t status = handleStringTag(mNames->getEntry(i), mValues->getEntry(i)); + if (status) { break; + } } } // else addStringTag() has done all the work so we have nothing to do diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp index 178039c..a11fb80 100644 --- a/media/libmedia/mediaplayer.cpp +++ b/media/libmedia/mediaplayer.cpp @@ -84,6 +84,8 @@ void MediaPlayer::disconnect() if (p != 0) { p->disconnect(); } + + disconnectNativeWindow(); } // always call with lock held @@ -226,6 +228,7 @@ status_t MediaPlayer::setVideoSurface(const sp<Surface>& surface) NATIVE_WINDOW_API_MEDIA); if (err != OK) { + LOGE("setVideoSurface failed: %d", err); // Note that we must do the reset before disconnecting from the ANW. // Otherwise queue/dequeue calls could be made on the disconnected // ANW, which may result in errors. @@ -275,6 +278,7 @@ status_t MediaPlayer::setVideoSurfaceTexture( NATIVE_WINDOW_API_MEDIA); if (err != OK) { + LOGE("setVideoSurfaceTexture failed: %d", err); // Note that we must do the reset before disconnecting from the ANW. // Otherwise queue/dequeue calls could be made on the disconnected // ANW, which may result in errors. diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp index 9e4edd0..fab674c 100644 --- a/media/libmedia/mediarecorder.cpp +++ b/media/libmedia/mediarecorder.cpp @@ -25,6 +25,7 @@ #include <media/IMediaPlayerService.h> #include <media/IMediaRecorder.h> #include <media/mediaplayer.h> // for MEDIA_ERROR_SERVER_DIED +#include <gui/ISurfaceTexture.h> namespace android { @@ -127,7 +128,9 @@ status_t MediaRecorder::setVideoSource(int vs) return INVALID_OPERATION; } + // following call is made over the Binder Interface status_t ret = mMediaRecorder->setVideoSource(vs); + if (OK != ret) { LOGV("setVideoSource failed: %d", ret); mCurrentState = MEDIA_RECORDER_ERROR; @@ -357,7 +360,7 @@ status_t MediaRecorder::setVideoSize(int width, int height) return INVALID_OPERATION; } if (!mIsVideoSourceSet) { - LOGE("try to set video size without setting video source first"); + LOGE("Cannot set video size without setting video source first"); return INVALID_OPERATION; } @@ -367,9 +370,27 @@ status_t MediaRecorder::setVideoSize(int width, int height) mCurrentState = MEDIA_RECORDER_ERROR; return ret; } + return ret; } +// Query a SurfaceMediaSurface through the Mediaserver, over the +// binder interface. This is used by the Filter Framework (MeidaEncoder) +// to get an <ISurfaceTexture> object to hook up to ANativeWindow. +sp<ISurfaceTexture> MediaRecorder:: + querySurfaceMediaSourceFromMediaServer() +{ + Mutex::Autolock _l(mLock); + mSurfaceMediaSource = + mMediaRecorder->querySurfaceMediaSource(); + if (mSurfaceMediaSource == NULL) { + LOGE("SurfaceMediaSource could not be initialized!"); + } + return mSurfaceMediaSource; +} + + + status_t MediaRecorder::setVideoFrameRate(int frames_per_second) { LOGV("setVideoFrameRate(%d)", frames_per_second); @@ -382,7 +403,7 @@ status_t MediaRecorder::setVideoFrameRate(int frames_per_second) return INVALID_OPERATION; } if (!mIsVideoSourceSet) { - LOGE("try to set video frame rate without setting video source first"); + LOGE("Cannot set video frame rate without setting video source first"); return INVALID_OPERATION; } @@ -621,7 +642,7 @@ status_t MediaRecorder::release() return INVALID_OPERATION; } -MediaRecorder::MediaRecorder() +MediaRecorder::MediaRecorder() : mSurfaceMediaSource(NULL) { LOGV("constructor"); @@ -632,6 +653,8 @@ MediaRecorder::MediaRecorder() if (mMediaRecorder != NULL) { mCurrentState = MEDIA_RECORDER_IDLE; } + + doCleanUp(); } @@ -646,6 +669,10 @@ MediaRecorder::~MediaRecorder() if (mMediaRecorder != NULL) { mMediaRecorder.clear(); } + + if (mSurfaceMediaSource != NULL) { + mSurfaceMediaSource.clear(); + } } status_t MediaRecorder::setListener(const sp<MediaRecorderListener>& listener) diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp index 115db1a..905b885 100644 --- a/media/libmediaplayerservice/MediaRecorderClient.cpp +++ b/media/libmediaplayerservice/MediaRecorderClient.cpp @@ -41,6 +41,7 @@ #include "MediaPlayerService.h" #include "StagefrightRecorder.h" +#include <gui/ISurfaceTexture.h> namespace android { @@ -57,6 +58,20 @@ static bool checkPermission(const char* permissionString) { return ok; } + +sp<ISurfaceTexture> MediaRecorderClient::querySurfaceMediaSource() +{ + LOGV("Query SurfaceMediaSource"); + Mutex::Autolock lock(mLock); + if (mRecorder == NULL) { + LOGE("recorder is not initialized"); + return NULL; + } + return mRecorder->querySurfaceMediaSource(); +} + + + status_t MediaRecorderClient::setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy) { diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h index bbca529..c87a3c0 100644 --- a/media/libmediaplayerservice/MediaRecorderClient.h +++ b/media/libmediaplayerservice/MediaRecorderClient.h @@ -25,45 +25,51 @@ namespace android { class MediaRecorderBase; class MediaPlayerService; class ICameraRecordingProxy; +class ISurfaceTexture; class MediaRecorderClient : public BnMediaRecorder { public: - virtual status_t setCamera(const sp<ICamera>& camera, - const sp<ICameraRecordingProxy>& proxy); - virtual status_t setPreviewSurface(const sp<Surface>& surface); - virtual status_t setVideoSource(int vs); - virtual status_t setAudioSource(int as); - virtual status_t setOutputFormat(int of); - virtual status_t setVideoEncoder(int ve); - virtual status_t setAudioEncoder(int ae); - virtual status_t setOutputFile(const char* path); - virtual status_t setOutputFile(int fd, int64_t offset, int64_t length); - virtual status_t setOutputFileAuxiliary(int fd); - virtual status_t setVideoSize(int width, int height); - virtual status_t setVideoFrameRate(int frames_per_second); - virtual status_t setParameters(const String8& params); - virtual status_t setListener(const sp<IMediaRecorderClient>& listener); - virtual status_t prepare(); - virtual status_t getMaxAmplitude(int* max); - virtual status_t start(); - virtual status_t stop(); - virtual status_t reset(); - virtual status_t init(); - virtual status_t close(); - virtual status_t release(); + virtual status_t setCamera(const sp<ICamera>& camera, + const sp<ICameraRecordingProxy>& proxy); + virtual status_t setPreviewSurface(const sp<Surface>& surface); + virtual status_t setVideoSource(int vs); + virtual status_t setAudioSource(int as); + virtual status_t setOutputFormat(int of); + virtual status_t setVideoEncoder(int ve); + virtual status_t setAudioEncoder(int ae); + virtual status_t setOutputFile(const char* path); + virtual status_t setOutputFile(int fd, int64_t offset, + int64_t length); + virtual status_t setOutputFileAuxiliary(int fd); + virtual status_t setVideoSize(int width, int height); + virtual status_t setVideoFrameRate(int frames_per_second); + virtual status_t setParameters(const String8& params); + virtual status_t setListener( + const sp<IMediaRecorderClient>& listener); + virtual status_t prepare(); + virtual status_t getMaxAmplitude(int* max); + virtual status_t start(); + virtual status_t stop(); + virtual status_t reset(); + virtual status_t init(); + virtual status_t close(); + virtual status_t release(); + virtual status_t dump(int fd, const Vector<String16>& args) const; + virtual sp<ISurfaceTexture> querySurfaceMediaSource(); - virtual status_t dump(int fd, const Vector<String16>& args) const; private: - friend class MediaPlayerService; // for accessing private constructor + friend class MediaPlayerService; // for accessing private constructor - MediaRecorderClient(const sp<MediaPlayerService>& service, pid_t pid); - virtual ~MediaRecorderClient(); + MediaRecorderClient( + const sp<MediaPlayerService>& service, + pid_t pid); + virtual ~MediaRecorderClient(); - pid_t mPid; - Mutex mLock; - MediaRecorderBase *mRecorder; - sp<MediaPlayerService> mMediaPlayerService; + pid_t mPid; + Mutex mLock; + MediaRecorderBase *mRecorder; + sp<MediaPlayerService> mMediaPlayerService; }; }; // namespace android diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp index 223e0be..6427bb7 100644 --- a/media/libmediaplayerservice/StagefrightRecorder.cpp +++ b/media/libmediaplayerservice/StagefrightRecorder.cpp @@ -38,10 +38,12 @@ #include <media/stagefright/MetaData.h> #include <media/stagefright/OMXClient.h> #include <media/stagefright/OMXCodec.h> +#include <media/stagefright/SurfaceMediaSource.h> #include <media/MediaProfiles.h> #include <camera/ICamera.h> #include <camera/CameraParameters.h> #include <surfaceflinger/Surface.h> + #include <utils/Errors.h> #include <sys/types.h> #include <ctype.h> @@ -69,7 +71,7 @@ StagefrightRecorder::StagefrightRecorder() mOutputFd(-1), mOutputFdAux(-1), mAudioSource(AUDIO_SOURCE_CNT), mVideoSource(VIDEO_SOURCE_LIST_END), - mStarted(false) { + mStarted(false), mSurfaceMediaSource(NULL) { LOGV("Constructor"); reset(); @@ -85,6 +87,14 @@ status_t StagefrightRecorder::init() { return OK; } +// The client side of mediaserver asks it to creat a SurfaceMediaSource +// and return a interface reference. The client side will use that +// while encoding GL Frames +sp<ISurfaceTexture> StagefrightRecorder::querySurfaceMediaSource() const { + LOGV("Get SurfaceMediaSource"); + return mSurfaceMediaSource; +} + status_t StagefrightRecorder::setAudioSource(audio_source_t as) { LOGV("setAudioSource: %d", as); if (as < AUDIO_SOURCE_DEFAULT || @@ -1006,13 +1016,13 @@ status_t StagefrightRecorder::startRTPRecording() { source = createAudioSource(); } else { - sp<CameraSource> cameraSource; - status_t err = setupCameraSource(&cameraSource); + sp<MediaSource> mediaSource; + status_t err = setupMediaSource(&mediaSource); if (err != OK) { return err; } - err = setupVideoEncoder(cameraSource, mVideoBitRate, &source); + err = setupVideoEncoder(mediaSource, mVideoBitRate, &source); if (err != OK) { return err; } @@ -1042,20 +1052,19 @@ status_t StagefrightRecorder::startMPEG2TSRecording() { } } - if (mVideoSource == VIDEO_SOURCE_DEFAULT - || mVideoSource == VIDEO_SOURCE_CAMERA) { + if (mVideoSource < VIDEO_SOURCE_LIST_END) { if (mVideoEncoder != VIDEO_ENCODER_H264) { return ERROR_UNSUPPORTED; } - sp<CameraSource> cameraSource; - status_t err = setupCameraSource(&cameraSource); + sp<MediaSource> mediaSource; + status_t err = setupMediaSource(&mediaSource); if (err != OK) { return err; } sp<MediaSource> encoder; - err = setupVideoEncoder(cameraSource, mVideoBitRate, &encoder); + err = setupVideoEncoder(mediaSource, mVideoBitRate, &encoder); if (err != OK) { return err; @@ -1289,6 +1298,60 @@ void StagefrightRecorder::clipVideoFrameHeight() { } } +// Set up the appropriate MediaSource depending on the chosen option +status_t StagefrightRecorder::setupMediaSource( + sp<MediaSource> *mediaSource) { + if (mVideoSource == VIDEO_SOURCE_DEFAULT + || mVideoSource == VIDEO_SOURCE_CAMERA) { + sp<CameraSource> cameraSource; + status_t err = setupCameraSource(&cameraSource); + if (err != OK) { + return err; + } + *mediaSource = cameraSource; + } else if (mVideoSource == VIDEO_SOURCE_GRALLOC_BUFFER) { + // If using GRAlloc buffers, setup surfacemediasource. + // Later a handle to that will be passed + // to the client side when queried + status_t err = setupSurfaceMediaSource(); + if (err != OK) { + return err; + } + *mediaSource = mSurfaceMediaSource; + } else { + return INVALID_OPERATION; + } + return OK; +} + +// setupSurfaceMediaSource creates a source with the given +// width and height and framerate. +// TODO: This could go in a static function inside SurfaceMediaSource +// similar to that in CameraSource +status_t StagefrightRecorder::setupSurfaceMediaSource() { + status_t err = OK; + mSurfaceMediaSource = new SurfaceMediaSource(mVideoWidth, mVideoHeight); + if (mSurfaceMediaSource == NULL) { + return NO_INIT; + } + + if (mFrameRate == -1) { + int32_t frameRate = 0; + CHECK (mSurfaceMediaSource->getFormat()->findInt32( + kKeyFrameRate, &frameRate)); + LOGI("Frame rate is not explicitly set. Use the current frame " + "rate (%d fps)", frameRate); + mFrameRate = frameRate; + } else { + err = mSurfaceMediaSource->setFrameRate(mFrameRate); + } + CHECK(mFrameRate != -1); + + mIsMetaDataStoredInVideoBuffers = + mSurfaceMediaSource->isMetaDataStoredInVideoBuffers(); + return err; +} + status_t StagefrightRecorder::setupCameraSource( sp<CameraSource> *cameraSource) { status_t err = OK; @@ -1465,29 +1528,37 @@ status_t StagefrightRecorder::setupMPEG4Recording( status_t err = OK; sp<MediaWriter> writer = new MPEG4Writer(outputFd); - if (mVideoSource == VIDEO_SOURCE_DEFAULT - || mVideoSource == VIDEO_SOURCE_CAMERA) { + if (mVideoSource < VIDEO_SOURCE_LIST_END) { - sp<MediaSource> cameraMediaSource; + sp<MediaSource> mediaSource; if (useSplitCameraSource) { + // TODO: Check if there is a better way to handle this + if (mVideoSource == VIDEO_SOURCE_GRALLOC_BUFFER) { + LOGE("Cannot use split camera when encoding frames"); + return INVALID_OPERATION; + } LOGV("Using Split camera source"); - cameraMediaSource = mCameraSourceSplitter->createClient(); + mediaSource = mCameraSourceSplitter->createClient(); } else { - sp<CameraSource> cameraSource; - err = setupCameraSource(&cameraSource); - cameraMediaSource = cameraSource; + err = setupMediaSource(&mediaSource); } + if ((videoWidth != mVideoWidth) || (videoHeight != mVideoHeight)) { + // TODO: Might be able to handle downsampling even if using GRAlloc + if (mVideoSource == VIDEO_SOURCE_GRALLOC_BUFFER) { + LOGE("Cannot change size or Downsample when encoding frames"); + return INVALID_OPERATION; + } // Use downsampling from the original source. - cameraMediaSource = - new VideoSourceDownSampler(cameraMediaSource, videoWidth, videoHeight); + mediaSource = + new VideoSourceDownSampler(mediaSource, videoWidth, videoHeight); } if (err != OK) { return err; } sp<MediaSource> encoder; - err = setupVideoEncoder(cameraMediaSource, videoBitRate, &encoder); + err = setupVideoEncoder(mediaSource, videoBitRate, &encoder); if (err != OK) { return err; } diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h index 034b373..1618b92 100644 --- a/media/libmediaplayerservice/StagefrightRecorder.h +++ b/media/libmediaplayerservice/StagefrightRecorder.h @@ -36,6 +36,8 @@ struct MediaWriter; class MetaData; struct AudioSource; class MediaProfiles; +class ISurfaceTexture; +class SurfaceMediaSource; struct StagefrightRecorder : public MediaRecorderBase { StagefrightRecorder(); @@ -64,6 +66,8 @@ struct StagefrightRecorder : public MediaRecorderBase { virtual status_t reset(); virtual status_t getMaxAmplitude(int *max); virtual status_t dump(int fd, const Vector<String16>& args) const; + // Querying a SurfaceMediaSourcer + virtual sp<ISurfaceTexture> querySurfaceMediaSource() const; private: sp<ICamera> mCamera; @@ -109,12 +113,18 @@ private: sp<MediaSourceSplitter> mCameraSourceSplitter; sp<CameraSourceTimeLapse> mCameraSourceTimeLapse; + String8 mParams; bool mIsMetaDataStoredInVideoBuffers; MediaProfiles *mEncoderProfiles; bool mStarted; + // Needed when GLFrames are encoded. + // An <ISurfaceTexture> pointer + // will be sent to the client side using which the + // frame buffers will be queued and dequeued + sp<SurfaceMediaSource> mSurfaceMediaSource; status_t setupMPEG4Recording( bool useSplitCameraSource, @@ -134,7 +144,14 @@ private: sp<MediaSource> createAudioSource(); status_t checkVideoEncoderCapabilities(); status_t checkAudioEncoderCapabilities(); + // Generic MediaSource set-up. Returns the appropriate + // source (CameraSource or SurfaceMediaSource) + // depending on the videosource type + status_t setupMediaSource(sp<MediaSource> *mediaSource); status_t setupCameraSource(sp<CameraSource> *cameraSource); + // setup the surfacemediasource for the encoder + status_t setupSurfaceMediaSource(); + status_t setupAudioEncoder(const sp<MediaWriter>& writer); status_t setupVideoEncoder( sp<MediaSource> cameraSource, @@ -176,6 +193,7 @@ private: void clipNumberOfAudioChannels(); void setDefaultProfileIfNecessary(); + StagefrightRecorder(const StagefrightRecorder &); StagefrightRecorder &operator=(const StagefrightRecorder &); }; diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk index e17e1e8..3a3c082 100644 --- a/media/libstagefright/Android.mk +++ b/media/libstagefright/Android.mk @@ -42,6 +42,7 @@ LOCAL_SRC_FILES:= \ SampleTable.cpp \ StagefrightMediaScanner.cpp \ StagefrightMetadataRetriever.cpp \ + SurfaceMediaSource.cpp \ ThrottledSource.cpp \ TimeSource.cpp \ TimedEventQueue.cpp \ diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp index 1bc2fb9..de66d99 100755 --- a/media/libstagefright/CameraSource.cpp +++ b/media/libstagefright/CameraSource.cpp @@ -179,9 +179,6 @@ status_t CameraSource::isCameraAvailable( if (camera == 0) { mCamera = Camera::connect(cameraId); if (mCamera == 0) return -EBUSY; - // If proxy is not passed in by applications, still use the proxy of - // our own Camera to simplify the code. - mCameraRecordingProxy = mCamera->getRecordingProxy(); mCameraFlags &= ~FLAGS_HOT_CAMERA; } else { // We get the proxy from Camera, not ICamera. We need to get the proxy @@ -192,12 +189,12 @@ status_t CameraSource::isCameraAvailable( if (mCamera == 0) return -EBUSY; mCameraRecordingProxy = proxy; mCameraFlags |= FLAGS_HOT_CAMERA; + mDeathNotifier = new DeathNotifier(); + // isBinderAlive needs linkToDeath to work. + mCameraRecordingProxy->asBinder()->linkToDeath(mDeathNotifier); } mCamera->lock(); - mDeathNotifier = new DeathNotifier(); - // isBinderAlive needs linkToDeath to work. - mCameraRecordingProxy->asBinder()->linkToDeath(mDeathNotifier); return OK; } @@ -292,7 +289,7 @@ status_t CameraSource::configureCamera( CameraParameters* params, int32_t width, int32_t height, int32_t frameRate) { - + LOGV("configureCamera"); Vector<Size> sizes; bool isSetVideoSizeSupportedByCamera = true; getSupportedVideoSizes(*params, &isSetVideoSizeSupportedByCamera, sizes); @@ -368,6 +365,7 @@ status_t CameraSource::checkVideoSize( const CameraParameters& params, int32_t width, int32_t height) { + LOGV("checkVideoSize"); // The actual video size is the same as the preview size // if the camera hal does not support separate video and // preview output. In this case, we retrieve the video @@ -419,6 +417,7 @@ status_t CameraSource::checkFrameRate( const CameraParameters& params, int32_t frameRate) { + LOGV("checkFrameRate"); int32_t frameRateActual = params.getPreviewFrameRate(); if (frameRateActual < 0) { LOGE("Failed to retrieve preview frame rate (%d)", frameRateActual); @@ -464,6 +463,7 @@ status_t CameraSource::init( int32_t frameRate, bool storeMetaDataInVideoBuffers) { + LOGV("init"); status_t err = OK; int64_t token = IPCThreadState::self()->clearCallingIdentity(); err = initWithCameraAccess(camera, proxy, cameraId, @@ -480,6 +480,7 @@ status_t CameraSource::initWithCameraAccess( Size videoSize, int32_t frameRate, bool storeMetaDataInVideoBuffers) { + LOGV("initWithCameraAccess"); status_t err = OK; if ((err = isCameraAvailable(camera, proxy, cameraId)) != OK) { @@ -552,17 +553,25 @@ CameraSource::~CameraSource() { } void CameraSource::startCameraRecording() { + LOGV("startCameraRecording"); // Reset the identity to the current thread because media server owns the // camera and recording is started by the applications. The applications // will connect to the camera in ICameraRecordingProxy::startRecording. int64_t token = IPCThreadState::self()->clearCallingIdentity(); - mCamera->unlock(); - mCamera.clear(); + if (mCameraFlags & FLAGS_HOT_CAMERA) { + mCamera->unlock(); + mCamera.clear(); + CHECK_EQ(OK, mCameraRecordingProxy->startRecording(new ProxyListener(this))); + } else { + mCamera->setListener(new CameraSourceListener(this)); + mCamera->startRecording(); + CHECK(mCamera->recordingEnabled()); + } IPCThreadState::self()->restoreCallingIdentity(token); - CHECK_EQ(OK, mCameraRecordingProxy->startRecording(new ProxyListener(this))); } status_t CameraSource::start(MetaData *meta) { + LOGV("start"); CHECK(!mStarted); if (mInitCheck != OK) { LOGE("CameraSource is not initialized yet"); @@ -588,7 +597,13 @@ status_t CameraSource::start(MetaData *meta) { } void CameraSource::stopCameraRecording() { - mCameraRecordingProxy->stopRecording(); + LOGV("stopCameraRecording"); + if (mCameraFlags & FLAGS_HOT_CAMERA) { + mCameraRecordingProxy->stopRecording(); + } else { + mCamera->setListener(NULL); + mCamera->stopRecording(); + } } void CameraSource::releaseCamera() { @@ -599,11 +614,10 @@ void CameraSource::releaseCamera() { LOGV("Camera was cold when we started, stopping preview"); mCamera->stopPreview(); mCamera->disconnect(); - } else { - // Unlock the camera so the application can lock it back. - mCamera->unlock(); } + mCamera->unlock(); mCamera.clear(); + mCamera = 0; IPCThreadState::self()->restoreCallingIdentity(token); } if (mCameraRecordingProxy != 0) { @@ -646,8 +660,13 @@ status_t CameraSource::stop() { } void CameraSource::releaseRecordingFrame(const sp<IMemory>& frame) { + LOGV("releaseRecordingFrame"); if (mCameraRecordingProxy != NULL) { mCameraRecordingProxy->releaseRecordingFrame(frame); + } else { + int64_t token = IPCThreadState::self()->clearCallingIdentity(); + mCamera->releaseRecordingFrame(frame); + IPCThreadState::self()->restoreCallingIdentity(token); } } @@ -707,7 +726,8 @@ status_t CameraSource::read( while (mStarted && mFramesReceived.empty()) { if (NO_ERROR != mFrameAvailableCondition.waitRelative(mLock, 1000000000LL)) { - if (!mCameraRecordingProxy->asBinder()->isBinderAlive()) { + if (mCameraRecordingProxy != 0 && + !mCameraRecordingProxy->asBinder()->isBinderAlive()) { LOGW("camera recording proxy is gone"); return ERROR_END_OF_STREAM; } diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp index 77a6602..4edb613 100644 --- a/media/libstagefright/NuCachedSource2.cpp +++ b/media/libstagefright/NuCachedSource2.cpp @@ -185,7 +185,8 @@ NuCachedSource2::NuCachedSource2(const sp<DataSource> &source) mFinalStatus(OK), mLastAccessPos(0), mFetching(true), - mLastFetchTimeUs(-1) { + mLastFetchTimeUs(-1), + mNumRetriesLeft(kMaxNumRetries) { mLooper->setName("NuCachedSource2"); mLooper->registerHandler(mReflector); mLooper->start(); @@ -254,7 +255,27 @@ void NuCachedSource2::onMessageReceived(const sp<AMessage> &msg) { void NuCachedSource2::fetchInternal() { LOGV("fetchInternal"); - CHECK_EQ(mFinalStatus, (status_t)OK); + { + Mutex::Autolock autoLock(mLock); + CHECK(mFinalStatus == OK || mNumRetriesLeft > 0); + + if (mFinalStatus != OK) { + --mNumRetriesLeft; + + status_t err = + mSource->reconnectAtOffset(mCacheOffset + mCache->totalSize()); + + if (err == ERROR_UNSUPPORTED) { + mNumRetriesLeft = 0; + return; + } else if (err != OK) { + LOGI("The attempt to reconnect failed, %d retries remaining", + mNumRetriesLeft); + + return; + } + } + } PageCache::Page *page = mCache->acquirePage(); @@ -264,14 +285,23 @@ void NuCachedSource2::fetchInternal() { Mutex::Autolock autoLock(mLock); if (n < 0) { - LOGE("source returned error %ld", n); + LOGE("source returned error %ld, %d retries left", n, mNumRetriesLeft); mFinalStatus = n; mCache->releasePage(page); } else if (n == 0) { LOGI("ERROR_END_OF_STREAM"); + + mNumRetriesLeft = 0; mFinalStatus = ERROR_END_OF_STREAM; + mCache->releasePage(page); } else { + if (mFinalStatus != OK) { + LOGI("retrying a previously failed read succeeded."); + } + mNumRetriesLeft = kMaxNumRetries; + mFinalStatus = OK; + page->mSize = n; mCache->appendPage(page); } @@ -280,7 +310,7 @@ void NuCachedSource2::fetchInternal() { void NuCachedSource2::onFetch() { LOGV("onFetch"); - if (mFinalStatus != OK) { + if (mFinalStatus != OK && mNumRetriesLeft == 0) { LOGV("EOS reached, done prefetching for now"); mFetching = false; } @@ -308,8 +338,19 @@ void NuCachedSource2::onFetch() { restartPrefetcherIfNecessary_l(); } - (new AMessage(kWhatFetchMore, mReflector->id()))->post( - mFetching ? 0 : 100000ll); + int64_t delayUs; + if (mFetching) { + if (mFinalStatus != OK && mNumRetriesLeft > 0) { + // We failed this time and will try again in 3 seconds. + delayUs = 3000000ll; + } else { + delayUs = 0; + } + } else { + delayUs = 100000ll; + } + + (new AMessage(kWhatFetchMore, mReflector->id()))->post(delayUs); } void NuCachedSource2::onRead(const sp<AMessage> &msg) { @@ -345,7 +386,7 @@ void NuCachedSource2::restartPrefetcherIfNecessary_l( bool ignoreLowWaterThreshold, bool force) { static const size_t kGrayArea = 1024 * 1024; - if (mFetching || mFinalStatus != OK) { + if (mFetching || (mFinalStatus != OK && mNumRetriesLeft == 0)) { return; } @@ -427,6 +468,12 @@ size_t NuCachedSource2::approxDataRemaining(status_t *finalStatus) { size_t NuCachedSource2::approxDataRemaining_l(status_t *finalStatus) { *finalStatus = mFinalStatus; + + if (mFinalStatus != OK && mNumRetriesLeft > 0) { + // Pretend that everything is fine until we're out of retries. + *finalStatus = OK; + } + off64_t lastBytePosCached = mCacheOffset + mCache->totalSize(); if (mLastAccessPos < lastBytePosCached) { return lastBytePosCached - mLastAccessPos; diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp index 89faff7..571e8be 100644 --- a/media/libstagefright/StagefrightMediaScanner.cpp +++ b/media/libstagefright/StagefrightMediaScanner.cpp @@ -52,13 +52,13 @@ static bool FileHasAcceptableExtension(const char *extension) { return false; } -static status_t HandleMIDI( +static MediaScanResult HandleMIDI( const char *filename, MediaScannerClient *client) { // get the library configuration and do sanity check const S_EAS_LIB_CONFIG* pLibConfig = EAS_Config(); if ((pLibConfig == NULL) || (LIB_VERSION != pLibConfig->libVersion)) { LOGE("EAS library/header mismatch\n"); - return UNKNOWN_ERROR; + return MEDIA_SCAN_RESULT_ERROR; } EAS_I32 temp; @@ -88,34 +88,41 @@ static status_t HandleMIDI( } if (result != EAS_SUCCESS) { - return UNKNOWN_ERROR; + return MEDIA_SCAN_RESULT_SKIPPED; } char buffer[20]; sprintf(buffer, "%ld", temp); - if (!client->addStringTag("duration", buffer)) return UNKNOWN_ERROR; - - return OK; + status_t status = client->addStringTag("duration", buffer); + if (status) { + return MEDIA_SCAN_RESULT_ERROR; + } + return MEDIA_SCAN_RESULT_OK; } -status_t StagefrightMediaScanner::processFile( +MediaScanResult StagefrightMediaScanner::processFile( const char *path, const char *mimeType, MediaScannerClient &client) { LOGV("processFile '%s'.", path); client.setLocale(locale()); client.beginFile(); + MediaScanResult result = processFileInternal(path, mimeType, client); + client.endFile(); + return result; +} +MediaScanResult StagefrightMediaScanner::processFileInternal( + const char *path, const char *mimeType, + MediaScannerClient &client) { const char *extension = strrchr(path, '.'); if (!extension) { - return UNKNOWN_ERROR; + return MEDIA_SCAN_RESULT_SKIPPED; } if (!FileHasAcceptableExtension(extension)) { - client.endFile(); - - return UNKNOWN_ERROR; + return MEDIA_SCAN_RESULT_SKIPPED; } if (!strcasecmp(extension, ".mid") @@ -127,53 +134,57 @@ status_t StagefrightMediaScanner::processFile( || !strcasecmp(extension, ".rtx") || !strcasecmp(extension, ".ota") || !strcasecmp(extension, ".mxmf")) { - status_t status = HandleMIDI(path, &client); - if (status != OK) { - return status; + return HandleMIDI(path, &client); + } + + sp<MediaMetadataRetriever> mRetriever(new MediaMetadataRetriever); + + status_t status = mRetriever->setDataSource(path); + if (status) { + return MEDIA_SCAN_RESULT_ERROR; + } + + const char *value; + if ((value = mRetriever->extractMetadata( + METADATA_KEY_MIMETYPE)) != NULL) { + status = client.setMimeType(value); + if (status) { + return MEDIA_SCAN_RESULT_ERROR; } - } else { - sp<MediaMetadataRetriever> mRetriever(new MediaMetadataRetriever); - - if (mRetriever->setDataSource(path) == OK) { - const char *value; - if ((value = mRetriever->extractMetadata( - METADATA_KEY_MIMETYPE)) != NULL) { - client.setMimeType(value); - } + } - struct KeyMap { - const char *tag; - int key; - }; - static const KeyMap kKeyMap[] = { - { "tracknumber", METADATA_KEY_CD_TRACK_NUMBER }, - { "discnumber", METADATA_KEY_DISC_NUMBER }, - { "album", METADATA_KEY_ALBUM }, - { "artist", METADATA_KEY_ARTIST }, - { "albumartist", METADATA_KEY_ALBUMARTIST }, - { "composer", METADATA_KEY_COMPOSER }, - { "genre", METADATA_KEY_GENRE }, - { "title", METADATA_KEY_TITLE }, - { "year", METADATA_KEY_YEAR }, - { "duration", METADATA_KEY_DURATION }, - { "writer", METADATA_KEY_WRITER }, - { "compilation", METADATA_KEY_COMPILATION }, - { "isdrm", METADATA_KEY_IS_DRM }, - }; - static const size_t kNumEntries = sizeof(kKeyMap) / sizeof(kKeyMap[0]); - - for (size_t i = 0; i < kNumEntries; ++i) { - const char *value; - if ((value = mRetriever->extractMetadata(kKeyMap[i].key)) != NULL) { - client.addStringTag(kKeyMap[i].tag, value); - } + struct KeyMap { + const char *tag; + int key; + }; + static const KeyMap kKeyMap[] = { + { "tracknumber", METADATA_KEY_CD_TRACK_NUMBER }, + { "discnumber", METADATA_KEY_DISC_NUMBER }, + { "album", METADATA_KEY_ALBUM }, + { "artist", METADATA_KEY_ARTIST }, + { "albumartist", METADATA_KEY_ALBUMARTIST }, + { "composer", METADATA_KEY_COMPOSER }, + { "genre", METADATA_KEY_GENRE }, + { "title", METADATA_KEY_TITLE }, + { "year", METADATA_KEY_YEAR }, + { "duration", METADATA_KEY_DURATION }, + { "writer", METADATA_KEY_WRITER }, + { "compilation", METADATA_KEY_COMPILATION }, + { "isdrm", METADATA_KEY_IS_DRM }, + }; + static const size_t kNumEntries = sizeof(kKeyMap) / sizeof(kKeyMap[0]); + + for (size_t i = 0; i < kNumEntries; ++i) { + const char *value; + if ((value = mRetriever->extractMetadata(kKeyMap[i].key)) != NULL) { + status = client.addStringTag(kKeyMap[i].tag, value); + if (status) { + return MEDIA_SCAN_RESULT_ERROR; } } } - client.endFile(); - - return OK; + return MEDIA_SCAN_RESULT_OK; } char *StagefrightMediaScanner::extractAlbumArt(int fd) { diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp new file mode 100644 index 0000000..ff4b08f --- /dev/null +++ b/media/libstagefright/SurfaceMediaSource.cpp @@ -0,0 +1,756 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// #define LOG_NDEBUG 0 +#define LOG_TAG "SurfaceMediaSource" + +#include <media/stagefright/SurfaceMediaSource.h> +#include <ui/GraphicBuffer.h> +#include <media/stagefright/MetaData.h> +#include <media/stagefright/MediaDefs.h> +#include <media/stagefright/MediaDebug.h> +#include <media/stagefright/openmax/OMX_IVCommon.h> + +#include <surfaceflinger/ISurfaceComposer.h> +#include <surfaceflinger/SurfaceComposerClient.h> +#include <surfaceflinger/IGraphicBufferAlloc.h> +#include <OMX_Component.h> + +#include <utils/Log.h> +#include <utils/String8.h> + +namespace android { + +SurfaceMediaSource::SurfaceMediaSource(uint32_t bufW, uint32_t bufH) : + mDefaultWidth(bufW), + mDefaultHeight(bufH), + mPixelFormat(0), + mBufferCount(MIN_ASYNC_BUFFER_SLOTS), + mClientBufferCount(0), + mServerBufferCount(MIN_ASYNC_BUFFER_SLOTS), + mCurrentSlot(INVALID_BUFFER_SLOT), + mCurrentTimestamp(0), + mSynchronousMode(true), + mConnectedApi(NO_CONNECTED_API), + mFrameRate(30), + mStarted(false) { + LOGV("SurfaceMediaSource::SurfaceMediaSource"); + sp<ISurfaceComposer> composer(ComposerService::getComposerService()); + mGraphicBufferAlloc = composer->createGraphicBufferAlloc(); +} + +SurfaceMediaSource::~SurfaceMediaSource() { + LOGV("SurfaceMediaSource::~SurfaceMediaSource"); + if (mStarted) { + stop(); + } + freeAllBuffers(); +} + +size_t SurfaceMediaSource::getQueuedCount() const { + Mutex::Autolock lock(mMutex); + return mQueue.size(); +} + +status_t SurfaceMediaSource::setBufferCountServerLocked(int bufferCount) { + if (bufferCount > NUM_BUFFER_SLOTS) + return BAD_VALUE; + + // special-case, nothing to do + if (bufferCount == mBufferCount) + return OK; + + if (!mClientBufferCount && + bufferCount >= mBufferCount) { + // easy, we just have more buffers + mBufferCount = bufferCount; + mServerBufferCount = bufferCount; + mDequeueCondition.signal(); + } else { + // we're here because we're either + // - reducing the number of available buffers + // - or there is a client-buffer-count in effect + + // less than 2 buffers is never allowed + if (bufferCount < 2) + return BAD_VALUE; + + // when there is non client-buffer-count in effect, the client is not + // allowed to dequeue more than one buffer at a time, + // so the next time they dequeue a buffer, we know that they don't + // own one. the actual resizing will happen during the next + // dequeueBuffer. + + mServerBufferCount = bufferCount; + } + return OK; +} + +// Called from the consumer side +status_t SurfaceMediaSource::setBufferCountServer(int bufferCount) { + Mutex::Autolock lock(mMutex); + return setBufferCountServerLocked(bufferCount); +} + +status_t SurfaceMediaSource::setBufferCount(int bufferCount) { + LOGV("SurfaceMediaSource::setBufferCount"); + if (bufferCount > NUM_BUFFER_SLOTS) { + LOGE("setBufferCount: bufferCount is larger than the number of buffer slots"); + return BAD_VALUE; + } + + Mutex::Autolock lock(mMutex); + // Error out if the user has dequeued buffers + for (int i = 0 ; i < mBufferCount ; i++) { + if (mSlots[i].mBufferState == BufferSlot::DEQUEUED) { + LOGE("setBufferCount: client owns some buffers"); + return INVALID_OPERATION; + } + } + + if (bufferCount == 0) { + const int minBufferSlots = mSynchronousMode ? + MIN_SYNC_BUFFER_SLOTS : MIN_ASYNC_BUFFER_SLOTS; + mClientBufferCount = 0; + bufferCount = (mServerBufferCount >= minBufferSlots) ? + mServerBufferCount : minBufferSlots; + return setBufferCountServerLocked(bufferCount); + } + + // We don't allow the client to set a buffer-count less than + // MIN_ASYNC_BUFFER_SLOTS (3), there is no reason for it. + if (bufferCount < MIN_ASYNC_BUFFER_SLOTS) { + return BAD_VALUE; + } + + // here we're guaranteed that the client doesn't have dequeued buffers + // and will release all of its buffer references. + freeAllBuffers(); + mBufferCount = bufferCount; + mClientBufferCount = bufferCount; + mCurrentSlot = INVALID_BUFFER_SLOT; + mQueue.clear(); + mDequeueCondition.signal(); + return OK; +} + +status_t SurfaceMediaSource::requestBuffer(int slot, sp<GraphicBuffer>* buf) { + LOGV("SurfaceMediaSource::requestBuffer"); + Mutex::Autolock lock(mMutex); + if (slot < 0 || mBufferCount <= slot) { + LOGE("requestBuffer: slot index out of range [0, %d]: %d", + mBufferCount, slot); + return BAD_VALUE; + } + mSlots[slot].mRequestBufferCalled = true; + *buf = mSlots[slot].mGraphicBuffer; + return NO_ERROR; +} + +status_t SurfaceMediaSource::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h, + uint32_t format, uint32_t usage) { + LOGV("dequeueBuffer"); + + + // Check for the buffer size- the client should just use the + // default width and height, and not try to set those. + // This is needed since + // the getFormat() returns mDefaultWidth/ Height for the OMX. It is + // queried by OMX in the beginning and not every time a frame comes. + // Not sure if there is a way to update the + // frame size while recording. So as of now, the client side + // sets the default values via the constructor, and the encoder is + // setup to encode frames of that size + // The design might need to change in the future. + // TODO: Currently just uses mDefaultWidth/Height. In the future + // we might declare mHeight and mWidth and check against those here. + if ((w != 0) || (h != 0)) { + LOGE("dequeuebuffer: invalid buffer size! Req: %dx%d, Found: %dx%d", + mDefaultWidth, mDefaultHeight, w, h); + return BAD_VALUE; + } + + Mutex::Autolock lock(mMutex); + + status_t returnFlags(OK); + + int found, foundSync; + int dequeuedCount = 0; + bool tryAgain = true; + while (tryAgain) { + // We need to wait for the FIFO to drain if the number of buffer + // needs to change. + // + // The condition "number of buffer needs to change" is true if + // - the client doesn't care about how many buffers there are + // - AND the actual number of buffer is different from what was + // set in the last setBufferCountServer() + // - OR - + // setBufferCountServer() was set to a value incompatible with + // the synchronization mode (for instance because the sync mode + // changed since) + // + // As long as this condition is true AND the FIFO is not empty, we + // wait on mDequeueCondition. + + int minBufferCountNeeded = mSynchronousMode ? + MIN_SYNC_BUFFER_SLOTS : MIN_ASYNC_BUFFER_SLOTS; + + if (!mClientBufferCount && + ((mServerBufferCount != mBufferCount) || + (mServerBufferCount < minBufferCountNeeded))) { + // wait for the FIFO to drain + while (!mQueue.isEmpty()) { + LOGV("Waiting for the FIFO to drain"); + mDequeueCondition.wait(mMutex); + } + // need to check again since the mode could have changed + // while we were waiting + minBufferCountNeeded = mSynchronousMode ? + MIN_SYNC_BUFFER_SLOTS : MIN_ASYNC_BUFFER_SLOTS; + } + + if (!mClientBufferCount && + ((mServerBufferCount != mBufferCount) || + (mServerBufferCount < minBufferCountNeeded))) { + // here we're guaranteed that mQueue is empty + freeAllBuffers(); + mBufferCount = mServerBufferCount; + if (mBufferCount < minBufferCountNeeded) + mBufferCount = minBufferCountNeeded; + mCurrentSlot = INVALID_BUFFER_SLOT; + returnFlags |= ISurfaceTexture::RELEASE_ALL_BUFFERS; + } + + // look for a free buffer to give to the client + found = INVALID_BUFFER_SLOT; + foundSync = INVALID_BUFFER_SLOT; + dequeuedCount = 0; + for (int i = 0; i < mBufferCount; i++) { + const int state = mSlots[i].mBufferState; + if (state == BufferSlot::DEQUEUED) { + dequeuedCount++; + continue; // won't be continuing if could + // dequeue a non 'FREE' current slot like + // that in SurfaceTexture + } + // In case of Encoding, we do not deque the mCurrentSlot buffer + // since we follow synchronous mode (unlike possibly in + // SurfaceTexture that could be using the asynch mode + // or has some mechanism in GL to be able to wait till the + // currentslot is done using the data) + // Here, we have to wait for the MPEG4Writer(or equiv) + // to tell us when it's done using the current buffer + if (state == BufferSlot::FREE) { + foundSync = i; + // Unlike that in SurfaceTexture, + // We don't need to worry if it is the + // currentslot or not as it is in state FREE + found = i; + break; + } + } + + // clients are not allowed to dequeue more than one buffer + // if they didn't set a buffer count. + if (!mClientBufferCount && dequeuedCount) { + return -EINVAL; + } + + // See whether a buffer has been queued since the last setBufferCount so + // we know whether to perform the MIN_UNDEQUEUED_BUFFERS check below. + bool bufferHasBeenQueued = mCurrentSlot != INVALID_BUFFER_SLOT; + if (bufferHasBeenQueued) { + // make sure the client is not trying to dequeue more buffers + // than allowed. + const int avail = mBufferCount - (dequeuedCount+1); + if (avail < (MIN_UNDEQUEUED_BUFFERS-int(mSynchronousMode))) { + LOGE("dequeueBuffer: MIN_UNDEQUEUED_BUFFERS=%d exceeded (dequeued=%d)", + MIN_UNDEQUEUED_BUFFERS-int(mSynchronousMode), + dequeuedCount); + return -EBUSY; + } + } + + // we're in synchronous mode and didn't find a buffer, we need to wait + // for for some buffers to be consumed + tryAgain = mSynchronousMode && (foundSync == INVALID_BUFFER_SLOT); + if (tryAgain) { + LOGW("Waiting..In synchronous mode and no buffer to dQ"); + mDequeueCondition.wait(mMutex); + } + } + + if (mSynchronousMode && found == INVALID_BUFFER_SLOT) { + // foundSync guaranteed to be != INVALID_BUFFER_SLOT + found = foundSync; + } + + if (found == INVALID_BUFFER_SLOT) { + return -EBUSY; + } + + const int buf = found; + *outBuf = found; + + const bool useDefaultSize = !w && !h; + if (useDefaultSize) { + // use the default size + w = mDefaultWidth; + h = mDefaultHeight; + } + + const bool updateFormat = (format != 0); + if (!updateFormat) { + // keep the current (or default) format + format = mPixelFormat; + } + + // buffer is now in DEQUEUED (but can also be current at the same time, + // if we're in synchronous mode) + mSlots[buf].mBufferState = BufferSlot::DEQUEUED; + + const sp<GraphicBuffer>& buffer(mSlots[buf].mGraphicBuffer); + if ((buffer == NULL) || + (uint32_t(buffer->width) != w) || + (uint32_t(buffer->height) != h) || + (uint32_t(buffer->format) != format) || + ((uint32_t(buffer->usage) & usage) != usage)) { + usage |= GraphicBuffer::USAGE_HW_TEXTURE; + status_t error; + sp<GraphicBuffer> graphicBuffer( + mGraphicBufferAlloc->createGraphicBuffer( + w, h, format, usage, &error)); + if (graphicBuffer == 0) { + LOGE("dequeueBuffer: SurfaceComposer::createGraphicBuffer failed"); + return error; + } + if (updateFormat) { + mPixelFormat = format; + } + mSlots[buf].mGraphicBuffer = graphicBuffer; + mSlots[buf].mRequestBufferCalled = false; + returnFlags |= ISurfaceTexture::BUFFER_NEEDS_REALLOCATION; + } + return returnFlags; +} + +status_t SurfaceMediaSource::setSynchronousMode(bool enabled) { + Mutex::Autolock lock(mMutex); + + status_t err = OK; + if (!enabled) { + // going to asynchronous mode, drain the queue + while (mSynchronousMode != enabled && !mQueue.isEmpty()) { + mDequeueCondition.wait(mMutex); + } + } + + if (mSynchronousMode != enabled) { + // - if we're going to asynchronous mode, the queue is guaranteed to be + // empty here + // - if the client set the number of buffers, we're guaranteed that + // we have at least 3 (because we don't allow less) + mSynchronousMode = enabled; + mDequeueCondition.signal(); + } + return err; +} + +status_t SurfaceMediaSource::connect(int api) { + LOGV("SurfaceMediaSource::connect"); + Mutex::Autolock lock(mMutex); + status_t err = NO_ERROR; + switch (api) { + case NATIVE_WINDOW_API_EGL: + case NATIVE_WINDOW_API_CPU: + case NATIVE_WINDOW_API_MEDIA: + case NATIVE_WINDOW_API_CAMERA: + if (mConnectedApi != NO_CONNECTED_API) { + err = -EINVAL; + } else { + mConnectedApi = api; + } + break; + default: + err = -EINVAL; + break; + } + return err; +} + +status_t SurfaceMediaSource::disconnect(int api) { + LOGV("SurfaceMediaSource::disconnect"); + Mutex::Autolock lock(mMutex); + status_t err = NO_ERROR; + switch (api) { + case NATIVE_WINDOW_API_EGL: + case NATIVE_WINDOW_API_CPU: + case NATIVE_WINDOW_API_MEDIA: + case NATIVE_WINDOW_API_CAMERA: + if (mConnectedApi == api) { + mConnectedApi = NO_CONNECTED_API; + } else { + err = -EINVAL; + } + break; + default: + err = -EINVAL; + break; + } + return err; +} + +status_t SurfaceMediaSource::queueBuffer(int buf, int64_t timestamp, + uint32_t* outWidth, uint32_t* outHeight, uint32_t* outTransform) { + LOGV("queueBuffer"); + + Mutex::Autolock lock(mMutex); + if (buf < 0 || buf >= mBufferCount) { + LOGE("queueBuffer: slot index out of range [0, %d]: %d", + mBufferCount, buf); + return -EINVAL; + } else if (mSlots[buf].mBufferState != BufferSlot::DEQUEUED) { + LOGE("queueBuffer: slot %d is not owned by the client (state=%d)", + buf, mSlots[buf].mBufferState); + return -EINVAL; + } else if (!mSlots[buf].mRequestBufferCalled) { + LOGE("queueBuffer: slot %d was enqueued without requesting a " + "buffer", buf); + return -EINVAL; + } + + if (mSynchronousMode) { + // in synchronous mode we queue all buffers in a FIFO + mQueue.push_back(buf); + LOGV("Client queued buffer on slot: %d, Q size = %d", + buf, mQueue.size()); + } else { + // in asynchronous mode we only keep the most recent buffer + if (mQueue.empty()) { + mQueue.push_back(buf); + } else { + Fifo::iterator front(mQueue.begin()); + // buffer currently queued is freed + mSlots[*front].mBufferState = BufferSlot::FREE; + // and we record the new buffer index in the queued list + *front = buf; + } + } + + mSlots[buf].mBufferState = BufferSlot::QUEUED; + mSlots[buf].mTimestamp = timestamp; + // TODO: (Confirm) Don't want to signal dequeue here. + // May be just in asynchronous mode? + // mDequeueCondition.signal(); + + // Once the queuing is done, we need to let the listener + // and signal the buffer consumer (encoder) know that a + // buffer is available + onFrameReceivedLocked(); + + *outWidth = mDefaultWidth; + *outHeight = mDefaultHeight; + *outTransform = 0; + + return OK; +} + + +// onFrameReceivedLocked informs the buffer consumers (StageFrightRecorder) +// or listeners that a frame has been received +// It is supposed to be called only from queuebuffer. +// The buffer is NOT made available for dequeueing immediately. We need to +// wait to hear from StageFrightRecorder to set the buffer FREE +// Make sure this is called when the mutex is locked +status_t SurfaceMediaSource::onFrameReceivedLocked() { + LOGV("On Frame Received"); + // Signal the encoder that a new frame has arrived + mFrameAvailableCondition.signal(); + + // call back the listener + // TODO: The listener may not be needed in SurfaceMediaSource at all. + // This can be made a SurfaceTexture specific thing + sp<FrameAvailableListener> listener; + if (mSynchronousMode || mQueue.empty()) { + listener = mFrameAvailableListener; + } + + if (listener != 0) { + listener->onFrameAvailable(); + } + return OK; +} + + +void SurfaceMediaSource::cancelBuffer(int buf) { + LOGV("SurfaceMediaSource::cancelBuffer"); + Mutex::Autolock lock(mMutex); + if (buf < 0 || buf >= mBufferCount) { + LOGE("cancelBuffer: slot index out of range [0, %d]: %d", + mBufferCount, buf); + return; + } else if (mSlots[buf].mBufferState != BufferSlot::DEQUEUED) { + LOGE("cancelBuffer: slot %d is not owned by the client (state=%d)", + buf, mSlots[buf].mBufferState); + return; + } + mSlots[buf].mBufferState = BufferSlot::FREE; + mDequeueCondition.signal(); +} + +nsecs_t SurfaceMediaSource::getTimestamp() { + LOGV("SurfaceMediaSource::getTimestamp"); + Mutex::Autolock lock(mMutex); + return mCurrentTimestamp; +} + + +void SurfaceMediaSource::setFrameAvailableListener( + const sp<FrameAvailableListener>& listener) { + LOGV("SurfaceMediaSource::setFrameAvailableListener"); + Mutex::Autolock lock(mMutex); + mFrameAvailableListener = listener; +} + +void SurfaceMediaSource::freeAllBuffers() { + LOGV("freeAllBuffers"); + for (int i = 0; i < NUM_BUFFER_SLOTS; i++) { + mSlots[i].mGraphicBuffer = 0; + mSlots[i].mBufferState = BufferSlot::FREE; + } +} + +sp<GraphicBuffer> SurfaceMediaSource::getCurrentBuffer() const { + Mutex::Autolock lock(mMutex); + return mCurrentBuf; +} + +int SurfaceMediaSource::query(int what, int* outValue) +{ + LOGV("query"); + Mutex::Autolock lock(mMutex); + int value; + switch (what) { + case NATIVE_WINDOW_WIDTH: + value = mDefaultWidth; + if (!mDefaultWidth && !mDefaultHeight && mCurrentBuf != 0) + value = mCurrentBuf->width; + break; + case NATIVE_WINDOW_HEIGHT: + value = mDefaultHeight; + if (!mDefaultWidth && !mDefaultHeight && mCurrentBuf != 0) + value = mCurrentBuf->height; + break; + case NATIVE_WINDOW_FORMAT: + value = mPixelFormat; + break; + case NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS: + value = mSynchronousMode ? + (MIN_UNDEQUEUED_BUFFERS-1) : MIN_UNDEQUEUED_BUFFERS; + break; + default: + return BAD_VALUE; + } + outValue[0] = value; + return NO_ERROR; +} + +void SurfaceMediaSource::dump(String8& result) const +{ + char buffer[1024]; + dump(result, "", buffer, 1024); +} + +void SurfaceMediaSource::dump(String8& result, const char* prefix, + char* buffer, size_t SIZE) const +{ + Mutex::Autolock _l(mMutex); + snprintf(buffer, SIZE, + "%smBufferCount=%d, mSynchronousMode=%d, default-size=[%dx%d], " + "mPixelFormat=%d, \n", + prefix, mBufferCount, mSynchronousMode, mDefaultWidth, mDefaultHeight, + mPixelFormat); + result.append(buffer); + + String8 fifo; + int fifoSize = 0; + Fifo::const_iterator i(mQueue.begin()); + while (i != mQueue.end()) { + snprintf(buffer, SIZE, "%02d ", *i++); + fifoSize++; + fifo.append(buffer); + } + + result.append(buffer); + + struct { + const char * operator()(int state) const { + switch (state) { + case BufferSlot::DEQUEUED: return "DEQUEUED"; + case BufferSlot::QUEUED: return "QUEUED"; + case BufferSlot::FREE: return "FREE"; + default: return "Unknown"; + } + } + } stateName; + + for (int i = 0; i < mBufferCount; i++) { + const BufferSlot& slot(mSlots[i]); + snprintf(buffer, SIZE, + "%s%s[%02d] state=%-8s, " + "timestamp=%lld\n", + prefix, (i==mCurrentSlot)?">":" ", i, stateName(slot.mBufferState), + slot.mTimestamp + ); + result.append(buffer); + } +} + +status_t SurfaceMediaSource::setFrameRate(int32_t fps) +{ + Mutex::Autolock lock(mMutex); + const int MAX_FRAME_RATE = 60; + if (fps < 0 || fps > MAX_FRAME_RATE) { + return BAD_VALUE; + } + mFrameRate = fps; + return OK; +} + +bool SurfaceMediaSource::isMetaDataStoredInVideoBuffers() const { + LOGV("isMetaDataStoredInVideoBuffers"); + return true; +} + +int32_t SurfaceMediaSource::getFrameRate( ) const { + Mutex::Autolock lock(mMutex); + return mFrameRate; +} + +status_t SurfaceMediaSource::start(MetaData *params) +{ + LOGV("start"); + Mutex::Autolock lock(mMutex); + CHECK(!mStarted); + mStarted = true; + return OK; +} + + +status_t SurfaceMediaSource::stop() +{ + LOGV("Stop"); + + Mutex::Autolock lock(mMutex); + // TODO: Add waiting on mFrameCompletedCondition here? + mStarted = false; + mFrameAvailableCondition.signal(); + + return OK; +} + +sp<MetaData> SurfaceMediaSource::getFormat() +{ + LOGV("getFormat"); + Mutex::Autolock autoLock(mMutex); + sp<MetaData> meta = new MetaData; + + meta->setInt32(kKeyWidth, mDefaultWidth); + meta->setInt32(kKeyHeight, mDefaultHeight); + // The encoder format is set as an opaque colorformat + // The encoder will later find out the actual colorformat + // from the GL Frames itself. + meta->setInt32(kKeyColorFormat, OMX_COLOR_FormatAndroidOpaque); + meta->setInt32(kKeyStride, mDefaultWidth); + meta->setInt32(kKeySliceHeight, mDefaultHeight); + meta->setInt32(kKeyFrameRate, mFrameRate); + meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_RAW); + return meta; +} + +status_t SurfaceMediaSource::read( MediaBuffer **buffer, + const ReadOptions *options) +{ + LOGV("Read. Size of queued buffer: %d", mQueue.size()); + *buffer = NULL; + + Mutex::Autolock autoLock(mMutex) ; + // If the recording has started and the queue is empty, then just + // wait here till the frames come in from the client side + while (mStarted && mQueue.empty()) { + LOGV("NO FRAMES! Recorder waiting for FrameAvailableCondition"); + mFrameAvailableCondition.wait(mMutex); + } + + // If the loop was exited as a result of stopping the recording, + // it is OK + if (!mStarted) { + return OK; + } + + // Update the current buffer info + // TODO: mCurrentSlot can be made a bufferstate since there + // can be more than one "current" slots. + Fifo::iterator front(mQueue.begin()); + mCurrentSlot = *front; + mCurrentBuf = mSlots[mCurrentSlot].mGraphicBuffer; + mCurrentTimestamp = mSlots[mCurrentSlot].mTimestamp; + + // Pass the data to the MediaBuffer + // TODO: Change later to pass in only the metadata + *buffer = new MediaBuffer(mCurrentBuf); + (*buffer)->setObserver(this); + (*buffer)->add_ref(); + (*buffer)->meta_data()->setInt64(kKeyTime, mCurrentTimestamp); + + return OK; +} + +void SurfaceMediaSource::signalBufferReturned(MediaBuffer *buffer) { + LOGV("signalBufferReturned"); + + bool foundBuffer = false; + Mutex::Autolock autoLock(mMutex); + + if (!mStarted) { + LOGV("started = false. Nothing to do"); + return; + } + + for (Fifo::iterator it = mQueue.begin(); it != mQueue.end(); ++it) { + if (mSlots[*it].mGraphicBuffer == buffer->graphicBuffer()) { + LOGV("Buffer %d returned. Setting it 'FREE'. New Queue size = %d", + *it, mQueue.size()-1); + mSlots[*it].mBufferState = BufferSlot::FREE; + mQueue.erase(it); + buffer->setObserver(0); + buffer->release(); + mDequeueCondition.signal(); + mFrameCompleteCondition.signal(); + foundBuffer = true; + break; + } + } + + if (!foundBuffer) { + CHECK_EQ(0, "signalBufferReturned: bogus buffer"); + } +} + + + +} // end of namespace android diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp index bf978d7..c406964 100644 --- a/media/libstagefright/WAVExtractor.cpp +++ b/media/libstagefright/WAVExtractor.cpp @@ -370,7 +370,9 @@ status_t WAVSource::read( int16_t *dst = (int16_t *)tmp->data(); const uint8_t *src = (const uint8_t *)buffer->data(); - while (n-- > 0) { + ssize_t numBytes = n; + + while (numBytes-- > 0) { *dst++ = ((int16_t)(*src) - 128) * 256; ++src; } diff --git a/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp b/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp index 588a74d..07a9eb8 100644 --- a/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp +++ b/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp @@ -25,6 +25,8 @@ #include "support.h" +#include <cutils/properties.h> // for property_get + namespace android { ChromiumHTTPDataSource::ChromiumHTTPDataSource(uint32_t flags) @@ -111,7 +113,7 @@ void ChromiumHTTPDataSource::onConnectionFailed(status_t err) { mState = DISCONNECTED; mCondition.broadcast(); - mURI.clear(); + // mURI.clear(); mIOResult = err; @@ -150,8 +152,18 @@ ssize_t ChromiumHTTPDataSource::readAt(off64_t offset, void *data, size_t size) Mutex::Autolock autoLock(mLock); if (mState != CONNECTED) { - return ERROR_NOT_CONNECTED; + return INVALID_OPERATION; + } + +#if 0 + char value[PROPERTY_VALUE_MAX]; + if (property_get("media.stagefright.disable-net", value, 0) + && (!strcasecmp(value, "true") || !strcmp(value, "1"))) { + LOG_PRI(ANDROID_LOG_INFO, LOG_TAG, "Simulating that the network is down."); + disconnect_l(); + return ERROR_IO; } +#endif if (offset != mCurrentOffset) { AString tmp = mURI; @@ -236,7 +248,7 @@ void ChromiumHTTPDataSource::onDisconnectComplete() { CHECK_EQ((int)mState, (int)DISCONNECTING); mState = DISCONNECTED; - mURI.clear(); + // mURI.clear(); mCondition.broadcast(); @@ -299,5 +311,21 @@ void ChromiumHTTPDataSource::clearDRMState_l() { } } +status_t ChromiumHTTPDataSource::reconnectAtOffset(off64_t offset) { + Mutex::Autolock autoLock(mLock); + + if (mURI.empty()) { + return INVALID_OPERATION; + } + + LOG_PRI(ANDROID_LOG_INFO, LOG_TAG, "Reconnecting..."); + status_t err = connect_l(mURI.c_str(), &mHeaders, offset); + if (err != OK) { + LOG_PRI(ANDROID_LOG_INFO, LOG_TAG, "Reconnect failed w/ err 0x%08x", err); + } + + return err; +} + } // namespace android diff --git a/media/libstagefright/include/ChromiumHTTPDataSource.h b/media/libstagefright/include/ChromiumHTTPDataSource.h index d833e2e..18f8913 100644 --- a/media/libstagefright/include/ChromiumHTTPDataSource.h +++ b/media/libstagefright/include/ChromiumHTTPDataSource.h @@ -51,6 +51,8 @@ struct ChromiumHTTPDataSource : public HTTPBase { virtual String8 getMIMEType() const; + virtual status_t reconnectAtOffset(off64_t offset); + protected: virtual ~ChromiumHTTPDataSource(); diff --git a/media/libstagefright/include/NuCachedSource2.h b/media/libstagefright/include/NuCachedSource2.h index 2d6cb84..22b2855 100644 --- a/media/libstagefright/include/NuCachedSource2.h +++ b/media/libstagefright/include/NuCachedSource2.h @@ -77,6 +77,10 @@ private: kWhatRead = 'read', }; + enum { + kMaxNumRetries = 10, + }; + sp<DataSource> mSource; sp<AHandlerReflector<NuCachedSource2> > mReflector; sp<ALooper> mLooper; @@ -93,6 +97,8 @@ private: bool mFetching; int64_t mLastFetchTimeUs; + int32_t mNumRetriesLeft; + void onMessageReceived(const sp<AMessage> &msg); void onFetch(); void onRead(const sp<AMessage> &msg); diff --git a/media/libstagefright/tests/Android.mk b/media/libstagefright/tests/Android.mk new file mode 100644 index 0000000..3ea8f39 --- /dev/null +++ b/media/libstagefright/tests/Android.mk @@ -0,0 +1,53 @@ +# Build the unit tests. +LOCAL_PATH:= $(call my-dir) +include $(CLEAR_VARS) + +ifneq ($(TARGET_SIMULATOR),true) + +LOCAL_MODULE := SurfaceMediaSource_test + +LOCAL_MODULE_TAGS := tests + +LOCAL_SRC_FILES := \ + SurfaceMediaSource_test.cpp \ + DummyRecorder.cpp \ + +LOCAL_SHARED_LIBRARIES := \ + libEGL \ + libGLESv2 \ + libandroid \ + libbinder \ + libcutils \ + libgui \ + libstlport \ + libui \ + libutils \ + libstagefright \ + libstagefright_omx \ + libstagefright_foundation \ + +LOCAL_STATIC_LIBRARIES := \ + libgtest \ + libgtest_main \ + +LOCAL_C_INCLUDES := \ + bionic \ + bionic/libstdc++/include \ + external/gtest/include \ + external/stlport/stlport \ + frameworks/base/media/libstagefright \ + frameworks/base/media/libstagefright/include \ + $(TOP)/frameworks/base/include/media/stagefright/openmax \ + +include $(BUILD_EXECUTABLE) + +endif + +# Include subdirectory makefiles +# ============================================================ + +# If we're building with ONE_SHOT_MAKEFILE (mm, mmm), then what the framework +# team really wants is to build the stuff defined by this makefile. +ifeq (,$(ONE_SHOT_MAKEFILE)) +include $(call first-makefiles-under,$(LOCAL_PATH)) +endif diff --git a/media/libstagefright/tests/DummyRecorder.cpp b/media/libstagefright/tests/DummyRecorder.cpp new file mode 100644 index 0000000..8d75d6b --- /dev/null +++ b/media/libstagefright/tests/DummyRecorder.cpp @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "DummyRecorder" +// #define LOG_NDEBUG 0 + +#include <media/stagefright/MediaBuffer.h> +#include <media/stagefright/MediaSource.h> +#include "DummyRecorder.h" + +#include <utils/Log.h> + +namespace android { + +// static +void *DummyRecorder::threadWrapper(void *pthis) { + LOGV("ThreadWrapper: %p", pthis); + DummyRecorder *writer = static_cast<DummyRecorder *>(pthis); + writer->readFromSource(); + return NULL; +} + + +status_t DummyRecorder::start() { + LOGV("Start"); + mStarted = true; + + mSource->start(); + + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); + int err = pthread_create(&mThread, &attr, threadWrapper, this); + pthread_attr_destroy(&attr); + + if (err) { + LOGE("Error creating thread!"); + return -ENODEV; + } + return OK; +} + + +status_t DummyRecorder::stop() { + LOGV("Stop"); + mStarted = false; + + mSource->stop(); + void *dummy; + pthread_join(mThread, &dummy); + status_t err = (status_t) dummy; + + LOGV("Ending the reading thread"); + return err; +} + +// pretend to read the source buffers +void DummyRecorder::readFromSource() { + LOGV("ReadFromSource"); + if (!mStarted) { + return; + } + + status_t err = OK; + MediaBuffer *buffer; + LOGV("A fake writer accessing the frames"); + while (mStarted && (err = mSource->read(&buffer)) == OK){ + // if not getting a valid buffer from source, then exit + if (buffer == NULL) { + return; + } + buffer->release(); + buffer = NULL; + } +} + + +} // end of namespace android diff --git a/media/libstagefright/tests/DummyRecorder.h b/media/libstagefright/tests/DummyRecorder.h new file mode 100644 index 0000000..1cbea1b --- /dev/null +++ b/media/libstagefright/tests/DummyRecorder.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef DUMMY_RECORDER_H_ +#define DUMMY_RECORDER_H_ + +#include <pthread.h> +#include <utils/String8.h> +#include <media/stagefright/foundation/ABase.h> + + +namespace android { + +class MediaSource; +class MediaBuffer; + +class DummyRecorder { + public: + // The media source from which this will receive frames + sp<MediaSource> mSource; + bool mStarted; + pthread_t mThread; + + status_t start(); + status_t stop(); + + // actual entry point for the thread + void readFromSource(); + + // static function to wrap the actual thread entry point + static void *threadWrapper(void *pthis); + + DummyRecorder(const sp<MediaSource> &source) : mSource(source) + , mStarted(false) {} + ~DummyRecorder( ) {} + + private: + + DISALLOW_EVIL_CONSTRUCTORS(DummyRecorder); +}; + +} // end of namespace android +#endif + + diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp new file mode 100644 index 0000000..ce10812 --- /dev/null +++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp @@ -0,0 +1,349 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "SurfaceMediaSource_test" +// #define LOG_NDEBUG 0 + +#include <gtest/gtest.h> +#include <utils/String8.h> +#include <utils/Errors.h> + +#include <media/stagefright/SurfaceMediaSource.h> + +#include <gui/SurfaceTextureClient.h> +#include <ui/GraphicBuffer.h> +#include <surfaceflinger/ISurfaceComposer.h> +#include <surfaceflinger/Surface.h> +#include <surfaceflinger/SurfaceComposerClient.h> + +#include <binder/ProcessState.h> +#include <ui/FramebufferNativeWindow.h> + +#include <media/stagefright/MediaDebug.h> +#include <media/stagefright/MediaDefs.h> +#include <media/stagefright/MetaData.h> +#include <media/stagefright/MPEG4Writer.h> +#include <media/stagefright/OMXClient.h> +#include <media/stagefright/OMXCodec.h> +#include <OMX_Component.h> + +#include "DummyRecorder.h" + +namespace android { + + +class SurfaceMediaSourceTest : public ::testing::Test { +public: + + SurfaceMediaSourceTest( ): mYuvTexWidth(64), mYuvTexHeight(66) { } + sp<MPEG4Writer> setUpWriter(OMXClient &client ); + void oneBufferPass(int width, int height ); + static void fillYV12Buffer(uint8_t* buf, int w, int h, int stride) ; + static void fillYV12BufferRect(uint8_t* buf, int w, int h, + int stride, const android_native_rect_t& rect) ; +protected: + + virtual void SetUp() { + mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight); + mSMS->setSynchronousMode(true); + mSTC = new SurfaceTextureClient(mSMS); + mANW = mSTC; + + } + + + virtual void TearDown() { + mSMS.clear(); + mSTC.clear(); + mANW.clear(); + } + + const int mYuvTexWidth;// = 64; + const int mYuvTexHeight;// = 66; + + sp<SurfaceMediaSource> mSMS; + sp<SurfaceTextureClient> mSTC; + sp<ANativeWindow> mANW; + +}; + +void SurfaceMediaSourceTest::oneBufferPass(int width, int height ) { + LOGV("One Buffer Pass"); + ANativeWindowBuffer* anb; + ASSERT_EQ(NO_ERROR, mANW->dequeueBuffer(mANW.get(), &anb)); + ASSERT_TRUE(anb != NULL); + + sp<GraphicBuffer> buf(new GraphicBuffer(anb, false)); + ASSERT_EQ(NO_ERROR, mANW->lockBuffer(mANW.get(), buf->getNativeBuffer())); + + // Fill the buffer with the a checkerboard pattern + uint8_t* img = NULL; + buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img)); + SurfaceMediaSourceTest::fillYV12Buffer(img, width, height, buf->getStride()); + buf->unlock(); + + ASSERT_EQ(NO_ERROR, mANW->queueBuffer(mANW.get(), buf->getNativeBuffer())); +} + +sp<MPEG4Writer> SurfaceMediaSourceTest::setUpWriter(OMXClient &client ) { + // Writing to a file + const char *fileName = "/sdcard/outputSurfEnc.mp4"; + sp<MetaData> enc_meta = new MetaData; + enc_meta->setInt32(kKeyBitRate, 300000); + enc_meta->setInt32(kKeyFrameRate, 30); + + enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4); + + sp<MetaData> meta = mSMS->getFormat(); + + int32_t width, height, stride, sliceHeight, colorFormat; + CHECK(meta->findInt32(kKeyWidth, &width)); + CHECK(meta->findInt32(kKeyHeight, &height)); + CHECK(meta->findInt32(kKeyStride, &stride)); + CHECK(meta->findInt32(kKeySliceHeight, &sliceHeight)); + CHECK(meta->findInt32(kKeyColorFormat, &colorFormat)); + + enc_meta->setInt32(kKeyWidth, width); + enc_meta->setInt32(kKeyHeight, height); + enc_meta->setInt32(kKeyIFramesInterval, 1); + enc_meta->setInt32(kKeyStride, stride); + enc_meta->setInt32(kKeySliceHeight, sliceHeight); + // TODO: overwriting the colorformat since the format set by GRAlloc + // could be wrong or not be read by OMX + enc_meta->setInt32(kKeyColorFormat, OMX_COLOR_FormatYUV420Planar); + // colorFormat); + + + sp<MediaSource> encoder = + OMXCodec::Create( + client.interface(), enc_meta, true /* createEncoder */, mSMS); + + sp<MPEG4Writer> writer = new MPEG4Writer(fileName); + writer->addSource(encoder); + + return writer; +} + +// Fill a YV12 buffer with a multi-colored checkerboard pattern +void SurfaceMediaSourceTest::fillYV12Buffer(uint8_t* buf, int w, int h, int stride) { + const int blockWidth = w > 16 ? w / 16 : 1; + const int blockHeight = h > 16 ? h / 16 : 1; + const int yuvTexOffsetY = 0; + int yuvTexStrideY = stride; + int yuvTexOffsetV = yuvTexStrideY * h; + int yuvTexStrideV = (yuvTexStrideY/2 + 0xf) & ~0xf; + int yuvTexOffsetU = yuvTexOffsetV + yuvTexStrideV * h/2; + int yuvTexStrideU = yuvTexStrideV; + for (int x = 0; x < w; x++) { + for (int y = 0; y < h; y++) { + int parityX = (x / blockWidth) & 1; + int parityY = (y / blockHeight) & 1; + unsigned char intensity = (parityX ^ parityY) ? 63 : 191; + buf[yuvTexOffsetY + (y * yuvTexStrideY) + x] = intensity; + if (x < w / 2 && y < h / 2) { + buf[yuvTexOffsetU + (y * yuvTexStrideU) + x] = intensity; + if (x * 2 < w / 2 && y * 2 < h / 2) { + buf[yuvTexOffsetV + (y*2 * yuvTexStrideV) + x*2 + 0] = + buf[yuvTexOffsetV + (y*2 * yuvTexStrideV) + x*2 + 1] = + buf[yuvTexOffsetV + ((y*2+1) * yuvTexStrideV) + x*2 + 0] = + buf[yuvTexOffsetV + ((y*2+1) * yuvTexStrideV) + x*2 + 1] = + intensity; + } + } + } + } +} + +// Fill a YV12 buffer with red outside a given rectangle and green inside it. +void SurfaceMediaSourceTest::fillYV12BufferRect(uint8_t* buf, int w, + int h, int stride, const android_native_rect_t& rect) { + const int yuvTexOffsetY = 0; + int yuvTexStrideY = stride; + int yuvTexOffsetV = yuvTexStrideY * h; + int yuvTexStrideV = (yuvTexStrideY/2 + 0xf) & ~0xf; + int yuvTexOffsetU = yuvTexOffsetV + yuvTexStrideV * h/2; + int yuvTexStrideU = yuvTexStrideV; + for (int x = 0; x < w; x++) { + for (int y = 0; y < h; y++) { + bool inside = rect.left <= x && x < rect.right && + rect.top <= y && y < rect.bottom; + buf[yuvTexOffsetY + (y * yuvTexStrideY) + x] = inside ? 240 : 64; + if (x < w / 2 && y < h / 2) { + bool inside = rect.left <= 2*x && 2*x < rect.right && + rect.top <= 2*y && 2*y < rect.bottom; + buf[yuvTexOffsetU + (y * yuvTexStrideU) + x] = 16; + buf[yuvTexOffsetV + (y * yuvTexStrideV) + x] = + inside ? 16 : 255; + } + } + } +} ///////// End of class SurfaceMediaSourceTest + +/////////////////////////////////////////////////////////////////// +// Class to imitate the recording ///////////////////////////// +// //////////////////////////////////////////////////////////////// +struct SimpleDummyRecorder { + sp<MediaSource> mSource; + + SimpleDummyRecorder + (const sp<MediaSource> &source): mSource(source) {} + + status_t start() { return mSource->start();} + status_t stop() { return mSource->stop();} + + // fakes reading from a media source + status_t readFromSource() { + MediaBuffer *buffer; + status_t err = mSource->read(&buffer); + if (err != OK) { + return err; + } + buffer->release(); + buffer = NULL; + return OK; + } +}; + +/////////////////////////////////////////////////////////////////// +// TESTS +// Just pass one buffer from the native_window to the SurfaceMediaSource +TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotOneBufferPass) { + LOGV("Testing OneBufferPass ******************************"); + + ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(), + 0, 0, HAL_PIXEL_FORMAT_YV12)); + // OMX_COLOR_FormatYUV420Planar)); // )); + ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(), + GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN)); + + oneBufferPass(mYuvTexWidth, mYuvTexHeight); +} + +// Pass the buffer with the wrong height and weight and should not be accepted +TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotWrongSizeBufferPass) { + LOGV("Testing Wrong size BufferPass ******************************"); + + // setting the client side buffer size different than the server size + ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(), + 10, 10, HAL_PIXEL_FORMAT_YV12)); + // OMX_COLOR_FormatYUV420Planar)); // )); + ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(), + GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN)); + + ANativeWindowBuffer* anb; + + // make sure we get an error back when dequeuing! + ASSERT_NE(NO_ERROR, mANW->dequeueBuffer(mANW.get(), &anb)); +} + + +// pass multiple buffers from the native_window the SurfaceMediaSource +// A dummy writer is used to simulate actual MPEG4Writer +TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotMultiBufferPass) { + LOGV("Testing MultiBufferPass, Dummy Recorder *********************"); + ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(), + 0, 0, HAL_PIXEL_FORMAT_YV12)); + ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(), + GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN)); + SimpleDummyRecorder writer(mSMS); + writer.start(); + + int32_t nFramesCount = 0; + while (nFramesCount < 300) { + oneBufferPass(mYuvTexWidth, mYuvTexHeight); + + ASSERT_EQ(NO_ERROR, writer.readFromSource()); + + nFramesCount++; + } + writer.stop(); +} + +// Delayed pass of multiple buffers from the native_window the SurfaceMediaSource +// A dummy writer is used to simulate actual MPEG4Writer +TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotMultiBufferPassLag) { + LOGV("Testing MultiBufferPass, Dummy Recorder Lagging **************"); + ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(), + 0, 0, HAL_PIXEL_FORMAT_YV12)); + ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(), + GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN)); + SimpleDummyRecorder writer(mSMS); + writer.start(); + + int32_t nFramesCount = 1; + const int FRAMES_LAG = mSMS->getBufferCount() - 1; + while (nFramesCount <= 300) { + oneBufferPass(mYuvTexWidth, mYuvTexHeight); + // Forcing the writer to lag behind a few frames + if (nFramesCount > FRAMES_LAG) { + ASSERT_EQ(NO_ERROR, writer.readFromSource()); + } + nFramesCount++; + } + writer.stop(); +} + +// pass multiple buffers from the native_window the SurfaceMediaSource +// A dummy writer (MULTITHREADED) is used to simulate actual MPEG4Writer +TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotMultiBufferPassThreaded) { + LOGV("Testing MultiBufferPass, Dummy Recorder Multi-Threaded **********"); + ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(), + 0, 0, HAL_PIXEL_FORMAT_YV12)); + ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(), + GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN)); + + DummyRecorder writer(mSMS); + writer.start(); + + int32_t nFramesCount = 0; + while (nFramesCount <= 300) { + oneBufferPass(mYuvTexWidth, mYuvTexHeight); + + nFramesCount++; + } + writer.stop(); +} + +// Test to examine the actual encoding. Temporarily disabled till the +// colorformat and encoding from GRAlloc data is resolved +TEST_F(SurfaceMediaSourceTest, DISABLED_EncodingFromCpuFilledYV12BufferNpotWrite) { + LOGV("Testing the whole pipeline with actual Recorder"); + ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(), + 0, 0, HAL_PIXEL_FORMAT_YV12)); // OMX_COLOR_FormatYUV420Planar)); // )); + ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(), + GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN)); + + OMXClient client; + CHECK_EQ(OK, client.connect()); + + sp<MPEG4Writer> writer = setUpWriter(client); + int64_t start = systemTime(); + CHECK_EQ(OK, writer->start()); + + int32_t nFramesCount = 0; + while (nFramesCount <= 300) { + oneBufferPass(mYuvTexWidth, mYuvTexHeight); + nFramesCount++; + } + + CHECK_EQ(OK, writer->stop()); + writer.clear(); + int64_t end = systemTime(); + client.disconnect(); +} + + +} // namespace android |