summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--camera/Android.mk16
-rw-r--r--camera/CameraParameters.cpp18
-rw-r--r--camera/ICameraServiceProxy.cpp23
-rw-r--r--camera/camera2/ICameraDeviceUser.cpp27
-rw-r--r--drm/common/IDrmManagerService.cpp16
-rw-r--r--include/camera/CameraParameters.h8
-rw-r--r--include/camera/CameraParametersExtra.h35
-rw-r--r--include/camera/ICameraServiceProxy.h17
-rw-r--r--include/camera/camera2/ICameraDeviceUser.h5
-rw-r--r--include/media/IMediaPlayer.h10
-rw-r--r--include/media/IMediaRecorder.h1
-rw-r--r--include/media/IOMX.h6
-rw-r--r--include/media/MediaPlayerInterface.h8
-rwxr-xr-x[-rw-r--r--]include/media/MediaProfiles.h16
-rw-r--r--include/media/MediaRecorderBase.h1
-rw-r--r--include/media/Visualizer.h1
-rw-r--r--include/media/mediaplayer.h5
-rwxr-xr-x[-rw-r--r--]include/media/mediarecorder.h7
-rw-r--r--include/media/stagefright/ACodec.h11
-rw-r--r--include/media/stagefright/AudioPlayer.h2
-rw-r--r--include/media/stagefright/AudioSource.h4
-rw-r--r--include/media/stagefright/CameraSource.h7
-rw-r--r--include/media/stagefright/CameraSourceTimeLapse.h3
-rw-r--r--include/media/stagefright/DataSource.h42
-rw-r--r--include/media/stagefright/FFMPEGSoftCodec.h133
-rw-r--r--include/media/stagefright/FileSource.h6
-rw-r--r--include/media/stagefright/MediaDefs.h35
-rw-r--r--include/media/stagefright/MediaExtractor.h15
-rw-r--r--include/media/stagefright/MetaData.h50
-rw-r--r--include/media/stagefright/OMXCodec.h4
-rw-r--r--media/img_utils/include/img_utils/DngUtils.h31
-rw-r--r--media/img_utils/src/DngUtils.cpp85
-rw-r--r--media/libavextensions/Android.mk6
-rw-r--r--media/libavextensions/mediaplayerservice/AVNuExtensions.h1
-rw-r--r--media/libavextensions/mediaplayerservice/AVNuUtils.cpp64
-rw-r--r--media/libavextensions/stagefright/AVFactory.cpp2
-rw-r--r--media/libavextensions/stagefright/AVUtils.cpp6
-rw-r--r--media/libmedia/AudioTrack.cpp21
-rw-r--r--media/libmedia/AudioTrackShared.cpp4
-rw-r--r--media/libmedia/IAudioFlinger.cpp54
-rw-r--r--media/libmedia/IAudioPolicyService.cpp26
-rw-r--r--media/libmedia/ICrypto.cpp23
-rw-r--r--media/libmedia/IEffect.cpp27
-rw-r--r--media/libmedia/IMediaPlayer.cpp34
-rw-r--r--media/libmedia/IMediaRecorder.cpp16
-rw-r--r--media/libmedia/IOMX.cpp6
-rwxr-xr-x[-rw-r--r--]media/libmedia/MediaProfiles.cpp33
-rw-r--r--media/libmedia/Visualizer.cpp8
-rw-r--r--media/libmedia/mediaplayer.cpp64
-rwxr-xr-x[-rw-r--r--]media/libmedia/mediarecorder.cpp28
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.cpp16
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.h3
-rw-r--r--media/libmediaplayerservice/MediaRecorderClient.cpp11
-rw-r--r--media/libmediaplayerservice/MediaRecorderClient.h1
-rw-r--r--media/libmediaplayerservice/StagefrightRecorder.cpp113
-rw-r--r--media/libmediaplayerservice/StagefrightRecorder.h5
-rw-r--r--media/libmediaplayerservice/nuplayer/Android.mk3
-rw-r--r--media/libmediaplayerservice/nuplayer/GenericSource.cpp15
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayer.cpp9
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayer.h3
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp11
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp3
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp53
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h1
-rw-r--r--media/libmediaplayerservice/nuplayer/RTSPSource.cpp4
-rw-r--r--media/libstagefright/AACExtractor.cpp27
-rw-r--r--media/libstagefright/ACodec.cpp181
-rw-r--r--media/libstagefright/APE.cpp125
-rw-r--r--media/libstagefright/Android.mk41
-rw-r--r--media/libstagefright/AudioPlayer.cpp30
-rw-r--r--media/libstagefright/AudioSource.cpp26
-rw-r--r--media/libstagefright/AwesomePlayer.cpp102
-rw-r--r--media/libstagefright/CameraSource.cpp52
-rw-r--r--media/libstagefright/CameraSourceTimeLapse.cpp9
-rw-r--r--media/libstagefright/DataSource.cpp150
-rw-r--r--media/libstagefright/DataURISource.cpp3
-rw-r--r--media/libstagefright/FFMPEGSoftCodec.cpp1149
-rw-r--r--media/libstagefright/FLACExtractor.cpp188
-rw-r--r--media/libstagefright/FileSource.cpp16
-rwxr-xr-xmedia/libstagefright/MPEG4Extractor.cpp53
-rw-r--r--media/libstagefright/MPEG4Writer.cpp13
-rw-r--r--media/libstagefright/MediaCodec.cpp10
-rw-r--r--media/libstagefright/MediaCodecSource.cpp2
-rw-r--r--media/libstagefright/MediaDefs.cpp28
-rw-r--r--media/libstagefright/MediaExtractor.cpp26
-rw-r--r--media/libstagefright/NuCachedSource2.cpp48
-rw-r--r--media/libstagefright/OMXClient.cpp1
-rw-r--r--media/libstagefright/OMXCodec.cpp148
-rw-r--r--media/libstagefright/OggExtractor.cpp108
-rw-r--r--media/libstagefright/SampleIterator.cpp7
-rw-r--r--media/libstagefright/SampleTable.cpp13
-rw-r--r--media/libstagefright/StagefrightMediaScanner.cpp6
-rw-r--r--media/libstagefright/StagefrightMetadataRetriever.cpp16
-rw-r--r--media/libstagefright/Utils.cpp66
-rw-r--r--media/libstagefright/WAVExtractor.cpp62
-rw-r--r--media/libstagefright/codecs/amrnb/dec/Android.mk2
-rw-r--r--media/libstagefright/codecs/amrnb/dec/src/a_refl.cpp5
-rw-r--r--media/libstagefright/codecs/avcdec/SoftAVCDec.cpp384
-rw-r--r--media/libstagefright/codecs/avcdec/SoftAVCDec.h16
-rw-r--r--media/libstagefright/codecs/m4v_h263/dec/src/conceal.cpp5
-rw-r--r--media/libstagefright/codecs/raw/SoftRaw.cpp6
-rw-r--r--media/libstagefright/codecs/raw/SoftRaw.h1
-rw-r--r--media/libstagefright/data/media_codecs_google_tv.xml29
-rw-r--r--[-rwxr-xr-x]media/libstagefright/data/media_codecs_google_video.xml9
-rw-r--r--media/libstagefright/foundation/AMessage.cpp28
-rw-r--r--media/libstagefright/foundation/ANetworkSession.cpp4
-rw-r--r--media/libstagefright/foundation/base64.cpp11
-rw-r--r--media/libstagefright/httplive/LiveSession.h2
-rw-r--r--media/libstagefright/httplive/PlaylistFetcher.cpp13
-rw-r--r--media/libstagefright/httplive/PlaylistFetcher.h1
-rw-r--r--media/libstagefright/id3/ID3.cpp21
-rw-r--r--media/libstagefright/include/AACExtractor.h4
-rw-r--r--media/libstagefright/include/APE.h43
-rw-r--r--media/libstagefright/include/AwesomePlayer.h6
-rw-r--r--media/libstagefright/include/NuCachedSource2.h12
-rw-r--r--media/libstagefright/include/OMX.h2
-rw-r--r--media/libstagefright/include/OMXNodeInstance.h5
-rw-r--r--media/libstagefright/include/SampleIterator.h5
-rw-r--r--media/libstagefright/matroska/MatroskaExtractor.cpp81
-rw-r--r--media/libstagefright/omx/OMX.cpp5
-rw-r--r--media/libstagefright/omx/OMXMaster.cpp67
-rw-r--r--media/libstagefright/omx/OMXMaster.h7
-rw-r--r--media/libstagefright/omx/OMXNodeInstance.cpp4
-rwxr-xr-xmedia/libstagefright/omx/SoftOMXPlugin.cpp6
-rw-r--r--media/libstagefright/timedtext/TextDescriptions.cpp627
-rw-r--r--media/libstagefright/timedtext/TextDescriptions.h4
-rw-r--r--media/mediaserver/Android.mk10
-rw-r--r--media/mediaserver/main_mediaserver.cpp8
-rw-r--r--media/mtp/MtpServer.cpp6
-rw-r--r--media/mtp/MtpServer.h1
-rw-r--r--media/utils/Android.mk2
-rw-r--r--media/utils/ISchedulingPolicyService.cpp (renamed from services/audioflinger/ISchedulingPolicyService.cpp)0
-rw-r--r--media/utils/ISchedulingPolicyService.h (renamed from services/audioflinger/ISchedulingPolicyService.h)0
-rw-r--r--media/utils/SchedulingPolicyService.cpp (renamed from services/audioflinger/SchedulingPolicyService.cpp)2
-rw-r--r--media/utils/include/mediautils/SchedulingPolicyService.h (renamed from services/audioflinger/SchedulingPolicyService.h)0
-rw-r--r--services/audioflinger/Android.mk15
-rw-r--r--services/audioflinger/AudioMixer.h1
-rw-r--r--services/audioflinger/Threads.cpp8
-rw-r--r--services/audioflinger/Tracks.cpp5
-rw-r--r--services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h12
-rw-r--r--services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h8
-rw-r--r--services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp20
-rw-r--r--services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp5
-rwxr-xr-xservices/audiopolicy/enginedefault/src/Engine.cpp8
-rw-r--r--services/audiopolicy/enginedefault/src/Gains.cpp8
-rw-r--r--services/audiopolicy/managerdefault/AudioPolicyManager.cpp69
-rw-r--r--services/audiopolicy/managerdefault/AudioPolicyManager.h1
-rw-r--r--services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp6
-rw-r--r--services/camera/libcameraservice/Android.mk10
-rw-r--r--services/camera/libcameraservice/CameraService.cpp82
-rw-r--r--services/camera/libcameraservice/CameraService.h16
-rw-r--r--services/camera/libcameraservice/api1/Camera2Client.cpp2
-rw-r--r--services/camera/libcameraservice/api1/CameraClient.cpp22
-rw-r--r--services/camera/libcameraservice/api1/client2/Parameters.cpp4
-rw-r--r--services/camera/libcameraservice/api2/CameraDeviceClient.cpp39
-rw-r--r--services/camera/libcameraservice/api2/CameraDeviceClient.h4
-rw-r--r--services/camera/libcameraservice/common/Camera2ClientBase.cpp17
-rw-r--r--services/camera/libcameraservice/common/Camera2ClientBase.h2
-rw-r--r--services/camera/libcameraservice/common/CameraDeviceBase.h6
-rw-r--r--services/camera/libcameraservice/common/CameraModule.cpp56
-rw-r--r--services/camera/libcameraservice/device1/CameraHardwareInterface.h27
-rw-r--r--services/camera/libcameraservice/device2/Camera2Device.cpp6
-rw-r--r--services/camera/libcameraservice/device2/Camera2Device.h1
-rw-r--r--services/camera/libcameraservice/device3/Camera3Device.cpp577
-rw-r--r--services/camera/libcameraservice/device3/Camera3Device.h64
-rw-r--r--services/camera/libcameraservice/device3/Camera3DummyStream.cpp4
-rw-r--r--services/camera/libcameraservice/device3/Camera3DummyStream.h5
-rw-r--r--services/camera/libcameraservice/device3/Camera3OutputStream.cpp11
-rw-r--r--services/camera/libcameraservice/device3/Camera3OutputStream.h5
-rw-r--r--services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h5
-rw-r--r--services/camera/libcameraservice/device3/Camera3Stream.cpp33
-rw-r--r--services/camera/libcameraservice/device3/Camera3Stream.h9
-rw-r--r--services/camera/libcameraservice/device3/Camera3StreamInterface.h11
-rw-r--r--services/mediaresourcemanager/ResourceManagerService.cpp9
174 files changed, 5226 insertions, 1439 deletions
diff --git a/camera/Android.mk b/camera/Android.mk
index 471cb0d..36f6da1 100644
--- a/camera/Android.mk
+++ b/camera/Android.mk
@@ -21,7 +21,6 @@ LOCAL_PATH := $(CAMERA_CLIENT_LOCAL_PATH)
LOCAL_SRC_FILES:= \
Camera.cpp \
CameraMetadata.cpp \
- CameraParameters.cpp \
CaptureResult.cpp \
CameraParameters2.cpp \
ICamera.cpp \
@@ -53,6 +52,21 @@ LOCAL_C_INCLUDES += \
system/media/camera/include \
system/media/private/camera/include \
+ifneq ($(TARGET_SPECIFIC_CAMERA_PARAMETER_LIBRARY),)
+LOCAL_WHOLE_STATIC_LIBRARIES += $(TARGET_SPECIFIC_CAMERA_PARAMETER_LIBRARY)
+else
+LOCAL_WHOLE_STATIC_LIBRARIES += libcamera_parameters
+endif
+
LOCAL_MODULE:= libcamera_client
include $(BUILD_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ CameraParameters.cpp
+
+LOCAL_MODULE := libcamera_parameters
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/camera/CameraParameters.cpp b/camera/CameraParameters.cpp
index 68969cf..42b0884 100644
--- a/camera/CameraParameters.cpp
+++ b/camera/CameraParameters.cpp
@@ -21,6 +21,7 @@
#include <string.h>
#include <stdlib.h>
#include <camera/CameraParameters.h>
+#include <camera/CameraParametersExtra.h>
#include <system/graphics.h>
namespace android {
@@ -106,6 +107,7 @@ const char CameraParameters::WHITE_BALANCE_DAYLIGHT[] = "daylight";
const char CameraParameters::WHITE_BALANCE_CLOUDY_DAYLIGHT[] = "cloudy-daylight";
const char CameraParameters::WHITE_BALANCE_TWILIGHT[] = "twilight";
const char CameraParameters::WHITE_BALANCE_SHADE[] = "shade";
+const char CameraParameters::WHITE_BALANCE_MANUAL_CCT[] = "manual-cct";
// Values for effect settings.
const char CameraParameters::EFFECT_NONE[] = "none";
@@ -168,11 +170,16 @@ const char CameraParameters::FOCUS_MODE_FIXED[] = "fixed";
const char CameraParameters::FOCUS_MODE_EDOF[] = "edof";
const char CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO[] = "continuous-video";
const char CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE[] = "continuous-picture";
+const char CameraParameters::FOCUS_MODE_MANUAL_POSITION[] = "manual";
// Values for light fx settings
const char CameraParameters::LIGHTFX_LOWLIGHT[] = "low-light";
const char CameraParameters::LIGHTFX_HDR[] = "high-dynamic-range";
+#ifdef CAMERA_PARAMETERS_EXTRA_C
+CAMERA_PARAMETERS_EXTRA_C
+#endif
+
CameraParameters::CameraParameters()
: mMap()
{
@@ -237,6 +244,9 @@ void CameraParameters::unflatten(const String8 &params)
void CameraParameters::set(const char *key, const char *value)
{
+ if (key == NULL || value == NULL)
+ return;
+
// XXX i think i can do this with strspn()
if (strchr(key, '=') || strchr(key, ';')) {
//XXX ALOGE("Key \"%s\"contains invalid character (= or ;)", key);
@@ -247,6 +257,14 @@ void CameraParameters::set(const char *key, const char *value)
//XXX ALOGE("Value \"%s\"contains invalid character (= or ;)", value);
return;
}
+#ifdef QCOM_HARDWARE
+ // qcom cameras default to delivering an extra zero-exposure frame on HDR.
+ // The android SDK only wants one frame, so disable this unless the app
+ // explicitly asks for it
+ if (!get("hdr-need-1x")) {
+ mMap.replaceValueFor(String8("hdr-need-1x"), String8("false"));
+ }
+#endif
mMap.replaceValueFor(String8(key), String8(value));
}
diff --git a/camera/ICameraServiceProxy.cpp b/camera/ICameraServiceProxy.cpp
index 06a5afb..694e9c3 100644
--- a/camera/ICameraServiceProxy.cpp
+++ b/camera/ICameraServiceProxy.cpp
@@ -29,11 +29,21 @@ public:
BpCameraServiceProxy(const sp<IBinder>& impl) : BpInterface<ICameraServiceProxy>(impl) {}
virtual void pingForUserUpdate() {
- Parcel data, reply;
+ Parcel data;
data.writeInterfaceToken(ICameraServiceProxy::getInterfaceDescriptor());
- remote()->transact(BnCameraServiceProxy::PING_FOR_USER_UPDATE, data, &reply,
+ remote()->transact(BnCameraServiceProxy::PING_FOR_USER_UPDATE, data, nullptr,
IBinder::FLAG_ONEWAY);
}
+
+ virtual void notifyCameraState(String16 cameraId, CameraState newCameraState) {
+ Parcel data;
+ data.writeInterfaceToken(ICameraServiceProxy::getInterfaceDescriptor());
+ data.writeString16(cameraId);
+ data.writeInt32(newCameraState);
+ remote()->transact(BnCameraServiceProxy::NOTIFY_CAMERA_STATE, data, nullptr,
+ IBinder::FLAG_ONEWAY);
+ }
+
};
@@ -47,9 +57,16 @@ status_t BnCameraServiceProxy::onTransact(uint32_t code, const Parcel& data, Par
pingForUserUpdate();
return NO_ERROR;
} break;
+ case NOTIFY_CAMERA_STATE: {
+ CHECK_INTERFACE(ICameraServiceProxy, data, reply);
+ String16 cameraId = data.readString16();
+ CameraState newCameraState =
+ static_cast<CameraState>(data.readInt32());
+ notifyCameraState(cameraId, newCameraState);
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
}
}; // namespace android
-
diff --git a/camera/camera2/ICameraDeviceUser.cpp b/camera/camera2/ICameraDeviceUser.cpp
index d2dc200..2a9fd2b 100644
--- a/camera/camera2/ICameraDeviceUser.cpp
+++ b/camera/camera2/ICameraDeviceUser.cpp
@@ -49,7 +49,8 @@ enum {
WAIT_UNTIL_IDLE,
FLUSH,
PREPARE,
- TEAR_DOWN
+ TEAR_DOWN,
+ PREPARE2
};
namespace {
@@ -366,6 +367,21 @@ public:
return reply.readInt32();
}
+ virtual status_t prepare2(int maxCount, int streamId)
+ {
+ ALOGV("prepare2");
+ Parcel data, reply;
+
+ data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+ data.writeInt32(maxCount);
+ data.writeInt32(streamId);
+
+ remote()->transact(PREPARE2, data, &reply);
+
+ reply.readExceptionCode();
+ return reply.readInt32();
+ }
+
virtual status_t tearDown(int streamId)
{
ALOGV("tearDown");
@@ -592,7 +608,14 @@ status_t BnCameraDeviceUser::onTransact(
reply->writeInt32(tearDown(streamId));
return NO_ERROR;
} break;
-
+ case PREPARE2: {
+ CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+ int maxCount = data.readInt32();
+ int streamId = data.readInt32();
+ reply->writeNoException();
+ reply->writeInt32(prepare2(maxCount, streamId));
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/drm/common/IDrmManagerService.cpp b/drm/common/IDrmManagerService.cpp
index b90da1b..f2e14b6 100644
--- a/drm/common/IDrmManagerService.cpp
+++ b/drm/common/IDrmManagerService.cpp
@@ -742,9 +742,11 @@ status_t BpDrmManagerService::decrypt(
const status_t status = reply.readInt32();
ALOGV("Return value of decrypt() is %d", status);
- const int size = reply.readInt32();
- (*decBuffer)->length = size;
- reply.read((void *)(*decBuffer)->data, size);
+ if (status == NO_ERROR) {
+ const int size = reply.readInt32();
+ (*decBuffer)->length = size;
+ reply.read((void *)(*decBuffer)->data, size);
+ }
return status;
}
@@ -1470,9 +1472,11 @@ status_t BnDrmManagerService::onTransact(
reply->writeInt32(status);
- const int size = decBuffer->length;
- reply->writeInt32(size);
- reply->write(decBuffer->data, size);
+ if (status == NO_ERROR) {
+ const int size = decBuffer->length;
+ reply->writeInt32(size);
+ reply->write(decBuffer->data, size);
+ }
clearDecryptHandle(&handle);
delete encBuffer; encBuffer = NULL;
diff --git a/include/camera/CameraParameters.h b/include/camera/CameraParameters.h
index ba33ffe..d85050d 100644
--- a/include/camera/CameraParameters.h
+++ b/include/camera/CameraParameters.h
@@ -19,6 +19,7 @@
#include <utils/KeyedVector.h>
#include <utils/String8.h>
+#include <camera/CameraParametersExtra.h>
namespace android {
@@ -554,6 +555,7 @@ public:
static const char WHITE_BALANCE_CLOUDY_DAYLIGHT[];
static const char WHITE_BALANCE_TWILIGHT[];
static const char WHITE_BALANCE_SHADE[];
+ static const char WHITE_BALANCE_MANUAL_CCT[];
// Values for effect settings.
static const char EFFECT_NONE[];
@@ -677,12 +679,18 @@ public:
// other modes.
static const char FOCUS_MODE_CONTINUOUS_PICTURE[];
+ static const char FOCUS_MODE_MANUAL_POSITION[];
+
// Values for light special effects
// Low-light enhancement mode
static const char LIGHTFX_LOWLIGHT[];
// High-dynamic range mode
static const char LIGHTFX_HDR[];
+#ifdef CAMERA_PARAMETERS_EXTRA_H
+CAMERA_PARAMETERS_EXTRA_H
+#endif
+
/**
* Returns the the supported preview formats as an enum given in graphics.h
* corrsponding to the format given in the input string or -1 if no such
diff --git a/include/camera/CameraParametersExtra.h b/include/camera/CameraParametersExtra.h
new file mode 100644
index 0000000..80a67cc
--- /dev/null
+++ b/include/camera/CameraParametersExtra.h
@@ -0,0 +1,35 @@
+// Overload this file in your device specific config if you need
+// to add extra camera parameters.
+// A typical file would look like this:
+/*
+ * Copyright (C) 2014 The CyanogenMod Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+#define CAMERA_PARAMETERS_EXTRA_C \
+const char CameraParameters::KEY_SUPPORTED_BURST_NUM[] = "supported-burst-num"; \
+const char CameraParameters::KEY_BURST_NUM[] = "burst-num"; \
+const char CameraParameters::KEY_SUPPORTED_HDR_MODES[] = "supported-hdr-modes"; \
+const char CameraParameters::KEY_HDR_MODE[] = "hdr-mode"; \
+const char CameraParameters::HDR_MODE_OFF[] = "hdr-mode-off"; \
+const char CameraParameters::HDR_MODE_HDR[] = "hdr-mode-hdr";
+
+#define CAMERA_PARAMETERS_EXTRA_H \
+ static const char KEY_SUPPORTED_BURST_NUM[]; \
+ static const char KEY_BURST_NUM[]; \
+ static const char KEY_SUPPORTED_HDR_MODES[]; \
+ static const char KEY_HDR_MODE[]; \
+ static const char HDR_MODE_OFF[]; \
+ static const char HDR_MODE_HDR[];
+*/
diff --git a/include/camera/ICameraServiceProxy.h b/include/camera/ICameraServiceProxy.h
index 12a555f..2613c01 100644
--- a/include/camera/ICameraServiceProxy.h
+++ b/include/camera/ICameraServiceProxy.h
@@ -23,15 +23,30 @@
namespace android {
+/**
+ * Interface from native camera service to managed-side camera service proxy.
+ *
+ * Keep in sync with frameworks/base/core/java/android/hardware/ICameraServiceProxy.aidl
+ *
+ */
class ICameraServiceProxy : public IInterface {
public:
enum {
PING_FOR_USER_UPDATE = IBinder::FIRST_CALL_TRANSACTION,
+ NOTIFY_CAMERA_STATE
+ };
+
+ enum CameraState {
+ CAMERA_STATE_OPEN,
+ CAMERA_STATE_ACTIVE,
+ CAMERA_STATE_IDLE,
+ CAMERA_STATE_CLOSED
};
DECLARE_META_INTERFACE(CameraServiceProxy);
virtual void pingForUserUpdate() = 0;
+ virtual void notifyCameraState(String16 cameraId, CameraState newCameraState) = 0;
};
class BnCameraServiceProxy: public BnInterface<ICameraServiceProxy>
@@ -48,5 +63,3 @@ public:
}; // namespace android
#endif // ANDROID_HARDWARE_ICAMERASERVICEPROXY_H
-
-
diff --git a/include/camera/camera2/ICameraDeviceUser.h b/include/camera/camera2/ICameraDeviceUser.h
index a7bf8ab..4d8eb53 100644
--- a/include/camera/camera2/ICameraDeviceUser.h
+++ b/include/camera/camera2/ICameraDeviceUser.h
@@ -140,6 +140,11 @@ public:
virtual status_t prepare(int streamId) = 0;
/**
+ * Preallocate up to maxCount buffers for a given output stream asynchronously.
+ */
+ virtual status_t prepare2(int maxCount, int streamId) = 0;
+
+ /**
* Free all unused buffers for a given output stream.
*/
virtual status_t tearDown(int streamId) = 0;
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
index 0fd8933..5957535 100644
--- a/include/media/IMediaPlayer.h
+++ b/include/media/IMediaPlayer.h
@@ -109,6 +109,16 @@ public:
virtual status_t getMetadata(bool update_only,
bool apply_filter,
Parcel *metadata) = 0;
+
+ // Suspend the video player
+ // In other words, just release the audio decoder and the video decoder
+ // @return OK if the video player was suspended successfully
+ virtual status_t suspend() = 0;
+
+ // Resume the video player
+ // Init the audio decoder and the video decoder
+ // @return OK if the video player was resumed successfully
+ virtual status_t resume() = 0;
};
// ----------------------------------------------------------------------------
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
index 77ed5d3..339bd9e 100644
--- a/include/media/IMediaRecorder.h
+++ b/include/media/IMediaRecorder.h
@@ -51,6 +51,7 @@ public:
virtual status_t prepare() = 0;
virtual status_t getMaxAmplitude(int* max) = 0;
virtual status_t start() = 0;
+ virtual status_t pause() = 0;
virtual status_t stop() = 0;
virtual status_t reset() = 0;
virtual status_t init() = 0;
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index 3d29e4a..27ad694 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -249,6 +249,12 @@ public:
virtual status_t onTransact(
uint32_t code, const Parcel &data, Parcel *reply,
uint32_t flags = 0);
+
+protected:
+ // check if the codec is secure.
+ virtual bool isSecure(IOMX::node_id node) {
+ return false;
+ }
};
class BnOMXObserver : public BnInterface<IOMXObserver> {
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 745151b..4810b7e 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -267,6 +267,14 @@ public:
return INVALID_OPERATION;
}
+ virtual status_t suspend() {
+ return INVALID_OPERATION;
+ }
+
+ virtual status_t resume() {
+ return INVALID_OPERATION;
+ }
+
private:
friend class MediaPlayerService;
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index 5a81574..c67bae9 100644..100755
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -1,4 +1,6 @@
/*
+ ** Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ ** Not a Contribution.
**
** Copyright 2010, The Android Open Source Project.
**
@@ -136,6 +138,9 @@ public:
* enc.vid.bps.max - max bit rate in bits per second
* enc.vid.fps.min - min frame rate in frames per second
* enc.vid.fps.max - max frame rate in frames per second
+ * enc.vid.hfr.width.max - max hfr video frame width
+ * enc.vid.hfr.height.max - max hfr video frame height
+ * enc.vid.hfr.mode.max - max hfr mode
*/
int getVideoEncoderParamByName(const char *name, video_encoder codec) const;
@@ -274,12 +279,16 @@ private:
int minBitRate, int maxBitRate,
int minFrameWidth, int maxFrameWidth,
int minFrameHeight, int maxFrameHeight,
- int minFrameRate, int maxFrameRate)
+ int minFrameRate, int maxFrameRate,
+ int maxHFRFrameWidth, int maxHFRFrameHeight,
+ int maxHFRMode)
: mCodec(codec),
mMinBitRate(minBitRate), mMaxBitRate(maxBitRate),
mMinFrameWidth(minFrameWidth), mMaxFrameWidth(maxFrameWidth),
mMinFrameHeight(minFrameHeight), mMaxFrameHeight(maxFrameHeight),
- mMinFrameRate(minFrameRate), mMaxFrameRate(maxFrameRate) {}
+ mMinFrameRate(minFrameRate), mMaxFrameRate(maxFrameRate),
+ mMaxHFRFrameWidth(maxHFRFrameWidth), mMaxHFRFrameHeight(maxHFRFrameHeight),
+ mMaxHFRMode(maxHFRMode) {}
~VideoEncoderCap() {}
@@ -288,6 +297,8 @@ private:
int mMinFrameWidth, mMaxFrameWidth;
int mMinFrameHeight, mMaxFrameHeight;
int mMinFrameRate, mMaxFrameRate;
+ int mMaxHFRFrameWidth, mMaxHFRFrameHeight;
+ int mMaxHFRMode;
};
struct AudioEncoderCap {
@@ -402,6 +413,7 @@ private:
static VideoEncoderCap* createDefaultH263VideoEncoderCap();
static VideoEncoderCap* createDefaultM4vVideoEncoderCap();
static AudioEncoderCap* createDefaultAmrNBEncoderCap();
+ static AudioEncoderCap* createDefaultAacEncoderCap();
static AudioEncoderCap* createDefaultLpcmEncoderCap();
static int findTagForName(const NameToTagMap *map, size_t nMappings, const char *name);
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
index d6cc4bb..48d8b70 100644
--- a/include/media/MediaRecorderBase.h
+++ b/include/media/MediaRecorderBase.h
@@ -53,6 +53,7 @@ struct MediaRecorderBase {
virtual status_t prepare() = 0;
virtual status_t start() = 0;
virtual status_t stop() = 0;
+ virtual status_t pause() = 0;
virtual status_t close() = 0;
virtual status_t reset() = 0;
virtual status_t getMaxAmplitude(int *max) = 0;
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
index 186e018..f14977b 100644
--- a/include/media/Visualizer.h
+++ b/include/media/Visualizer.h
@@ -97,6 +97,7 @@ public:
// and the capture format is according to flags (see callback_flags).
status_t setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags, uint32_t rate,
bool force = false);
+ void cancelCaptureCallBack();
// set the capture size capture size must be a power of two in the range
// [VISUALIZER_CAPTURE_SIZE_MAX. VISUALIZER_CAPTURE_SIZE_MIN]
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 1f6ddad..3d4a6e2 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -147,7 +147,8 @@ enum media_player_states {
MEDIA_PLAYER_STARTED = 1 << 4,
MEDIA_PLAYER_PAUSED = 1 << 5,
MEDIA_PLAYER_STOPPED = 1 << 6,
- MEDIA_PLAYER_PLAYBACK_COMPLETE = 1 << 7
+ MEDIA_PLAYER_PLAYBACK_COMPLETE = 1 << 7,
+ MEDIA_PLAYER_SUSPENDED = 1 << 8
};
// Keep KEY_PARAMETER_* in sync with MediaPlayer.java.
@@ -255,6 +256,8 @@ public:
status_t getParameter(int key, Parcel* reply);
status_t setRetransmitEndpoint(const char* addrString, uint16_t port);
status_t setNextMediaPlayer(const sp<MediaPlayer>& player);
+ status_t suspend();
+ status_t resume();
private:
void clear_l();
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 6ace36d..96c84bb 100644..100755
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -1,4 +1,7 @@
/*
+ ** Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ ** Not a Contribution.
+ **
** Copyright (C) 2008 The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
@@ -131,6 +134,9 @@ enum media_recorder_states {
// Recording is in progress.
MEDIA_RECORDER_RECORDING = 1 << 4,
+
+ // Recording is paused.
+ MEDIA_RECORDER_PAUSED = 1 << 5,
};
// The "msg" code passed to the listener in notify.
@@ -243,6 +249,7 @@ public:
status_t getMaxAmplitude(int* max);
virtual status_t start();
virtual status_t stop();
+ virtual status_t pause();
status_t reset();
status_t init();
status_t close();
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index a346e2b..2e621fe 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -28,6 +28,8 @@
#include <media/stagefright/SkipCutBuffer.h>
#include <OMX_Audio.h>
+#include <system/audio.h>
+
#define TRACK_BUFFER_TIMING 0
namespace android {
@@ -345,9 +347,11 @@ protected:
int32_t maxOutputChannelCount, const drcParams_t& drc,
int32_t pcmLimiterEnable);
- status_t setupAC3Codec(bool encoder, int32_t numChannels, int32_t sampleRate);
+ status_t setupAC3Codec(bool encoder, int32_t numChannels, int32_t sampleRate,
+ int32_t bitsPerSample = 16);
- status_t setupEAC3Codec(bool encoder, int32_t numChannels, int32_t sampleRate);
+ status_t setupEAC3Codec(bool encoder, int32_t numChannels, int32_t sampleRate,
+ int32_t bitsPerSample = 16);
status_t selectAudioPortFormat(
OMX_U32 portIndex, OMX_AUDIO_CODINGTYPE desiredFormat);
@@ -359,7 +363,8 @@ protected:
bool encoder, int32_t numChannels, int32_t sampleRate, int32_t compressionLevel);
status_t setupRawAudioFormat(
- OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels);
+ OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels,
+ int32_t bitsPerSample = 16);
status_t setPriority(int32_t priority);
status_t setOperatingRate(float rateFloat, bool isVideo);
diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h
index e0cd965..edc9f25 100644
--- a/include/media/stagefright/AudioPlayer.h
+++ b/include/media/stagefright/AudioPlayer.h
@@ -103,6 +103,7 @@ private:
int64_t mSeekTimeUs;
bool mStarted;
+ bool mSourcePaused;
bool mIsFirstBuffer;
status_t mFirstBufferResult;
@@ -115,6 +116,7 @@ private:
bool mPlaying;
int64_t mStartPosUs;
const uint32_t mCreateFlags;
+ bool mPauseRequired;
static void AudioCallback(int event, void *user, void *info);
void AudioCallback(int event, void *info);
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 9750bcd..e48765e 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -46,7 +46,7 @@ struct AudioSource : public MediaSource, public MediaBufferObserver {
virtual status_t stop() { return reset(); }
virtual sp<MetaData> getFormat();
- virtual status_t pause() { return ERROR_UNSUPPORTED; }
+ virtual status_t pause();
// Returns the maximum amplitude since last call.
int16_t getMaxAmplitude();
@@ -94,6 +94,8 @@ protected:
int64_t mNumFramesReceived;
int64_t mNumClientOwnedBuffers;
+ bool mRecPaused;
+
List<MediaBuffer * > mBuffersReceived;
void trackMaxAmplitude(int16_t *data, int nSamples);
diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h
index 527ee15..70149cc 100644
--- a/include/media/stagefright/CameraSource.h
+++ b/include/media/stagefright/CameraSource.h
@@ -92,7 +92,7 @@ public:
virtual status_t read(
MediaBuffer **buffer, const ReadOptions *options = NULL);
- virtual status_t pause() { return ERROR_UNSUPPORTED; }
+ virtual status_t pause();
/**
* Check whether a CameraSource object is properly initialized.
@@ -208,6 +208,11 @@ protected:
bool mCollectStats;
bool mIsMetaDataStoredInVideoBuffers;
+ int64_t mPauseAdjTimeUs;
+ int64_t mPauseStartTimeUs;
+ int64_t mPauseEndTimeUs;
+ bool mRecPause;
+
void releaseQueuedFrames();
void releaseOneRecordingFrame(const sp<IMemory>& frame);
diff --git a/include/media/stagefright/CameraSourceTimeLapse.h b/include/media/stagefright/CameraSourceTimeLapse.h
index f264d98..eeb453f 100644
--- a/include/media/stagefright/CameraSourceTimeLapse.h
+++ b/include/media/stagefright/CameraSourceTimeLapse.h
@@ -67,6 +67,9 @@ protected:
// Real timestamp of the last encoded time lapse frame
int64_t mLastTimeLapseFrameRealTimestampUs;
+ // Adjusted continuous timestamp based on recording fps
+ // of the last encoded time lapse frame
+ int64_t mLastTimeLapseFrameTimeStampUs;
// Variable set in dataCallbackTimestamp() to help skipCurrentFrame()
// to know if current frame needs to be skipped.
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index 0c31e72..56abe71 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -36,6 +36,40 @@ class IDataSource;
struct IMediaHTTPService;
class String8;
struct HTTPBase;
+class DataSource;
+
+class Sniffer : public RefBase {
+public:
+ Sniffer();
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ bool sniff(DataSource *source, String8 *mimeType, float *confidence, sp<AMessage> *meta);
+
+ // The sniffer can optionally fill in "meta" with an AMessage containing
+ // a dictionary of values that helps the corresponding extractor initialize
+ // its state without duplicating effort already exerted by the sniffer.
+ typedef bool (*SnifferFunc)(
+ const sp<DataSource> &source, String8 *mimeType,
+ float *confidence, sp<AMessage> *meta);
+
+ //if isExtendedExtractor = true, store the location of the sniffer to register
+ void registerSniffer_l(SnifferFunc func);
+ void registerDefaultSniffers();
+
+ virtual ~Sniffer() {}
+
+private:
+ Mutex mSnifferMutex;
+ List<SnifferFunc> mSniffers;
+ List<SnifferFunc> mExtraSniffers;
+ List<SnifferFunc>::iterator extendedSnifferPosition;
+
+ void registerSnifferPlugin();
+
+ Sniffer(const Sniffer &);
+ Sniffer &operator=(const Sniffer &);
+};
class DataSource : public RefBase {
public:
@@ -57,7 +91,7 @@ public:
static sp<DataSource> CreateMediaHTTP(const sp<IMediaHTTPService> &httpService);
static sp<DataSource> CreateFromIDataSource(const sp<IDataSource> &source);
- DataSource() {}
+ DataSource() : mSniffer(new Sniffer()) {}
virtual status_t initCheck() const = 0;
@@ -111,12 +145,10 @@ public:
protected:
virtual ~DataSource() {}
-private:
- static Mutex gSnifferMutex;
- static List<SnifferFunc> gSniffers;
- static bool gSniffersRegistered;
+ sp<Sniffer> mSniffer;
static void RegisterSniffer_l(SnifferFunc func);
+ static void RegisterSnifferPlugin();
DataSource(const DataSource &);
DataSource &operator=(const DataSource &);
diff --git a/include/media/stagefright/FFMPEGSoftCodec.h b/include/media/stagefright/FFMPEGSoftCodec.h
new file mode 100644
index 0000000..83373d0
--- /dev/null
+++ b/include/media/stagefright/FFMPEGSoftCodec.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2014 The CyanogenMod Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef FFMPEG_SOFT_CODEC_H_
+#define FFMPEG_SOFT_CODEC_H_
+
+#include <media/IOMX.h>
+#include <media/MediaCodecInfo.h>
+
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AString.h>
+
+#include <media/stagefright/MetaData.h>
+
+#include <OMX_Audio.h>
+#include <OMX_Video.h>
+
+namespace android {
+
+struct FFMPEGSoftCodec {
+
+ enum {
+ kPortIndexInput = 0,
+ kPortIndexOutput = 1
+ };
+
+ static void convertMessageToMetaDataFF(
+ const sp<AMessage> &msg, sp<MetaData> &meta);
+
+ static void convertMetaDataToMessageFF(
+ const sp<MetaData> &meta, sp<AMessage> *format);
+
+ static const char* overrideComponentName(
+ uint32_t quirks, const sp<MetaData> &meta,
+ const char *mime, bool isEncoder);
+
+ static void overrideComponentName(
+ uint32_t quirks, const sp<AMessage> &msg,
+ AString* componentName, AString* mime,
+ int32_t isEncoder);
+
+ static status_t setSupportedRole(
+ const sp<IOMX> &omx, IOMX::node_id node,
+ bool isEncoder, const char *mime);
+
+ static status_t setAudioFormat(
+ const sp<AMessage> &msg, const char* mime,
+ sp<IOMX> OMXhandle, IOMX::node_id nodeID);
+
+ static status_t setVideoFormat(
+ const sp<AMessage> &msg, const char* mime,
+ sp<IOMX> OMXhandle,IOMX::node_id nodeID,
+ bool isEncoder, OMX_VIDEO_CODINGTYPE *compressionFormat);
+
+ static status_t getAudioPortFormat(
+ OMX_U32 portIndex, int coding,
+ sp<AMessage> &notify, sp<IOMX> OMXhandle, IOMX::node_id nodeID);
+
+ static status_t getVideoPortFormat(
+ OMX_U32 portIndex, int coding,
+ sp<AMessage> &notify, sp<IOMX> OMXhandle, IOMX::node_id nodeID);
+
+private:
+ static const char* getMsgKey(int key);
+
+ static status_t setWMVFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setRVFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setFFmpegVideoFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setRawAudioFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setWMAFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setVORBISFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setRAFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setFLACFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setMP2Format(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setAC3Format(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setAPEFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setDTSFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setFFmpegAudioFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+};
+
+}
+#endif
diff --git a/include/media/stagefright/FileSource.h b/include/media/stagefright/FileSource.h
index a981d1c..21844ca 100644
--- a/include/media/stagefright/FileSource.h
+++ b/include/media/stagefright/FileSource.h
@@ -39,6 +39,10 @@ public:
virtual status_t getSize(off64_t *size);
+ virtual String8 getUri() {
+ return mUri;
+ }
+
virtual sp<DecryptHandle> DrmInitialization(const char *mime);
virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client);
@@ -48,6 +52,7 @@ protected:
private:
int mFd;
+ String8 mUri;
int64_t mOffset;
int64_t mLength;
Mutex mLock;
@@ -60,6 +65,7 @@ private:
unsigned char *mDrmBuf;
ssize_t readAtDRM(off64_t offset, void *data, size_t size);
+ void fetchUriFromFd(int fd);
FileSource(const FileSource &);
FileSource &operator=(const FileSource &);
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index 8b1e63b..ffbd20c 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -66,6 +66,41 @@ extern const char *MEDIA_MIMETYPE_TEXT_VTT;
extern const char *MEDIA_MIMETYPE_TEXT_CEA_608;
extern const char *MEDIA_MIMETYPE_DATA_TIMED_ID3;
+extern const char *MEDIA_MIMETYPE_AUDIO_EAC3_JOC;
+extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
+
+extern const char *MEDIA_MIMETYPE_VIDEO_FLV1;
+extern const char *MEDIA_MIMETYPE_VIDEO_MJPEG;
+extern const char *MEDIA_MIMETYPE_VIDEO_RV;
+extern const char *MEDIA_MIMETYPE_VIDEO_VC1;
+extern const char *MEDIA_MIMETYPE_VIDEO_WMV;
+extern const char *MEDIA_MIMETYPE_VIDEO_HEVC;
+extern const char *MEDIA_MIMETYPE_VIDEO_FFMPEG;
+
+extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_PCM;
+extern const char *MEDIA_MIMETYPE_AUDIO_RA;
+extern const char *MEDIA_MIMETYPE_AUDIO_WMA;
+extern const char *MEDIA_MIMETYPE_AUDIO_FFMPEG;
+
+extern const char *MEDIA_MIMETYPE_CONTAINER_APE;
+extern const char *MEDIA_MIMETYPE_CONTAINER_DIVX;
+extern const char *MEDIA_MIMETYPE_CONTAINER_DTS;
+extern const char *MEDIA_MIMETYPE_CONTAINER_FLAC;
+extern const char *MEDIA_MIMETYPE_CONTAINER_FLV;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MOV;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MP2;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPG;
+extern const char *MEDIA_MIMETYPE_CONTAINER_RA;
+extern const char *MEDIA_MIMETYPE_CONTAINER_RM;
+extern const char *MEDIA_MIMETYPE_CONTAINER_TS;
+extern const char *MEDIA_MIMETYPE_CONTAINER_WEBM;
+extern const char *MEDIA_MIMETYPE_CONTAINER_VC1;
+extern const char *MEDIA_MIMETYPE_CONTAINER_HEVC;
+extern const char *MEDIA_MIMETYPE_CONTAINER_WMA;
+extern const char *MEDIA_MIMETYPE_CONTAINER_WMV;
+extern const char *MEDIA_MIMETYPE_CONTAINER_FFMPEG;
+
} // namespace android
#include <media/stagefright/ExtendedMediaDefs.h>
diff --git a/include/media/stagefright/MediaExtractor.h b/include/media/stagefright/MediaExtractor.h
index 32925ca..2f2057f 100644
--- a/include/media/stagefright/MediaExtractor.h
+++ b/include/media/stagefright/MediaExtractor.h
@@ -19,15 +19,27 @@
#define MEDIA_EXTRACTOR_H_
#include <utils/RefBase.h>
+#include <media/stagefright/DataSource.h>
namespace android {
-class DataSource;
class MediaSource;
class MetaData;
class MediaExtractor : public RefBase {
public:
+ typedef MediaExtractor *(*CreateFunc)(const sp<DataSource> &source,
+ const char *mime, const sp<AMessage> &meta);
+
+ struct Plugin {
+ DataSource::SnifferFunc sniff;
+ CreateFunc create;
+ };
+
+ static Plugin *getPlugin() {
+ return &sPlugin;
+ }
+
static sp<MediaExtractor> Create(
const sp<DataSource> &source, const char *mime = NULL,
const uint32_t flags = 0);
@@ -76,6 +88,7 @@ protected:
private:
bool mIsDrm;
+ static Plugin sPlugin;
MediaExtractor(const MediaExtractor &);
MediaExtractor &operator=(const MediaExtractor &);
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index 8d4e15a..0dd5995 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -50,6 +50,10 @@ enum {
kKeySampleRate = 'srte', // int32_t (audio sampling rate Hz)
kKeyFrameRate = 'frmR', // int32_t (video frame rate fps)
kKeyBitRate = 'brte', // int32_t (bps)
+ kKeyCodecId = 'cdid', // int32_t
+ kKeyBitsPerSample = 'sbit', // int32_t (DUPE of kKeySampleBits)
+ kKeyCodedSampleBits = 'cosb', // int32_t
+ kKeySampleFormat = 'sfmt', // int32_t
kKeyESDS = 'esds', // raw data
kKeyAACProfile = 'aacp', // int32_t
kKeyAVCC = 'avcc', // raw data
@@ -131,6 +135,23 @@ enum {
kKeyIsUnreadable = 'unre', // bool (int32_t)
+ kKeyRawCodecSpecificData = 'rcsd', // raw data - added to support mmParser
+ kKeyDivXVersion = 'DivX', // int32_t
+ kKeyDivXDrm = 'QDrm', // void *
+ kKeyWMAEncodeOpt = 'eopt', // int32_t
+ kKeyWMABlockAlign = 'blka', // int32_t
+ kKeyWMAVersion = 'wmav', // int32_t
+ kKeyWMAAdvEncOpt1 = 'ade1', // int16_t
+ kKeyWMAAdvEncOpt2 = 'ade2', // int32_t
+ kKeyWMAFormatTag = 'fmtt', // int64_t
+ kKeyWMABitspersample = 'bsps', // int64_t
+ kKeyWMAVirPktSize = 'vpks', // int64_t
+ kKeyWMVProfile = 'wmvp', // int32_t
+
+ kKeyWMVVersion = 'wmvv', // int32_t
+ kKeyRVVersion = '#rvv', // int32_t
+ kKeyBlockAlign = 'ablk', // int32_t , should be different from kKeyWMABlockAlign
+
// An indication that a video buffer has been rendered.
kKeyRendered = 'rend', // bool (int32_t)
@@ -181,6 +202,9 @@ enum {
// H264 supplemental enhancement information offsets/sizes
kKeySEI = 'sei ', // raw data
+
+ kKeyPCMFormat = 'pfmt',
+ kKeyArbitraryMode = 'ArbM',
};
enum {
@@ -190,6 +214,32 @@ enum {
kTypeD263 = 'd263',
};
+enum {
+ kTypeDivXVer_3_11,
+ kTypeDivXVer_4,
+ kTypeDivXVer_5,
+ kTypeDivXVer_6,
+};
+
+enum {
+ kTypeWMA,
+ kTypeWMAPro,
+ kTypeWMALossLess,
+};
+
+enum {
+ kTypeWMVVer_7, // WMV1
+ kTypeWMVVer_8, // WMV2
+ kTypeWMVVer_9, // WMV3
+};
+
+// http://en.wikipedia.org/wiki/RealVideo
+enum {
+ kTypeRVVer_G2, // rv20: RealVideo G2
+ kTypeRVVer_8, // rv30: RealVideo 8
+ kTypeRVVer_9, // rv40: RealVideo 9
+};
+
class MetaData : public RefBase {
public:
MetaData();
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index 7fabcb3..ea534e0 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -139,6 +139,9 @@ private:
EXECUTING_TO_IDLE,
IDLE_TO_LOADED,
RECONFIGURING,
+ PAUSING,
+ FLUSHING,
+ PAUSED,
ERROR
};
@@ -348,6 +351,7 @@ private:
status_t waitForBufferFilled_l();
+ status_t resumeLocked(bool drainInputBuf);
int64_t getDecodingTimeUs();
status_t parseHEVCCodecSpecificData(
diff --git a/media/img_utils/include/img_utils/DngUtils.h b/media/img_utils/include/img_utils/DngUtils.h
index 3dcedc5..1d8df9c 100644
--- a/media/img_utils/include/img_utils/DngUtils.h
+++ b/media/img_utils/include/img_utils/DngUtils.h
@@ -138,6 +138,34 @@ class ANDROID_API OpcodeListBuilder : public LightRefBase<OpcodeListBuilder> {
double opticalCenterY,
const double* kCoeffs);
+
+ /**
+ * Add FixBadPixelsList opcode for the given metadata parameters.
+ *
+ * Returns OK on success, or a negative error code.
+ */
+ virtual status_t addBadPixelListForMetadata(const uint32_t* hotPixels,
+ uint32_t xyPairCount,
+ uint32_t colorFilterArrangement);
+
+ /**
+ * Add FixBadPixelsList opcode.
+ *
+ * bayerPhase - 0=top-left of image is red, 1=top-left of image is green pixel in red row,
+ * 2=top-left of image is green pixel in blue row, 3=top-left of image is
+ * blue.
+ * badPointCount - number of (x,y) pairs of bad pixels are given in badPointRowColPairs.
+ * badRectCount - number of (top, left, bottom, right) tuples are given in
+ * badRectTopLeftBottomRightTuples
+ *
+ * Returns OK on success, or a negative error code.
+ */
+ virtual status_t addBadPixelList(uint32_t bayerPhase,
+ uint32_t badPointCount,
+ uint32_t badRectCount,
+ const uint32_t* badPointRowColPairs,
+ const uint32_t* badRectTopLeftBottomRightTuples);
+
// TODO: Add other Opcode methods
protected:
static const uint32_t FLAG_OPTIONAL = 0x1u;
@@ -146,6 +174,7 @@ class ANDROID_API OpcodeListBuilder : public LightRefBase<OpcodeListBuilder> {
// Opcode IDs
enum {
WARP_RECTILINEAR_ID = 1,
+ FIX_BAD_PIXELS_LIST = 5,
GAIN_MAP_ID = 9,
};
@@ -161,6 +190,8 @@ class ANDROID_API OpcodeListBuilder : public LightRefBase<OpcodeListBuilder> {
ByteArrayOutput mOpList;
EndianOutput mEndianOut;
+ status_t addOpcodePreamble(uint32_t opcodeId);
+
};
} /*namespace img_utils*/
diff --git a/media/img_utils/src/DngUtils.cpp b/media/img_utils/src/DngUtils.cpp
index b213403..9473dce 100644
--- a/media/img_utils/src/DngUtils.cpp
+++ b/media/img_utils/src/DngUtils.cpp
@@ -224,13 +224,7 @@ status_t OpcodeListBuilder::addGainMap(uint32_t top,
uint32_t mapPlanes,
const float* mapGains) {
- uint32_t opcodeId = GAIN_MAP_ID;
-
- status_t err = mEndianOut.write(&opcodeId, 0, 1);
- if (err != OK) return err;
-
- uint8_t version[] = {1, 3, 0, 0};
- err = mEndianOut.write(version, 0, NELEMS(version));
+ status_t err = addOpcodePreamble(GAIN_MAP_ID);
if (err != OK) return err;
// Allow this opcode to be skipped if not supported
@@ -334,13 +328,7 @@ status_t OpcodeListBuilder::addWarpRectilinear(uint32_t numPlanes,
double opticalCenterY,
const double* kCoeffs) {
- uint32_t opcodeId = WARP_RECTILINEAR_ID;
-
- status_t err = mEndianOut.write(&opcodeId, 0, 1);
- if (err != OK) return err;
-
- uint8_t version[] = {1, 3, 0, 0};
- err = mEndianOut.write(version, 0, NELEMS(version));
+ status_t err = addOpcodePreamble(WARP_RECTILINEAR_ID);
if (err != OK) return err;
// Allow this opcode to be skipped if not supported
@@ -373,5 +361,74 @@ status_t OpcodeListBuilder::addWarpRectilinear(uint32_t numPlanes,
return OK;
}
+status_t OpcodeListBuilder::addBadPixelListForMetadata(const uint32_t* hotPixels,
+ uint32_t xyPairCount,
+ uint32_t colorFilterArrangement) {
+ if (colorFilterArrangement > 3) {
+ ALOGE("%s: Unknown color filter arrangement %" PRIu32, __FUNCTION__,
+ colorFilterArrangement);
+ return BAD_VALUE;
+ }
+
+ return addBadPixelList(colorFilterArrangement, xyPairCount, 0, hotPixels, nullptr);
+}
+
+status_t OpcodeListBuilder::addBadPixelList(uint32_t bayerPhase,
+ uint32_t badPointCount,
+ uint32_t badRectCount,
+ const uint32_t* badPointRowColPairs,
+ const uint32_t* badRectTopLeftBottomRightTuples) {
+
+ status_t err = addOpcodePreamble(FIX_BAD_PIXELS_LIST);
+ if (err != OK) return err;
+
+ // Allow this opcode to be skipped if not supported
+ uint32_t flags = FLAG_OPTIONAL;
+
+ err = mEndianOut.write(&flags, 0, 1);
+ if (err != OK) return err;
+
+ const uint32_t NUM_NON_VARLEN_FIELDS = 3;
+ const uint32_t SIZE_OF_POINT = 2;
+ const uint32_t SIZE_OF_RECT = 4;
+
+ uint32_t totalSize = (NUM_NON_VARLEN_FIELDS + badPointCount * SIZE_OF_POINT +
+ badRectCount * SIZE_OF_RECT) * sizeof(uint32_t);
+ err = mEndianOut.write(&totalSize, 0, 1);
+ if (err != OK) return err;
+
+ err = mEndianOut.write(&bayerPhase, 0, 1);
+ if (err != OK) return err;
+
+ err = mEndianOut.write(&badPointCount, 0, 1);
+ if (err != OK) return err;
+
+ err = mEndianOut.write(&badRectCount, 0, 1);
+ if (err != OK) return err;
+
+ if (badPointCount > 0) {
+ err = mEndianOut.write(badPointRowColPairs, 0, SIZE_OF_POINT * badPointCount);
+ if (err != OK) return err;
+ }
+
+ if (badRectCount > 0) {
+ err = mEndianOut.write(badRectTopLeftBottomRightTuples, 0, SIZE_OF_RECT * badRectCount);
+ if (err != OK) return err;
+ }
+
+ mCount++;
+ return OK;
+}
+
+status_t OpcodeListBuilder::addOpcodePreamble(uint32_t opcodeId) {
+ status_t err = mEndianOut.write(&opcodeId, 0, 1);
+ if (err != OK) return err;
+
+ uint8_t version[] = {1, 3, 0, 0};
+ err = mEndianOut.write(version, 0, NELEMS(version));
+ if (err != OK) return err;
+ return OK;
+}
+
} /*namespace img_utils*/
} /*namespace android*/
diff --git a/media/libavextensions/Android.mk b/media/libavextensions/Android.mk
index 0380135..3918857 100644
--- a/media/libavextensions/Android.mk
+++ b/media/libavextensions/Android.mk
@@ -12,7 +12,7 @@ LOCAL_C_INCLUDES:= \
$(TOP)/frameworks/native/include/media/hardware \
$(TOP)/frameworks/native/include/media/openmax \
$(TOP)/external/flac/include \
- $(TOP)/hardware/qcom/media/mm-core/inc \
+ $(TOP)/$(call project-path-for,qcom-media)/mm-core/inc \
$(TOP)/frameworks/av/media/libstagefright \
LOCAL_CFLAGS += -Wno-multichar -Werror
@@ -41,7 +41,7 @@ LOCAL_C_INCLUDES:= \
$(TOP)/frameworks/native/include/media/hardware \
$(TOP)/frameworks/native/include/media/openmax \
$(TOP)/external/flac/include \
- $(TOP)/hardware/qcom/media/mm-core/inc
+ $(TOP)/$(call project-path-for,qcom-media)/mm-core/inc
LOCAL_CFLAGS += -Wno-multichar -Werror
@@ -75,7 +75,7 @@ LOCAL_C_INCLUDES:= \
$(TOP)/frameworks/native/include/media/hardware \
$(TOP)/frameworks/native/include/media/openmax \
$(TOP)/external/flac/include \
- $(TOP)/hardware/qcom/media/mm-core/inc
+ $(TOP)/$(call project-path-for,qcom-media)/mm-core/inc
LOCAL_CFLAGS += -Wno-multichar -Werror
diff --git a/media/libavextensions/mediaplayerservice/AVNuExtensions.h b/media/libavextensions/mediaplayerservice/AVNuExtensions.h
index 1d45c00..d7e29d1 100644
--- a/media/libavextensions/mediaplayerservice/AVNuExtensions.h
+++ b/media/libavextensions/mediaplayerservice/AVNuExtensions.h
@@ -85,6 +85,7 @@ struct AVNuUtils {
virtual void printFileName(int fd);
virtual void checkFormatChange(bool *formatChange, const sp<ABuffer> &accessUnit);
+ virtual void addFlagsInMeta(const sp<ABuffer> &buffer, int32_t flags, bool isAudio);
virtual bool dropCorruptFrame();
// ----- NO TRESSPASSING BEYOND THIS LINE ------
diff --git a/media/libavextensions/mediaplayerservice/AVNuUtils.cpp b/media/libavextensions/mediaplayerservice/AVNuUtils.cpp
index 95f6454..85f07db 100644
--- a/media/libavextensions/mediaplayerservice/AVNuUtils.cpp
+++ b/media/libavextensions/mediaplayerservice/AVNuUtils.cpp
@@ -33,6 +33,8 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaDefs.h>
+
#include <nuplayer/NuPlayer.h>
#include <nuplayer/NuPlayerDecoderBase.h>
#include <nuplayer/NuPlayerDecoderPassThrough.h>
@@ -52,12 +54,29 @@ bool AVNuUtils::pcmOffloadException(const sp<MetaData> &) {
return true;
}
-bool AVNuUtils::isRAWFormat(const sp<MetaData> &) {
- return false;
+bool AVNuUtils::isRAWFormat(const sp<MetaData> &meta) {
+ const char *mime = {0};
+ if (meta == NULL) {
+ return false;
+ }
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+ if (!strncasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW, 9))
+ return true;
+ else
+ return false;
}
-bool AVNuUtils::isRAWFormat(const sp<AMessage> &) {
- return false;
+bool AVNuUtils::isRAWFormat(const sp<AMessage> &format) {
+ AString mime;
+ if (format == NULL) {
+ return false;
+ }
+ CHECK(format->findString("mime", &mime));
+ if (!strncasecmp(mime.c_str(), MEDIA_MIMETYPE_AUDIO_RAW, 9))
+ return true;
+ else
+ return false;
+
}
bool AVNuUtils::isVorbisFormat(const sp<MetaData> &) {
@@ -69,20 +88,39 @@ int AVNuUtils::updateAudioBitWidth(audio_format_t /*audioFormat*/,
return 16;
}
-audio_format_t AVNuUtils::getKeyPCMFormat(const sp<MetaData> &) {
- return AUDIO_FORMAT_INVALID;
-}
+audio_format_t AVNuUtils::getKeyPCMFormat(const sp<MetaData> &meta) {
+ int32_t pcmFormat = 0;
+ if (meta->findInt32('pfmt', &pcmFormat))
+ return (audio_format_t)pcmFormat;
-void AVNuUtils::setKeyPCMFormat(const sp<MetaData> &, audio_format_t /*audioFormat*/) {
+ return AUDIO_FORMAT_PCM_16_BIT;
+}
+void AVNuUtils::setKeyPCMFormat(const sp<MetaData> &meta, audio_format_t audioFormat) {
+ if (audio_is_linear_pcm(audioFormat))
+ meta->setInt32('pfmt', audioFormat);
}
-audio_format_t AVNuUtils::getPCMFormat(const sp<AMessage> &) {
+audio_format_t AVNuUtils::getPCMFormat(const sp<AMessage> &format) {
+ int32_t pcmFormat = 0;
+ if (format->findInt32("pcm-format", &pcmFormat))
+ return (audio_format_t)pcmFormat;
+
+ int32_t bits = 16;
+ if (format->findInt32("bit-width", &bits)) {
+ if (bits == 8)
+ return AUDIO_FORMAT_PCM_8_BIT;
+ if (bits == 24)
+ return AUDIO_FORMAT_PCM_32_BIT;
+ if (bits == 32)
+ return AUDIO_FORMAT_PCM_FLOAT;
+ }
return AUDIO_FORMAT_PCM_16_BIT;
}
-void AVNuUtils::setPCMFormat(const sp<AMessage> &, audio_format_t /*audioFormat*/) {
-
+void AVNuUtils::setPCMFormat(const sp<AMessage> &format, audio_format_t audioFormat) {
+ if (audio_is_linear_pcm(audioFormat))
+ format->setInt32("pcm-format", audioFormat);
}
void AVNuUtils::setSourcePCMFormat(const sp<MetaData> &) {
@@ -104,6 +142,10 @@ void AVNuUtils::checkFormatChange(bool * /*formatChange*/,
const sp<ABuffer> & /*accessUnit*/) {
}
+void AVNuUtils::addFlagsInMeta(const sp<ABuffer> & /*buffer*/,
+ int32_t /*flags*/, bool /*isAudio*/) {
+}
+
uint32_t AVNuUtils::getFlags() {
return 0;
}
diff --git a/media/libavextensions/stagefright/AVFactory.cpp b/media/libavextensions/stagefright/AVFactory.cpp
index 2a3810d..f6d5f53 100644
--- a/media/libavextensions/stagefright/AVFactory.cpp
+++ b/media/libavextensions/stagefright/AVFactory.cpp
@@ -72,7 +72,7 @@ sp<NuCachedSource2> AVFactory::createCachedSource(
const sp<DataSource> &source,
const char *cacheConfig,
bool disconnectAtHighwatermark) {
- return new NuCachedSource2(source, cacheConfig, disconnectAtHighwatermark);
+ return NuCachedSource2::Create(source, cacheConfig, disconnectAtHighwatermark);
}
MediaHTTP* AVFactory::createMediaHTTP(
diff --git a/media/libavextensions/stagefright/AVUtils.cpp b/media/libavextensions/stagefright/AVUtils.cpp
index 324ff9b..50c0f89 100644
--- a/media/libavextensions/stagefright/AVUtils.cpp
+++ b/media/libavextensions/stagefright/AVUtils.cpp
@@ -66,8 +66,10 @@ int AVUtils::getAudioSampleBits(const sp<MetaData> &) {
return 16;
}
-int AVUtils::getAudioSampleBits(const sp<AMessage> &) {
- return 16;
+int AVUtils::getAudioSampleBits(const sp<AMessage> &format) {
+ int32_t bits = 16;
+ format->findInt32("bit-width", &bits);
+ return bits;
}
void AVUtils::setPcmSampleBits(const sp<AMessage> &, int32_t /*bitWidth*/) {
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 22c66db..3a8dc07 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -528,6 +528,15 @@ status_t AudioTrack::start()
mTimestampStartupGlitchReported = false;
mRetrogradeMotionReported = false;
+ // If previousState == STATE_STOPPED, we reactivate markers (mMarkerPosition != 0)
+ // as the position is reset to 0. This is legacy behavior. This is not done
+ // in stop() to avoid a race condition where the last marker event is issued twice.
+ // Note: the if is technically unnecessary because previousState == STATE_FLUSHED
+ // is only for streaming tracks, and mMarkerReached is already set to false.
+ if (previousState == STATE_STOPPED) {
+ mMarkerReached = false;
+ }
+
// For offloaded tracks, we don't know if the hardware counters are really zero here,
// since the flush is asynchronous and stop may not fully drain.
// We save the time when the track is started to later verify whether
@@ -603,9 +612,9 @@ void AudioTrack::stop()
mProxy->interrupt();
mAudioTrack->stop();
- // the playback head position will reset to 0, so if a marker is set, we need
- // to activate it again
- mMarkerReached = false;
+
+ // Note: legacy handling - stop does not clear playback marker
+ // and periodic update counter, but flush does for streaming tracks.
if (mSharedBuffer != 0) {
// clear buffer position and loop count.
@@ -708,7 +717,7 @@ status_t AudioTrack::setVolume(float left, float right)
mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
- if (isOffloaded_l()) {
+ if (isOffloaded_l() && mAudioTrack != NULL) {
mAudioTrack->signal();
}
return NO_ERROR;
@@ -833,13 +842,13 @@ status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
}
// Check resampler ratios are within bounds
- if (effectiveRate > mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
+ if ((uint64_t)effectiveRate > (uint64_t)mSampleRate * (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
ALOGV("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
playbackRate.mSpeed, playbackRate.mPitch);
return BAD_VALUE;
}
- if (effectiveRate * AUDIO_RESAMPLER_UP_RATIO_MAX < mSampleRate) {
+ if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
ALOGV("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
playbackRate.mSpeed, playbackRate.mPitch);
return BAD_VALUE;
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 6a51a76..0f8e6d6 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -38,7 +38,7 @@ size_t clampToSize(T x) {
// In general, this means (new_self) returned is max(self, other) + 1.
static uint32_t incrementSequence(uint32_t self, uint32_t other) {
- int32_t diff = self - other;
+ int32_t diff = (int32_t) self - (int32_t) other;
if (diff >= 0 && diff < INT32_MAX) {
return self + 1; // we're already ahead of other.
}
@@ -893,7 +893,7 @@ ssize_t StaticAudioTrackServerProxy::pollPosition()
if (mObserver.poll(state)) {
StaticAudioTrackState trystate = mState;
bool result;
- const int32_t diffSeq = state.mLoopSequence - state.mPositionSequence;
+ const int32_t diffSeq = (int32_t) state.mLoopSequence - (int32_t) state.mPositionSequence;
if (diffSeq < 0) {
result = updateStateWithLoop(&trystate, state) == OK &&
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index a3f014b..0bf503a 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -937,7 +937,7 @@ status_t BnAudioFlinger::onTransact(
pid_t tid = (pid_t) data.readInt32();
int sessionId = data.readInt32();
int clientUid = data.readInt32();
- status_t status;
+ status_t status = NO_ERROR;
sp<IAudioTrack> track;
if ((haveSharedBuffer && (buffer == 0)) ||
((buffer != 0) && (buffer->pointer() == NULL))) {
@@ -972,7 +972,7 @@ status_t BnAudioFlinger::onTransact(
size_t notificationFrames = data.readInt64();
sp<IMemory> cblk;
sp<IMemory> buffers;
- status_t status;
+ status_t status = NO_ERROR;
sp<IAudioRecord> record = openRecord(input,
sampleRate, format, channelMask, opPackageName, &frameCount, &flags, tid,
clientUid, &sessionId, &notificationFrames, cblk, buffers, &status);
@@ -1104,13 +1104,15 @@ status_t BnAudioFlinger::onTransact(
case OPEN_OUTPUT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
audio_module_handle_t module = (audio_module_handle_t)data.readInt32();
- audio_config_t config;
- data.read(&config, sizeof(audio_config_t));
+ audio_config_t config = {};
+ if (data.read(&config, sizeof(audio_config_t)) != NO_ERROR) {
+ ALOGE("b/23905951");
+ }
audio_devices_t devices = (audio_devices_t)data.readInt32();
String8 address(data.readString8());
audio_output_flags_t flags = (audio_output_flags_t) data.readInt32();
- uint32_t latencyMs;
- audio_io_handle_t output;
+ uint32_t latencyMs = 0;
+ audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
status_t status = openOutput(module, &output, &config,
&devices, address, &latencyMs, flags);
ALOGV("OPEN_OUTPUT output, %d", output);
@@ -1149,8 +1151,10 @@ status_t BnAudioFlinger::onTransact(
CHECK_INTERFACE(IAudioFlinger, data, reply);
audio_module_handle_t module = (audio_module_handle_t)data.readInt32();
audio_io_handle_t input = (audio_io_handle_t)data.readInt32();
- audio_config_t config;
- data.read(&config, sizeof(audio_config_t));
+ audio_config_t config = {};
+ if (data.read(&config, sizeof(audio_config_t)) != NO_ERROR) {
+ ALOGE("b/23905951");
+ }
audio_devices_t device = (audio_devices_t)data.readInt32();
String8 address(data.readString8());
audio_source_t source = (audio_source_t)data.readInt32();
@@ -1186,8 +1190,8 @@ status_t BnAudioFlinger::onTransact(
case GET_RENDER_POSITION: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
- uint32_t halFrames;
- uint32_t dspFrames;
+ uint32_t halFrames = 0;
+ uint32_t dspFrames = 0;
status_t status = getRenderPosition(&halFrames, &dspFrames, output);
reply->writeInt32(status);
if (status == NO_ERROR) {
@@ -1223,7 +1227,7 @@ status_t BnAudioFlinger::onTransact(
} break;
case QUERY_NUM_EFFECTS: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- uint32_t numEffects;
+ uint32_t numEffects = 0;
status_t status = queryNumberEffects(&numEffects);
reply->writeInt32(status);
if (status == NO_ERROR) {
@@ -1233,7 +1237,7 @@ status_t BnAudioFlinger::onTransact(
}
case QUERY_EFFECT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- effect_descriptor_t desc;
+ effect_descriptor_t desc = {};
status_t status = queryEffect(data.readInt32(), &desc);
reply->writeInt32(status);
if (status == NO_ERROR) {
@@ -1245,7 +1249,7 @@ status_t BnAudioFlinger::onTransact(
CHECK_INTERFACE(IAudioFlinger, data, reply);
effect_uuid_t uuid;
data.read(&uuid, sizeof(effect_uuid_t));
- effect_descriptor_t desc;
+ effect_descriptor_t desc = {};
status_t status = getEffectDescriptor(&uuid, &desc);
reply->writeInt32(status);
if (status == NO_ERROR) {
@@ -1255,16 +1259,18 @@ status_t BnAudioFlinger::onTransact(
}
case CREATE_EFFECT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- effect_descriptor_t desc;
- data.read(&desc, sizeof(effect_descriptor_t));
+ effect_descriptor_t desc = {};
+ if (data.read(&desc, sizeof(effect_descriptor_t)) != NO_ERROR) {
+ ALOGE("b/23905951");
+ }
sp<IEffectClient> client = interface_cast<IEffectClient>(data.readStrongBinder());
int32_t priority = data.readInt32();
audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
int sessionId = data.readInt32();
const String16 opPackageName = data.readString16();
- status_t status;
- int id;
- int enabled;
+ status_t status = NO_ERROR;
+ int id = 0;
+ int enabled = 0;
sp<IEffect> effect = createEffect(&desc, client, priority, output, sessionId,
opPackageName, &status, &id, &enabled);
@@ -1333,8 +1339,10 @@ status_t BnAudioFlinger::onTransact(
} break;
case GET_AUDIO_PORT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- struct audio_port port;
- data.read(&port, sizeof(struct audio_port));
+ struct audio_port port = {};
+ if (data.read(&port, sizeof(struct audio_port)) != NO_ERROR) {
+ ALOGE("b/23905951");
+ }
status_t status = getAudioPort(&port);
reply->writeInt32(status);
if (status == NO_ERROR) {
@@ -1346,8 +1354,10 @@ status_t BnAudioFlinger::onTransact(
CHECK_INTERFACE(IAudioFlinger, data, reply);
struct audio_patch patch;
data.read(&patch, sizeof(struct audio_patch));
- audio_patch_handle_t handle;
- data.read(&handle, sizeof(audio_patch_handle_t));
+ audio_patch_handle_t handle = {};
+ if (data.read(&handle, sizeof(audio_patch_handle_t)) != NO_ERROR) {
+ ALOGE("b/23905951");
+ }
status_t status = createAudioPatch(&patch, &handle);
reply->writeInt32(status);
if (status == NO_ERROR) {
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 3348441..76b5924 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -877,7 +877,7 @@ status_t BnAudioPolicyService::onTransact(
if (hasOffloadInfo) {
data.read(&offloadInfo, sizeof(audio_offload_info_t));
}
- audio_io_handle_t output;
+ audio_io_handle_t output = 0;
status_t status = getOutputForAttr(hasAttributes ? &attr : NULL,
&output, session, &stream, uid,
samplingRate, format, channelMask,
@@ -932,7 +932,7 @@ status_t BnAudioPolicyService::onTransact(
audio_channel_mask_t channelMask = data.readInt32();
audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
audio_port_handle_t selectedDeviceId = (audio_port_handle_t) data.readInt32();
- audio_io_handle_t input;
+ audio_io_handle_t input = {};
status_t status = getInputForAttr(&attr, &input, session, uid,
samplingRate, format, channelMask,
flags, selectedDeviceId);
@@ -994,7 +994,7 @@ status_t BnAudioPolicyService::onTransact(
audio_stream_type_t stream =
static_cast <audio_stream_type_t>(data.readInt32());
audio_devices_t device = static_cast <audio_devices_t>(data.readInt32());
- int index;
+ int index = 0;
status_t status = getStreamVolumeIndex(stream, &index, device);
reply->writeInt32(index);
reply->writeInt32(static_cast <uint32_t>(status));
@@ -1148,8 +1148,10 @@ status_t BnAudioPolicyService::onTransact(
case GET_AUDIO_PORT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- struct audio_port port;
- data.read(&port, sizeof(struct audio_port));
+ struct audio_port port = {};
+ if (data.read(&port, sizeof(struct audio_port)) != NO_ERROR) {
+ ALOGE("b/23912202");
+ }
status_t status = getAudioPort(&port);
reply->writeInt32(status);
if (status == NO_ERROR) {
@@ -1162,8 +1164,10 @@ status_t BnAudioPolicyService::onTransact(
CHECK_INTERFACE(IAudioPolicyService, data, reply);
struct audio_patch patch;
data.read(&patch, sizeof(struct audio_patch));
- audio_patch_handle_t handle;
- data.read(&handle, sizeof(audio_patch_handle_t));
+ audio_patch_handle_t handle = {};
+ if (data.read(&handle, sizeof(audio_patch_handle_t)) != NO_ERROR) {
+ ALOGE("b/23912202");
+ }
status_t status = createAudioPatch(&patch, &handle);
reply->writeInt32(status);
if (status == NO_ERROR) {
@@ -1238,9 +1242,9 @@ status_t BnAudioPolicyService::onTransact(
CHECK_INTERFACE(IAudioPolicyService, data, reply);
sp<IAudioPolicyServiceClient> client = interface_cast<IAudioPolicyServiceClient>(
data.readStrongBinder());
- audio_session_t session;
- audio_io_handle_t ioHandle;
- audio_devices_t device;
+ audio_session_t session = {};
+ audio_io_handle_t ioHandle = {};
+ audio_devices_t device = {};
status_t status = acquireSoundTriggerSession(&session, &ioHandle, &device);
reply->writeInt32(status);
if (status == NO_ERROR) {
@@ -1292,7 +1296,7 @@ status_t BnAudioPolicyService::onTransact(
data.read(&source, sizeof(struct audio_port_config));
audio_attributes_t attributes;
data.read(&attributes, sizeof(audio_attributes_t));
- audio_io_handle_t handle;
+ audio_io_handle_t handle = {};
status_t status = startAudioSource(&source, &attributes, &handle);
reply->writeInt32(status);
reply->writeInt32(handle);
diff --git a/media/libmedia/ICrypto.cpp b/media/libmedia/ICrypto.cpp
index 8f05936..5d822cf 100644
--- a/media/libmedia/ICrypto.cpp
+++ b/media/libmedia/ICrypto.cpp
@@ -301,14 +301,31 @@ status_t BnCrypto::onTransact(
secureBufferId = reinterpret_cast<void *>(static_cast<uintptr_t>(data.readInt64()));
AVMediaUtils::get()->readCustomData(&data, &secureBufferId);
} else {
- dstPtr = malloc(totalSize);
- CHECK(dstPtr != NULL);
+ dstPtr = calloc(1, totalSize);
}
AString errorDetailMsg;
ssize_t result;
- if (offset + totalSize > sharedBuffer->size()) {
+ size_t sumSubsampleSizes = 0;
+ bool overflow = false;
+ for (int32_t i = 0; i < numSubSamples; ++i) {
+ CryptoPlugin::SubSample &ss = subSamples[i];
+ if (sumSubsampleSizes <= SIZE_MAX - ss.mNumBytesOfEncryptedData) {
+ sumSubsampleSizes += ss.mNumBytesOfEncryptedData;
+ } else {
+ overflow = true;
+ }
+ if (sumSubsampleSizes <= SIZE_MAX - ss.mNumBytesOfClearData) {
+ sumSubsampleSizes += ss.mNumBytesOfClearData;
+ } else {
+ overflow = true;
+ }
+ }
+
+ if (overflow || sumSubsampleSizes != totalSize) {
+ result = -EINVAL;
+ } else if (offset + totalSize > sharedBuffer->size()) {
result = -EINVAL;
} else {
result = decrypt(
diff --git a/media/libmedia/IEffect.cpp b/media/libmedia/IEffect.cpp
index 761b243..faf5795 100644
--- a/media/libmedia/IEffect.cpp
+++ b/media/libmedia/IEffect.cpp
@@ -85,13 +85,15 @@ public:
data.writeInt32(size);
status_t status = remote()->transact(COMMAND, data, &reply);
+ if (status == NO_ERROR) {
+ status = reply.readInt32();
+ }
if (status != NO_ERROR) {
if (pReplySize != NULL)
*pReplySize = 0;
return status;
}
- status = reply.readInt32();
size = reply.readInt32();
if (size != 0 && pReplyData != NULL && pReplySize != NULL) {
reply.read(pReplyData, size);
@@ -155,6 +157,10 @@ status_t BnEffect::onTransact(
char *cmd = NULL;
if (cmdSize) {
cmd = (char *)calloc(cmdSize, 1);
+ if (cmd == NULL) {
+ reply->writeInt32(NO_MEMORY);
+ return NO_ERROR;
+ }
data.read(cmd, cmdSize);
}
uint32_t replySize = data.readInt32();
@@ -162,15 +168,22 @@ status_t BnEffect::onTransact(
char *resp = NULL;
if (replySize) {
resp = (char *)calloc(replySize, 1);
+ if (resp == NULL) {
+ free(cmd);
+ reply->writeInt32(NO_MEMORY);
+ return NO_ERROR;
+ }
}
status_t status = command(cmdCode, cmdSize, cmd, &replySz, resp);
reply->writeInt32(status);
- if (replySz < replySize) {
- replySize = replySz;
- }
- reply->writeInt32(replySize);
- if (replySize) {
- reply->write(resp, replySize);
+ if (status == NO_ERROR) {
+ if (replySz < replySize) {
+ replySize = replySz;
+ }
+ reply->writeInt32(replySize);
+ if (replySize) {
+ reply->write(resp, replySize);
+ }
}
if (cmd) {
free(cmd);
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index 7387a79..bad84b7 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -67,6 +67,8 @@ enum {
SET_RETRANSMIT_ENDPOINT,
GET_RETRANSMIT_ENDPOINT,
SET_NEXT_PLAYER,
+ SUSPEND,
+ RESUME,
};
class BpMediaPlayer: public BpInterface<IMediaPlayer>
@@ -419,6 +421,22 @@ public:
return err;
}
+
+ status_t suspend()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+ remote()->transact(SUSPEND, data, &reply);
+ return reply.readInt32();
+ }
+
+ status_t resume()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+ remote()->transact(RESUME, data, &reply);
+ return reply.readInt32();
+ }
};
IMPLEMENT_META_INTERFACE(MediaPlayer, "android.media.IMediaPlayer");
@@ -566,7 +584,7 @@ status_t BnMediaPlayer::onTransact(
} break;
case GET_CURRENT_POSITION: {
CHECK_INTERFACE(IMediaPlayer, data, reply);
- int msec;
+ int msec = 0;
status_t ret = getCurrentPosition(&msec);
reply->writeInt32(msec);
reply->writeInt32(ret);
@@ -574,7 +592,7 @@ status_t BnMediaPlayer::onTransact(
} break;
case GET_DURATION: {
CHECK_INTERFACE(IMediaPlayer, data, reply);
- int msec;
+ int msec = 0;
status_t ret = getDuration(&msec);
reply->writeInt32(msec);
reply->writeInt32(ret);
@@ -682,6 +700,18 @@ status_t BnMediaPlayer::onTransact(
return NO_ERROR;
} break;
+ case SUSPEND: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ status_t ret = suspend();
+ reply->writeInt32(ret);
+ return NO_ERROR;
+ } break;
+ case RESUME: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ status_t ret = resume();
+ reply->writeInt32(ret);
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index ee3b584..4711c1b 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -39,6 +39,7 @@ enum {
QUERY_SURFACE_MEDIASOURCE,
RESET,
STOP,
+ PAUSE,
START,
PREPARE,
GET_MAX_AMPLITUDE,
@@ -258,6 +259,15 @@ public:
return reply.readInt32();
}
+ status_t pause()
+ {
+ ALOGV("pause");
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+ remote()->transact(PAUSE, data, &reply);
+ return reply.readInt32();
+ }
+
status_t stop()
{
ALOGV("stop");
@@ -334,6 +344,12 @@ status_t BnMediaRecorder::onTransact(
reply->writeInt32(stop());
return NO_ERROR;
} break;
+ case PAUSE: {
+ ALOGV("PAUSE");
+ CHECK_INTERFACE(IMediaRecorder, data, reply);
+ reply->writeInt32(pause());
+ return NO_ERROR;
+ } break;
case START: {
ALOGV("START");
CHECK_INTERFACE(IMediaRecorder, data, reply);
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index ac2e872..5356494 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -965,6 +965,12 @@ status_t BnOMX::onTransact(
node_id node = (node_id)data.readInt32();
OMX_U32 port_index = data.readInt32();
+ if (!isSecure(node) || port_index != 0 /* kPortIndexInput */) {
+ ALOGE("b/24310423");
+ reply->writeInt32(INVALID_OPERATION);
+ return NO_ERROR;
+ }
+
size_t size = data.readInt64();
buffer_id buffer;
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index 8af0ce8..87ec309 100644..100755
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -1,4 +1,6 @@
/*
+** Copyright (c) 2014, The Linux Foundation. All rights reserved.
+** Not a Contribution.
**
** Copyright 2010, The Android Open Source Project
**
@@ -137,6 +139,8 @@ MediaProfiles::logVideoEncoderCap(const MediaProfiles::VideoEncoderCap& cap UNUS
ALOGV("frame width: min = %d and max = %d", cap.mMinFrameWidth, cap.mMaxFrameWidth);
ALOGV("frame height: min = %d and max = %d", cap.mMinFrameHeight, cap.mMaxFrameHeight);
ALOGV("frame rate: min = %d and max = %d", cap.mMinFrameRate, cap.mMaxFrameRate);
+ ALOGV("max HFR width: = %d max HFR height: = %d", cap.mMaxHFRFrameWidth, cap.mMaxHFRFrameHeight);
+ ALOGV("max HFR mode: = %d", cap.mMaxHFRMode);
}
/*static*/ void
@@ -272,10 +276,23 @@ MediaProfiles::createVideoEncoderCap(const char **atts)
const int codec = findTagForName(sVideoEncoderNameMap, nMappings, atts[1]);
CHECK(codec != -1);
+ int maxHFRWidth = 0, maxHFRHeight = 0, maxHFRMode = 0;
+ // Check if there are enough (start through end) attributes in the
+ // 0-terminated list, to include our additional HFR params. Then check
+ // if each of those match the expected names.
+ if (atts[20] && atts[21] && !strcmp("maxHFRFrameWidth", atts[20]) &&
+ atts[22] && atts[23] && !strcmp("maxHFRFrameHeight", atts[22]) &&
+ atts[24] && atts[25] && !strcmp("maxHFRMode", atts[24])) {
+ maxHFRWidth = atoi(atts[21]);
+ maxHFRHeight = atoi(atts[23]);
+ maxHFRMode = atoi(atts[25]);
+ }
+
MediaProfiles::VideoEncoderCap *cap =
new MediaProfiles::VideoEncoderCap(static_cast<video_encoder>(codec),
atoi(atts[5]), atoi(atts[7]), atoi(atts[9]), atoi(atts[11]), atoi(atts[13]),
- atoi(atts[15]), atoi(atts[17]), atoi(atts[19]));
+ atoi(atts[15]), atoi(atts[17]), atoi(atts[19]),
+ maxHFRWidth, maxHFRHeight, maxHFRMode);
logVideoEncoderCap(*cap);
return cap;
}
@@ -629,14 +646,14 @@ MediaProfiles::getInstance()
MediaProfiles::createDefaultH263VideoEncoderCap()
{
return new MediaProfiles::VideoEncoderCap(
- VIDEO_ENCODER_H263, 192000, 420000, 176, 352, 144, 288, 1, 20);
+ VIDEO_ENCODER_H263, 192000, 420000, 176, 352, 144, 288, 1, 20, 0, 0, 0);
}
/*static*/ MediaProfiles::VideoEncoderCap*
MediaProfiles::createDefaultM4vVideoEncoderCap()
{
return new MediaProfiles::VideoEncoderCap(
- VIDEO_ENCODER_MPEG_4_SP, 192000, 420000, 176, 352, 144, 288, 1, 20);
+ VIDEO_ENCODER_MPEG_4_SP, 192000, 420000, 176, 352, 144, 288, 1, 20, 0, 0, 0);
}
@@ -791,6 +808,7 @@ MediaProfiles::createDefaultCamcorderProfiles(MediaProfiles *profiles)
MediaProfiles::createDefaultAudioEncoders(MediaProfiles *profiles)
{
profiles->mAudioEncoders.add(createDefaultAmrNBEncoderCap());
+ profiles->mAudioEncoders.add(createDefaultAacEncoderCap());
profiles->mAudioEncoders.add(createDefaultLpcmEncoderCap());
}
@@ -826,6 +844,12 @@ MediaProfiles::createDefaultAmrNBEncoderCap()
AUDIO_ENCODER_AMR_NB, 5525, 12200, 8000, 8000, 1, 1);
}
+/*static*/ MediaProfiles::AudioEncoderCap*
+MediaProfiles::createDefaultAacEncoderCap()
+{
+ return new MediaProfiles::AudioEncoderCap(
+ AUDIO_ENCODER_AAC, 64000, 156000, 8000, 48000, 1, 2);
+}
/*static*/ MediaProfiles::AudioEncoderCap*
MediaProfiles::createDefaultLpcmEncoderCap()
@@ -949,6 +973,9 @@ int MediaProfiles::getVideoEncoderParamByName(const char *name, video_encoder co
if (!strcmp("enc.vid.bps.max", name)) return mVideoEncoders[index]->mMaxBitRate;
if (!strcmp("enc.vid.fps.min", name)) return mVideoEncoders[index]->mMinFrameRate;
if (!strcmp("enc.vid.fps.max", name)) return mVideoEncoders[index]->mMaxFrameRate;
+ if (!strcmp("enc.vid.hfr.width.max", name)) return mVideoEncoders[index]->mMaxHFRFrameWidth;
+ if (!strcmp("enc.vid.hfr.height.max", name)) return mVideoEncoders[index]->mMaxHFRFrameHeight;
+ if (!strcmp("enc.vid.hfr.mode.max", name)) return mVideoEncoders[index]->mMaxHFRMode;
ALOGE("The given video encoder param name %s is not found", name);
return -1;
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index f5c1b1f..8d83c1b 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -94,6 +94,14 @@ status_t Visualizer::setEnabled(bool enabled)
return status;
}
+void Visualizer::cancelCaptureCallBack()
+{
+ sp<CaptureThread> t = mCaptureThread;
+ if (t != 0) {
+ t->requestExitAndWait();
+ }
+}
+
status_t Visualizer::setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags,
uint32_t rate, bool force)
{
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 502ab2d..3c6bef3 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -333,6 +333,9 @@ status_t MediaPlayer::start()
ALOGV("playback completed immediately following start()");
}
}
+ } else if ( (mPlayer != 0) && (mCurrentState & MEDIA_PLAYER_SUSPENDED) ) {
+ ALOGV("start while suspended, so ignore this start");
+ ret = NO_ERROR;
} else {
ALOGE("start called in state %d", mCurrentState);
ret = INVALID_OPERATION;
@@ -395,6 +398,10 @@ bool MediaPlayer::isPlaying()
ALOGE("internal/external state mismatch corrected");
mCurrentState = MEDIA_PLAYER_STARTED;
}
+ if ((mCurrentState & MEDIA_PLAYER_PLAYBACK_COMPLETE) && temp) {
+ ALOGE("internal/external state mismatch corrected");
+ mCurrentState = MEDIA_PLAYER_STARTED;
+ }
return temp;
}
ALOGV("isPlaying: no active player");
@@ -484,7 +491,8 @@ status_t MediaPlayer::getDuration_l(int *msec)
{
ALOGV("getDuration_l");
bool isValidState = (mCurrentState & (MEDIA_PLAYER_PREPARED | MEDIA_PLAYER_STARTED |
- MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_STOPPED | MEDIA_PLAYER_PLAYBACK_COMPLETE));
+ MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_STOPPED | MEDIA_PLAYER_PLAYBACK_COMPLETE |
+ MEDIA_PLAYER_SUSPENDED));
if (mPlayer != 0 && isValidState) {
int durationMs;
status_t ret = mPlayer->getDuration(&durationMs);
@@ -514,7 +522,7 @@ status_t MediaPlayer::seekTo_l(int msec)
{
ALOGV("seekTo %d", msec);
if ((mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_STARTED | MEDIA_PLAYER_PREPARED |
- MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_PLAYBACK_COMPLETE) ) ) {
+ MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_PLAYBACK_COMPLETE | MEDIA_PLAYER_SUSPENDED) ) ) {
if ( msec < 0 ) {
ALOGW("Attempt to seek to invalid position: %d", msec);
msec = 0;
@@ -935,4 +943,54 @@ status_t MediaPlayer::setNextMediaPlayer(const sp<MediaPlayer>& next) {
return mPlayer->setNextPlayer(next == NULL ? NULL : next->mPlayer);
}
-} // namespace android
+status_t MediaPlayer::suspend() {
+ ALOGV("MediaPlayer::suspend()");
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == NULL) {
+ ALOGE("mPlayer = NULL");
+ return NO_INIT;
+ }
+
+ bool isValidState = (mCurrentState & (MEDIA_PLAYER_PREPARED | MEDIA_PLAYER_STARTED | MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_STOPPED | MEDIA_PLAYER_PLAYBACK_COMPLETE | MEDIA_PLAYER_SUSPENDED));
+
+ if (!isValidState) {
+ ALOGE("suspend while in a invalid state = %d", mCurrentState);
+ return UNKNOWN_ERROR;
+ }
+
+ status_t ret = mPlayer->suspend();
+
+ if (OK != ret) {
+ ALOGE("MediaPlayer::suspend() return with error ret = %d", ret);
+ return ret;
+ }
+ mCurrentState = MEDIA_PLAYER_SUSPENDED;
+ return OK;
+}
+
+status_t MediaPlayer::resume() {
+ ALOGV("MediaPlayer::resume()");
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == NULL) {
+ ALOGE("mPlayer == NULL");
+ return NO_INIT;
+ }
+
+ bool isValidState = (mCurrentState == MEDIA_PLAYER_SUSPENDED);
+
+ if (!isValidState) {
+ ALOGE("resume while in a invalid state = %d", mCurrentState);
+ return UNKNOWN_ERROR;
+ }
+
+ status_t ret = mPlayer->resume();
+
+ if (OK != ret) {
+ ALOGE("MediaPlayer::resume() return with error ret = %d", ret);
+ return ret;
+ }
+ mCurrentState = MEDIA_PLAYER_PREPARED;
+ return OK;
+}
+
+}; // namespace android
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 8bbd8f1..8b7b171 100644..100755
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -482,7 +482,7 @@ status_t MediaRecorder::start()
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
- if (!(mCurrentState & MEDIA_RECORDER_PREPARED)) {
+ if (!(mCurrentState & (MEDIA_RECORDER_PREPARED | MEDIA_RECORDER_PAUSED))) {
ALOGE("start called in an invalid state: %d", mCurrentState);
return INVALID_OPERATION;
}
@@ -497,6 +497,29 @@ status_t MediaRecorder::start()
return ret;
}
+status_t MediaRecorder::pause()
+{
+ ALOGV("pause");
+ if (mMediaRecorder == NULL) {
+ ALOGE("media recorder is not initialized yet");
+ return INVALID_OPERATION;
+ }
+ if (!(mCurrentState & MEDIA_RECORDER_RECORDING)) {
+ ALOGE("pause called in an invalid state: %d", mCurrentState);
+ return INVALID_OPERATION;
+ }
+
+ status_t ret = mMediaRecorder->pause();
+ if (OK != ret) {
+ ALOGE("pause failed: %d", ret);
+ mCurrentState = MEDIA_RECORDER_ERROR;
+ return ret;
+ }
+
+ mCurrentState = MEDIA_RECORDER_PAUSED;
+ return ret;
+}
+
status_t MediaRecorder::stop()
{
ALOGV("stop");
@@ -504,7 +527,7 @@ status_t MediaRecorder::stop()
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
- if (!(mCurrentState & MEDIA_RECORDER_RECORDING)) {
+ if (!(mCurrentState & (MEDIA_RECORDER_RECORDING | MEDIA_RECORDER_PAUSED))) {
ALOGE("stop called in an invalid state: %d", mCurrentState);
return INVALID_OPERATION;
}
@@ -540,6 +563,7 @@ status_t MediaRecorder::reset()
ret = OK;
break;
+ case MEDIA_RECORDER_PAUSED:
case MEDIA_RECORDER_RECORDING:
case MEDIA_RECORDER_DATASOURCE_CONFIGURED:
case MEDIA_RECORDER_PREPARED:
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 6e104a4..de51b3c 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -1306,6 +1306,22 @@ void MediaPlayerService::Client::addNewMetadataUpdate(media::Metadata::Type meta
}
}
+status_t MediaPlayerService::Client::suspend()
+{
+ ALOGV("[%d] suspend", mConnId);
+ sp<MediaPlayerBase> p = getPlayer();
+ if (p == NULL) return NO_INIT;
+ return p->suspend();
+}
+
+status_t MediaPlayerService::Client::resume()
+{
+ ALOGV("[%d] resume", mConnId);
+ sp<MediaPlayerBase> p = getPlayer();
+ if (p == NULL) return NO_INIT;
+ return p->resume();
+}
+
#if CALLBACK_ANTAGONIZER
const int Antagonizer::interval = 10000; // 10 msecs
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 748b25f..ff8f550 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -334,6 +334,9 @@ private:
int getAudioSessionId() { return mAudioSessionId; }
+ virtual status_t suspend();
+ virtual status_t resume();
+
private:
friend class MediaPlayerService;
Client( const sp<MediaPlayerService>& service,
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 6f242e5..1e112c8 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -244,6 +244,17 @@ status_t MediaRecorderClient::start()
}
+status_t MediaRecorderClient::pause()
+{
+ ALOGV("pause");
+ Mutex::Autolock lock(mLock);
+ if (mRecorder == NULL) {
+ ALOGE("recorder is not initialized");
+ return NO_INIT;
+ }
+ return mRecorder->pause();
+}
+
status_t MediaRecorderClient::stop()
{
ALOGV("stop");
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index 05130d4..2e77d21 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -49,6 +49,7 @@ public:
virtual status_t prepare();
virtual status_t getMaxAmplitude(int* max);
virtual status_t start();
+ virtual status_t pause();
virtual status_t stop();
virtual status_t reset();
virtual status_t init();
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 80d5ac2..b1f0742 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -79,7 +79,8 @@ StagefrightRecorder::StagefrightRecorder(const String16 &opPackageName)
mOutputFd(-1),
mAudioSource(AUDIO_SOURCE_CNT),
mVideoSource(VIDEO_SOURCE_LIST_END),
- mStarted(false) {
+ mStarted(false),
+ mRecPaused(false) {
ALOGV("Constructor");
reset();
@@ -846,6 +847,22 @@ status_t StagefrightRecorder::start() {
return INVALID_OPERATION;
}
+ if (mRecPaused == true) {
+ status_t err = mWriter->start();
+ if (err != OK) {
+ ALOGE("Writer start in StagefrightRecorder pause failed");
+ return err;
+ }
+
+ err = setSourcePause(false);
+ if (err != OK) {
+ ALOGE("Source start after pause failed");
+ return err;
+ }
+
+ mRecPaused = false;
+ return OK;
+ }
status_t status = OK;
if (mVideoSource != VIDEO_SOURCE_SURFACE) {
@@ -1010,6 +1027,7 @@ sp<MediaSource> StagefrightRecorder::createAudioSource() {
ALOGE("Failed to create audio encoder");
}
+ mAudioEncoderOMX = audioEncoder;
return audioEncoder;
}
@@ -1564,7 +1582,7 @@ status_t StagefrightRecorder::setupVideoEncoder(
format->setInt32("width", mVideoWidth);
format->setInt32("height", mVideoHeight);
format->setInt32("stride", mVideoWidth);
- format->setInt32("slice-height", mVideoWidth);
+ format->setInt32("slice-height", mVideoHeight);
format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
// set up time lapse/slow motion for surface source
@@ -1625,6 +1643,8 @@ status_t StagefrightRecorder::setupVideoEncoder(
mGraphicBufferProducer = encoder->getGraphicBufferProducer();
}
+ mVideoSourceNode = cameraSource;
+ mVideoEncoderOMX = encoder;
*source = encoder;
return OK;
@@ -1759,10 +1779,23 @@ void StagefrightRecorder::setupMPEG4orWEBMMetaData(sp<MetaData> *meta) {
status_t StagefrightRecorder::pause() {
ALOGV("pause");
+ status_t err = OK;
if (mWriter == NULL) {
return UNKNOWN_ERROR;
}
- mWriter->pause();
+ err = setSourcePause(true);
+ if (err != OK) {
+ ALOGE("StagefrightRecorder pause failed");
+ return err;
+ }
+
+ err = mWriter->pause();
+ if (err != OK) {
+ ALOGE("Writer pause failed");
+ return err;
+ }
+
+ mRecPaused = true;
if (mStarted) {
mStarted = false;
@@ -1791,6 +1824,16 @@ status_t StagefrightRecorder::stop() {
mCameraSourceTimeLapse = NULL;
}
+ if (mRecPaused) {
+ status_t err = setSourcePause(false);
+ if (err != OK) {
+ ALOGE("Source start after pause in StagefrightRecorder stop failed");
+ return err;
+ }
+
+ mRecPaused = false;
+ }
+
if (mWriter != NULL) {
err = mWriter->stop();
mWriter.clear();
@@ -1960,4 +2003,68 @@ status_t StagefrightRecorder::dump(
::write(fd, result.string(), result.size());
return OK;
}
+
+status_t StagefrightRecorder::setSourcePause(bool pause) {
+ status_t err = OK;
+ if (pause) {
+ if (mVideoEncoderOMX != NULL) {
+ err = mVideoEncoderOMX->pause();
+ if (err != OK) {
+ ALOGE("OMX VideoEncoder pause failed");
+ return err;
+ }
+ }
+ if (mAudioEncoderOMX != NULL) {
+ err = mAudioEncoderOMX->pause();
+ if (err != OK) {
+ ALOGE("OMX AudioEncoder pause failed");
+ return err;
+ }
+ }
+ if (mVideoSourceNode != NULL) {
+ err = mVideoSourceNode->pause();
+ if (err != OK) {
+ ALOGE("OMX VideoSourceNode pause failed");
+ return err;
+ }
+ }
+ if (mAudioSourceNode != NULL) {
+ err = mAudioSourceNode->pause();
+ if (err != OK) {
+ ALOGE("OMX AudioSourceNode pause failed");
+ return err;
+ }
+ }
+ } else {
+ if (mVideoSourceNode != NULL) {
+ err = mVideoSourceNode->start();
+ if (err != OK) {
+ ALOGE("OMX VideoSourceNode start failed");
+ return err;
+ }
+ }
+ if (mAudioSourceNode != NULL) {
+ err = mAudioSourceNode->start();
+ if (err != OK) {
+ ALOGE("OMX AudioSourceNode start failed");
+ return err;
+ }
+ }
+ if (mVideoEncoderOMX != NULL) {
+ err = mVideoEncoderOMX->start();
+ if (err != OK) {
+ ALOGE("OMX VideoEncoder start failed");
+ return err;
+ }
+ }
+ if (mAudioEncoderOMX != NULL) {
+ err = mAudioEncoderOMX->start();
+ if (err != OK) {
+ ALOGE("OMX AudioEncoder start failed");
+ return err;
+ }
+ }
+ }
+ return err;
+}
} // namespace android
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index d2ff62d..26c5582 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -60,6 +60,7 @@ struct StagefrightRecorder : public MediaRecorderBase {
virtual status_t setParameters(const String8& params);
virtual status_t setListener(const sp<IMediaRecorderClient>& listener);
virtual status_t setClientName(const String16& clientName);
+ virtual status_t setSourcePause(bool pause);
virtual status_t prepare();
virtual status_t start();
virtual status_t pause();
@@ -80,6 +81,9 @@ protected:
String16 mClientName;
uid_t mClientUid;
sp<MediaWriter> mWriter;
+ sp<MediaSource> mVideoEncoderOMX;
+ sp<MediaSource> mAudioEncoderOMX;
+ sp<MediaSource> mVideoSourceNode;
int mOutputFd;
sp<AudioSource> mAudioSourceNode;
@@ -123,6 +127,7 @@ protected:
MediaProfiles *mEncoderProfiles;
bool mStarted;
+ bool mRecPaused;
// Needed when GLFrames are encoded.
// An <IGraphicBufferProducer> pointer
// will be sent to the client side using which the
diff --git a/media/libmediaplayerservice/nuplayer/Android.mk b/media/libmediaplayerservice/nuplayer/Android.mk
index 005cb4b..6729cd5 100644
--- a/media/libmediaplayerservice/nuplayer/Android.mk
+++ b/media/libmediaplayerservice/nuplayer/Android.mk
@@ -24,7 +24,8 @@ LOCAL_C_INCLUDES := \
$(TOP)/frameworks/av/media/libstagefright/timedtext \
$(TOP)/frameworks/av/media/libmediaplayerservice \
$(TOP)/frameworks/native/include/media/openmax \
- $(TOP)/frameworks/av/media/libavextensions \
+ $(TOP)/frameworks/av/media/libavextensions \
+ $(TOP)/frameworks/av/include/media \
LOCAL_CFLAGS += -Werror -Wall
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index ee0310e..c0355d7 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -485,10 +485,12 @@ void NuPlayer::GenericSource::finishPrepareAsync() {
void NuPlayer::GenericSource::notifyPreparedAndCleanup(status_t err) {
if (err != OK) {
- Mutex::Autolock _l(mSourceLock);
- mDataSource.clear();
- mCachedSource.clear();
- mHttpSource.clear();
+ {
+ Mutex::Autolock _l(mSourceLock);
+ mDataSource.clear();
+ mCachedSource.clear();
+ mHttpSource.clear();
+ }
mBitrate = -1;
cancelPollBuffering();
@@ -541,14 +543,13 @@ void NuPlayer::GenericSource::resume() {
}
void NuPlayer::GenericSource::disconnect() {
-
- sp<DataSource> dataSource;
- sp<DataSource> httpSource;
+ sp<DataSource> dataSource, httpSource;
{
Mutex::Autolock _l(mSourceLock);
dataSource = mDataSource;
httpSource = mHttpSource;
}
+
if (dataSource != NULL) {
// disconnect data source
if (dataSource->flags() & DataSource::kIsCachingDataSource) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index ec1ab79..8064781 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -206,6 +206,7 @@ NuPlayer::NuPlayer(pid_t pid)
mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
mVideoFpsHint(-1.f),
mStarted(false),
+ mResetting(false),
mSourceStarted(false),
mPaused(false),
mPausedByClient(false),
@@ -1163,6 +1164,8 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
{
ALOGV("kWhatReset");
+ mResetting = true;
+
mDeferredActions.push_back(
new FlushDecoderAction(
FLUSH_CMD_SHUTDOWN /* audio */,
@@ -1248,7 +1251,8 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
}
void NuPlayer::onResume() {
- if (!mPaused) {
+ if (!mPaused || mResetting) {
+ ALOGD_IF(mResetting, "resetting, onResume discarded");
return;
}
mPaused = false;
@@ -1969,6 +1973,7 @@ void NuPlayer::performReset() {
}
mStarted = false;
+ mResetting = false;
mSourceStarted = false;
}
@@ -2229,7 +2234,7 @@ void NuPlayer::onSourceNotify(const sp<AMessage> &msg) {
int posMs;
int64_t timeUs, posUs;
driver->getCurrentPosition(&posMs);
- posUs = posMs * 1000;
+ posUs = (int64_t) posMs * 1000ll;
CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
if (posUs < timeUs) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index c0aa782..1f4dec8 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -202,6 +202,7 @@ protected:
AVSyncSettings mSyncSettings;
float mVideoFpsHint;
bool mStarted;
+ bool mResetting;
bool mSourceStarted;
// Actual pause state, either as requested by client or due to buffering.
@@ -261,7 +262,7 @@ protected:
void processDeferredActions();
- void performSeek(int64_t seekTimeUs);
+ virtual void performSeek(int64_t seekTimeUs);
void performDecoderFlush(FlushCommand audio, FlushCommand video);
void performReset();
void performScanSources();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 0bffafe..98eff88 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -35,6 +35,7 @@
#include <media/stagefright/MediaErrors.h>
#include <stagefright/AVExtensions.h>
+#include <stagefright/FFMPEGSoftCodec.h>
#include <gui/Surface.h>
#include "avc_utils.h"
@@ -257,9 +258,16 @@ void NuPlayer::Decoder::onConfigure(const sp<AMessage> &format) {
ALOGV("[%s] onConfigure (surface=%p)", mComponentName.c_str(), mSurface.get());
mCodec = AVUtils::get()->createCustomComponentByName(mCodecLooper, mime.c_str(), false /* encoder */, format);
+ FFMPEGSoftCodec::overrideComponentName(0, format, &mComponentName, &mime, false);
+
if (mCodec == NULL) {
- mCodec = MediaCodec::CreateByType(mCodecLooper, mime.c_str(), false /* encoder */);
+ if (!mComponentName.startsWith(mime.c_str())) {
+ mCodec = MediaCodec::CreateByComponentName(mCodecLooper, mComponentName.c_str());
+ } else {
+ mCodec = MediaCodec::CreateByType(mCodecLooper, mime.c_str(), false /* encoder */);
+ }
}
+
int32_t secure = 0;
if (format->findInt32("secure", &secure) && secure != 0) {
if (mCodec != NULL) {
@@ -589,6 +597,7 @@ bool NuPlayer::Decoder::handleAnOutputBuffer(
buffer->meta()->clear();
buffer->meta()->setInt64("timeUs", timeUs);
setPcmFormat(buffer->meta());
+ AVNuUtils::get()->addFlagsInMeta(buffer, flags, mIsAudio);
bool eos = flags & MediaCodec::BUFFER_FLAG_EOS;
// we do not expect CODECCONFIG or SYNCFRAME for decoder
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index a294d36..8727907 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -753,7 +753,8 @@ void NuPlayerDriver::notifyListener_l(
// the last little bit of audio. If we're looping, we need to restart it.
mAudioSink->start();
}
- break;
+ // don't send completion event when looping
+ return;
}
mPlayer->pause();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 9d2f134..490a0d2 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -26,6 +26,7 @@
#include <media/stagefright/foundation/AUtils.h>
#include <media/stagefright/foundation/AWakeLock.h>
#include <media/stagefright/MediaClock.h>
+#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
@@ -119,6 +120,7 @@ NuPlayer::Renderer::Renderer(
mNotifyCompleteVideo(false),
mSyncQueues(false),
mPaused(false),
+ mPauseDrainAudioAllowedUs(0),
mVideoSampleReceived(false),
mVideoRenderingStarted(false),
mVideoRenderingStartGeneration(0),
@@ -651,6 +653,14 @@ void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
return;
}
+ // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
+ if (mPaused) {
+ const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
+ if (diffUs > delayUs) {
+ delayUs = diffUs;
+ }
+ }
+
mDrainAudioQueuePending = true;
sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
msg->setInt32("drainGeneration", mAudioDrainGeneration);
@@ -819,6 +829,10 @@ void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() {
}
bool NuPlayer::Renderer::onDrainAudioQueue() {
+ // do not drain audio during teardown as queued buffers may be invalid.
+ if (mAudioTornDown) {
+ return false;
+ }
// TODO: This call to getPosition checks if AudioTrack has been created
// in AudioSink before draining audio. If AudioTrack doesn't exist, then
// CHECKs on getPosition will fail.
@@ -898,6 +912,8 @@ bool NuPlayer::Renderer::onDrainAudioQueue() {
ALOGV("AudioSink write would block when writing %zu bytes", copy);
} else {
ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
+ // This can only happen when AudioSink was opened with doNotReconnect flag set to
+ // true, in which case the NuPlayer will handle the reconnect.
notifyAudioTearDown();
}
break;
@@ -964,6 +980,10 @@ bool NuPlayer::Renderer::onDrainAudioQueue() {
int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
int32_t sampleRate = offloadingAudio() ?
mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
+ if (sampleRate == 0) {
+ ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
+ return 0;
+ }
// TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
}
@@ -1367,8 +1387,16 @@ void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
mAudioSink->flush();
// Call stop() to signal to the AudioSink to completely fill the
// internal buffer before resuming playback.
+ // FIXME: this is ignored after flush().
mAudioSink->stop();
- if (!mPaused) {
+ if (mPaused) {
+ // Race condition: if renderer is paused and audio sink is stopped,
+ // we need to make sure that the audio track buffer fully drains
+ // before delivering data.
+ // FIXME: remove this if we can detect if stop() is complete.
+ const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
+ mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
+ } else {
mAudioSink->start();
}
mNumFramesWritten = 0;
@@ -1678,13 +1706,17 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
}
+ int32_t bitWidth = 16;
+ format->findInt32("bit-width", &bitWidth);
+
int32_t sampleRate;
CHECK(format->findInt32("sample-rate", &sampleRate));
+ AString mime;
+ CHECK(format->findString("mime", &mime));
+
if (offloadingAudio()) {
audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
- AString mime;
- CHECK(format->findString("mime", &mime));
status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
if (err != OK) {
@@ -1692,15 +1724,11 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
"audio_format", mime.c_str());
onDisableOffloadAudio();
} else {
- int32_t bitWidth = 16;
- ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
- mime.c_str(), audioFormat);
-
audioFormat = AVUtils::get()->updateAudioFormat(audioFormat, format);
bitWidth = AVUtils::get()->getAudioSampleBits(format);
int avgBitRate = -1;
- format->findInt32("bit-rate", &avgBitRate);
+ format->findInt32("bitrate", &avgBitRate);
int32_t aacProfile = -1;
if (audioFormat == AUDIO_FORMAT_AAC
@@ -1824,6 +1852,12 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
const uint32_t frameCount =
(unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
+ // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct
+ // AudioSink. We don't want this when there's video because it will cause a video seek to
+ // the previous I frame. But we do want this when there's only audio because it will give
+ // NuPlayer a chance to switch from non-offload mode to offload mode.
+ // So we only set doNotReconnect when there's no video.
+ const bool doNotReconnect = !hasVideo;
status_t err = mAudioSink->open(
sampleRate,
numChannels,
@@ -1834,13 +1868,14 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
mUseAudioCallback ? this : NULL,
(audio_output_flags_t)pcmFlags,
NULL,
- true /* doNotReconnect */,
+ doNotReconnect,
frameCount);
if (err == OK) {
err = mAudioSink->setPlaybackRate(mPlaybackSettings);
}
if (err != OK) {
ALOGW("openAudioSink: non offloaded open failed status: %d", err);
+ mAudioSink->close();
mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
return err;
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 28ec16f..50bd0a9 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -173,6 +173,7 @@ protected:
// modified on only renderer's thread.
bool mPaused;
+ int64_t mPauseDrainAudioAllowedUs; // time when we can drain/deliver audio in pause mode.
bool mVideoSampleReceived;
bool mVideoRenderingStarted;
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 6d0e075..35567a5 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -139,7 +139,9 @@ void NuPlayer::RTSPSource::pause() {
return;
}
}
- mHandler->pause();
+ if (mHandler != NULL) {
+ mHandler->pause();
+ }
}
void NuPlayer::RTSPSource::resume() {
diff --git a/media/libstagefright/AACExtractor.cpp b/media/libstagefright/AACExtractor.cpp
index 45e8a30..1353e3f 100644
--- a/media/libstagefright/AACExtractor.cpp
+++ b/media/libstagefright/AACExtractor.cpp
@@ -136,7 +136,8 @@ AACExtractor::AACExtractor(
const sp<DataSource> &source, const sp<AMessage> &_meta)
: mDataSource(source),
mInitCheck(NO_INIT),
- mFrameDurationUs(0) {
+ mFrameDurationUs(0),
+ mApeMeta(new MetaData) {
sp<AMessage> meta = _meta;
if (meta == NULL) {
@@ -170,11 +171,25 @@ AACExtractor::AACExtractor(
off64_t streamSize, numFrames = 0;
size_t frameSize = 0;
int64_t duration = 0;
+ uint8_t apeTag[8];
if (mDataSource->getSize(&streamSize) == OK) {
while (offset < streamSize) {
+ mDataSource->readAt(offset, &apeTag, 8);
+ if (ape.isAPE(apeTag)) {
+ size_t apeSize = 0;
+ mDataSource->readAt(offset + 8 + 4, &apeSize, 1);
+
+ if (ape.parseAPE(source, offset, mApeMeta) == false) {
+ break;
+ }
+
+ mOffsetVector.push(offset);
+ offset += apeSize;
+ continue;
+ }
if ((frameSize = getAdtsFrameLength(source, offset, NULL)) == 0) {
- return;
+ break;
}
mOffsetVector.push(offset);
@@ -196,15 +211,13 @@ AACExtractor::~AACExtractor() {
}
sp<MetaData> AACExtractor::getMetaData() {
- sp<MetaData> meta = new MetaData;
if (mInitCheck != OK) {
- return meta;
+ return mApeMeta;
}
+ mApeMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC_ADTS);
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC_ADTS);
-
- return meta;
+ return mApeMeta;
}
size_t AACExtractor::countTracks() {
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index f40a3c4..213d4e2 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -44,6 +44,8 @@
#include <media/stagefright/OMXCodec.h>
#include <media/stagefright/PersistentSurface.h>
#include <media/stagefright/SurfaceUtils.h>
+#include <media/stagefright/FFMPEGSoftCodec.h>
+
#include <media/hardware/HardwareAPI.h>
#include <OMX_AudioExt.h>
@@ -541,7 +543,12 @@ ACodec::ACodec()
ACodec::~ACodec() {
}
-status_t ACodec::setupCustomCodec(status_t err, const char *, const sp<AMessage> &) {
+status_t ACodec::setupCustomCodec(status_t err, const char *mime, const sp<AMessage> &msg) {
+ if (!strncmp(mComponentName.c_str(), "OMX.ffmpeg.", 11) && !mIsEncoder) {
+ return FFMPEGSoftCodec::setAudioFormat(
+ msg, mime, mOMX, mNode);
+ }
+
return err;
}
@@ -976,6 +983,12 @@ status_t ACodec::configureOutputBuffersFromNativeWindow(
// 2. try to allocate two (2) additional buffers to reduce starvation from
// the consumer
// plus an extra buffer to account for incorrect minUndequeuedBufs
+#ifdef BOARD_CANT_REALLOCATE_OMX_BUFFERS
+ // Some devices don't like to set OMX_IndexParamPortDefinition at this
+ // point (even with an unmodified def), so skip it if possible.
+ // This check was present in KitKat.
+ if (def.nBufferCountActual < def.nBufferCountMin + *minUndequeuedBuffers) {
+#endif
for (OMX_U32 extraBuffers = 2 + 1; /* condition inside loop */; extraBuffers--) {
OMX_U32 newBufferCount =
def.nBufferCountMin + *minUndequeuedBuffers + extraBuffers;
@@ -995,6 +1008,9 @@ status_t ACodec::configureOutputBuffersFromNativeWindow(
return err;
}
}
+#ifdef BOARD_CANT_REALLOCATE_OMX_BUFFERS
+ }
+#endif
err = native_window_set_buffer_count(
mNativeWindow.get(), def.nBufferCountActual);
@@ -1329,7 +1345,8 @@ ACodec::BufferInfo *ACodec::dequeueBufferFromNativeWindow() {
}
bool stale = false;
- for (size_t i = mBuffers[kPortIndexOutput].size(); i-- > 0;) {
+ for (size_t i = mBuffers[kPortIndexOutput].size(); i > 0;) {
+ i--;
BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
if (info->mGraphicBuffer != NULL &&
@@ -1372,7 +1389,8 @@ ACodec::BufferInfo *ACodec::dequeueBufferFromNativeWindow() {
// get oldest undequeued buffer
BufferInfo *oldest = NULL;
- for (size_t i = mBuffers[kPortIndexOutput].size(); i-- > 0;) {
+ for (size_t i = mBuffers[kPortIndexOutput].size(); i > 0;) {
+ i--;
BufferInfo *info =
&mBuffers[kPortIndexOutput].editItemAt(i);
if (info->mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW &&
@@ -1589,7 +1607,11 @@ status_t ACodec::setComponentRole(
}
if (i == kNumMimeToRole) {
- return ERROR_UNSUPPORTED;
+ status_t err = ERROR_UNSUPPORTED;
+ if (!strncmp(mComponentName.c_str(), "OMX.ffmpeg.", 11)) {
+ err = FFMPEGSoftCodec::setSupportedRole(mOMX, mNode, isEncoder, mime);
+ }
+ return err;
}
const char *role =
@@ -1910,7 +1932,8 @@ status_t ACodec::configureCodec(
if (video) {
// determine need for software renderer
bool usingSwRenderer = false;
- if (haveNativeWindow && mComponentName.startsWith("OMX.google.")) {
+ if (haveNativeWindow && (mComponentName.startsWith("OMX.google.") ||
+ mComponentName.startsWith("OMX.ffmpeg."))) {
usingSwRenderer = true;
haveNativeWindow = false;
}
@@ -1995,10 +2018,12 @@ status_t ACodec::configureCodec(
// and have the decoder figure it all out.
err = OK;
} else {
+ int32_t bitsPerSample = 16;
+ msg->findInt32("bit-width", &bitsPerSample);
err = setupRawAudioFormat(
encoder ? kPortIndexInput : kPortIndexOutput,
sampleRate,
- numChannels);
+ numChannels, bitsPerSample);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
int32_t numChannels, sampleRate;
@@ -2113,16 +2138,21 @@ status_t ACodec::configureCodec(
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
- err = setupRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
+ int32_t bitsPerSample = 16;
+ msg->findInt32("bit-width", &bitsPerSample);
+ err = setupRawAudioFormat(kPortIndexInput, sampleRate, numChannels, bitsPerSample);
}
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC3)) {
+ } else if (!strncmp(mComponentName.c_str(), "OMX.google.", 11)
+ && !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC3)) {
int32_t numChannels;
int32_t sampleRate;
if (!msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
- err = setupAC3Codec(encoder, numChannels, sampleRate);
+ int32_t bitsPerSample = 16;
+ msg->findInt32("bit-width", &bitsPerSample);
+ err = setupAC3Codec(encoder, numChannels, sampleRate, bitsPerSample);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_EAC3)) {
int32_t numChannels;
@@ -2131,7 +2161,9 @@ status_t ACodec::configureCodec(
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
- err = setupEAC3Codec(encoder, numChannels, sampleRate);
+ int32_t bitsPerSample = 16;
+ msg->findInt32("bit-width", &bitsPerSample);
+ err = setupEAC3Codec(encoder, numChannels, sampleRate, bitsPerSample);
}
} else {
err = setupCustomCodec(err, mime, msg);
@@ -2448,9 +2480,9 @@ status_t ACodec::setupAACCodec(
}
status_t ACodec::setupAC3Codec(
- bool encoder, int32_t numChannels, int32_t sampleRate) {
+ bool encoder, int32_t numChannels, int32_t sampleRate, int32_t bitsPerSample) {
status_t err = setupRawAudioFormat(
- encoder ? kPortIndexInput : kPortIndexOutput, sampleRate, numChannels);
+ encoder ? kPortIndexInput : kPortIndexOutput, sampleRate, numChannels, bitsPerSample);
if (err != OK) {
return err;
@@ -2486,9 +2518,9 @@ status_t ACodec::setupAC3Codec(
}
status_t ACodec::setupEAC3Codec(
- bool encoder, int32_t numChannels, int32_t sampleRate) {
+ bool encoder, int32_t numChannels, int32_t sampleRate, int32_t bitsPerSample) {
status_t err = setupRawAudioFormat(
- encoder ? kPortIndexInput : kPortIndexOutput, sampleRate, numChannels);
+ encoder ? kPortIndexInput : kPortIndexOutput, sampleRate, numChannels, bitsPerSample);
if (err != OK) {
return err;
@@ -2634,7 +2666,7 @@ status_t ACodec::setupFlacCodec(
}
status_t ACodec::setupRawAudioFormat(
- OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels) {
+ OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels, int32_t bitsPerSample) {
OMX_PARAM_PORTDEFINITIONTYPE def;
InitOMXParams(&def);
def.nPortIndex = portIndex;
@@ -2669,7 +2701,7 @@ status_t ACodec::setupRawAudioFormat(
pcmParams.nChannels = numChannels;
pcmParams.eNumData = OMX_NumericalDataSigned;
pcmParams.bInterleaved = OMX_TRUE;
- pcmParams.nBitPerSample = 16;
+ pcmParams.nBitPerSample = bitsPerSample;
pcmParams.nSamplingRate = sampleRate;
pcmParams.ePCMMode = OMX_AUDIO_PCMModeLinear;
@@ -2896,7 +2928,13 @@ status_t ACodec::setupVideoDecoder(
status_t err = GetVideoCodingTypeFromMime(mime, &compressionFormat);
if (err != OK) {
- return err;
+ if (!strncmp(mComponentName.c_str(), "OMX.ffmpeg.", 11)) {
+ err = FFMPEGSoftCodec::setVideoFormat(
+ msg, mime, mOMX, mNode, mIsEncoder, &compressionFormat);
+ }
+ if (err != OK) {
+ return err;
+ }
}
err = setVideoPortFormatType(
@@ -3046,7 +3084,14 @@ status_t ACodec::setupVideoEncoder(const char *mime, const sp<AMessage> &msg) {
err = GetVideoCodingTypeFromMime(mime, &compressionFormat);
if (err != OK) {
- return err;
+ if (!strncmp(mComponentName.c_str(), "OMX.ffmpeg.", 11)) {
+ err = FFMPEGSoftCodec::setVideoFormat(
+ msg, mime, mOMX, mNode, mIsEncoder, &compressionFormat);
+ }
+ if (err != OK) {
+ ALOGE("Not a supported video mime type: %s", mime);
+ return err;
+ }
}
err = setVideoPortFormatType(
@@ -3858,6 +3903,7 @@ bool ACodec::describeDefaultColorFormat(DescribeColorFormatParams &params) {
fmt != OMX_COLOR_FormatYUV420PackedPlanar &&
fmt != OMX_COLOR_FormatYUV420SemiPlanar &&
fmt != OMX_COLOR_FormatYUV420PackedSemiPlanar &&
+ fmt != OMX_TI_COLOR_FormatYUV420PackedSemiPlanar &&
fmt != HAL_PIXEL_FORMAT_YV12) {
ALOGW("do not know color format 0x%x = %d", fmt, fmt);
return false;
@@ -3930,6 +3976,7 @@ bool ACodec::describeDefaultColorFormat(DescribeColorFormatParams &params) {
case OMX_COLOR_FormatYUV420SemiPlanar:
// FIXME: NV21 for sw-encoder, NV12 for decoder and hw-encoder
case OMX_COLOR_FormatYUV420PackedSemiPlanar:
+ case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
// NV12
image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
image.mPlane[image.U].mColInc = 2;
@@ -4154,6 +4201,14 @@ status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
default:
{
+ if (!strncmp(mComponentName.c_str(), "OMX.ffmpeg.", 11)) {
+ err = FFMPEGSoftCodec::getVideoPortFormat(portIndex,
+ (int)videoDef->eCompressionFormat, notify, mOMX, mNode);
+ if (err == OK) {
+ break;
+ }
+ }
+
if (mIsEncoder ^ (portIndex == kPortIndexOutput)) {
// should be CodingUnused
ALOGE("Raw port video compression format is %s(%d)",
@@ -4200,7 +4255,9 @@ status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
if (params.nChannels <= 0
|| (params.nChannels != 1 && !params.bInterleaved)
|| (params.nBitPerSample != 16u
- && params.nBitPerSample != 24u)// we support 16/24 bit s/w decoding
+ && params.nBitPerSample != 24u
+ && params.nBitPerSample != 32u
+ && params.nBitPerSample != 8u)// we support 8/16/24/32 bit s/w decoding
|| params.eNumData != OMX_NumericalDataSigned
|| params.ePCMMode != OMX_AUDIO_PCMModeLinear) {
ALOGE("unsupported PCM port: %u channels%s, %u-bit, %s(%d), %s(%d) mode ",
@@ -4216,6 +4273,7 @@ status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
notify->setInt32("channel-count", params.nChannels);
notify->setInt32("sample-rate", params.nSamplingRate);
notify->setInt32("bit-width", params.nBitPerSample);
+
if (mChannelMaskPresent) {
notify->setInt32("channel-mask", mChannelMask);
}
@@ -4265,6 +4323,7 @@ status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
case OMX_AUDIO_CodingFLAC:
{
+ if (portIndex == kPortIndexInput) {
OMX_AUDIO_PARAM_FLACTYPE params;
InitOMXParams(&params);
params.nPortIndex = portIndex;
@@ -4279,6 +4338,7 @@ status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
notify->setInt32("channel-count", params.nChannels);
notify->setInt32("sample-rate", params.nSampleRate);
break;
+ }
}
case OMX_AUDIO_CodingMP3:
@@ -4419,6 +4479,14 @@ status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
}
default:
+ if (!strncmp(mComponentName.c_str(), "OMX.ffmpeg.", 11)) {
+ err = FFMPEGSoftCodec::getAudioPortFormat(portIndex,
+ (int)audioDef->eEncoding, notify, mOMX, mNode);
+ }
+ if (err == OK) {
+ break;
+ }
+
ALOGE("Unsupported audio coding: %s(%d)\n",
asString(audioDef->eEncoding), audioDef->eEncoding);
return BAD_TYPE;
@@ -5718,8 +5786,79 @@ bool ACodec::LoadedState::onConfigureComponent(
ALOGE("[%s] configureCodec returning error %d",
mCodec->mComponentName.c_str(), err);
- mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
- return false;
+ int32_t encoder;
+ if (!msg->findInt32("encoder", &encoder)) {
+ encoder = false;
+ }
+
+ if (!encoder && !strncmp(mime.c_str(), "video/", strlen("video/"))) {
+ Vector<OMXCodec::CodecNameAndQuirks> matchingCodecs;
+
+ OMXCodec::findMatchingCodecs(
+ mime.c_str(),
+ encoder, // createEncoder
+ NULL, // matchComponentName
+ 0, // flags
+ &matchingCodecs);
+
+ status_t err = mCodec->mOMX->freeNode(mCodec->mNode);
+
+ if (err != OK) {
+ ALOGE("Failed to freeNode");
+ mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
+ return false;
+ }
+
+ mCodec->mNode = 0;
+ AString componentName;
+ sp<CodecObserver> observer = new CodecObserver;
+
+ err = NAME_NOT_FOUND;
+ for (size_t matchIndex = 0; matchIndex < matchingCodecs.size();
+ ++matchIndex) {
+ componentName = matchingCodecs.itemAt(matchIndex).mName.string();
+ if (!strcmp(mCodec->mComponentName.c_str(), componentName.c_str())) {
+ continue;
+ }
+
+ pid_t tid = gettid();
+ int prevPriority = androidGetThreadPriority(tid);
+ androidSetThreadPriority(tid, ANDROID_PRIORITY_FOREGROUND);
+ err = mCodec->mOMX->allocateNode(componentName.c_str(), observer, &mCodec->mNode);
+ androidSetThreadPriority(tid, prevPriority);
+
+ if (err == OK) {
+ break;
+ } else {
+ ALOGW("Allocating component '%s' failed, try next one.", componentName.c_str());
+ }
+
+ mCodec->mNode = 0;
+ }
+
+ if (mCodec->mNode == 0) {
+ if (!mime.empty()) {
+ ALOGE("Unable to instantiate a %scoder for type '%s' with err %#x.",
+ encoder ? "en" : "de", mime.c_str(), err);
+ } else {
+ ALOGE("Unable to instantiate codec '%s' with err %#x.", componentName.c_str(), err);
+ }
+
+ mCodec->signalError((OMX_ERRORTYPE)err, makeNoSideEffectStatus(err));
+ return false;
+ }
+
+ sp<AMessage> notify = new AMessage(kWhatOMXMessageList, mCodec);
+ observer->setNotificationMessage(notify);
+ mCodec->mComponentName = componentName;
+
+ err = mCodec->configureCodec(mime.c_str(), msg);
+
+ if (err != OK) {
+ mCodec->signalError((OMX_ERRORTYPE)err, makeNoSideEffectStatus(err));
+ return false;
+ }
+ }
}
{
diff --git a/media/libstagefright/APE.cpp b/media/libstagefright/APE.cpp
new file mode 100644
index 0000000..74ca7dc
--- /dev/null
+++ b/media/libstagefright/APE.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) Texas Instruments - http://www.ti.com/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APE_TAG"
+#include <utils/Log.h>
+
+#include "include/APE.h"
+
+namespace android {
+
+APE::APE(){
+
+}
+
+APE::~APE(){
+
+}
+
+bool APE::isAPE(uint8_t *apeTag) const {
+ if(apeTag[0] == 'A' && apeTag[1] == 'P' && apeTag[2] == 'E' &&
+ apeTag[3] == 'T' && apeTag[4] == 'A' && apeTag[5] == 'G' &&
+ apeTag[6] == 'E' && apeTag[7] == 'X'){
+ return true;
+ }
+ return false;
+}
+
+size_t sizeItemKey(const sp<DataSource> &source, off64_t offset){
+ off64_t ItemKeyOffset = offset;
+ uint8_t keyTerminator = 0;
+ size_t keySize = 0;
+ while (keyTerminator != 0){
+ source->readAt(ItemKeyOffset, &keyTerminator, 1);
+ ItemKeyOffset++;
+ keySize++;
+ }
+ return keySize - 1;
+}
+
+bool APE::parseAPE(const sp<DataSource> &source, off64_t offset,
+ sp<MetaData> &meta){
+
+ struct Map {
+ int key;
+ const char *tag;
+ } const kMap[] = {
+ { kKeyAlbum, "Album" },
+ { kKeyArtist, "Artist" },
+ { kKeyAlbumArtist, "Album" },
+ { kKeyComposer, "Composer" },
+ { kKeyGenre, "Genre" },
+ { kKeyTitle, "Title" },
+ { kKeyYear, "Year" },
+ { kKeyCDTrackNumber, "Track" },
+ { kKeyDate, "Record Date"},
+ };
+
+ static const size_t kNumMapEntries = sizeof(kMap) / sizeof(kMap[0]);
+
+ off64_t headerOffset = offset;
+ headerOffset += 16;
+ itemNumber = 0;
+ if (source->readAt(headerOffset, &itemNumber, 1) == 0)
+ return false;
+
+ headerOffset += 16;
+
+ for(uint32_t it = 0; it < itemNumber; it++){
+ lenValue = 0;
+ if (source->readAt(headerOffset, &lenValue, 1) == 0)
+ return false;
+
+ headerOffset += 4;
+
+ itemFlags = 0;
+ if (source->readAt(headerOffset, &itemFlags, 1) == 0)
+ return false;
+
+ headerOffset += 4;
+
+ size_t sizeKey = sizeItemKey(source, headerOffset);
+
+ char *key = new char[sizeKey];
+
+ if (source->readAt(headerOffset, key, sizeKey) == 0)
+ return false;
+
+ key[sizeKey] = '\0';
+ headerOffset += sizeKey + 1;
+
+ char *val = new char[lenValue + 1];
+
+ if (source->readAt(headerOffset, val, lenValue) == 0)
+ return false;
+
+ val[lenValue] = '\0';
+
+ for (size_t i = 0; i < kNumMapEntries; i++){
+ if (!strcmp(key, kMap[i].tag)){
+ if (itemFlags == 0)
+ meta->setCString(kMap[i].key, (const char *)val);
+ break;
+ }
+ }
+ headerOffset += lenValue;
+ delete[] key;
+ delete[] val;
+ }
+
+ return true;
+}
+} //namespace android
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 9687c7d..5a75269 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -69,6 +69,8 @@ LOCAL_SRC_FILES:= \
WVMExtractor.cpp \
XINGSeeker.cpp \
avc_utils.cpp \
+ APE.cpp \
+ FFMPEGSoftCodec.cpp \
LOCAL_C_INCLUDES:= \
$(TOP)/frameworks/av/include/media/ \
@@ -123,6 +125,28 @@ LOCAL_STATIC_LIBRARIES := \
LOCAL_WHOLE_STATIC_LIBRARIES := libavextensions
+ifeq ($(BOARD_USE_S3D_SUPPORT), true)
+ifeq ($(BOARD_USES_HWC_SERVICES), true)
+LOCAL_CFLAGS += -DUSE_S3D_SUPPORT -DHWC_SERVICES
+LOCAL_C_INCLUDES += \
+ $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr/include \
+ $(TOP)/hardware/samsung_slsi/openmax/include/exynos \
+ $(TOP)/hardware/samsung_slsi/$(TARGET_BOARD_PLATFORM)-insignal/libhwcService \
+ $(TOP)/hardware/samsung_slsi/$(TARGET_BOARD_PLATFORM)-insignal/libhwc \
+ $(TOP)/hardware/samsung_slsi/$(TARGET_BOARD_PLATFORM)-insignal/include \
+ $(TOP)/hardware/samsung_slsi/$(TARGET_SOC)/libhwcmodule \
+ $(TOP)/hardware/samsung_slsi/$(TARGET_SOC)/include \
+ $(TOP)/hardware/samsung_slsi/exynos/libexynosutils \
+ $(TOP)/hardware/samsung_slsi/exynos/include
+
+LOCAL_ADDITIONAL_DEPENDENCIES := \
+ $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr
+
+LOCAL_SHARED_LIBRARIES += \
+ libExynosHWCService
+endif
+endif
+
LOCAL_SHARED_LIBRARIES += \
libstagefright_enc_common \
libstagefright_avc_common \
@@ -133,7 +157,7 @@ LOCAL_SHARED_LIBRARIES += \
LOCAL_CFLAGS += -Wno-multichar -Werror -Wno-error=deprecated-declarations -Wall
ifeq ($(TARGET_USES_QCOM_BSP), true)
- LOCAL_C_INCLUDES += hardware/qcom/display/libgralloc
+ LOCAL_C_INCLUDES += $(call project-path-for,qcom-display)/libgralloc
LOCAL_CFLAGS += -DQTI_BSP
endif
@@ -142,6 +166,10 @@ ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
LOCAL_CFLAGS += -DENABLE_STAGEFRIGHT_EXPERIMENTS
endif
+ifeq ($(TARGET_BOARD_PLATFORM),omap4)
+LOCAL_CFLAGS += -DBOARD_CANT_REALLOCATE_OMX_BUFFERS
+endif
+
ifeq ($(call is-vendor-board-platform,QCOM),true)
ifeq ($(strip $(AUDIO_FEATURE_ENABLED_EXTN_FLAC_DECODER)),true)
LOCAL_CFLAGS += -DQTI_FLAC_DECODER
@@ -150,6 +178,17 @@ endif
LOCAL_CLANG := true
+ifeq ($(BOARD_USE_SAMSUNG_CAMERAFORMAT_NV21), true)
+# This needs flag requires the following string constant in
+# CameraParametersExtra.h:
+#
+# const char CameraParameters::PIXEL_FORMAT_YUV420SP_NV21[] = "nv21";
+LOCAL_CFLAGS += -DUSE_SAMSUNG_CAMERAFORMAT_NV21
+endif
+
+# FFMPEG plugin
+LOCAL_C_INCLUDES += $(TOP)/external/stagefright-plugins/include
+
LOCAL_MODULE:= libstagefright
LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index dd9d393..1ff5d4f 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -54,6 +54,7 @@ AudioPlayer::AudioPlayer(
mFinalStatus(OK),
mSeekTimeUs(0),
mStarted(false),
+ mSourcePaused(false),
mIsFirstBuffer(false),
mFirstBufferResult(OK),
mFirstBuffer(NULL),
@@ -62,7 +63,8 @@ AudioPlayer::AudioPlayer(
mPinnedTimeUs(-1ll),
mPlaying(false),
mStartPosUs(0),
- mCreateFlags(flags) {
+ mCreateFlags(flags),
+ mPauseRequired(false) {
}
AudioPlayer::~AudioPlayer() {
@@ -82,6 +84,7 @@ status_t AudioPlayer::start(bool sourceAlreadyStarted) {
status_t err;
if (!sourceAlreadyStarted) {
+ mSourcePaused = false;
err = mSource->start();
if (err != OK) {
@@ -257,13 +260,16 @@ status_t AudioPlayer::start(bool sourceAlreadyStarted) {
mStarted = true;
mPlaying = true;
mPinnedTimeUs = -1ll;
-
+ const char *componentName;
+ if (!(format->findCString(kKeyDecoderComponent, &componentName))) {
+ componentName = "none";
+ }
+ mPauseRequired = !strncmp(componentName, "OMX.qcom.", 9);
return OK;
}
void AudioPlayer::pause(bool playPendingSamples) {
CHECK(mStarted);
-
if (playPendingSamples) {
if (mAudioSink.get() != NULL) {
mAudioSink->stop();
@@ -284,10 +290,21 @@ void AudioPlayer::pause(bool playPendingSamples) {
}
mPlaying = false;
+ CHECK(mSource != NULL);
+ if (mPauseRequired) {
+ if (mSource->pause() == OK) {
+ mSourcePaused = true;
+ }
+ }
}
status_t AudioPlayer::resume() {
CHECK(mStarted);
+ CHECK(mSource != NULL);
+ if (mSourcePaused == true) {
+ mSourcePaused = false;
+ mSource->start();
+ }
status_t err;
if (mAudioSink.get() != NULL) {
@@ -349,7 +366,7 @@ void AudioPlayer::reset() {
mInputBuffer->release();
mInputBuffer = NULL;
}
-
+ mSourcePaused = false;
mSource->stop();
// The following hack is necessary to ensure that the OMX
@@ -379,6 +396,7 @@ void AudioPlayer::reset() {
mStarted = false;
mPlaying = false;
mStartPosUs = 0;
+ mPauseRequired = false;
}
// static
@@ -549,6 +567,10 @@ size_t AudioPlayer::fillBuffer(void *data, size_t size) {
mIsFirstBuffer = false;
} else {
err = mSource->read(&mInputBuffer, &options);
+ if (err == OK && mInputBuffer == NULL && mSourcePaused) {
+ ALOGV("mSourcePaused, return 0 from fillBuffer");
+ return 0;
+ }
}
CHECK((err == OK && mInputBuffer != NULL)
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index a15bca7..db08476 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -58,7 +58,8 @@ AudioSource::AudioSource(
mPrevSampleTimeUs(0),
mFirstSampleTimeUs(-1ll),
mNumFramesReceived(0),
- mNumClientOwnedBuffers(0) {
+ mNumClientOwnedBuffers(0),
+ mRecPaused(false) {
ALOGV("sampleRate: %u, outSampleRate: %u, channelCount: %u",
sampleRate, outSampleRate, channelCount);
CHECK(channelCount == 1 || channelCount == 2 || channelCount == 6);
@@ -109,6 +110,11 @@ status_t AudioSource::initCheck() const {
status_t AudioSource::start(MetaData *params) {
Mutex::Autolock autoLock(mLock);
+ if (mRecPaused) {
+ mRecPaused = false;
+ return OK;
+ }
+
if (mStarted) {
return UNKNOWN_ERROR;
}
@@ -138,6 +144,12 @@ status_t AudioSource::start(MetaData *params) {
return err;
}
+status_t AudioSource::pause() {
+ ALOGV("AudioSource::Pause");
+ mRecPaused = true;
+ return OK;
+}
+
void AudioSource::releaseQueuedFrames_l() {
ALOGV("releaseQueuedFrames_l");
List<MediaBuffer *>::iterator it;
@@ -292,6 +304,10 @@ void AudioSource::signalBufferReturned(MediaBuffer *buffer) {
status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) {
int64_t timeUs = systemTime() / 1000ll;
+ // Estimate the real sampling time of the 1st sample in this buffer
+ // from AudioRecord's latency. (Apply this adjustment first so that
+ // the start time logic is not affected.)
+ timeUs -= mRecord->latency() * 1000LL;
ALOGV("dataCallbackTimestamp: %" PRId64 " us", timeUs);
Mutex::Autolock autoLock(mLock);
@@ -364,6 +380,14 @@ status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) {
}
void AudioSource::queueInputBuffer_l(MediaBuffer *buffer, int64_t timeUs) {
+ if (mRecPaused) {
+ if (!mBuffersReceived.empty()) {
+ releaseQueuedFrames_l();
+ }
+ buffer->release();
+ return;
+ }
+
const size_t bufferSize = buffer->range_length();
const size_t frameSize = mRecord->frameSize();
const int64_t timestampUs =
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index 4e6c2a6..778dfa5 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -216,7 +216,8 @@ AwesomePlayer::AwesomePlayer()
mLastVideoTimeUs(-1),
mTextDriver(NULL),
mOffloadAudio(false),
- mAudioTearDown(false) {
+ mAudioTearDown(false),
+ mIsFirstFrameAfterResume(false) {
CHECK_EQ(mClient.connect(), (status_t)OK);
DataSource::RegisterDefaultSniffers();
@@ -1804,11 +1805,18 @@ void AwesomePlayer::onVideoEvent() {
if (mSeeking != NO_SEEK) {
ALOGV("seeking to %" PRId64 " us (%.2f secs)", mSeekTimeUs, mSeekTimeUs / 1E6);
+ MediaSource::ReadOptions::SeekMode seekmode = (mSeeking == SEEK_VIDEO_ONLY)
+ ? MediaSource::ReadOptions::SEEK_NEXT_SYNC
+ : MediaSource::ReadOptions::SEEK_CLOSEST_SYNC;
+ // Seek to the next key-frame after resume for http streaming
+ if (mCachedSource != NULL && mIsFirstFrameAfterResume) {
+ seekmode = MediaSource::ReadOptions::SEEK_NEXT_SYNC;
+ mIsFirstFrameAfterResume = false;
+ }
+
options.setSeekTo(
mSeekTimeUs,
- mSeeking == SEEK_VIDEO_ONLY
- ? MediaSource::ReadOptions::SEEK_NEXT_SYNC
- : MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
+ seekmode);
}
for (;;) {
status_t err = mVideoSource->read(&mVideoBuffer, &options);
@@ -2290,11 +2298,11 @@ status_t AwesomePlayer::finishSetDataSource_l() {
// The widevine extractor does its own caching.
#if 0
- mCachedSource = new NuCachedSource2(
+ mCachedSource = NuCachedSource2::Create(
new ThrottledSource(
mConnectingDataSource, 50 * 1024 /* bytes/sec */));
#else
- mCachedSource = new NuCachedSource2(
+ mCachedSource = NuCachedSource2::Create(
mConnectingDataSource,
cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
disconnectAtHighwatermark);
@@ -3044,4 +3052,86 @@ void AwesomePlayer::onAudioTearDownEvent() {
beginPrepareAsync_l();
}
+// suspend() will release the decoders, the renderers and the buffers allocated for decoders
+// Releasing decoders eliminates draining power in suspended state.
+status_t AwesomePlayer::suspend() {
+ ALOGV("suspend()");
+ Mutex::Autolock autoLock(mLock);
+
+ // Set PAUSE to DrmManagerClient which will be set START in play_l()
+ if (mDecryptHandle != NULL) {
+ mDrmManagerClient->setPlaybackStatus(mDecryptHandle,
+ Playback::PAUSE, 0);
+ }
+
+ cancelPlayerEvents();
+ if (mQueueStarted) {
+ mQueue.stop();
+ mQueueStarted = false;
+ }
+
+ // Shutdown audio decoder first
+ if ((mAudioPlayer == NULL || !(mFlags & AUDIOPLAYER_STARTED))
+ && mAudioSource != NULL) {
+ mAudioSource->stop();
+ }
+ mAudioSource.clear();
+ mOmxSource.clear();
+ delete mAudioPlayer;
+ mAudioPlayer = NULL;
+ modifyFlags(AUDIO_RUNNING | AUDIOPLAYER_STARTED, CLEAR);
+
+ // Shutdown the video decoder
+ mVideoRenderer.clear();
+ if (mVideoSource != NULL) {
+ shutdownVideoDecoder_l();
+ }
+ modifyFlags(PLAYING, CLEAR);
+ mVideoRenderingStarted = false;
+
+ // Disconnect the source
+ if (mCachedSource != NULL) {
+ status_t err = mCachedSource->disconnectWhileSuspend();
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ return OK;
+}
+
+status_t AwesomePlayer::resume() {
+ ALOGV("resume()");
+ Mutex::Autolock autoLock(mLock);
+
+ // Reconnect the source
+ status_t err = mCachedSource->connectWhileResume();
+ if (err != OK) {
+ return err;
+ }
+
+ if (mVideoTrack != NULL && mVideoSource == NULL) {
+ status_t err = initVideoDecoder();
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ if (mAudioTrack != NULL && mAudioSource == NULL) {
+ status_t err = initAudioDecoder();
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ mIsFirstFrameAfterResume = true;
+
+ if (!mQueueStarted) {
+ mQueue.start();
+ mQueueStarted = true;
+ }
+
+ return OK;
+}
+
} // namespace android
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 27a6086..20a0a45 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -117,6 +117,13 @@ static int32_t getColorFormat(const char* colorFormat) {
return OMX_COLOR_FormatYUV420SemiPlanar;
}
+#ifdef USE_SAMSUNG_CAMERAFORMAT_NV21
+ if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV420SP_NV21)) {
+ static const int OMX_SEC_COLOR_FormatNV21Linear = 0x7F000011;
+ return OMX_SEC_COLOR_FormatNV21Linear;
+ }
+#endif /* USE_SAMSUNG_CAMERAFORMAT_NV21 */
+
if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV422I)) {
return OMX_COLOR_FormatYCbYCr;
}
@@ -192,7 +199,11 @@ CameraSource::CameraSource(
mNumFramesDropped(0),
mNumGlitches(0),
mGlitchDurationThresholdUs(200000),
- mCollectStats(false) {
+ mCollectStats(false),
+ mPauseAdjTimeUs(0),
+ mPauseStartTimeUs(0),
+ mPauseEndTimeUs(0),
+ mRecPause(false) {
mVideoSize.width = -1;
mVideoSize.height = -1;
@@ -662,6 +673,14 @@ status_t CameraSource::startCameraRecording() {
status_t CameraSource::start(MetaData *meta) {
ALOGV("start");
+ if(mRecPause) {
+ mRecPause = false;
+ mPauseAdjTimeUs = mPauseEndTimeUs - mPauseStartTimeUs;
+ ALOGV("resume : mPause Adj / End / Start : %" PRId64 " / %" PRId64 " / %" PRId64" us",
+ mPauseAdjTimeUs, mPauseEndTimeUs, mPauseStartTimeUs);
+ return OK;
+ }
+
CHECK(!mStarted);
if (mInitCheck != OK) {
ALOGE("CameraSource is not initialized yet");
@@ -675,6 +694,10 @@ status_t CameraSource::start(MetaData *meta) {
}
mStartTimeUs = 0;
+ mRecPause = false;
+ mPauseAdjTimeUs = 0;
+ mPauseStartTimeUs = 0;
+ mPauseEndTimeUs = 0;
mNumInputBuffers = 0;
mEncoderFormat = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
mEncoderDataSpace = HAL_DATASPACE_BT709;
@@ -708,6 +731,16 @@ status_t CameraSource::start(MetaData *meta) {
return err;
}
+status_t CameraSource::pause() {
+ mRecPause = true;
+ mPauseStartTimeUs = mLastFrameTimestampUs;
+ //record the end time too, or there is a risk the end time is 0
+ mPauseEndTimeUs = mLastFrameTimestampUs;
+ ALOGV("pause : mPauseStart %" PRId64 " us, #Queued Frames : %zd",
+ mPauseStartTimeUs, mFramesReceived.size());
+ return OK;
+}
+
void CameraSource::stopCameraRecording() {
ALOGV("stopCameraRecording");
if (mCameraFlags & FLAGS_HOT_CAMERA) {
@@ -909,10 +942,23 @@ void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
return;
}
+ if (mRecPause == true) {
+ if(!mFramesReceived.empty()) {
+ ALOGV("releaseQueuedFrames - #Queued Frames : %zd", mFramesReceived.size());
+ releaseQueuedFrames();
+ }
+ ALOGV("release One Video Frame for Pause : %" PRId64 "us", timestampUs);
+ releaseOneRecordingFrame(data);
+ mPauseEndTimeUs = timestampUs;
+ return;
+ }
+ timestampUs -= mPauseAdjTimeUs;
+ ALOGV("dataCallbackTimestamp: AdjTimestamp %" PRId64 "us", timestampUs);
+
if (mNumFramesReceived > 0) {
if (timestampUs <= mLastFrameTimestampUs) {
- ALOGW("Dropping frame with backward timestamp %lld (last %lld)",
- (long long)timestampUs, (long long)mLastFrameTimestampUs);
+ ALOGW("Dropping frame with backward timestamp %" PRId64 " (last %" PRId64 ")",
+ timestampUs, mLastFrameTimestampUs);
releaseOneRecordingFrame(data);
return;
}
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 926e95c..53815bd 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -78,6 +78,7 @@ CameraSourceTimeLapse::CameraSourceTimeLapse(
storeMetaDataInVideoBuffers),
mTimeBetweenTimeLapseVideoFramesUs(1E6/videoFrameRate),
mLastTimeLapseFrameRealTimestampUs(0),
+ mLastTimeLapseFrameTimeStampUs(0),
mSkipCurrentFrame(false) {
mTimeBetweenFrameCaptureUs = timeBetweenFrameCaptureUs;
@@ -252,6 +253,7 @@ bool CameraSourceTimeLapse::skipFrameAndModifyTimeStamp(int64_t *timestampUs) {
ALOGV("dataCallbackTimestamp timelapse: initial frame");
mLastTimeLapseFrameRealTimestampUs = *timestampUs;
+ mLastTimeLapseFrameTimeStampUs = *timestampUs;
return false;
}
@@ -263,8 +265,10 @@ bool CameraSourceTimeLapse::skipFrameAndModifyTimeStamp(int64_t *timestampUs) {
if (mForceRead) {
ALOGV("dataCallbackTimestamp timelapse: forced read");
mForceRead = false;
+ mLastTimeLapseFrameRealTimestampUs = *timestampUs;
*timestampUs =
- mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs;
+ mLastTimeLapseFrameTimeStampUs + mTimeBetweenTimeLapseVideoFramesUs;
+ mLastTimeLapseFrameTimeStampUs = *timestampUs;
// Really make sure that this video recording frame will not be dropped.
if (*timestampUs < mStartTimeUs) {
@@ -294,7 +298,8 @@ bool CameraSourceTimeLapse::skipFrameAndModifyTimeStamp(int64_t *timestampUs) {
ALOGV("dataCallbackTimestamp timelapse: got timelapse frame");
mLastTimeLapseFrameRealTimestampUs = *timestampUs;
- *timestampUs = mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs;
+ *timestampUs = mLastTimeLapseFrameTimeStampUs + mTimeBetweenTimeLapseVideoFramesUs;
+ mLastTimeLapseFrameTimeStampUs = *timestampUs;
// Update start-time once the captured-time reaches the expected start-time.
// Not doing so will result in CameraSource always dropping frames since
// updated-timestamp will never intersect start-timestamp
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 85d0292..2df045f 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -47,11 +47,28 @@
#include <utils/String8.h>
#include <cutils/properties.h>
+#include <cutils/log.h>
+
+#include <dlfcn.h>
#include <stagefright/AVExtensions.h>
namespace android {
+static void *loadExtractorPlugin() {
+ void *ret = NULL;
+ char lib[PROPERTY_VALUE_MAX];
+ if (property_get("media.sf.extractor-plugin", lib, NULL)) {
+ if (void *extractorLib = ::dlopen(lib, RTLD_LAZY)) {
+ ret = ::dlsym(extractorLib, "getExtractorPlugin");
+ ALOGW_IF(!ret, "Failed to find symbol, dlerror: %s", ::dlerror());
+ } else {
+ ALOGV("Failed to load %s, dlerror: %s", lib, ::dlerror());
+ }
+ }
+ return ret;
+}
+
bool DataSource::getUInt16(off64_t offset, uint16_t *x) {
*x = 0;
@@ -112,29 +129,49 @@ status_t DataSource::getSize(off64_t *size) {
////////////////////////////////////////////////////////////////////////////////
-Mutex DataSource::gSnifferMutex;
-List<DataSource::SnifferFunc> DataSource::gSniffers;
-bool DataSource::gSniffersRegistered = false;
-
bool DataSource::sniff(
String8 *mimeType, float *confidence, sp<AMessage> *meta) {
+
+ return mSniffer->sniff(this, mimeType, confidence, meta);
+}
+
+// static
+void DataSource::RegisterSniffer_l(SnifferFunc /* func */) {
+ return;
+}
+
+// static
+void DataSource::RegisterDefaultSniffers() {
+ return;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+Sniffer::Sniffer() {
+ registerDefaultSniffers();
+}
+
+bool Sniffer::sniff(
+ DataSource *source, String8 *mimeType, float *confidence, sp<AMessage> *meta) {
+
+ bool forceExtraSniffers = false;
+
+ if (*confidence == 3.14f) {
+ // Magic value, as set by MediaExtractor when a video container looks incomplete
+ forceExtraSniffers = true;
+ }
+
*mimeType = "";
*confidence = 0.0f;
meta->clear();
- {
- Mutex::Autolock autoLock(gSnifferMutex);
- if (!gSniffersRegistered) {
- return false;
- }
- }
-
- for (List<SnifferFunc>::iterator it = gSniffers.begin();
- it != gSniffers.end(); ++it) {
+ Mutex::Autolock autoLock(mSnifferMutex);
+ for (List<SnifferFunc>::iterator it = mSniffers.begin();
+ it != mSniffers.end(); ++it) {
String8 newMimeType;
float newConfidence;
sp<AMessage> newMeta;
- if ((*it)(this, &newMimeType, &newConfidence, &newMeta)) {
+ if ((*it)(source, &newMimeType, &newConfidence, &newMeta)) {
if (newConfidence > *confidence) {
*mimeType = newMimeType;
*confidence = newConfidence;
@@ -143,48 +180,81 @@ bool DataSource::sniff(
}
}
+ /* Only do the deeper sniffers if the results are null or in doubt */
+ if (mimeType->length() == 0 || *confidence < 0.21f || forceExtraSniffers) {
+ for (List<SnifferFunc>::iterator it = mExtraSniffers.begin();
+ it != mExtraSniffers.end(); ++it) {
+ String8 newMimeType;
+ float newConfidence;
+ sp<AMessage> newMeta;
+ if ((*it)(source, &newMimeType, &newConfidence, &newMeta)) {
+ if (newConfidence > *confidence) {
+ *mimeType = newMimeType;
+ *confidence = newConfidence;
+ *meta = newMeta;
+ }
+ }
+ }
+ }
+
return *confidence > 0.0;
}
-// static
-void DataSource::RegisterSniffer_l(SnifferFunc func) {
- for (List<SnifferFunc>::iterator it = gSniffers.begin();
- it != gSniffers.end(); ++it) {
+void Sniffer::registerSniffer_l(SnifferFunc func) {
+
+ for (List<SnifferFunc>::iterator it = mSniffers.begin();
+ it != mSniffers.end(); ++it) {
if (*it == func) {
return;
}
}
- gSniffers.push_back(func);
+ mSniffers.push_back(func);
}
-// static
-void DataSource::RegisterDefaultSniffers() {
- Mutex::Autolock autoLock(gSnifferMutex);
- if (gSniffersRegistered) {
- return;
+void Sniffer::registerSnifferPlugin() {
+ static void (*getExtractorPlugin)(MediaExtractor::Plugin *) =
+ (void (*)(MediaExtractor::Plugin *))loadExtractorPlugin();
+
+ MediaExtractor::Plugin *plugin = MediaExtractor::getPlugin();
+ if (!plugin->sniff && getExtractorPlugin) {
+ getExtractorPlugin(plugin);
}
+ if (plugin->sniff) {
+ for (List<SnifferFunc>::iterator it = mExtraSniffers.begin();
+ it != mExtraSniffers.end(); ++it) {
+ if (*it == plugin->sniff) {
+ return;
+ }
+ }
+
+ mExtraSniffers.push_back(plugin->sniff);
+ }
+}
- RegisterSniffer_l(SniffMPEG4);
- RegisterSniffer_l(SniffMatroska);
- RegisterSniffer_l(SniffOgg);
- RegisterSniffer_l(SniffWAV);
- RegisterSniffer_l(SniffFLAC);
- RegisterSniffer_l(SniffAMR);
- RegisterSniffer_l(SniffMPEG2TS);
- RegisterSniffer_l(SniffMP3);
- RegisterSniffer_l(SniffAAC);
- RegisterSniffer_l(SniffMPEG2PS);
- RegisterSniffer_l(SniffWVM);
- RegisterSniffer_l(SniffMidi);
- RegisterSniffer_l(AVUtils::get()->getExtendedSniffer());
+void Sniffer::registerDefaultSniffers() {
+ Mutex::Autolock autoLock(mSnifferMutex);
+
+ registerSniffer_l(SniffMPEG4);
+ registerSniffer_l(SniffMatroska);
+ registerSniffer_l(SniffOgg);
+ registerSniffer_l(SniffWAV);
+ registerSniffer_l(SniffFLAC);
+ registerSniffer_l(SniffAMR);
+ registerSniffer_l(SniffMPEG2TS);
+ registerSniffer_l(SniffMP3);
+ registerSniffer_l(SniffAAC);
+ registerSniffer_l(SniffMPEG2PS);
+ registerSniffer_l(SniffWVM);
+ registerSniffer_l(SniffMidi);
+ registerSniffer_l(AVUtils::get()->getExtendedSniffer());
+ registerSnifferPlugin();
char value[PROPERTY_VALUE_MAX];
if (property_get("drm.service.enabled", value, NULL)
&& (!strcmp(value, "1") || !strcasecmp(value, "true"))) {
- RegisterSniffer_l(SniffDRM);
+ registerSniffer_l(SniffDRM);
}
- gSniffersRegistered = true;
}
// static
@@ -256,7 +326,7 @@ sp<DataSource> DataSource::CreateFromURI(
cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
disconnectAtHighwatermark);
} else {
- source = new NuCachedSource2(
+ source = NuCachedSource2::Create(
httpSource,
cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
disconnectAtHighwatermark);
diff --git a/media/libstagefright/DataURISource.cpp b/media/libstagefright/DataURISource.cpp
index 2c39314..2a61c3a 100644
--- a/media/libstagefright/DataURISource.cpp
+++ b/media/libstagefright/DataURISource.cpp
@@ -42,7 +42,8 @@ sp<DataURISource> DataURISource::Create(const char *uri) {
AString encoded(commaPos + 1);
// Strip CR and LF...
- for (size_t i = encoded.size(); i-- > 0;) {
+ for (size_t i = encoded.size(); i > 0;) {
+ i--;
if (encoded.c_str()[i] == '\r' || encoded.c_str()[i] == '\n') {
encoded.erase(i, 1);
}
diff --git a/media/libstagefright/FFMPEGSoftCodec.cpp b/media/libstagefright/FFMPEGSoftCodec.cpp
new file mode 100644
index 0000000..5c6a8ed
--- /dev/null
+++ b/media/libstagefright/FFMPEGSoftCodec.cpp
@@ -0,0 +1,1149 @@
+/*
+ * Copyright (C) 2014 The CyanogenMod Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FFMPEGSoftCodec"
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ABitReader.h>
+
+#include <media/stagefright/FFMPEGSoftCodec.h>
+
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/Utils.h>
+
+#include <OMX_Component.h>
+#include <OMX_AudioExt.h>
+#include <OMX_IndexExt.h>
+
+#include <OMX_FFMPEG_Extn.h>
+
+namespace android {
+
+enum MetaKeyType{
+ INT32, INT64, STRING, DATA, CSD
+};
+
+struct MetaKeyEntry{
+ int MetaKey;
+ const char* MsgKey;
+ MetaKeyType KeyType;
+};
+
+static const MetaKeyEntry MetaKeyTable[] {
+ {kKeyAACAOT , "aac-profile" , INT32},
+ {kKeyArbitraryMode , "use-arbitrary-mode" , INT32},
+ {kKeyBitRate , "bitrate" , INT32},
+ {kKeyBitsPerSample , "bit-width" , INT32},
+ {kKeyBlockAlign , "block-align" , INT32},
+ {kKeyChannelCount , "channel-count" , INT32},
+ {kKeyCodecId , "codec-id" , INT32},
+ {kKeyCodedSampleBits , "coded-sample-bits" , INT32},
+ {kKeyRawCodecSpecificData , "raw-codec-specific-data", CSD},
+ {kKeyRVVersion , "rv-version" , INT32},
+ {kKeySampleFormat , "sample-format" , INT32},
+ {kKeySampleRate , "sample-rate" , INT32},
+ {kKeyWMAVersion , "wma-version" , INT32}, // int32_t
+ {kKeyWMVVersion , "wmv-version" , INT32},
+ {kKeyPCMFormat , "pcm-format" , INT32},
+};
+
+const char* FFMPEGSoftCodec::getMsgKey(int key) {
+ static const size_t numMetaKeys =
+ sizeof(MetaKeyTable) / sizeof(MetaKeyTable[0]);
+ size_t i;
+ for (i = 0; i < numMetaKeys; ++i) {
+ if (key == MetaKeyTable[i].MetaKey) {
+ return MetaKeyTable[i].MsgKey;
+ }
+ }
+ return "unknown";
+}
+
+void FFMPEGSoftCodec::convertMetaDataToMessageFF(
+ const sp<MetaData> &meta, sp<AMessage> *format) {
+ const char * str_val;
+ int32_t int32_val;
+ int64_t int64_val;
+ uint32_t data_type;
+ const void * data;
+ size_t size;
+ static const size_t numMetaKeys =
+ sizeof(MetaKeyTable) / sizeof(MetaKeyTable[0]);
+ size_t i;
+ for (i = 0; i < numMetaKeys; ++i) {
+ if (MetaKeyTable[i].KeyType == INT32 &&
+ meta->findInt32(MetaKeyTable[i].MetaKey, &int32_val)) {
+ ALOGV("found metakey %s of type int32", MetaKeyTable[i].MsgKey);
+ format->get()->setInt32(MetaKeyTable[i].MsgKey, int32_val);
+ } else if (MetaKeyTable[i].KeyType == INT64 &&
+ meta->findInt64(MetaKeyTable[i].MetaKey, &int64_val)) {
+ ALOGV("found metakey %s of type int64", MetaKeyTable[i].MsgKey);
+ format->get()->setInt64(MetaKeyTable[i].MsgKey, int64_val);
+ } else if (MetaKeyTable[i].KeyType == STRING &&
+ meta->findCString(MetaKeyTable[i].MetaKey, &str_val)) {
+ ALOGV("found metakey %s of type string", MetaKeyTable[i].MsgKey);
+ format->get()->setString(MetaKeyTable[i].MsgKey, str_val);
+ } else if ( (MetaKeyTable[i].KeyType == DATA ||
+ MetaKeyTable[i].KeyType == CSD) &&
+ meta->findData(MetaKeyTable[i].MetaKey, &data_type, &data, &size)) {
+ ALOGV("found metakey %s of type data", MetaKeyTable[i].MsgKey);
+ if (MetaKeyTable[i].KeyType == CSD) {
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+ if (strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
+ sp<ABuffer> buffer = new ABuffer(size);
+ memcpy(buffer->data(), data, size);
+ buffer->meta()->setInt32("csd", true);
+ buffer->meta()->setInt64("timeUs", 0);
+ format->get()->setBuffer("csd-0", buffer);
+ } else {
+ const uint8_t *ptr = (const uint8_t *)data;
+ CHECK(size >= 8);
+ int seqLength = 0, picLength = 0;
+ for (size_t i = 4; i < (size - 4); i++)
+ {
+ if ((*(ptr + i) == 0) && (*(ptr + i + 1) == 0) &&
+ (*(ptr + i + 2) == 0) && (*(ptr + i + 3) == 1))
+ seqLength = i;
+ }
+ sp<ABuffer> buffer = new ABuffer(seqLength);
+ memcpy(buffer->data(), data, seqLength);
+ buffer->meta()->setInt32("csd", true);
+ buffer->meta()->setInt64("timeUs", 0);
+ format->get()->setBuffer("csd-0", buffer);
+ picLength=size-seqLength;
+ sp<ABuffer> buffer1 = new ABuffer(picLength);
+ memcpy(buffer1->data(), (const uint8_t *)data + seqLength, picLength);
+ buffer1->meta()->setInt32("csd", true);
+ buffer1->meta()->setInt64("timeUs", 0);
+ format->get()->setBuffer("csd-1", buffer1);
+ }
+ } else {
+ sp<ABuffer> buffer = new ABuffer(size);
+ memcpy(buffer->data(), data, size);
+ format->get()->setBuffer(MetaKeyTable[i].MsgKey, buffer);
+ }
+ }
+
+ }
+}
+
+void FFMPEGSoftCodec::convertMessageToMetaDataFF(
+ const sp<AMessage> &msg, sp<MetaData> &meta) {
+ AString str_val;
+ int32_t int32_val;
+ int64_t int64_val;
+ static const size_t numMetaKeys =
+ sizeof(MetaKeyTable) / sizeof(MetaKeyTable[0]);
+ size_t i;
+ for (i = 0; i < numMetaKeys; ++i) {
+ if (MetaKeyTable[i].KeyType == INT32 &&
+ msg->findInt32(MetaKeyTable[i].MsgKey, &int32_val)) {
+ ALOGV("found metakey %s of type int32", MetaKeyTable[i].MsgKey);
+ meta->setInt32(MetaKeyTable[i].MetaKey, int32_val);
+ } else if (MetaKeyTable[i].KeyType == INT64 &&
+ msg->findInt64(MetaKeyTable[i].MsgKey, &int64_val)) {
+ ALOGV("found metakey %s of type int64", MetaKeyTable[i].MsgKey);
+ meta->setInt64(MetaKeyTable[i].MetaKey, int64_val);
+ } else if (MetaKeyTable[i].KeyType == STRING &&
+ msg->findString(MetaKeyTable[i].MsgKey, &str_val)) {
+ ALOGV("found metakey %s of type string", MetaKeyTable[i].MsgKey);
+ meta->setCString(MetaKeyTable[i].MetaKey, str_val.c_str());
+ }
+ }
+}
+
+
+template<class T>
+static void InitOMXParams(T *params) {
+ params->nSize = sizeof(T);
+ params->nVersion.s.nVersionMajor = 1;
+ params->nVersion.s.nVersionMinor = 0;
+ params->nVersion.s.nRevision = 0;
+ params->nVersion.s.nStep = 0;
+}
+
+void FFMPEGSoftCodec::overrideComponentName(
+ uint32_t /*quirks*/, const sp<AMessage> &msg, AString* componentName, AString* mime, int32_t isEncoder) {
+
+ int32_t wmvVersion = 0;
+ if (!strncasecmp(mime->c_str(), MEDIA_MIMETYPE_VIDEO_WMV, strlen(MEDIA_MIMETYPE_VIDEO_WMV)) &&
+ msg->findInt32(getMsgKey(kKeyWMVVersion), &wmvVersion)) {
+ ALOGD("Found WMV version key %d", wmvVersion);
+ if (wmvVersion == 1) {
+ ALOGD("Use FFMPEG for unsupported WMV track");
+ componentName->setTo("OMX.ffmpeg.wmv.decoder");
+ }
+ }
+
+ int32_t encodeOptions = 0;
+ if (!isEncoder && !strncasecmp(mime->c_str(), MEDIA_MIMETYPE_AUDIO_WMA, strlen(MEDIA_MIMETYPE_AUDIO_WMA)) &&
+ !msg->findInt32(getMsgKey(kKeyWMAEncodeOpt), &encodeOptions)) {
+ ALOGD("Use FFMPEG for unsupported WMA track");
+ componentName->setTo("OMX.ffmpeg.wma.decoder");
+ }
+
+ // Google's decoder doesn't support MAIN profile
+ int32_t aacProfile = 0;
+ if (!isEncoder && !strncasecmp(mime->c_str(), MEDIA_MIMETYPE_AUDIO_AAC, strlen(MEDIA_MIMETYPE_AUDIO_AAC)) &&
+ msg->findInt32(getMsgKey(kKeyAACAOT), &aacProfile)) {
+ if (aacProfile == OMX_AUDIO_AACObjectMain) {
+ ALOGD("Use FFMPEG for AAC MAIN profile");
+ componentName->setTo("OMX.ffmpeg.aac.decoder");
+ }
+ }
+}
+
+status_t FFMPEGSoftCodec::setVideoFormat(
+ const sp<AMessage> &msg, const char* mime, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID, bool isEncoder,
+ OMX_VIDEO_CODINGTYPE *compressionFormat) {
+ status_t err = OK;
+
+ if (isEncoder) {
+ ALOGE("Encoding not supported");
+ err = BAD_VALUE;
+
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_WMV, mime)) {
+ err = setWMVFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setWMVFormat() failed (err = %d)", err);
+ } else {
+ *compressionFormat = OMX_VIDEO_CodingWMV;
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_RV, mime)) {
+ err = setRVFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setRVFormat() failed (err = %d)", err);
+ } else {
+ *compressionFormat = OMX_VIDEO_CodingRV;
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_VC1, mime)) {
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)OMX_VIDEO_CodingVC1;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_FLV1, mime)) {
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)OMX_VIDEO_CodingFLV1;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX, mime)) {
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)OMX_VIDEO_CodingDIVX;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)OMX_VIDEO_CodingHEVC;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_FFMPEG, mime)) {
+ ALOGV("Setting the OMX_VIDEO_PARAM_FFMPEGTYPE params");
+ err = setFFmpegVideoFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setFFmpegVideoFormat() failed (err = %d)", err);
+ } else {
+ *compressionFormat = OMX_VIDEO_CodingAutoDetect;
+ }
+ } else {
+ err = BAD_TYPE;
+ }
+
+ return err;
+}
+
+status_t FFMPEGSoftCodec::getVideoPortFormat(OMX_U32 portIndex, int coding,
+ sp<AMessage> &notify, sp<IOMX> OMXHandle, IOMX::node_id nodeId) {
+
+ status_t err = BAD_TYPE;
+ switch (coding) {
+ case OMX_VIDEO_CodingWMV:
+ {
+ OMX_VIDEO_PARAM_WMVTYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, OMX_IndexParamVideoWmv, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ int32_t version;
+ if (params.eFormat == OMX_VIDEO_WMVFormat7) {
+ version = kTypeWMVVer_7;
+ } else if (params.eFormat == OMX_VIDEO_WMVFormat8) {
+ version = kTypeWMVVer_8;
+ } else {
+ version = kTypeWMVVer_9;
+ }
+ notify->setString("mime", MEDIA_MIMETYPE_VIDEO_WMV);
+ notify->setInt32("wmv-version", version);
+ break;
+ }
+ case OMX_VIDEO_CodingAutoDetect:
+ {
+ OMX_VIDEO_PARAM_FFMPEGTYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, (OMX_INDEXTYPE)OMX_IndexParamVideoFFmpeg, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_VIDEO_FFMPEG);
+ notify->setInt32("codec-id", params.eCodecId);
+ break;
+ }
+ case OMX_VIDEO_CodingRV:
+ {
+ OMX_VIDEO_PARAM_RVTYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, (OMX_INDEXTYPE)OMX_IndexParamVideoRv, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ int32_t version;
+ if (params.eFormat == OMX_VIDEO_RVFormatG2) {
+ version = kTypeRVVer_G2;
+ } else if (params.eFormat == OMX_VIDEO_RVFormat8) {
+ version = kTypeRVVer_8;
+ } else {
+ version = kTypeRVVer_9;
+ }
+ notify->setString("mime", MEDIA_MIMETYPE_VIDEO_RV);
+ break;
+ }
+ }
+ return err;
+}
+
+status_t FFMPEGSoftCodec::getAudioPortFormat(OMX_U32 portIndex, int coding,
+ sp<AMessage> &notify, sp<IOMX> OMXHandle, IOMX::node_id nodeId) {
+
+ status_t err = BAD_TYPE;
+ switch (coding) {
+ case OMX_AUDIO_CodingRA:
+ {
+ OMX_AUDIO_PARAM_RATYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, OMX_IndexParamAudioRa, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_RA);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSamplingRate);
+ break;
+ }
+ case OMX_AUDIO_CodingMP2:
+ {
+ OMX_AUDIO_PARAM_MP2TYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, (OMX_INDEXTYPE)OMX_IndexParamAudioMp2, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSampleRate);
+ break;
+ }
+ case OMX_AUDIO_CodingWMA:
+ {
+ OMX_AUDIO_PARAM_WMATYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, OMX_IndexParamAudioWma, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_WMA);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSamplingRate);
+ break;
+ }
+ case OMX_AUDIO_CodingAPE:
+ {
+ OMX_AUDIO_PARAM_APETYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, (OMX_INDEXTYPE)OMX_IndexParamAudioApe, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_APE);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSamplingRate);
+ notify->setInt32("bit-width", params.nBitsPerSample);
+ break;
+ }
+ case OMX_AUDIO_CodingFLAC:
+ {
+ OMX_AUDIO_PARAM_FLACTYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, (OMX_INDEXTYPE)OMX_IndexParamAudioFlac, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_FLAC);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSampleRate);
+ notify->setInt32("bit-width", params.nCompressionLevel); // piggyback
+ break;
+ }
+
+ case OMX_AUDIO_CodingDTS:
+ {
+ OMX_AUDIO_PARAM_DTSTYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, (OMX_INDEXTYPE)OMX_IndexParamAudioDts, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_DTS);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSamplingRate);
+ break;
+ }
+ case OMX_AUDIO_CodingAC3:
+ {
+ OMX_AUDIO_PARAM_ANDROID_AC3TYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_AC3);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSampleRate);
+ break;
+ }
+
+ case OMX_AUDIO_CodingAutoDetect:
+ {
+ OMX_AUDIO_PARAM_FFMPEGTYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, (OMX_INDEXTYPE)OMX_IndexParamAudioFFmpeg, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_FFMPEG);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSampleRate);
+ break;
+ }
+ }
+ return err;
+}
+
+status_t FFMPEGSoftCodec::setAudioFormat(
+ const sp<AMessage> &msg, const char* mime, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID) {
+ ALOGV("setAudioFormat called");
+ status_t err = OK;
+
+ ALOGV("setAudioFormat: %s", msg->debugString(0).c_str());
+
+ if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_WMA, mime)) {
+ err = setWMAFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setWMAFormat() failed (err = %d)", err);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_VORBIS, mime)) {
+ err = setVORBISFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setVORBISFormat() failed (err = %d)", err);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RA, mime)) {
+ err = setRAFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setRAFormat() failed (err = %d)", err);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_FLAC, mime)) {
+ err = setFLACFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setFLACFormat() failed (err = %d)", err);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II, mime)) {
+ err = setMP2Format(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setMP2Format() failed (err = %d)", err);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AC3, mime)) {
+ err = setAC3Format(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setAC3Format() failed (err = %d)", err);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_APE, mime)) {
+ err = setAPEFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setAPEFormat() failed (err = %d)", err);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_DTS, mime)) {
+ err = setDTSFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setDTSFormat() failed (err = %d)", err);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_FFMPEG, mime)) {
+ err = setFFmpegAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setFFmpegAudioFormat() failed (err = %d)", err);
+ }
+ }
+
+ return err;
+}
+
+status_t FFMPEGSoftCodec::setSupportedRole(
+ const sp<IOMX> &omx, IOMX::node_id node,
+ bool isEncoder, const char *mime) {
+
+ ALOGV("setSupportedRole Called %s", mime);
+
+ struct MimeToRole {
+ const char *mime;
+ const char *decoderRole;
+ const char *encoderRole;
+ };
+
+ static const MimeToRole kFFMPEGMimeToRole[] = {
+ { MEDIA_MIMETYPE_AUDIO_AAC,
+ "audio_decoder.aac", NULL },
+ { MEDIA_MIMETYPE_AUDIO_MPEG,
+ "audio_decoder.mp3", NULL },
+ { MEDIA_MIMETYPE_AUDIO_VORBIS,
+ "audio_decoder.vorbis", NULL },
+ { MEDIA_MIMETYPE_AUDIO_WMA,
+ "audio_decoder.wma", NULL },
+ { MEDIA_MIMETYPE_AUDIO_RA,
+ "audio_decoder.ra" , NULL },
+ { MEDIA_MIMETYPE_AUDIO_FLAC,
+ "audio_decoder.flac", NULL },
+ { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II,
+ "audio_decoder.mp2", NULL },
+ { MEDIA_MIMETYPE_AUDIO_AC3,
+ "audio_decoder.ac3", NULL },
+ { MEDIA_MIMETYPE_AUDIO_APE,
+ "audio_decoder.ape", NULL },
+ { MEDIA_MIMETYPE_AUDIO_DTS,
+ "audio_decoder.dts", NULL },
+ { MEDIA_MIMETYPE_VIDEO_MPEG2,
+ "video_decoder.mpeg2", NULL },
+ { MEDIA_MIMETYPE_VIDEO_DIVX,
+ "video_decoder.divx", NULL },
+ { MEDIA_MIMETYPE_VIDEO_DIVX4,
+ "video_decoder.divx", NULL },
+ { MEDIA_MIMETYPE_VIDEO_DIVX311,
+ "video_decoder.divx", NULL },
+ { MEDIA_MIMETYPE_VIDEO_WMV,
+ "video_decoder.wmv", NULL },
+ { MEDIA_MIMETYPE_VIDEO_VC1,
+ "video_decoder.vc1", NULL },
+ { MEDIA_MIMETYPE_VIDEO_RV,
+ "video_decoder.rv", NULL },
+ { MEDIA_MIMETYPE_VIDEO_FLV1,
+ "video_decoder.flv1", NULL },
+ { MEDIA_MIMETYPE_VIDEO_HEVC,
+ "video_decoder.hevc", NULL },
+ { MEDIA_MIMETYPE_AUDIO_FFMPEG,
+ "audio_decoder.trial", NULL },
+ { MEDIA_MIMETYPE_VIDEO_FFMPEG,
+ "video_decoder.trial", NULL },
+ };
+ static const size_t kNumMimeToRole =
+ sizeof(kFFMPEGMimeToRole) / sizeof(kFFMPEGMimeToRole[0]);
+
+ size_t i;
+ for (i = 0; i < kNumMimeToRole; ++i) {
+ if (!strcasecmp(mime, kFFMPEGMimeToRole[i].mime)) {
+ break;
+ }
+ }
+
+ if (i == kNumMimeToRole) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ const char *role =
+ isEncoder ? kFFMPEGMimeToRole[i].encoderRole
+ : kFFMPEGMimeToRole[i].decoderRole;
+
+ if (role != NULL) {
+ OMX_PARAM_COMPONENTROLETYPE roleParams;
+ InitOMXParams(&roleParams);
+
+ strncpy((char *)roleParams.cRole,
+ role, OMX_MAX_STRINGNAME_SIZE - 1);
+
+ roleParams.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
+
+ status_t err = omx->setParameter(
+ node, OMX_IndexParamStandardComponentRole,
+ &roleParams, sizeof(roleParams));
+
+ if (err != OK) {
+ ALOGW("Failed to set standard component role '%s'.", role);
+ return err;
+ }
+ }
+ return OK;
+}
+
+//video
+status_t FFMPEGSoftCodec::setWMVFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t version = -1;
+ OMX_VIDEO_PARAM_WMVTYPE paramWMV;
+
+ if (!msg->findInt32(getMsgKey(kKeyWMVVersion), &version)) {
+ ALOGE("WMV version not detected");
+ }
+
+ InitOMXParams(&paramWMV);
+ paramWMV.nPortIndex = kPortIndexInput;
+
+ status_t err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamVideoWmv, &paramWMV, sizeof(paramWMV));
+ if (err != OK) {
+ return err;
+ }
+
+ if (version == kTypeWMVVer_7) {
+ paramWMV.eFormat = OMX_VIDEO_WMVFormat7;
+ } else if (version == kTypeWMVVer_8) {
+ paramWMV.eFormat = OMX_VIDEO_WMVFormat8;
+ } else if (version == kTypeWMVVer_9) {
+ paramWMV.eFormat = OMX_VIDEO_WMVFormat9;
+ }
+
+ err = OMXhandle->setParameter(
+ nodeID, OMX_IndexParamVideoWmv, &paramWMV, sizeof(paramWMV));
+ return err;
+}
+
+status_t FFMPEGSoftCodec::setRVFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t version = kTypeRVVer_G2;
+ OMX_VIDEO_PARAM_RVTYPE paramRV;
+
+ if (!msg->findInt32(getMsgKey(kKeyRVVersion), &version)) {
+ ALOGE("RV version not detected");
+ }
+
+ InitOMXParams(&paramRV);
+ paramRV.nPortIndex = kPortIndexInput;
+
+ status_t err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamVideoRv, &paramRV, sizeof(paramRV));
+ if (err != OK)
+ return err;
+
+ if (version == kTypeRVVer_G2) {
+ paramRV.eFormat = OMX_VIDEO_RVFormatG2;
+ } else if (version == kTypeRVVer_8) {
+ paramRV.eFormat = OMX_VIDEO_RVFormat8;
+ } else if (version == kTypeRVVer_9) {
+ paramRV.eFormat = OMX_VIDEO_RVFormat9;
+ }
+
+ err = OMXhandle->setParameter(
+ nodeID, OMX_IndexParamVideoRv, &paramRV, sizeof(paramRV));
+ return err;
+}
+
+status_t FFMPEGSoftCodec::setFFmpegVideoFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t codec_id = 0;
+ int32_t width = 0;
+ int32_t height = 0;
+ OMX_VIDEO_PARAM_FFMPEGTYPE param;
+
+ ALOGD("setFFmpegVideoFormat");
+
+ if (msg->findInt32(getMsgKey(kKeyWidth), &width)) {
+ ALOGE("No video width specified");
+ }
+ if (msg->findInt32(getMsgKey(kKeyHeight), &height)) {
+ ALOGE("No video height specified");
+ }
+ if (!msg->findInt32(getMsgKey(kKeyCodecId), &codec_id)) {
+ ALOGE("No codec id sent for FFMPEG catch-all codec!");
+ }
+
+ InitOMXParams(&param);
+ param.nPortIndex = kPortIndexInput;
+
+ status_t err = OMXhandle->getParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamVideoFFmpeg, &param, sizeof(param));
+ if (err != OK)
+ return err;
+
+ param.eCodecId = codec_id;
+ param.nWidth = width;
+ param.nHeight = height;
+
+ err = OMXhandle->setParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamVideoFFmpeg, &param, sizeof(param));
+ return err;
+}
+
+//audio
+status_t FFMPEGSoftCodec::setRawAudioFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t numChannels = 0;
+ int32_t sampleRate = 0;
+ int32_t bitsPerSample = 16;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+ if (!msg->findInt32(getMsgKey(kKeyBitsPerSample), &bitsPerSample)) {
+ ALOGD("No PCM format specified, using 16 bit");
+ }
+
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+ def.nPortIndex = kPortIndexOutput;
+
+ status_t err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamPortDefinition, &def, sizeof(def));
+
+ if (err != OK) {
+ return err;
+ }
+
+ def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
+
+ err = OMXhandle->setParameter(
+ nodeID, OMX_IndexParamPortDefinition, &def, sizeof(def));
+
+ if (err != OK) {
+ return err;
+ }
+
+ OMX_AUDIO_PARAM_PCMMODETYPE pcmParams;
+ InitOMXParams(&pcmParams);
+ pcmParams.nPortIndex = kPortIndexOutput;
+
+ err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+
+ if (err != OK) {
+ return err;
+ }
+
+ pcmParams.nChannels = numChannels;
+ pcmParams.eNumData = OMX_NumericalDataSigned;
+ pcmParams.bInterleaved = OMX_TRUE;
+ pcmParams.nBitPerSample = bitsPerSample;
+ pcmParams.nSamplingRate = sampleRate;
+ pcmParams.ePCMMode = OMX_AUDIO_PCMModeLinear;
+
+ if (getOMXChannelMapping(numChannels, pcmParams.eChannelMapping) != OK) {
+ return OMX_ErrorNone;
+ }
+
+ return OMXhandle->setParameter(
+ nodeID, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+}
+
+status_t FFMPEGSoftCodec::setWMAFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t version = 0;
+ int32_t numChannels = 0;
+ int32_t bitRate = 0;
+ int32_t sampleRate = 0;
+ int32_t blockAlign = 0;
+ int32_t bitsPerSample = 0;
+
+ OMX_AUDIO_PARAM_WMATYPE paramWMA;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+ CHECK(msg->findInt32(getMsgKey(kKeyBitRate), &bitRate));
+ if (!msg->findInt32(getMsgKey(kKeyBlockAlign), &blockAlign)) {
+ // we should be last on the codec list, but another sniffer may
+ // have handled it and there is no hardware codec.
+ if (!msg->findInt32(getMsgKey(kKeyWMABlockAlign), &blockAlign)) {
+ return ERROR_UNSUPPORTED;
+ }
+ }
+
+ // mm-parser may want a different bit depth
+ if (msg->findInt32(getMsgKey(kKeyWMABitspersample), &bitsPerSample)) {
+ msg->setInt32("bit-width", bitsPerSample);
+ }
+
+ ALOGV("Channels: %d, SampleRate: %d, BitRate: %d, blockAlign: %d",
+ numChannels, sampleRate, bitRate, blockAlign);
+
+ CHECK(msg->findInt32(getMsgKey(kKeyWMAVersion), &version));
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&paramWMA);
+ paramWMA.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamAudioWma, &paramWMA, sizeof(paramWMA));
+ if (err != OK)
+ return err;
+
+ paramWMA.nChannels = numChannels;
+ paramWMA.nSamplingRate = sampleRate;
+ paramWMA.nBitRate = bitRate;
+ paramWMA.nBlockAlign = blockAlign;
+
+ // http://msdn.microsoft.com/en-us/library/ff819498(v=vs.85).aspx
+ if (version == kTypeWMA) {
+ paramWMA.eFormat = OMX_AUDIO_WMAFormat7;
+ } else if (version == kTypeWMAPro) {
+ paramWMA.eFormat = OMX_AUDIO_WMAFormat8;
+ } else if (version == kTypeWMALossLess) {
+ paramWMA.eFormat = OMX_AUDIO_WMAFormat9;
+ }
+
+ return OMXhandle->setParameter(
+ nodeID, OMX_IndexParamAudioWma, &paramWMA, sizeof(paramWMA));
+}
+
+status_t FFMPEGSoftCodec::setVORBISFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t numChannels = 0;
+ int32_t sampleRate = 0;
+ OMX_AUDIO_PARAM_VORBISTYPE param;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+
+ ALOGV("Channels: %d, SampleRate: %d",
+ numChannels, sampleRate);
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&param);
+ param.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamAudioVorbis, &param, sizeof(param));
+ if (err != OK)
+ return err;
+
+ param.nChannels = numChannels;
+ param.nSampleRate = sampleRate;
+
+ return OMXhandle->setParameter(
+ nodeID, OMX_IndexParamAudioVorbis, &param, sizeof(param));
+}
+
+status_t FFMPEGSoftCodec::setRAFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t numChannels = 0;
+ int32_t bitRate = 0;
+ int32_t sampleRate = 0;
+ int32_t blockAlign = 0;
+ OMX_AUDIO_PARAM_RATYPE paramRA;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+ msg->findInt32(getMsgKey(kKeyBitRate), &bitRate);
+ CHECK(msg->findInt32(getMsgKey(kKeyBlockAlign), &blockAlign));
+
+ ALOGV("Channels: %d, SampleRate: %d, BitRate: %d, blockAlign: %d",
+ numChannels, sampleRate, bitRate, blockAlign);
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&paramRA);
+ paramRA.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamAudioRa, &paramRA, sizeof(paramRA));
+ if (err != OK)
+ return err;
+
+ paramRA.eFormat = OMX_AUDIO_RAFormatUnused; // FIXME, cook only???
+ paramRA.nChannels = numChannels;
+ paramRA.nSamplingRate = sampleRate;
+ // FIXME, HACK!!!, I use the nNumRegions parameter pass blockAlign!!!
+ // the cook audio codec need blockAlign!
+ paramRA.nNumRegions = blockAlign;
+
+ return OMXhandle->setParameter(
+ nodeID, OMX_IndexParamAudioRa, &paramRA, sizeof(paramRA));
+}
+
+status_t FFMPEGSoftCodec::setFLACFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t numChannels = 0;
+ int32_t sampleRate = 0;
+ int32_t bitsPerSample = 16;
+ OMX_AUDIO_PARAM_FLACTYPE param;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+ CHECK(msg->findInt32(getMsgKey(kKeyBitsPerSample), &bitsPerSample));
+
+ ALOGV("Channels: %d, SampleRate: %d BitsPerSample: %d",
+ numChannels, sampleRate, bitsPerSample);
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&param);
+ param.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamAudioFlac, &param, sizeof(param));
+ if (err != OK)
+ return err;
+
+ param.nChannels = numChannels;
+ param.nSampleRate = sampleRate;
+ param.nCompressionLevel = bitsPerSample; // piggyback hax!
+
+ return OMXhandle->setParameter(
+ nodeID, OMX_IndexParamAudioFlac, &param, sizeof(param));
+}
+
+status_t FFMPEGSoftCodec::setMP2Format(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t numChannels = 0;
+ int32_t sampleRate = 0;
+ OMX_AUDIO_PARAM_MP2TYPE param;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+
+ ALOGV("Channels: %d, SampleRate: %d",
+ numChannels, sampleRate);
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&param);
+ param.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioMp2, &param, sizeof(param));
+ if (err != OK)
+ return err;
+
+ param.nChannels = numChannels;
+ param.nSampleRate = sampleRate;
+
+ return OMXhandle->setParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioMp2, &param, sizeof(param));
+}
+
+status_t FFMPEGSoftCodec::setAC3Format(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t numChannels = 0;
+ int32_t sampleRate = 0;
+ OMX_AUDIO_PARAM_ANDROID_AC3TYPE param;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+
+ ALOGV("Channels: %d, SampleRate: %d",
+ numChannels, sampleRate);
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&param);
+ param.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3, &param, sizeof(param));
+ if (err != OK)
+ return err;
+
+ param.nChannels = numChannels;
+ param.nSampleRate = sampleRate;
+
+ return OMXhandle->setParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3, &param, sizeof(param));
+}
+
+status_t FFMPEGSoftCodec::setAPEFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t numChannels = 0;
+ int32_t sampleRate = 0;
+ int32_t bitsPerSample = 0;
+ OMX_AUDIO_PARAM_APETYPE param;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+ CHECK(msg->findInt32(getMsgKey(kKeyBitsPerSample), &bitsPerSample));
+
+ ALOGV("Channels:%d, SampleRate:%d, bitsPerSample:%d",
+ numChannels, sampleRate, bitsPerSample);
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&param);
+ param.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioApe, &param, sizeof(param));
+ if (err != OK)
+ return err;
+
+ param.nChannels = numChannels;
+ param.nSamplingRate = sampleRate;
+ param.nBitsPerSample = bitsPerSample;
+
+ return OMXhandle->setParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioApe, &param, sizeof(param));
+}
+
+status_t FFMPEGSoftCodec::setDTSFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t numChannels = 0;
+ int32_t sampleRate = 0;
+ OMX_AUDIO_PARAM_DTSTYPE param;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+
+ ALOGV("Channels: %d, SampleRate: %d",
+ numChannels, sampleRate);
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&param);
+ param.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioDts, &param, sizeof(param));
+ if (err != OK)
+ return err;
+
+ param.nChannels = numChannels;
+ param.nSamplingRate = sampleRate;
+
+ return OMXhandle->setParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioDts, &param, sizeof(param));
+}
+
+status_t FFMPEGSoftCodec::setFFmpegAudioFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t codec_id = 0;
+ int32_t numChannels = 0;
+ int32_t bitRate = 0;
+ int32_t bitsPerSample = 16;
+ int32_t sampleRate = 0;
+ int32_t blockAlign = 0;
+ int32_t sampleFormat = 0;
+ int32_t codedSampleBits = 0;
+ OMX_AUDIO_PARAM_FFMPEGTYPE param;
+
+ ALOGD("setFFmpegAudioFormat");
+
+ CHECK(msg->findInt32(getMsgKey(kKeyCodecId), &codec_id));
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleFormat), &sampleFormat));
+ msg->findInt32(getMsgKey(kKeyBitRate), &bitRate);
+ msg->findInt32(getMsgKey(kKeyBitsPerSample), &bitsPerSample);
+ msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate);
+ msg->findInt32(getMsgKey(kKeyBlockAlign), &blockAlign);
+ msg->findInt32(getMsgKey(kKeyBitsPerSample), &bitsPerSample);
+ msg->findInt32(getMsgKey(kKeyCodedSampleBits), &codedSampleBits);
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&param);
+ param.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioFFmpeg, &param, sizeof(param));
+ if (err != OK)
+ return err;
+
+ param.eCodecId = codec_id;
+ param.nChannels = numChannels;
+ param.nBitRate = bitRate;
+ param.nBitsPerSample = codedSampleBits;
+ param.nSampleRate = sampleRate;
+ param.nBlockAlign = blockAlign;
+ param.eSampleFormat = sampleFormat;
+
+ return OMXhandle->setParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioFFmpeg, &param, sizeof(param));
+}
+
+}
diff --git a/media/libstagefright/FLACExtractor.cpp b/media/libstagefright/FLACExtractor.cpp
index 89a91f7..87345e1 100644
--- a/media/libstagefright/FLACExtractor.cpp
+++ b/media/libstagefright/FLACExtractor.cpp
@@ -32,6 +32,13 @@
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MediaBuffer.h>
+#ifdef ENABLE_AV_ENHANCEMENTS
+#include "QCMediaDefs.h"
+#include "QCMetaData.h"
+#endif
+
+#include <system/audio.h>
+
namespace android {
class FLACParser;
@@ -72,6 +79,8 @@ private:
class FLACParser : public RefBase {
+friend class FLACSource;
+
public:
FLACParser(
const sp<DataSource> &dataSource,
@@ -103,6 +112,8 @@ public:
// media buffers
void allocateBuffers();
void releaseBuffers();
+ void copyBuffer(short *dst, const int *const *src, unsigned nSamples);
+
MediaBuffer *readBuffer() {
return readBuffer(false, 0LL);
}
@@ -113,6 +124,7 @@ public:
protected:
virtual ~FLACParser();
+
private:
sp<DataSource> mDataSource;
sp<MetaData> mFileMetadata;
@@ -122,7 +134,6 @@ private:
// media buffers
size_t mMaxBufferSize;
MediaBufferGroup *mGroup;
- void (*mCopy)(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels);
// handle to underlying libFLAC parser
FLAC__StreamDecoder *mDecoder;
@@ -377,109 +388,41 @@ void FLACParser::errorCallback(FLAC__StreamDecoderErrorStatus status)
mErrorStatus = status;
}
-// Copy samples from FLAC native 32-bit non-interleaved to 16-bit interleaved.
-// These are candidates for optimization if needed.
-
-static void copyMono8(
- short *dst,
- const int *const *src,
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i] << 8;
- }
-}
-
-static void copyStereo8(
- short *dst,
- const int *const *src,
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i] << 8;
- *dst++ = src[1][i] << 8;
- }
-}
-
-static void copyMultiCh8(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels)
+void FLACParser::copyBuffer(short *dst, const int *const *src, unsigned nSamples)
{
- for (unsigned i = 0; i < nSamples; ++i) {
- for (unsigned c = 0; c < nChannels; ++c) {
- *dst++ = src[c][i] << 8;
- }
- }
-}
-
-static void copyMono16(
- short *dst,
- const int *const *src,
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i];
- }
-}
-
-static void copyStereo16(
- short *dst,
- const int *const *src,
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i];
- *dst++ = src[1][i];
- }
-}
-
-static void copyMultiCh16(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels)
-{
- for (unsigned i = 0; i < nSamples; ++i) {
- for (unsigned c = 0; c < nChannels; ++c) {
- *dst++ = src[c][i];
- }
- }
-}
-
-// 24-bit versions should do dithering or noise-shaping, here or in AudioFlinger
-
-static void copyMono24(
- short *dst,
- const int *const *src,
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i] >> 8;
- }
-}
-
-static void copyStereo24(
- short *dst,
- const int *const *src,
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i] >> 8;
- *dst++ = src[1][i] >> 8;
- }
-}
-
-static void copyMultiCh24(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels)
-{
- for (unsigned i = 0; i < nSamples; ++i) {
- for (unsigned c = 0; c < nChannels; ++c) {
- *dst++ = src[c][i] >> 8;
+ unsigned int nChannels = getChannels();
+ unsigned int nBits = getBitsPerSample();
+ switch (nBits) {
+ case 8:
+ for (unsigned i = 0; i < nSamples; ++i) {
+ for (unsigned c = 0; c < nChannels; ++c) {
+ *dst++ = src[c][i] << 8;
+ }
+ }
+ break;
+ case 16:
+ for (unsigned i = 0; i < nSamples; ++i) {
+ for (unsigned c = 0; c < nChannels; ++c) {
+ *dst++ = src[c][i];
+ }
+ }
+ break;
+ case 24:
+ case 32:
+ {
+ int32_t *out = (int32_t *)dst;
+ for (unsigned i = 0; i < nSamples; ++i) {
+ for (unsigned c = 0; c < nChannels; ++c) {
+ *out++ = src[c][i] << 8;
+ }
+ }
+ break;
}
+ default:
+ TRESPASS();
}
}
-static void copyTrespass(
- short * /* dst */,
- const int *const * /* src */,
- unsigned /* nSamples */,
- unsigned /* nChannels */) {
- TRESPASS();
-}
-
// FLACParser
FLACParser::FLACParser(
@@ -492,7 +435,6 @@ FLACParser::FLACParser(
mInitCheck(false),
mMaxBufferSize(0),
mGroup(NULL),
- mCopy(copyTrespass),
mDecoder(NULL),
mCurrentPos(0LL),
mEOF(false),
@@ -571,6 +513,8 @@ status_t FLACParser::init()
}
// check sample rate
switch (getSampleRate()) {
+ case 100:
+ case 1000:
case 8000:
case 11025:
case 12000:
@@ -578,38 +522,18 @@ status_t FLACParser::init()
case 22050:
case 24000:
case 32000:
+ case 42000:
case 44100:
+ case 46000:
case 48000:
case 88200:
case 96000:
+ case 192000:
break;
default:
ALOGE("unsupported sample rate %u", getSampleRate());
return NO_INIT;
}
- // configure the appropriate copy function, defaulting to trespass
- static const struct {
- unsigned mChannels;
- unsigned mBitsPerSample;
- void (*mCopy)(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels);
- } table[] = {
- { 1, 8, copyMono8 },
- { 2, 8, copyStereo8 },
- { 8, 8, copyMultiCh8 },
- { 1, 16, copyMono16 },
- { 2, 16, copyStereo16 },
- { 8, 16, copyMultiCh16 },
- { 1, 24, copyMono24 },
- { 2, 24, copyStereo24 },
- { 8, 24, copyMultiCh24 },
- };
- for (unsigned i = 0; i < sizeof(table)/sizeof(table[0]); ++i) {
- if (table[i].mChannels >= getChannels() &&
- table[i].mBitsPerSample == getBitsPerSample()) {
- mCopy = table[i].mCopy;
- break;
- }
- }
// populate track metadata
if (mTrackMetadata != 0) {
mTrackMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
@@ -618,6 +542,7 @@ status_t FLACParser::init()
// sample rate is non-zero, so division by zero not possible
mTrackMetadata->setInt64(kKeyDuration,
(getTotalSamples() * 1000000LL) / getSampleRate());
+ mTrackMetadata->setInt32(kKeyBitsPerSample, getBitsPerSample());
}
} else {
ALOGE("missing STREAMINFO");
@@ -633,7 +558,9 @@ void FLACParser::allocateBuffers()
{
CHECK(mGroup == NULL);
mGroup = new MediaBufferGroup;
- mMaxBufferSize = getMaxBlockSize() * getChannels() * sizeof(short);
+ // allocate enough to hold 24-bits (packed in 32 bits)
+ unsigned int bytesPerSample = getBitsPerSample() > 16 ? 4 : 2;
+ mMaxBufferSize = getMaxBlockSize() * getChannels() * bytesPerSample;
mGroup->add_buffer(new MediaBuffer(mMaxBufferSize));
}
@@ -686,12 +613,12 @@ MediaBuffer *FLACParser::readBuffer(bool doSeek, FLAC__uint64 sample)
if (err != OK) {
return NULL;
}
- size_t bufferSize = blocksize * getChannels() * sizeof(short);
+ size_t bufferSize = blocksize * getChannels() * (getBitsPerSample() > 16 ? 4 : 2);
CHECK(bufferSize <= mMaxBufferSize);
short *data = (short *) buffer->data();
buffer->set_range(0, bufferSize);
// copy PCM from FLAC write buffer to our media buffer, with interleaving
- (*mCopy)(data, mWriteBuffer, blocksize, getChannels());
+ copyBuffer(data, mWriteBuffer, blocksize);
// fill in buffer metadata
CHECK(mWriteHeader.number_type == FLAC__FRAME_NUMBER_TYPE_SAMPLE_NUMBER);
FLAC__uint64 sampleNumber = mWriteHeader.number.sample_number;
@@ -726,9 +653,10 @@ FLACSource::~FLACSource()
status_t FLACSource::start(MetaData * /* params */)
{
+ CHECK(!mStarted);
+
ALOGV("FLACSource::start");
- CHECK(!mStarted);
mParser->allocateBuffers();
mStarted = true;
@@ -845,12 +773,14 @@ bool SniffFLAC(
{
// first 4 is the signature word
// second 4 is the sizeof STREAMINFO
+ // 1st bit of 2nd 4 bytes represent whether last block of metadata or not
// 042 is the mandatory STREAMINFO
// no need to read rest of the header, as a premature EOF will be caught later
uint8_t header[4+4];
if (source->readAt(0, header, sizeof(header)) != sizeof(header)
- || memcmp("fLaC\0\0\0\042", header, 4+4))
- {
+ || memcmp("fLaC", header, 4)
+ || !(header[4] == 0x80 || header[4] == 0x00)
+ || memcmp("\0\0\042", header + 5, 3)) {
return false;
}
diff --git a/media/libstagefright/FileSource.cpp b/media/libstagefright/FileSource.cpp
index 565f156..f7b1a02 100644
--- a/media/libstagefright/FileSource.cpp
+++ b/media/libstagefright/FileSource.cpp
@@ -30,6 +30,7 @@ namespace android {
FileSource::FileSource(const char *filename)
: mFd(-1),
+ mUri(filename),
mOffset(0),
mLength(-1),
mDecryptHandle(NULL),
@@ -58,6 +59,7 @@ FileSource::FileSource(int fd, int64_t offset, int64_t length)
mDrmBuf(NULL){
CHECK(offset >= 0);
CHECK(length >= 0);
+ fetchUriFromFd(fd);
}
FileSource::~FileSource() {
@@ -188,4 +190,18 @@ ssize_t FileSource::readAtDRM(off64_t offset, void *data, size_t size) {
return mDrmManagerClient->pread(mDecryptHandle, data, size, offset + mOffset);
}
}
+
+void FileSource::fetchUriFromFd(int fd) {
+ ssize_t len = 0;
+ char path[PATH_MAX] = {0};
+ char link[PATH_MAX] = {0};
+
+ mUri.clear();
+
+ snprintf(path, PATH_MAX, "/proc/%d/fd/%d", getpid(), fd);
+ if ((len = readlink(path, link, sizeof(link)-1)) != -1) {
+ link[len] = '\0';
+ mUri.setTo(link);
+ }
+}
} // namespace android
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index cd50365..1f010d4 100755
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -315,6 +315,9 @@ static const char *FourCC2MIME(uint32_t fourcc) {
case FOURCC('m', 'p', '4', 'a'):
return MEDIA_MIMETYPE_AUDIO_AAC;
+ case FOURCC('.', 'm', 'p', '3'):
+ return MEDIA_MIMETYPE_AUDIO_MPEG;
+
case FOURCC('s', 'a', 'm', 'r'):
return MEDIA_MIMETYPE_AUDIO_AMR_NB;
@@ -838,7 +841,7 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
&& chunk_type != FOURCC('c', 'o', 'v', 'r')
&& mPath.size() == 5 && underMetaDataPath(mPath)) {
off64_t stop_offset = *offset + chunk_size;
- *offset = data_offset;
+ *offset = stop_offset;
while (*offset < stop_offset) {
status_t err = parseChunk(offset, depth + 1);
if (err != OK) {
@@ -1352,7 +1355,14 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
mLastTrack->meta->setInt32(kKeySampleRate, sample_rate);
off64_t stop_offset = *offset + chunk_size;
- *offset = data_offset + sizeof(buffer);
+ if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_MPEG, FourCC2MIME(chunk_type)) ||
+ !strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, FourCC2MIME(chunk_type))) {
+ // ESD is not required in mp3
+ // amr wb with damr atom corrupted can cause the clip to not play
+ *offset = stop_offset;
+ } else {
+ *offset = data_offset + sizeof(buffer);
+ }
while (*offset < stop_offset) {
status_t err = parseChunk(offset, depth + 1);
if (err != OK) {
@@ -1785,13 +1795,13 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
if (!isParsingMetaKeys) {
uint8_t buffer[4];
if (chunk_data_size < (off64_t)sizeof(buffer)) {
- *offset += chunk_size;
+ *offset = stop_offset;
return ERROR_MALFORMED;
}
if (mDataSource->readAt(
data_offset, buffer, 4) < 4) {
- *offset += chunk_size;
+ *offset = stop_offset;
return ERROR_IO;
}
@@ -1802,7 +1812,7 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
// apparently malformed chunks that don't have flags
// and completely different semantics than what's
// in the MPEG4 specs and skip it.
- *offset += chunk_size;
+ *offset = stop_offset;
return OK;
}
*offset += sizeof(buffer);
@@ -2573,6 +2583,12 @@ status_t MPEG4Extractor::parseITunesMetaData(off64_t offset, size_t size) {
mLastCommentName.setTo((const char *)buffer + 4);
break;
case FOURCC('d', 'a', 't', 'a'):
+ if (size < 8) {
+ delete[] buffer;
+ buffer = NULL;
+ ALOGE("b/24346430");
+ return ERROR_MALFORMED;
+ }
mLastCommentData.setTo((const char *)buffer + 8);
break;
}
@@ -2974,12 +2990,12 @@ status_t MPEG4Extractor::updateAudioTrackInfoFromESDS_MPEG4Audio(
return OK;
}
- if (objectTypeIndication == 0x6b) {
- // The media subtype is MP3 audio
- // Our software MP3 audio decoder may not be able to handle
- // packetized MP3 audio; for now, lets just return ERROR_UNSUPPORTED
- ALOGE("MP3 track in MP4/3GPP file is not supported");
- return ERROR_UNSUPPORTED;
+ if (objectTypeIndication == 0x6b
+ || objectTypeIndication == 0x69) {
+ // This is mpeg1/2 audio content, set mimetype to mpeg
+ mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
+ ALOGD("objectTypeIndication:0x%x, set mimetype to mpeg ",objectTypeIndication);
+ return OK;
}
const uint8_t *csd;
@@ -4153,7 +4169,10 @@ status_t MPEG4Source::read(
(const uint8_t *)mBuffer->data() + mBuffer->range_offset();
size_t nal_size = parseNALSize(src);
- if (mBuffer->range_length() < mNALLengthSize + nal_size) {
+ if (mNALLengthSize > SIZE_MAX - nal_size) {
+ ALOGE("b/24441553, b/24445122");
+ }
+ if (mBuffer->range_length() - mNALLengthSize < nal_size) {
ALOGE("incomplete NAL unit.");
mBuffer->release();
@@ -4440,7 +4459,11 @@ status_t MPEG4Source::fragmentedRead(
(const uint8_t *)mBuffer->data() + mBuffer->range_offset();
size_t nal_size = parseNALSize(src);
- if (mBuffer->range_length() < mNALLengthSize + nal_size) {
+ if (mNALLengthSize > SIZE_MAX - nal_size) {
+ ALOGE("b/24441553, b/24445122");
+ }
+
+ if (mBuffer->range_length() - mNALLengthSize < nal_size) {
ALOGE("incomplete NAL unit.");
mBuffer->release();
@@ -4602,7 +4625,9 @@ static bool LegacySniffMPEG4(
return false;
}
- if (!memcmp(header, "ftyp3gp", 7) || !memcmp(header, "ftypmp42", 8)
+ if (!memcmp(header, "ftyp3g2a", 8) || !memcmp(header, "ftyp3g2b", 8)
+ || !memcmp(header, "ftyp3g2c", 8)
+ || !memcmp(header, "ftyp3gp", 7) || !memcmp(header, "ftypmp42", 8)
|| !memcmp(header, "ftyp3gr6", 8) || !memcmp(header, "ftyp3gs6", 8)
|| !memcmp(header, "ftyp3ge6", 8) || !memcmp(header, "ftyp3gg6", 8)
|| !memcmp(header, "ftypisom", 8) || !memcmp(header, "ftypM4V ", 8)
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 8dfc54c..7f7ddf7 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -2343,19 +2343,6 @@ status_t MPEG4Writer::Track::threadEntry() {
#if 0
if (mResumed) {
- int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
- if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- return ERROR_MALFORMED;
- }
-
- int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
- if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
- copy->release();
- return ERROR_MALFORMED;
- }
-
- previousPausedDurationUs += pausedDurationUs - lastDurationUs;
mResumed = false;
}
#endif
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index c77d366..b6bea65 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -573,6 +573,7 @@ status_t MediaCodec::stop() {
}
status_t MediaCodec::reclaim() {
+ ALOGD("MediaCodec::reclaim(%p) %s", this, mInitName.c_str());
sp<AMessage> msg = new AMessage(kWhatRelease, this);
msg->setInt32("reclaimed", 1);
@@ -1148,7 +1149,8 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
CHECK(msg->findString("componentName", &mComponentName));
- if (mComponentName.startsWith("OMX.google.")) {
+ if (mComponentName.startsWith("OMX.google.") ||
+ mComponentName.startsWith("OMX.ffmpeg.")) {
mFlags |= kFlagUsesSoftwareRenderer;
} else {
mFlags &= ~kFlagUsesSoftwareRenderer;
@@ -1163,8 +1165,10 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
resourceType = String8(kResourceNonSecureCodec);
}
- const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
- addResource(resourceType, String8(subtype), 1);
+ if (mIsVideo) {
+ // audio codec is currently ignored.
+ addResource(resourceType, String8(kResourceVideoCodec), 1);
+ }
(new AMessage)->postReply(mReplyID);
break;
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 925be14..aef6715 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -216,7 +216,7 @@ void MediaCodecSource::Puller::onMessageReceived(const sp<AMessage> &msg) {
status_t err = mSource->read(&mbuf);
if (mPaused) {
- if (err == OK) {
+ if (err == OK && (NULL != mbuf)) {
mbuf->release();
mbuf = NULL;
}
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp
index 2a50692..089c150 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libstagefright/MediaDefs.cpp
@@ -64,4 +64,32 @@ const char *MEDIA_MIMETYPE_TEXT_VTT = "text/vtt";
const char *MEDIA_MIMETYPE_TEXT_CEA_608 = "text/cea-608";
const char *MEDIA_MIMETYPE_DATA_TIMED_ID3 = "application/x-id3v4";
+const char *MEDIA_MIMETYPE_VIDEO_FLV1 = "video/x-flv";
+const char *MEDIA_MIMETYPE_VIDEO_MJPEG = "video/x-jpeg";
+const char *MEDIA_MIMETYPE_VIDEO_RV = "video/vnd.rn-realvideo";
+const char *MEDIA_MIMETYPE_VIDEO_VC1 = "video/vc1";
+const char *MEDIA_MIMETYPE_VIDEO_FFMPEG = "video/ffmpeg";
+
+const char *MEDIA_MIMETYPE_AUDIO_PCM = "audio/x-pcm";
+const char *MEDIA_MIMETYPE_AUDIO_RA = "audio/vnd.rn-realaudio";
+const char *MEDIA_MIMETYPE_AUDIO_FFMPEG = "audio/ffmpeg";
+
+const char *MEDIA_MIMETYPE_CONTAINER_APE = "audio/x-ape";
+const char *MEDIA_MIMETYPE_CONTAINER_DIVX = "video/divx";
+const char *MEDIA_MIMETYPE_CONTAINER_DTS = "audio/vnd.dts";
+const char *MEDIA_MIMETYPE_CONTAINER_FLAC = "audio/flac";
+const char *MEDIA_MIMETYPE_CONTAINER_FLV = "video/x-flv";
+const char *MEDIA_MIMETYPE_CONTAINER_MOV = "video/quicktime";
+const char *MEDIA_MIMETYPE_CONTAINER_MP2 = "audio/mpeg2";
+const char *MEDIA_MIMETYPE_CONTAINER_MPG = "video/mpeg";
+const char *MEDIA_MIMETYPE_CONTAINER_RA = "audio/vnd.rn-realaudio";
+const char *MEDIA_MIMETYPE_CONTAINER_RM = "video/vnd.rn-realvideo";
+const char *MEDIA_MIMETYPE_CONTAINER_TS = "video/mp2t";
+const char *MEDIA_MIMETYPE_CONTAINER_WEBM = "video/webm";
+const char *MEDIA_MIMETYPE_CONTAINER_WMA = "audio/x-ms-wma";
+const char *MEDIA_MIMETYPE_CONTAINER_WMV = "video/x-ms-wmv";
+const char *MEDIA_MIMETYPE_CONTAINER_VC1 = "video/vc1";
+const char *MEDIA_MIMETYPE_CONTAINER_HEVC = "video/hevc";
+const char *MEDIA_MIMETYPE_CONTAINER_FFMPEG = "video/ffmpeg";
+
} // namespace android
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index bfb2a16..fc96e2f 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -44,6 +44,8 @@
namespace android {
+MediaExtractor::Plugin MediaExtractor::sPlugin;
+
sp<MetaData> MediaExtractor::getMetaData() {
return new MetaData;
}
@@ -58,9 +60,15 @@ sp<MediaExtractor> MediaExtractor::Create(
const uint32_t flags) {
sp<AMessage> meta;
+ bool secondPass = false;
+
String8 tmp;
- if (mime == NULL) {
+retry:
+ if (secondPass || mime == NULL) {
float confidence;
+ if (secondPass) {
+ confidence = 3.14f;
+ }
if (!source->sniff(&tmp, &confidence, &meta)) {
ALOGV("FAILED to autodetect media content.");
@@ -95,7 +103,12 @@ sp<MediaExtractor> MediaExtractor::Create(
}
sp<MediaExtractor> ret = NULL;
+ AString extractorName;
if ((ret = AVFactory::get()->createExtendedExtractor(source, mime, meta, flags)) != NULL) {
+ } else if (meta.get() && meta->findString("extended-extractor-use", &extractorName)
+ && sPlugin.create) {
+ ALOGI("Use extended extractor for the special mime(%s) or codec", mime);
+ ret = sPlugin.create(source, mime, meta);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG4)
|| !strcasecmp(mime, "audio/mp4")) {
ret = new MPEG4Extractor(source);
@@ -123,6 +136,8 @@ sp<MediaExtractor> MediaExtractor::Create(
ret = new MPEG2PSExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MIDI)) {
ret = new MidiExtractor(source);
+ } else if (!isDrm && sPlugin.create) {
+ ret = sPlugin.create(source, mime, meta);
}
ret = AVFactory::get()->updateExtractor(ret, source, mime, meta, flags);
@@ -134,6 +149,15 @@ sp<MediaExtractor> MediaExtractor::Create(
}
}
+ if (ret != NULL) {
+
+ if (!secondPass && ( ret->countTracks() == 0 ||
+ (!strncasecmp("video/", mime, 6) && ret->countTracks() < 2) ) ) {
+ secondPass = true;
+ goto retry;
+ }
+ }
+
return ret;
}
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp
index f82636b..f72acf7 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libstagefright/NuCachedSource2.cpp
@@ -197,7 +197,8 @@ NuCachedSource2::NuCachedSource2(
mHighwaterThresholdBytes(kDefaultHighWaterThreshold),
mLowwaterThresholdBytes(kDefaultLowWaterThreshold),
mKeepAliveIntervalUs(kDefaultKeepAliveIntervalUs),
- mDisconnectAtHighwatermark(disconnectAtHighwatermark) {
+ mDisconnectAtHighwatermark(disconnectAtHighwatermark),
+ mSuspended(false) {
// We are NOT going to support disconnect-at-highwatermark indefinitely
// and we are not guaranteeing support for client-specified cache
// parameters. Both of these are temporary measures to solve a specific
@@ -224,9 +225,6 @@ NuCachedSource2::NuCachedSource2(
// So whenever we call DataSource::readAt it may end up in a call to
// IMediaHTTPConnection::readAt and therefore call back into JAVA.
mLooper->start(false /* runOnCallingThread */, true /* canCallJava */);
-
- Mutex::Autolock autoLock(mLock);
- (new AMessage(kWhatFetchMore, mReflector))->post();
}
NuCachedSource2::~NuCachedSource2() {
@@ -237,6 +235,18 @@ NuCachedSource2::~NuCachedSource2() {
mCache = NULL;
}
+// static
+sp<NuCachedSource2> NuCachedSource2::Create(
+ const sp<DataSource> &source,
+ const char *cacheConfig,
+ bool disconnectAtHighwatermark) {
+ sp<NuCachedSource2> instance = new NuCachedSource2(
+ source, cacheConfig, disconnectAtHighwatermark);
+ Mutex::Autolock autoLock(instance->mLock);
+ (new AMessage(kWhatFetchMore, instance->mReflector))->post();
+ return instance;
+}
+
status_t NuCachedSource2::getEstimatedBandwidthKbps(int32_t *kbps) {
if (mSource->flags() & kIsHTTPBasedSource) {
HTTPBase* source = static_cast<HTTPBase *>(mSource.get());
@@ -323,7 +333,7 @@ void NuCachedSource2::fetchInternal() {
}
}
- if (reconnect) {
+ if (reconnect && !mSuspended) {
status_t err =
mSource->reconnectAtOffset(mCacheOffset + mCache->totalSize());
@@ -433,6 +443,13 @@ void NuCachedSource2::onFetch() {
delayUs = 100000ll;
}
+ if (mSuspended) {
+ static_cast<HTTPBase *>(mSource.get())->disconnect();
+ mFinalStatus = -EAGAIN;
+ return;
+ }
+
+
(new AMessage(kWhatFetchMore, mReflector))->post(delayUs);
}
@@ -762,4 +779,25 @@ void NuCachedSource2::RemoveCacheSpecificHeaders(
}
}
+status_t NuCachedSource2::disconnectWhileSuspend() {
+ if (mSource != NULL) {
+ static_cast<HTTPBase *>(mSource.get())->disconnect();
+ mFinalStatus = -EAGAIN;
+ mSuspended = true;
+ } else {
+ return ERROR_UNSUPPORTED;
+ }
+
+ return OK;
+}
+
+status_t NuCachedSource2::connectWhileResume() {
+ mSuspended = false;
+
+ // Begin to connect again and fetch more data
+ (new AMessage(kWhatFetchMore, mReflector))->post();
+
+ return OK;
+}
+
} // namespace android
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index e69890d..ac925f7 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -181,6 +181,7 @@ bool MuxOMX::isLocalNode_l(node_id node) const {
}
// static
+
bool MuxOMX::CanLiveLocally(const char *name) {
#ifdef __LP64__
(void)name; // disable unused parameter warning
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index b1dde80..3ec02d4 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -56,6 +56,11 @@
#include "include/avc_utils.h"
+#ifdef USE_S3D_SUPPORT
+#include "Exynos_OMX_Def.h"
+#include "ExynosHWCService.h"
+#endif
+
namespace android {
// Treat time out as an error if we have not received any output
@@ -1609,6 +1614,8 @@ bool OMXCodec::isIntermediateState(State state) {
return state == LOADED_TO_IDLE
|| state == IDLE_TO_EXECUTING
|| state == EXECUTING_TO_IDLE
+ || state == PAUSING
+ || state == FLUSHING
|| state == IDLE_TO_LOADED
|| state == RECONFIGURING;
}
@@ -1821,6 +1828,10 @@ status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
if (mFlags & kEnableGrallocUsageProtected) {
usage |= GRALLOC_USAGE_PROTECTED;
+#ifdef GRALLOC_USAGE_PRIVATE_NONSECURE
+ if (!(mFlags & kUseSecureInputBuffers))
+ usage |= GRALLOC_USAGE_PRIVATE_NONSECURE;
+#endif
}
err = setNativeWindowSizeFormatAndUsage(
@@ -1856,7 +1867,12 @@ status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
// plus an extra buffer to account for incorrect minUndequeuedBufs
CODEC_LOGI("OMX-buffers: min=%u actual=%u undeq=%d+1",
def.nBufferCountMin, def.nBufferCountActual, minUndequeuedBufs);
-
+#ifdef BOARD_CANT_REALLOCATE_OMX_BUFFERS
+ // Some devices don't like to set OMX_IndexParamPortDefinition at this
+ // point (even with an unmodified def), so skip it if possible.
+ // This check was present in KitKat.
+ if (def.nBufferCountActual < def.nBufferCountMin + minUndequeuedBufs) {
+#endif
for (OMX_U32 extraBuffers = 2 + 1; /* condition inside loop */; extraBuffers--) {
OMX_U32 newBufferCount =
def.nBufferCountMin + minUndequeuedBufs + extraBuffers;
@@ -1878,6 +1894,9 @@ status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
}
CODEC_LOGI("OMX-buffers: min=%u actual=%u undeq=%d+1",
def.nBufferCountMin, def.nBufferCountActual, minUndequeuedBufs);
+#ifdef BOARD_CANT_REALLOCATE_OMX_BUFFERS
+ }
+#endif
err = native_window_set_buffer_count(
mNativeWindow.get(), def.nBufferCountActual);
@@ -2373,7 +2392,41 @@ void OMXCodec::onEvent(OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
break;
}
#endif
+#ifdef USE_S3D_SUPPORT
+ case (OMX_EVENTTYPE)OMX_EventS3DInformation:
+ {
+ if (mFlags & kClientNeedsFramebuffer)
+ break;
+
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<android::IExynosHWCService> hwc = interface_cast<android::IExynosHWCService>(
+ sm->getService(String16("Exynos.HWCService")));
+ if (hwc != NULL) {
+ if (data1 == OMX_TRUE) {
+ int eS3DMode;
+ switch (data2) {
+ case OMX_SEC_FPARGMT_SIDE_BY_SIDE:
+ eS3DMode = S3D_SBS;
+ break;
+ case OMX_SEC_FPARGMT_TOP_BOTTOM:
+ eS3DMode = S3D_TB;
+ break;
+ case OMX_SEC_FPARGMT_CHECKERBRD_INTERL: // unsupport format at HDMI
+ case OMX_SEC_FPARGMT_COLUMN_INTERL:
+ case OMX_SEC_FPARGMT_ROW_INTERL:
+ case OMX_SEC_FPARGMT_TEMPORAL_INTERL:
+ default:
+ eS3DMode = S3D_NONE;
+ }
+ hwc->setHdmiResolution(0, eS3DMode);
+ }
+ } else {
+ ALOGE("Exynos.HWCService is unavailable");
+ }
+ break;
+ }
+#endif
default:
{
CODEC_LOGV("EVENT(%d, %u, %u)", event, data1, data2);
@@ -2614,6 +2667,14 @@ void OMXCodec::onStateChange(OMX_STATETYPE newState) {
break;
}
+ case OMX_StatePause:
+ {
+ CODEC_LOGV("Now paused.");
+ CHECK_EQ((int)mState, (int)PAUSING);
+ setState(PAUSED);
+ break;
+ }
+
case OMX_StateInvalid:
{
setState(ERROR);
@@ -2646,7 +2707,8 @@ status_t OMXCodec::freeBuffersOnPort(
status_t stickyErr = OK;
- for (size_t i = buffers->size(); i-- > 0;) {
+ for (size_t i = buffers->size(); i > 0;) {
+ i--;
BufferInfo *info = &buffers->editItemAt(i);
if (onlyThoseWeOwn && info->mStatus == OWNED_BY_COMPONENT) {
@@ -2728,7 +2790,7 @@ void OMXCodec::onPortSettingsChanged(OMX_U32 portIndex) {
bool OMXCodec::flushPortAsync(OMX_U32 portIndex) {
CHECK(mState == EXECUTING || mState == RECONFIGURING
- || mState == EXECUTING_TO_IDLE);
+ || mState == EXECUTING_TO_IDLE || mState == FLUSHING);
CODEC_LOGV("flushPortAsync(%u): we own %zu out of %zu buffers already.",
portIndex, countBuffersWeOwn(mPortBuffers[portIndex]),
@@ -2778,7 +2840,7 @@ status_t OMXCodec::enablePortAsync(OMX_U32 portIndex) {
}
void OMXCodec::fillOutputBuffers() {
- CHECK_EQ((int)mState, (int)EXECUTING);
+ CHECK(mState == EXECUTING || mState == FLUSHING);
// This is a workaround for some decoders not properly reporting
// end-of-output-stream. If we own all input buffers and also own
@@ -2805,7 +2867,7 @@ void OMXCodec::fillOutputBuffers() {
}
void OMXCodec::drainInputBuffers() {
- CHECK(mState == EXECUTING || mState == RECONFIGURING);
+ CHECK(mState == EXECUTING || mState == RECONFIGURING || mState == FLUSHING);
if (mFlags & kUseSecureInputBuffers) {
Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexInput];
@@ -3552,6 +3614,11 @@ void OMXCodec::clearCodecSpecificData() {
status_t OMXCodec::start(MetaData *meta) {
Mutex::Autolock autoLock(mLock);
+ if (mPaused) {
+ status_t err = resumeLocked(true);
+ return err;
+ }
+
if (mState != LOADED) {
CODEC_LOGE("called start in the unexpected state: %d", mState);
return UNKNOWN_ERROR;
@@ -3662,6 +3729,7 @@ status_t OMXCodec::stopOmxComponent_l() {
isError = true;
}
+ case PAUSED:
case EXECUTING:
{
setState(EXECUTING_TO_IDLE);
@@ -3733,6 +3801,14 @@ status_t OMXCodec::read(
Mutex::Autolock autoLock(mLock);
+ if (mPaused) {
+ err = resumeLocked(false);
+ if(err != OK) {
+ CODEC_LOGE("Failed to restart codec err= %d", err);
+ return err;
+ }
+ }
+
if (mState != EXECUTING && mState != RECONFIGURING) {
return UNKNOWN_ERROR;
}
@@ -3789,6 +3865,8 @@ status_t OMXCodec::read(
mFilledBuffers.clear();
CHECK_EQ((int)mState, (int)EXECUTING);
+ //DSP supports flushing of ports simultaneously. Flushing individual port is not supported.
+ setState(FLUSHING);
bool emulateInputFlushCompletion = !flushPortAsync(kPortIndexInput);
bool emulateOutputFlushCompletion = !flushPortAsync(kPortIndexOutput);
@@ -3818,6 +3896,11 @@ status_t OMXCodec::read(
return UNKNOWN_ERROR;
}
+ if (seeking) {
+ CHECK_EQ((int)mState, (int)FLUSHING);
+ setState(EXECUTING);
+ }
+
if (mFilledBuffers.empty()) {
return mSignalledEOS ? mFinalStatus : ERROR_END_OF_STREAM;
}
@@ -4251,11 +4334,60 @@ void OMXCodec::initOutputFormat(const sp<MetaData> &inputFormat) {
}
status_t OMXCodec::pause() {
- Mutex::Autolock autoLock(mLock);
+ CODEC_LOGV("pause mState=%d", mState);
+
+ Mutex::Autolock autoLock(mLock);
+
+ if (mState != EXECUTING) {
+ return UNKNOWN_ERROR;
+ }
+
+ while (isIntermediateState(mState)) {
+ mAsyncCompletion.wait(mLock);
+ }
+ if (!strncmp(mComponentName, "OMX.qcom.", 9)) {
+ status_t err = mOMX->sendCommand(mNode,
+ OMX_CommandStateSet, OMX_StatePause);
+ CHECK_EQ(err, (status_t)OK);
+ setState(PAUSING);
+
+ mPaused = true;
+ while (mState != PAUSED && mState != ERROR) {
+ mAsyncCompletion.wait(mLock);
+ }
+ return mState == ERROR ? UNKNOWN_ERROR : OK;
+ } else {
+ mPaused = true;
+ return OK;
+ }
+
+}
- mPaused = true;
+status_t OMXCodec::resumeLocked(bool drainInputBuf) {
+ CODEC_LOGV("resume mState=%d", mState);
- return OK;
+ if (!strncmp(mComponentName, "OMX.qcom.", 9)) {
+ while (isIntermediateState(mState)) {
+ mAsyncCompletion.wait(mLock);
+ }
+ CHECK_EQ(mState, (status_t)PAUSED);
+ status_t err = mOMX->sendCommand(mNode,
+ OMX_CommandStateSet, OMX_StateExecuting);
+ CHECK_EQ(err, (status_t)OK);
+ setState(IDLE_TO_EXECUTING);
+ mPaused = false;
+ while (mState != EXECUTING && mState != ERROR) {
+ mAsyncCompletion.wait(mLock);
+ }
+ if(drainInputBuf)
+ drainInputBuffers();
+ return mState == ERROR ? UNKNOWN_ERROR : OK;
+ } else { // SW Codec
+ mPaused = false;
+ if(drainInputBuf)
+ drainInputBuffers();
+ return OK;
+ }
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/media/libstagefright/OggExtractor.cpp b/media/libstagefright/OggExtractor.cpp
index c438d3c..d63ac96 100644
--- a/media/libstagefright/OggExtractor.cpp
+++ b/media/libstagefright/OggExtractor.cpp
@@ -23,6 +23,7 @@
#include <cutils/properties.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/base64.h>
#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaBufferGroup.h>
@@ -178,6 +179,9 @@ struct MyVorbisExtractor : public MyOggExtractor {
protected:
virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const {
+ if (granulePos > INT64_MAX / 1000000ll) {
+ return INT64_MAX;
+ }
return granulePos * 1000000ll / mVi.rate;
}
@@ -770,8 +774,13 @@ status_t MyOggExtractor::_readNextPacket(MediaBuffer **out, bool calcVorbisTimes
return n < 0 ? n : (status_t)ERROR_END_OF_STREAM;
}
- mCurrentPageSamples =
- mCurrentPage.mGranulePosition - mPrevGranulePosition;
+ // Prevent a harmless unsigned integer overflow by clamping to 0
+ if (mCurrentPage.mGranulePosition >= mPrevGranulePosition) {
+ mCurrentPageSamples =
+ mCurrentPage.mGranulePosition - mPrevGranulePosition;
+ } else {
+ mCurrentPageSamples = 0;
+ }
mFirstPacketInPage = true;
mPrevGranulePosition = mCurrentPage.mGranulePosition;
@@ -916,6 +925,9 @@ int64_t MyOpusExtractor::getTimeUsOfGranule(uint64_t granulePos) const {
if (granulePos > mCodecDelay) {
pcmSamplePosition = granulePos - mCodecDelay;
}
+ if (pcmSamplePosition > INT64_MAX / 1000000ll) {
+ return INT64_MAX;
+ }
return pcmSamplePosition * 1000000ll / kOpusSampleRate;
}
@@ -1202,84 +1214,18 @@ void parseVorbisComment(
}
-// The returned buffer should be free()d.
-static uint8_t *DecodeBase64(const char *s, size_t size, size_t *outSize) {
- *outSize = 0;
-
- if ((size % 4) != 0) {
- return NULL;
- }
-
- size_t n = size;
- size_t padding = 0;
- if (n >= 1 && s[n - 1] == '=') {
- padding = 1;
-
- if (n >= 2 && s[n - 2] == '=') {
- padding = 2;
- }
- }
-
- size_t outLen = 3 * size / 4 - padding;
-
- *outSize = outLen;
-
- void *buffer = malloc(outLen);
- CHECK(buffer != NULL);
-
- uint8_t *out = (uint8_t *)buffer;
- size_t j = 0;
- uint32_t accum = 0;
- for (size_t i = 0; i < n; ++i) {
- char c = s[i];
- unsigned value;
- if (c >= 'A' && c <= 'Z') {
- value = c - 'A';
- } else if (c >= 'a' && c <= 'z') {
- value = 26 + c - 'a';
- } else if (c >= '0' && c <= '9') {
- value = 52 + c - '0';
- } else if (c == '+') {
- value = 62;
- } else if (c == '/') {
- value = 63;
- } else if (c != '=') {
- return NULL;
- } else {
- if (i < n - padding) {
- return NULL;
- }
-
- value = 0;
- }
-
- accum = (accum << 6) | value;
-
- if (((i + 1) % 4) == 0) {
- out[j++] = (accum >> 16);
-
- if (j < outLen) { out[j++] = (accum >> 8) & 0xff; }
- if (j < outLen) { out[j++] = accum & 0xff; }
-
- accum = 0;
- }
- }
-
- return (uint8_t *)buffer;
-}
-
static void extractAlbumArt(
const sp<MetaData> &fileMeta, const void *data, size_t size) {
ALOGV("extractAlbumArt from '%s'", (const char *)data);
- size_t flacSize;
- uint8_t *flac = DecodeBase64((const char *)data, size, &flacSize);
-
- if (flac == NULL) {
+ sp<ABuffer> flacBuffer = decodeBase64(AString((const char *)data, size));
+ if (flacBuffer == NULL) {
ALOGE("malformed base64 encoded data.");
return;
}
+ size_t flacSize = flacBuffer->size();
+ uint8_t *flac = flacBuffer->data();
ALOGV("got flac of size %zu", flacSize);
uint32_t picType;
@@ -1289,24 +1235,24 @@ static void extractAlbumArt(
char type[128];
if (flacSize < 8) {
- goto exit;
+ return;
}
picType = U32_AT(flac);
if (picType != 3) {
// This is not a front cover.
- goto exit;
+ return;
}
typeLen = U32_AT(&flac[4]);
if (typeLen > sizeof(type) - 1) {
- goto exit;
+ return;
}
// we've already checked above that flacSize >= 8
if (flacSize - 8 < typeLen) {
- goto exit;
+ return;
}
memcpy(type, &flac[8], typeLen);
@@ -1316,7 +1262,7 @@ static void extractAlbumArt(
if (!strcmp(type, "-->")) {
// This is not inline cover art, but an external url instead.
- goto exit;
+ return;
}
descLen = U32_AT(&flac[8 + typeLen]);
@@ -1324,7 +1270,7 @@ static void extractAlbumArt(
if (flacSize < 32 ||
flacSize - 32 < typeLen ||
flacSize - 32 - typeLen < descLen) {
- goto exit;
+ return;
}
dataLen = U32_AT(&flac[8 + typeLen + 4 + descLen + 16]);
@@ -1332,7 +1278,7 @@ static void extractAlbumArt(
// we've already checked above that (flacSize - 32 - typeLen - descLen) >= 0
if (flacSize - 32 - typeLen - descLen < dataLen) {
- goto exit;
+ return;
}
ALOGV("got image data, %zu trailing bytes",
@@ -1342,10 +1288,6 @@ static void extractAlbumArt(
kKeyAlbumArt, 0, &flac[8 + typeLen + 4 + descLen + 20], dataLen);
fileMeta->setCString(kKeyAlbumArtMIME, type);
-
-exit:
- free(flac);
- flac = NULL;
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/media/libstagefright/SampleIterator.cpp b/media/libstagefright/SampleIterator.cpp
index 2748349..032bbb9 100644
--- a/media/libstagefright/SampleIterator.cpp
+++ b/media/libstagefright/SampleIterator.cpp
@@ -166,6 +166,13 @@ status_t SampleIterator::findChunkRange(uint32_t sampleIndex) {
if (mSampleToChunkIndex + 1 < mTable->mNumSampleToChunkOffsets) {
mStopChunk = entry[1].startChunk;
+ if (mStopChunk < mFirstChunk ||
+ (mStopChunk - mFirstChunk) > UINT32_MAX / mSamplesPerChunk ||
+ ((mStopChunk - mFirstChunk) * mSamplesPerChunk >
+ UINT32_MAX - mFirstChunkSampleIndex)) {
+
+ return ERROR_OUT_OF_RANGE;
+ }
mStopChunkSampleIndex =
mFirstChunkSampleIndex
+ (mStopChunk - mFirstChunk) * mSamplesPerChunk;
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 97dff43..02b20c4 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -194,11 +194,11 @@ status_t SampleTable::setChunkOffsetParams(
mNumChunkOffsets = U32_AT(&header[4]);
if (mChunkOffsetType == kChunkOffsetType32) {
- if (data_size < 8 + mNumChunkOffsets * 4) {
+ if ((data_size - 8) / 4 < mNumChunkOffsets) {
return ERROR_MALFORMED;
}
} else {
- if (data_size < 8 + mNumChunkOffsets * 8) {
+ if ((data_size - 8) / 8 < mNumChunkOffsets) {
return ERROR_MALFORMED;
}
}
@@ -231,7 +231,7 @@ status_t SampleTable::setSampleToChunkParams(
mNumSampleToChunkOffsets = U32_AT(&header[4]);
- if (data_size < 8 + mNumSampleToChunkOffsets * 12) {
+ if ((data_size - 8) / 12 < mNumSampleToChunkOffsets) {
return ERROR_MALFORMED;
}
@@ -245,6 +245,11 @@ status_t SampleTable::setSampleToChunkParams(
for (uint32_t i = 0; i < mNumSampleToChunkOffsets; ++i) {
uint8_t buffer[12];
+
+ if ((off64_t)((SIZE_MAX / 12) - 8 - i) < mSampleToChunkOffset) {
+ return ERROR_MALFORMED;
+ }
+
if (mDataSource->readAt(
mSampleToChunkOffset + 8 + i * 12, buffer, sizeof(buffer))
!= (ssize_t)sizeof(buffer)) {
@@ -386,7 +391,7 @@ status_t SampleTable::setCompositionTimeToSampleParams(
size_t numEntries = U32_AT(&header[4]);
- if (data_size != (numEntries + 1) * 8) {
+ if (((SIZE_MAX / 8) - 1 < numEntries) || (data_size != (numEntries + 1) * 8)) {
return ERROR_MALFORMED;
}
diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp
index a757181..c5018ae 100644
--- a/media/libstagefright/StagefrightMediaScanner.cpp
+++ b/media/libstagefright/StagefrightMediaScanner.cpp
@@ -42,7 +42,11 @@ static bool FileHasAcceptableExtension(const char *extension) {
".mpeg", ".ogg", ".mid", ".smf", ".imy", ".wma", ".aac",
".wav", ".amr", ".midi", ".xmf", ".rtttl", ".rtx", ".ota",
".mkv", ".mka", ".webm", ".ts", ".fl", ".flac", ".mxmf",
- ".avi", ".mpeg", ".mpg", ".awb", ".mpga"
+ ".adts", ".dm", ".m2ts", ".mp3d", ".wmv", ".asf", ".flv",
+ ".mov", ".ra", ".rm", ".rmvb", ".ac3", ".ape", ".dts",
+ ".mp1", ".mp2", ".f4v", "hlv", "nrg", "m2v", ".swf",
+ ".avi", ".mpg", ".mpeg", ".awb", ".vc1", ".vob", ".divx",
+ ".mpga", ".mov", ".qcp", ".ec3"
};
static const size_t kNumValidExtensions =
sizeof(kValidExtensions) / sizeof(kValidExtensions[0]);
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 6c795ac..c3adac4 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -156,7 +156,10 @@ static VideoFrame *extractVideoFrame(
sp<MetaData> format = source->getFormat();
sp<AMessage> videoFormat;
- convertMetaDataToMessage(trackMeta, &videoFormat);
+ if (convertMetaDataToMessage(trackMeta, &videoFormat) != OK) {
+ ALOGW("Failed to convert meta data to message");
+ return NULL;
+ }
// TODO: Use Flexible color instead
videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
@@ -455,6 +458,10 @@ VideoFrame *StagefrightMetadataRetriever::getFrameAtTime(
for (i = 0; i < n; ++i) {
sp<MetaData> meta = mExtractor->getTrackMetaData(i);
+ if (meta == NULL) {
+ continue;
+ }
+
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
@@ -625,6 +632,10 @@ void StagefrightMetadataRetriever::parseMetaData() {
size_t numTracks = mExtractor->countTracks();
+ if (numTracks == 0) { //If no tracks available, corrupt or not valid stream
+ return;
+ }
+
char tmp[32];
sprintf(tmp, "%zu", numTracks);
@@ -648,6 +659,9 @@ void StagefrightMetadataRetriever::parseMetaData() {
String8 timedTextLang;
for (size_t i = 0; i < numTracks; ++i) {
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(i);
+ if (trackMeta == NULL) {
+ continue;
+ }
int64_t durationUs;
if (trackMeta->findInt64(kKeyDuration, &durationUs)) {
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index e7d36dc..0231655 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -36,6 +36,7 @@
#include <media/AudioParameter.h>
#include <stagefright/AVExtensions.h>
+#include <media/stagefright/FFMPEGSoftCodec.h>
namespace android {
@@ -106,7 +107,7 @@ status_t convertMetaDataToMessage(
int avgBitRate;
if (meta->findInt32(kKeyBitRate, &avgBitRate)) {
- msg->setInt32("bit-rate", avgBitRate);
+ msg->setInt32("bitrate", avgBitRate);
}
int32_t isSync;
@@ -201,10 +202,15 @@ status_t convertMetaDataToMessage(
}
int32_t fps;
- if (meta->findInt32(kKeyFrameRate, &fps)) {
+ if (meta->findInt32(kKeyFrameRate, &fps) && fps > 0) {
msg->setInt32("frame-rate", fps);
}
+ int32_t bitsPerSample;
+ if (meta->findInt32(kKeyBitsPerSample, &bitsPerSample)) {
+ msg->setInt32("bit-width", bitsPerSample);
+ }
+
uint32_t type;
const void *data;
size_t size;
@@ -213,8 +219,10 @@ status_t convertMetaDataToMessage(
const uint8_t *ptr = (const uint8_t *)data;
- CHECK(size >= 7);
- CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1
+ if (size < 7 || ptr[0] != 1) { // configurationVersion == 1
+ ALOGE("b/23680780");
+ return BAD_VALUE;
+ }
uint8_t profile __unused = ptr[1];
uint8_t level __unused = ptr[3];
@@ -240,7 +248,10 @@ status_t convertMetaDataToMessage(
buffer->setRange(0, 0);
for (size_t i = 0; i < numSeqParameterSets; ++i) {
- CHECK(size >= 2);
+ if (size < 2) {
+ ALOGE("b/23680780");
+ return BAD_VALUE;
+ }
size_t length = U16_AT(ptr);
ptr += 2;
@@ -269,13 +280,19 @@ status_t convertMetaDataToMessage(
}
buffer->setRange(0, 0);
- CHECK(size >= 1);
+ if (size < 1) {
+ ALOGE("b/23680780");
+ return BAD_VALUE;
+ }
size_t numPictureParameterSets = *ptr;
++ptr;
--size;
for (size_t i = 0; i < numPictureParameterSets; ++i) {
- CHECK(size >= 2);
+ if (size < 2) {
+ ALOGE("b/23680780");
+ return BAD_VALUE;
+ }
size_t length = U16_AT(ptr);
ptr += 2;
@@ -299,8 +316,10 @@ status_t convertMetaDataToMessage(
} else if (meta->findData(kKeyHVCC, &type, &data, &size)) {
const uint8_t *ptr = (const uint8_t *)data;
- CHECK(size >= 7);
- CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1
+ if (size < 23 || ptr[0] != 1) { // configurationVersion == 1
+ ALOGE("b/23680780");
+ return BAD_VALUE;
+ }
uint8_t profile __unused = ptr[1] & 31;
uint8_t level __unused = ptr[12];
ptr += 22;
@@ -319,6 +338,10 @@ status_t convertMetaDataToMessage(
buffer->setRange(0, 0);
for (i = 0; i < numofArrays; i++) {
+ if (size < 3) {
+ ALOGE("b/23680780");
+ return BAD_VALUE;
+ }
ptr += 1;
size -= 1;
@@ -329,7 +352,10 @@ status_t convertMetaDataToMessage(
size -= 2;
for (j = 0; j < numofNals; j++) {
- CHECK(size >= 2);
+ if (size < 2) {
+ ALOGE("b/23680780");
+ return BAD_VALUE;
+ }
size_t length = U16_AT(ptr);
ptr += 2;
@@ -436,8 +462,16 @@ status_t convertMetaDataToMessage(
}
AVUtils::get()->convertMetaDataToMessage(meta, &msg);
+
+ FFMPEGSoftCodec::convertMetaDataToMessageFF(meta, &msg);
*format = msg;
+#if 0
+ ALOGI("convertMetaDataToMessage from:");
+ meta->dumpToLog();
+ ALOGI(" to: %s", msg->debugString(0).c_str());
+#endif
+
return OK;
}
@@ -629,6 +663,11 @@ void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
if (msg->findInt32("is-adts", &isADTS)) {
meta->setInt32(kKeyIsADTS, isADTS);
}
+
+ int32_t bitsPerSample;
+ if (msg->findInt32("bit-width", &bitsPerSample)) {
+ meta->setInt32(kKeyBitsPerSample, bitsPerSample);
+ }
}
int32_t maxInputSize;
@@ -647,7 +686,7 @@ void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
}
int32_t fps;
- if (msg->findInt32("frame-rate", &fps)) {
+ if (msg->findInt32("frame-rate", &fps) && fps > 0) {
meta->setInt32(kKeyFrameRate, fps);
}
@@ -680,8 +719,10 @@ void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
// XXX TODO add whatever other keys there are
+ FFMPEGSoftCodec::convertMessageToMetaDataFF(msg, meta);
+
#if 0
- ALOGI("converted %s to:", msg->debugString(0).c_str());
+ ALOGI("convertMessageToMetaData from %s to:", msg->debugString(0).c_str());
meta->dumpToLog();
#endif
}
@@ -829,6 +870,7 @@ bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo,
if (AVUtils::get()->canOffloadAPE(meta) != true) {
return false;
}
+ ALOGV("Mime type \"%s\" mapped to audio_format %d", mime, info.format);
// Redefine aac format according to its profile
// Offloading depends on audio DSP capabilities.
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
index 335ac84..b988f19 100644
--- a/media/libstagefright/WAVExtractor.cpp
+++ b/media/libstagefright/WAVExtractor.cpp
@@ -29,6 +29,7 @@
#include <media/stagefright/MetaData.h>
#include <utils/String8.h>
#include <cutils/bitops.h>
+#include <system/audio.h>
#define CHANNEL_MASK_USE_CHANNEL_ORDER 0
@@ -194,12 +195,14 @@ status_t WAVExtractor::init() {
mNumChannels = U16_LE_AT(&formatSpec[2]);
if (mWaveFormat != WAVE_FORMAT_EXTENSIBLE) {
- if (mNumChannels != 1 && mNumChannels != 2) {
+ if (mNumChannels == 0) {
+ return ERROR_UNSUPPORTED;
+ } else if (mNumChannels != 1 && mNumChannels != 2) {
ALOGW("More than 2 channels (%d) in non-WAVE_EXT, unknown channel mask",
mNumChannels);
}
} else {
- if (mNumChannels < 1 && mNumChannels > 8) {
+ if (mNumChannels < 1 || mNumChannels > 8) {
return ERROR_UNSUPPORTED;
}
}
@@ -284,6 +287,7 @@ status_t WAVExtractor::init() {
case WAVE_FORMAT_PCM:
mTrackMeta->setCString(
kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
+ mTrackMeta->setInt32(kKeyBitsPerSample, mBitsPerSample);
break;
case WAVE_FORMAT_ALAW:
mTrackMeta->setCString(
@@ -359,15 +363,16 @@ WAVSource::~WAVSource() {
}
status_t WAVSource::start(MetaData * /* params */) {
- ALOGV("WAVSource::start");
- CHECK(!mStarted);
+ if (mStarted) {
+ return OK;
+ }
mGroup = new MediaBufferGroup;
mGroup->add_buffer(new MediaBuffer(kMaxFrameSize));
- if (mBitsPerSample == 8) {
- // As a temporary buffer for 8->16 bit conversion.
+ if (mBitsPerSample == 8 || mBitsPerSample == 24) {
+ // As a temporary buffer for 8->16/24->32 bit conversion.
mGroup->add_buffer(new MediaBuffer(kMaxFrameSize));
}
@@ -427,9 +432,15 @@ status_t WAVSource::read(
}
// make sure that maxBytesToRead is multiple of 3, in 24-bit case
- size_t maxBytesToRead =
- mBitsPerSample == 8 ? kMaxFrameSize / 2 :
- (mBitsPerSample == 24 ? 3*(kMaxFrameSize/3): kMaxFrameSize);
+ size_t maxBytesToRead;
+ if(8 == mBitsPerSample)
+ maxBytesToRead = kMaxFrameSize / 2;
+ else if (24 == mBitsPerSample) {
+ maxBytesToRead = 3*(kMaxFrameSize/4);
+ } else
+ maxBytesToRead = kMaxFrameSize;
+ ALOGV("%s mBitsPerSample %d, kMaxFrameSize %zu, ",
+ __func__, mBitsPerSample, kMaxFrameSize);
size_t maxBytesAvailable =
(mCurrentPos - mOffset >= (off64_t)mSize)
@@ -488,23 +499,24 @@ status_t WAVSource::read(
buffer->release();
buffer = tmp;
} else if (mBitsPerSample == 24) {
- // Convert 24-bit signed samples to 16-bit signed.
-
- const uint8_t *src =
- (const uint8_t *)buffer->data() + buffer->range_offset();
- int16_t *dst = (int16_t *)src;
-
- size_t numSamples = buffer->range_length() / 3;
- for (size_t i = 0; i < numSamples; ++i) {
- int32_t x = (int32_t)(src[0] | src[1] << 8 | src[2] << 16);
- x = (x << 8) >> 8; // sign extension
-
- x = x >> 8;
- *dst++ = (int16_t)x;
- src += 3;
+ // Padding done here to convert to 32-bit samples
+ MediaBuffer *tmp;
+ CHECK_EQ(mGroup->acquire_buffer(&tmp), (status_t)OK);
+ ssize_t numBytes = buffer->range_length() / 3;
+ tmp->set_range(0, 4 * numBytes);
+ int8_t *dst = (int8_t *)tmp->data();
+ const uint8_t *src = (const uint8_t *)buffer->data();
+ ALOGV("numBytes = %zd", numBytes);
+ while(numBytes-- > 0) {
+ *dst++ = 0x0;
+ *dst++ = src[0];
+ *dst++ = src[1];
+ *dst++ = src[2];
+ src += 3;
}
-
- buffer->set_range(buffer->range_offset(), 2 * numSamples);
+ buffer->release();
+ buffer = tmp;
+ ALOGV("length = %zu", buffer->range_length());
}
}
diff --git a/media/libstagefright/codecs/amrnb/dec/Android.mk b/media/libstagefright/codecs/amrnb/dec/Android.mk
index 415702e..21109d9 100644
--- a/media/libstagefright/codecs/amrnb/dec/Android.mk
+++ b/media/libstagefright/codecs/amrnb/dec/Android.mk
@@ -99,7 +99,7 @@ LOCAL_STATIC_LIBRARIES := \
libstagefright_amrnbdec libsndfile
LOCAL_SHARED_LIBRARIES := \
- libstagefright_amrnb_common libaudioutils
+ libstagefright_amrnb_common libaudioutils liblog
LOCAL_MODULE := libstagefright_amrnbdec_test
LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/codecs/amrnb/dec/src/a_refl.cpp b/media/libstagefright/codecs/amrnb/dec/src/a_refl.cpp
index fb7cff3..696d2da 100644
--- a/media/libstagefright/codecs/amrnb/dec/src/a_refl.cpp
+++ b/media/libstagefright/codecs/amrnb/dec/src/a_refl.cpp
@@ -59,6 +59,8 @@ terms listed above has been obtained from the copyright holder.
/*----------------------------------------------------------------------------
; INCLUDES
----------------------------------------------------------------------------*/
+#include <log/log.h>
+
#include "a_refl.h"
#include "typedef.h"
#include "cnst.h"
@@ -291,7 +293,8 @@ void A_Refl(
{
refl[i] = 0;
}
- break;
+ ALOGE("b/23609206");
+ return;
}
bState[j] = extract_l(L_temp);
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
index e083315..afbe230 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
@@ -38,10 +38,10 @@ namespace android {
/** Function and structure definitions to keep code similar for each codec */
#define ivdec_api_function ih264d_api_function
-#define ivdext_init_ip_t ih264d_init_ip_t
-#define ivdext_init_op_t ih264d_init_op_t
-#define ivdext_fill_mem_rec_ip_t ih264d_fill_mem_rec_ip_t
-#define ivdext_fill_mem_rec_op_t ih264d_fill_mem_rec_op_t
+#define ivdext_create_ip_t ih264d_create_ip_t
+#define ivdext_create_op_t ih264d_create_op_t
+#define ivdext_delete_ip_t ih264d_delete_ip_t
+#define ivdext_delete_op_t ih264d_delete_op_t
#define ivdext_ctl_set_num_cores_ip_t ih264d_ctl_set_num_cores_ip_t
#define ivdext_ctl_set_num_cores_op_t ih264d_ctl_set_num_cores_op_t
@@ -115,15 +115,12 @@ SoftAVC::SoftAVC(
320 /* width */, 240 /* height */, callbacks,
appData, component),
mCodecCtx(NULL),
- mMemRecords(NULL),
mFlushOutBuffer(NULL),
mOmxColorFormat(OMX_COLOR_FormatYUV420Planar),
mIvColorFormat(IV_YUV_420P),
- mNewWidth(mWidth),
- mNewHeight(mHeight),
- mNewLevel(0),
mChangingResolution(false),
- mSignalledError(false) {
+ mSignalledError(false),
+ mStride(mWidth){
initPorts(
kNumBuffers, INPUT_BUF_SIZE, kNumBuffers, CODEC_MIME_TYPE);
@@ -132,14 +129,23 @@ SoftAVC::SoftAVC(
// If input dump is enabled, then open create an empty file
GENERATE_FILE_NAMES();
CREATE_DUMP_FILE(mInFile);
-
- CHECK_EQ(initDecoder(mWidth, mHeight), (status_t)OK);
}
SoftAVC::~SoftAVC() {
CHECK_EQ(deInitDecoder(), (status_t)OK);
}
+static void *ivd_aligned_malloc(void *ctxt, WORD32 alignment, WORD32 size) {
+ UNUSED(ctxt);
+ return memalign(alignment, size);
+}
+
+static void ivd_aligned_free(void *ctxt, void *buf) {
+ UNUSED(ctxt);
+ free(buf);
+ return;
+}
+
static size_t GetCPUCoreCount() {
long cpuCoreCount = 1;
#if defined(_SC_NPROCESSORS_ONLN)
@@ -149,7 +155,7 @@ static size_t GetCPUCoreCount() {
cpuCoreCount = sysconf(_SC_NPROC_ONLN);
#endif
CHECK(cpuCoreCount >= 1);
- ALOGD("Number of CPU cores: %ld", cpuCoreCount);
+ ALOGV("Number of CPU cores: %ld", cpuCoreCount);
return (size_t)cpuCoreCount;
}
@@ -235,12 +241,10 @@ status_t SoftAVC::resetDecoder() {
}
mSignalledError = false;
- /* Set the run-time (dynamic) parameters */
- setParams(outputBufferWidth());
-
/* Set number of cores/threads to be used by the codec */
setNumCores();
+ mStride = 0;
return OK;
}
@@ -287,160 +291,41 @@ status_t SoftAVC::setFlushMode() {
return OK;
}
-status_t SoftAVC::initDecoder(uint32_t width, uint32_t height) {
+status_t SoftAVC::initDecoder() {
IV_API_CALL_STATUS_T status;
- UWORD32 u4_num_reorder_frames;
- UWORD32 u4_num_ref_frames;
- UWORD32 u4_share_disp_buf;
- WORD32 i4_level;
-
mNumCores = GetCPUCoreCount();
mCodecCtx = NULL;
- /* Initialize number of ref and reorder modes (for H264) */
- u4_num_reorder_frames = 16;
- u4_num_ref_frames = 16;
- u4_share_disp_buf = 0;
-
- uint32_t displayStride = mIsAdaptive ? mAdaptiveMaxWidth : width;
- uint32_t displayHeight = mIsAdaptive ? mAdaptiveMaxHeight : height;
- uint32_t displaySizeY = displayStride * displayHeight;
-
- if(mNewLevel == 0){
- if (displaySizeY > (1920 * 1088)) {
- i4_level = 50;
- } else if (displaySizeY > (1280 * 720)) {
- i4_level = 40;
- } else if (displaySizeY > (720 * 576)) {
- i4_level = 31;
- } else if (displaySizeY > (624 * 320)) {
- i4_level = 30;
- } else if (displaySizeY > (352 * 288)) {
- i4_level = 21;
- } else {
- i4_level = 20;
- }
- } else {
- i4_level = mNewLevel;
- }
-
- {
- iv_num_mem_rec_ip_t s_num_mem_rec_ip;
- iv_num_mem_rec_op_t s_num_mem_rec_op;
-
- s_num_mem_rec_ip.u4_size = sizeof(s_num_mem_rec_ip);
- s_num_mem_rec_op.u4_size = sizeof(s_num_mem_rec_op);
- s_num_mem_rec_ip.e_cmd = IV_CMD_GET_NUM_MEM_REC;
-
- ALOGV("Get number of mem records");
- status = ivdec_api_function(
- mCodecCtx, (void *)&s_num_mem_rec_ip, (void *)&s_num_mem_rec_op);
- if (IV_SUCCESS != status) {
- ALOGE("Error in getting mem records: 0x%x",
- s_num_mem_rec_op.u4_error_code);
- return UNKNOWN_ERROR;
- }
-
- mNumMemRecords = s_num_mem_rec_op.u4_num_mem_rec;
- }
-
- mMemRecords = (iv_mem_rec_t *)ivd_aligned_malloc(
- 128, mNumMemRecords * sizeof(iv_mem_rec_t));
- if (mMemRecords == NULL) {
- ALOGE("Allocation failure");
- return NO_MEMORY;
- }
-
- memset(mMemRecords, 0, mNumMemRecords * sizeof(iv_mem_rec_t));
-
- {
- size_t i;
- ivdext_fill_mem_rec_ip_t s_fill_mem_ip;
- ivdext_fill_mem_rec_op_t s_fill_mem_op;
- iv_mem_rec_t *ps_mem_rec;
-
- s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_size =
- sizeof(ivdext_fill_mem_rec_ip_t);
- s_fill_mem_ip.i4_level = i4_level;
- s_fill_mem_ip.u4_num_reorder_frames = u4_num_reorder_frames;
- s_fill_mem_ip.u4_num_ref_frames = u4_num_ref_frames;
- s_fill_mem_ip.u4_share_disp_buf = u4_share_disp_buf;
- s_fill_mem_ip.u4_num_extra_disp_buf = 0;
- s_fill_mem_ip.e_output_format = mIvColorFormat;
-
- s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.e_cmd = IV_CMD_FILL_NUM_MEM_REC;
- s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.pv_mem_rec_location = mMemRecords;
- s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_max_frm_wd = displayStride;
- s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_max_frm_ht = displayHeight;
- s_fill_mem_op.s_ivd_fill_mem_rec_op_t.u4_size =
- sizeof(ivdext_fill_mem_rec_op_t);
-
- ps_mem_rec = mMemRecords;
- for (i = 0; i < mNumMemRecords; i++) {
- ps_mem_rec[i].u4_size = sizeof(iv_mem_rec_t);
- }
-
- status = ivdec_api_function(
- mCodecCtx, (void *)&s_fill_mem_ip, (void *)&s_fill_mem_op);
-
- if (IV_SUCCESS != status) {
- ALOGE("Error in filling mem records: 0x%x",
- s_fill_mem_op.s_ivd_fill_mem_rec_op_t.u4_error_code);
- return UNKNOWN_ERROR;
- }
- mNumMemRecords =
- s_fill_mem_op.s_ivd_fill_mem_rec_op_t.u4_num_mem_rec_filled;
-
- ps_mem_rec = mMemRecords;
-
- for (i = 0; i < mNumMemRecords; i++) {
- ps_mem_rec->pv_base = ivd_aligned_malloc(
- ps_mem_rec->u4_mem_alignment, ps_mem_rec->u4_mem_size);
- if (ps_mem_rec->pv_base == NULL) {
- ALOGE("Allocation failure for memory record #%zu of size %u",
- i, ps_mem_rec->u4_mem_size);
- status = IV_FAIL;
- return NO_MEMORY;
- }
-
- ps_mem_rec++;
- }
- }
+ mStride = outputBufferWidth();
/* Initialize the decoder */
{
- ivdext_init_ip_t s_init_ip;
- ivdext_init_op_t s_init_op;
+ ivdext_create_ip_t s_create_ip;
+ ivdext_create_op_t s_create_op;
void *dec_fxns = (void *)ivdec_api_function;
- s_init_ip.s_ivd_init_ip_t.u4_size = sizeof(ivdext_init_ip_t);
- s_init_ip.s_ivd_init_ip_t.e_cmd = (IVD_API_COMMAND_TYPE_T)IV_CMD_INIT;
- s_init_ip.s_ivd_init_ip_t.pv_mem_rec_location = mMemRecords;
- s_init_ip.s_ivd_init_ip_t.u4_frm_max_wd = displayStride;
- s_init_ip.s_ivd_init_ip_t.u4_frm_max_ht = displayHeight;
-
- s_init_ip.i4_level = i4_level;
- s_init_ip.u4_num_reorder_frames = u4_num_reorder_frames;
- s_init_ip.u4_num_ref_frames = u4_num_ref_frames;
- s_init_ip.u4_share_disp_buf = u4_share_disp_buf;
- s_init_ip.u4_num_extra_disp_buf = 0;
-
- s_init_op.s_ivd_init_op_t.u4_size = sizeof(s_init_op);
+ s_create_ip.s_ivd_create_ip_t.u4_size = sizeof(ivdext_create_ip_t);
+ s_create_ip.s_ivd_create_ip_t.e_cmd = IVD_CMD_CREATE;
+ s_create_ip.s_ivd_create_ip_t.u4_share_disp_buf = 0;
+ s_create_op.s_ivd_create_op_t.u4_size = sizeof(ivdext_create_op_t);
+ s_create_ip.s_ivd_create_ip_t.e_output_format = mIvColorFormat;
+ s_create_ip.s_ivd_create_ip_t.pf_aligned_alloc = ivd_aligned_malloc;
+ s_create_ip.s_ivd_create_ip_t.pf_aligned_free = ivd_aligned_free;
+ s_create_ip.s_ivd_create_ip_t.pv_mem_ctxt = NULL;
- s_init_ip.s_ivd_init_ip_t.u4_num_mem_rec = mNumMemRecords;
- s_init_ip.s_ivd_init_ip_t.e_output_format = mIvColorFormat;
+ status = ivdec_api_function(mCodecCtx, (void *)&s_create_ip, (void *)&s_create_op);
- mCodecCtx = (iv_obj_t *)mMemRecords[0].pv_base;
+ mCodecCtx = (iv_obj_t*)s_create_op.s_ivd_create_op_t.pv_handle;
mCodecCtx->pv_fxns = dec_fxns;
mCodecCtx->u4_size = sizeof(iv_obj_t);
- status = ivdec_api_function(mCodecCtx, (void *)&s_init_ip, (void *)&s_init_op);
if (status != IV_SUCCESS) {
+ ALOGE("Error in create: 0x%x",
+ s_create_op.s_ivd_create_op_t.u4_error_code);
+ deInitDecoder();
mCodecCtx = NULL;
- ALOGE("Error in init: 0x%x",
- s_init_op.s_ivd_init_op_t.u4_error_code);
return UNKNOWN_ERROR;
}
}
@@ -449,7 +334,7 @@ status_t SoftAVC::initDecoder(uint32_t width, uint32_t height) {
resetPlugin();
/* Set the run time (dynamic) parameters */
- setParams(displayStride);
+ setParams(mStride);
/* Set number of cores/threads to be used by the codec */
setNumCores();
@@ -457,61 +342,37 @@ status_t SoftAVC::initDecoder(uint32_t width, uint32_t height) {
/* Get codec version */
logVersion();
- /* Allocate internal picture buffer */
- uint32_t bufferSize = displaySizeY * 3 / 2;
- mFlushOutBuffer = (uint8_t *)ivd_aligned_malloc(128, bufferSize);
- if (NULL == mFlushOutBuffer) {
- ALOGE("Could not allocate flushOutputBuffer of size %u", bufferSize);
- return NO_MEMORY;
- }
-
- mInitNeeded = false;
mFlushNeeded = false;
return OK;
}
status_t SoftAVC::deInitDecoder() {
size_t i;
+ IV_API_CALL_STATUS_T status;
- if (mMemRecords) {
- iv_mem_rec_t *ps_mem_rec;
+ if (mCodecCtx) {
+ ivdext_delete_ip_t s_delete_ip;
+ ivdext_delete_op_t s_delete_op;
- ps_mem_rec = mMemRecords;
- for (i = 0; i < mNumMemRecords; i++) {
- if (ps_mem_rec->pv_base) {
- ivd_aligned_free(ps_mem_rec->pv_base);
- }
- ps_mem_rec++;
+ s_delete_ip.s_ivd_delete_ip_t.u4_size = sizeof(ivdext_delete_ip_t);
+ s_delete_ip.s_ivd_delete_ip_t.e_cmd = IVD_CMD_DELETE;
+
+ s_delete_op.s_ivd_delete_op_t.u4_size = sizeof(ivdext_delete_op_t);
+
+ status = ivdec_api_function(mCodecCtx, (void *)&s_delete_ip, (void *)&s_delete_op);
+ if (status != IV_SUCCESS) {
+ ALOGE("Error in delete: 0x%x",
+ s_delete_op.s_ivd_delete_op_t.u4_error_code);
+ return UNKNOWN_ERROR;
}
- ivd_aligned_free(mMemRecords);
- mMemRecords = NULL;
}
- if (mFlushOutBuffer) {
- ivd_aligned_free(mFlushOutBuffer);
- mFlushOutBuffer = NULL;
- }
- mInitNeeded = true;
mChangingResolution = false;
return OK;
}
-status_t SoftAVC::reInitDecoder(uint32_t width, uint32_t height) {
- status_t ret;
-
- deInitDecoder();
-
- ret = initDecoder(width, height);
- if (OK != ret) {
- ALOGE("Create failure");
- deInitDecoder();
- return NO_MEMORY;
- }
- return OK;
-}
-
void SoftAVC::onReset() {
SoftVideoDecoderOMXComponent::onReset();
@@ -520,23 +381,6 @@ void SoftAVC::onReset() {
resetPlugin();
}
-OMX_ERRORTYPE SoftAVC::internalSetParameter(OMX_INDEXTYPE index, const OMX_PTR params) {
- const uint32_t oldWidth = mWidth;
- const uint32_t oldHeight = mHeight;
- OMX_ERRORTYPE ret = SoftVideoDecoderOMXComponent::internalSetParameter(index, params);
- if (mWidth != oldWidth || mHeight != oldHeight) {
- mNewWidth = mWidth;
- mNewHeight = mHeight;
- status_t err = reInitDecoder(mNewWidth, mNewHeight);
- if (err != OK) {
- notify(OMX_EventError, OMX_ErrorUnsupportedSetting, err, NULL);
- mSignalledError = true;
- return OMX_ErrorUnsupportedSetting;
- }
- }
- return ret;
-}
-
void SoftAVC::setDecodeArgs(
ivd_video_decode_ip_t *ps_dec_ip,
ivd_video_decode_op_t *ps_dec_op,
@@ -587,6 +431,17 @@ void SoftAVC::onPortFlushCompleted(OMX_U32 portIndex) {
if (kOutputPortIndex == portIndex) {
setFlushMode();
+ /* Allocate a picture buffer to flushed data */
+ uint32_t displayStride = outputBufferWidth();
+ uint32_t displayHeight = outputBufferHeight();
+
+ uint32_t bufferSize = displayStride * displayHeight * 3 / 2;
+ mFlushOutBuffer = (uint8_t *)memalign(128, bufferSize);
+ if (NULL == mFlushOutBuffer) {
+ ALOGE("Could not allocate flushOutputBuffer of size %zu", bufferSize);
+ return;
+ }
+
while (true) {
ivd_video_decode_ip_t s_dec_ip;
ivd_video_decode_op_t s_dec_op;
@@ -601,6 +456,12 @@ void SoftAVC::onPortFlushCompleted(OMX_U32 portIndex) {
break;
}
}
+
+ if (mFlushOutBuffer) {
+ free(mFlushOutBuffer);
+ mFlushOutBuffer = NULL;
+ }
+
}
}
@@ -614,6 +475,20 @@ void SoftAVC::onQueueFilled(OMX_U32 portIndex) {
return;
}
+ if (NULL == mCodecCtx) {
+ if (OK != initDecoder()) {
+ ALOGE("Failed to initialize decoder");
+ notify(OMX_EventError, OMX_ErrorUnsupportedSetting, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
+ }
+ if (outputBufferWidth() != mStride) {
+ /* Set the run-time (dynamic) parameters */
+ mStride = outputBufferWidth();
+ setParams(mStride);
+ }
+
List<BufferInfo *> &inQueue = getPortQueue(kInputPortIndex);
List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex);
@@ -676,22 +551,6 @@ void SoftAVC::onQueueFilled(OMX_U32 portIndex) {
}
}
- // When there is an init required and the decoder is not in flush mode,
- // update output port's definition and reinitialize decoder.
- if (mInitNeeded && !mIsInFlush) {
- bool portWillReset = false;
-
- status_t err = reInitDecoder(mNewWidth, mNewHeight);
- if (err != OK) {
- notify(OMX_EventError, OMX_ErrorUnsupportedSetting, err, NULL);
- mSignalledError = true;
- return;
- }
-
- handlePortSettingsChange(&portWillReset, mNewWidth, mNewHeight);
- return;
- }
-
/* Get a free slot in timestamp array to hold input timestamp */
{
size_t i;
@@ -726,10 +585,26 @@ void SoftAVC::onQueueFilled(OMX_U32 portIndex) {
IV_API_CALL_STATUS_T status;
status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
- bool unsupportedDimensions =
+ bool unsupportedResolution =
(IVD_STREAM_WIDTH_HEIGHT_NOT_SUPPORTED == (s_dec_op.u4_error_code & 0xFF));
+
+ /* Check for unsupported dimensions */
+ if (unsupportedResolution) {
+ ALOGE("Unsupported resolution : %dx%d", mWidth, mHeight);
+ notify(OMX_EventError, OMX_ErrorUnsupportedSetting, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
+
+ bool allocationFailed = (IVD_MEM_ALLOC_FAILED == (s_dec_op.u4_error_code & 0xFF));
+ if (allocationFailed) {
+ ALOGE("Allocation failure in decoder");
+ notify(OMX_EventError, OMX_ErrorUnsupportedSetting, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
+
bool resChanged = (IVD_RES_CHANGED == (s_dec_op.u4_error_code & 0xFF));
- bool unsupportedLevel = (IH264D_UNSUPPORTED_LEVEL == (s_dec_op.u4_error_code & 0xFF));
GETTIME(&mTimeEnd, NULL);
/* Compute time taken for decode() */
@@ -747,46 +622,6 @@ void SoftAVC::onQueueFilled(OMX_U32 portIndex) {
mTimeStampsValid[timeStampIx] = false;
}
-
- // This is needed to handle CTS DecoderTest testCodecResetsH264WithoutSurface,
- // which is not sending SPS/PPS after port reconfiguration and flush to the codec.
- if (unsupportedDimensions && !mFlushNeeded) {
- bool portWillReset = false;
- mNewWidth = s_dec_op.u4_pic_wd;
- mNewHeight = s_dec_op.u4_pic_ht;
-
- status_t err = reInitDecoder(mNewWidth, mNewHeight);
- if (err != OK) {
- notify(OMX_EventError, OMX_ErrorUnsupportedSetting, err, NULL);
- mSignalledError = true;
- return;
- }
-
- handlePortSettingsChange(&portWillReset, mNewWidth, mNewHeight);
-
- setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx);
-
- ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
- return;
- }
-
- if (unsupportedLevel && !mFlushNeeded) {
-
- mNewLevel = 51;
-
- status_t err = reInitDecoder(mNewWidth, mNewHeight);
- if (err != OK) {
- notify(OMX_EventError, OMX_ErrorUnsupportedSetting, err, NULL);
- mSignalledError = true;
- return;
- }
-
- setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx);
-
- ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
- return;
- }
-
// If the decoder is in the changing resolution mode and there is no output present,
// that means the switching is done and it's ready to reset the decoder and the plugin.
if (mChangingResolution && !s_dec_op.u4_output_present) {
@@ -796,28 +631,11 @@ void SoftAVC::onQueueFilled(OMX_U32 portIndex) {
continue;
}
- if (unsupportedDimensions || resChanged) {
+ if (resChanged) {
mChangingResolution = true;
if (mFlushNeeded) {
setFlushMode();
}
-
- if (unsupportedDimensions) {
- mNewWidth = s_dec_op.u4_pic_wd;
- mNewHeight = s_dec_op.u4_pic_ht;
- mInitNeeded = true;
- }
- continue;
- }
-
- if (unsupportedLevel) {
-
- if (mFlushNeeded) {
- setFlushMode();
- }
-
- mNewLevel = 51;
- mInitNeeded = true;
continue;
}
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.h b/media/libstagefright/codecs/avcdec/SoftAVCDec.h
index 1ec8991..9dcabb4 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.h
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.h
@@ -23,9 +23,6 @@
namespace android {
-#define ivd_aligned_malloc(alignment, size) memalign(alignment, size)
-#define ivd_aligned_free(buf) free(buf)
-
/** Number of entries in the time-stamp array */
#define MAX_TIME_STAMPS 64
@@ -62,7 +59,6 @@ protected:
virtual void onQueueFilled(OMX_U32 portIndex);
virtual void onPortFlushCompleted(OMX_U32 portIndex);
virtual void onReset();
- virtual OMX_ERRORTYPE internalSetParameter(OMX_INDEXTYPE index, const OMX_PTR params);
private:
// Number of input and output buffers
enum {
@@ -70,8 +66,6 @@ private:
};
iv_obj_t *mCodecCtx; // Codec context
- iv_mem_rec_t *mMemRecords; // Memory records requested by the codec
- size_t mNumMemRecords; // Number of memory records requested by the codec
size_t mNumCores; // Number of cores to be uesd by the codec
@@ -97,17 +91,15 @@ private:
bool mIsInFlush; // codec is flush mode
bool mReceivedEOS; // EOS is receieved on input port
- bool mInitNeeded;
- uint32_t mNewWidth;
- uint32_t mNewHeight;
- uint32_t mNewLevel;
+
// The input stream has changed to a different resolution, which is still supported by the
// codec. So the codec is switching to decode the new resolution.
bool mChangingResolution;
bool mFlushNeeded;
bool mSignalledError;
+ size_t mStride;
- status_t initDecoder(uint32_t width, uint32_t height);
+ status_t initDecoder();
status_t deInitDecoder();
status_t setFlushMode();
status_t setParams(size_t stride);
@@ -115,7 +107,7 @@ private:
status_t setNumCores();
status_t resetDecoder();
status_t resetPlugin();
- status_t reInitDecoder(uint32_t width, uint32_t height);
+
void setDecodeArgs(
ivd_video_decode_ip_t *ps_dec_ip,
diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/conceal.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/conceal.cpp
index e9ead01..03e4119 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/src/conceal.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/src/conceal.cpp
@@ -19,6 +19,7 @@
#include "vlc_decode.h"
#include "bitstream.h"
#include "scaling.h"
+#include "log/log.h"
/* ====================================================================== /
Function : ConcealTexture_I()
@@ -137,6 +138,10 @@ Modified: 6/04/2001 rewrote the function
****************************************************************************/
void CopyVopMB(Vop *curr, uint8 *prevFrame, int mbnum, int width_Y, int height)
{
+ if (curr == NULL || prevFrame == NULL) {
+ ALOGE("b/24630158");
+ return;
+ }
int width_C = width_Y >> 1;
int row = MB_SIZE;
uint8 *y1, *y2, *u1, *u2, *v1, *v2;
diff --git a/media/libstagefright/codecs/raw/SoftRaw.cpp b/media/libstagefright/codecs/raw/SoftRaw.cpp
index b78b36f..0d80098 100644
--- a/media/libstagefright/codecs/raw/SoftRaw.cpp
+++ b/media/libstagefright/codecs/raw/SoftRaw.cpp
@@ -42,7 +42,8 @@ SoftRaw::SoftRaw(
: SimpleSoftOMXComponent(name, callbacks, appData, component),
mSignalledError(false),
mChannelCount(2),
- mSampleRate(44100) {
+ mSampleRate(44100),
+ mBitsPerSample(16) {
initPorts();
CHECK_EQ(initDecoder(), (status_t)OK);
}
@@ -110,7 +111,7 @@ OMX_ERRORTYPE SoftRaw::internalGetParameter(
pcmParams->eNumData = OMX_NumericalDataSigned;
pcmParams->eEndian = OMX_EndianBig;
pcmParams->bInterleaved = OMX_TRUE;
- pcmParams->nBitPerSample = 16;
+ pcmParams->nBitPerSample = mBitsPerSample;
pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
@@ -154,6 +155,7 @@ OMX_ERRORTYPE SoftRaw::internalSetParameter(
mChannelCount = pcmParams->nChannels;
mSampleRate = pcmParams->nSamplingRate;
+ mBitsPerSample = pcmParams->nBitPerSample;
return OMX_ErrorNone;
}
diff --git a/media/libstagefright/codecs/raw/SoftRaw.h b/media/libstagefright/codecs/raw/SoftRaw.h
index 94b0ef1..894889f 100644
--- a/media/libstagefright/codecs/raw/SoftRaw.h
+++ b/media/libstagefright/codecs/raw/SoftRaw.h
@@ -50,6 +50,7 @@ private:
int32_t mChannelCount;
int32_t mSampleRate;
+ int32_t mBitsPerSample;
void initPorts();
status_t initDecoder();
diff --git a/media/libstagefright/data/media_codecs_google_tv.xml b/media/libstagefright/data/media_codecs_google_tv.xml
new file mode 100644
index 0000000..330c6fb
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_tv.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<Included>
+ <Decoders>
+ <MediaCodec name="OMX.google.mpeg2.decoder" type="video/mpeg2">
+ <!-- profiles and levels: ProfileMain : LevelHL -->
+ <Limit name="size" min="16x16" max="1920x1088" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-244800" />
+ <Limit name="bitrate" range="1-20000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ </Decoders>
+</Included>
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
index a83789c..07e26a2 100755..100644
--- a/media/libstagefright/data/media_codecs_google_video.xml
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -16,15 +16,6 @@
<Included>
<Decoders>
- <MediaCodec name="OMX.google.mpeg2.decoder" type="video/mpeg2">
- <!-- profiles and levels: ProfileMain : LevelHL -->
- <Limit name="size" min="16x16" max="1920x1088" />
- <Limit name="alignment" value="2x2" />
- <Limit name="block-size" value="16x16" />
- <Limit name="blocks-per-second" range="1-244800" />
- <Limit name="bitrate" range="1-20000000" />
- <Feature name="adaptive-playback" />
- </MediaCodec>
<MediaCodec name="OMX.google.mpeg4.decoder">
<Type name="video/mp4v-es" />
<!--
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index e549ff6..725a574 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -601,13 +601,24 @@ sp<AMessage> AMessage::FromParcel(const Parcel &parcel) {
msg->setWhat(what);
msg->mNumItems = static_cast<size_t>(parcel.readInt32());
+ if (msg->mNumItems > kMaxNumItems) {
+ ALOGE("Too large number of items clipped.");
+ msg->mNumItems = kMaxNumItems;
+ }
+
for (size_t i = 0; i < msg->mNumItems; ++i) {
Item *item = &msg->mItems[i];
const char *name = parcel.readCString();
- item->setName(name, strlen(name));
- item->mType = static_cast<Type>(parcel.readInt32());
+ if (name == NULL) {
+ ALOGE("Failed reading name for an item. Parsing aborted.");
+ msg->mNumItems = i;
+ break;
+ }
+ item->mType = static_cast<Type>(parcel.readInt32());
+ // setName() happens below so that we don't leak memory when parsing
+ // is aborted in the middle.
switch (item->mType) {
case kTypeInt32:
{
@@ -641,7 +652,16 @@ sp<AMessage> AMessage::FromParcel(const Parcel &parcel) {
case kTypeString:
{
- item->u.stringValue = new AString(parcel.readCString());
+ const char *stringValue = parcel.readCString();
+ if (stringValue == NULL) {
+ ALOGE("Failed reading string value from a parcel. "
+ "Parsing aborted.");
+ msg->mNumItems = i;
+ continue;
+ // The loop will terminate subsequently.
+ } else {
+ item->u.stringValue = new AString(stringValue);
+ }
break;
}
@@ -660,6 +680,8 @@ sp<AMessage> AMessage::FromParcel(const Parcel &parcel) {
TRESPASS();
}
}
+
+ item->setName(name, strlen(name));
}
return msg;
diff --git a/media/libstagefright/foundation/ANetworkSession.cpp b/media/libstagefright/foundation/ANetworkSession.cpp
index b230400..4bcb1f6 100644
--- a/media/libstagefright/foundation/ANetworkSession.cpp
+++ b/media/libstagefright/foundation/ANetworkSession.cpp
@@ -1318,7 +1318,8 @@ void ANetworkSession::threadLoop() {
List<sp<Session> > sessionsToAdd;
- for (size_t i = mSessions.size(); res > 0 && i-- > 0;) {
+ for (size_t i = mSessions.size(); res > 0 && i > 0;) {
+ i--;
const sp<Session> &session = mSessions.valueAt(i);
int s = session->socket();
@@ -1409,4 +1410,3 @@ void ANetworkSession::threadLoop() {
}
} // namespace android
-
diff --git a/media/libstagefright/foundation/base64.cpp b/media/libstagefright/foundation/base64.cpp
index dcf5bef..7da7db9 100644
--- a/media/libstagefright/foundation/base64.cpp
+++ b/media/libstagefright/foundation/base64.cpp
@@ -22,11 +22,11 @@
namespace android {
sp<ABuffer> decodeBase64(const AString &s) {
- if ((s.size() % 4) != 0) {
+ size_t n = s.size();
+ if ((n % 4) != 0) {
return NULL;
}
- size_t n = s.size();
size_t padding = 0;
if (n >= 1 && s.c_str()[n - 1] == '=') {
padding = 1;
@@ -40,11 +40,16 @@ sp<ABuffer> decodeBase64(const AString &s) {
}
}
- size_t outLen = 3 * s.size() / 4 - padding;
+ // We divide first to avoid overflow. It's OK to do this because we
+ // already made sure that n % 4 == 0.
+ size_t outLen = (n / 4) * 3 - padding;
sp<ABuffer> buffer = new ABuffer(outLen);
uint8_t *out = buffer->data();
+ if (out == NULL || buffer->size() < outLen) {
+ return NULL;
+ }
size_t j = 0;
uint32_t accum = 0;
for (size_t i = 0; i < n; ++i) {
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 4667c71..0d504e4 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -276,7 +276,7 @@ protected:
float getAbortThreshold(
ssize_t currentBWIndex, ssize_t targetBWIndex) const;
void addBandwidthMeasurement(size_t numBytes, int64_t delayUs);
- size_t getBandwidthIndex(int32_t bandwidthBps);
+ virtual size_t getBandwidthIndex(int32_t bandwidthBps);
ssize_t getLowestValidBandwidthIndex() const;
HLSTime latestMediaSegmentStartTime() const;
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 5ad29c3..52be368 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -975,7 +975,9 @@ bool PlaylistFetcher::initDownloadState(
if (mSegmentStartTimeUs < 0) {
if (!mPlaylist->isComplete() && !mPlaylist->isEvent()) {
// If this is a live session, start 3 segments from the end on connect
- mSeqNumber = lastSeqNumberInPlaylist - 3;
+ if (!getSeqNumberInLiveStreaming()) {
+ mSeqNumber = lastSeqNumberInPlaylist - 3;
+ }
if (mSeqNumber < firstSeqNumberInPlaylist) {
mSeqNumber = firstSeqNumberInPlaylist;
}
@@ -1632,7 +1634,8 @@ status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &bu
if (mSegmentFirstPTS < 0ll) {
// get the smallest first PTS from all streams present in this parser
- for (size_t i = mPacketSources.size(); i-- > 0;) {
+ for (size_t i = mPacketSources.size(); i > 0;) {
+ i--;
const LiveSession::StreamType stream = mPacketSources.keyAt(i);
if (stream == LiveSession::STREAMTYPE_SUBTITLES) {
ALOGE("MPEG2 Transport streams do not contain subtitles.");
@@ -1687,7 +1690,8 @@ status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &bu
}
status_t err = OK;
- for (size_t i = mPacketSources.size(); i-- > 0;) {
+ for (size_t i = mPacketSources.size(); i > 0;) {
+ i--;
sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
const LiveSession::StreamType stream = mPacketSources.keyAt(i);
@@ -1811,7 +1815,8 @@ status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &bu
}
if (err != OK) {
- for (size_t i = mPacketSources.size(); i-- > 0;) {
+ for (size_t i = mPacketSources.size(); i > 0;) {
+ i--;
sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
packetSource->clear();
}
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index 74cc0dd..6b60b65 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -249,6 +249,7 @@ protected:
void updateDuration();
void updateTargetDuration();
virtual bool checkSwitchBandwidth() { return false; }
+ virtual bool getSeqNumberInLiveStreaming() { return false; }
DISALLOW_EVIL_CONSTRUCTORS(PlaylistFetcher);
};
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index d9a198d..76d65f0 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -194,6 +194,13 @@ struct id3_header {
if (header.version_major == 4) {
void *copy = malloc(size);
+ if (copy == NULL) {
+ free(mData);
+ mData = NULL;
+ ALOGE("b/24623447, no more memory");
+ return false;
+ }
+
memcpy(copy, mData, size);
bool success = removeUnsynchronizationV2_4(false /* iTunesHack */);
@@ -234,7 +241,14 @@ struct id3_header {
return false;
}
- size_t extendedHeaderSize = U32_AT(&mData[0]) + 4;
+ size_t extendedHeaderSize = U32_AT(&mData[0]);
+ if (extendedHeaderSize > SIZE_MAX - 4) {
+ free(mData);
+ mData = NULL;
+ ALOGE("b/24623447, extendedHeaderSize is too large");
+ return false;
+ }
+ extendedHeaderSize += 4;
if (extendedHeaderSize > mSize) {
free(mData);
@@ -252,7 +266,10 @@ struct id3_header {
if (extendedHeaderSize >= 10) {
size_t paddingSize = U32_AT(&mData[6]);
- if (mFirstFrameOffset + paddingSize > mSize) {
+ if (paddingSize > SIZE_MAX - mFirstFrameOffset) {
+ ALOGE("b/24623447, paddingSize is too large");
+ }
+ if (paddingSize > mSize - mFirstFrameOffset) {
free(mData);
mData = NULL;
diff --git a/media/libstagefright/include/AACExtractor.h b/media/libstagefright/include/AACExtractor.h
index e98ca82..9a0ba2f 100644
--- a/media/libstagefright/include/AACExtractor.h
+++ b/media/libstagefright/include/AACExtractor.h
@@ -21,6 +21,7 @@
#include <media/stagefright/MediaExtractor.h>
#include <utils/Vector.h>
+#include "include/APE.h"
namespace android {
@@ -48,6 +49,9 @@ private:
Vector<uint64_t> mOffsetVector;
int64_t mFrameDurationUs;
+ APE ape;
+ sp<MetaData> mApeMeta;
+
AACExtractor(const AACExtractor &);
AACExtractor &operator=(const AACExtractor &);
};
diff --git a/media/libstagefright/include/APE.h b/media/libstagefright/include/APE.h
new file mode 100644
index 0000000..db49bb0
--- /dev/null
+++ b/media/libstagefright/include/APE.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) Texas Instruments - http://www.ti.com/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef APE_TAG_H_
+
+#define APE_TAG_H_
+
+#include <utils/RefBase.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MetaData.h>
+
+namespace android {
+
+class APE{
+public:
+ APE();
+ ~APE();
+ bool isAPE(uint8_t *apeTag) const;
+ bool parseAPE(const sp<DataSource> &source, off64_t offset,
+ sp<MetaData> &meta);
+
+private:
+ uint32_t itemNumber;
+ uint32_t itemFlags;
+ size_t lenValue;
+};
+
+} //namespace android
+
+#endif //APE_TAG_H_
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 758b2c9..1a8e6c8 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -109,6 +109,9 @@ struct AwesomePlayer {
void postAudioTearDown();
status_t dump(int fd, const Vector<String16> &args) const;
+ status_t suspend();
+ status_t resume();
+
private:
friend struct AwesomeEvent;
friend struct PreviewPlayer;
@@ -193,6 +196,7 @@ private:
uint32_t mFlags;
uint32_t mExtractorFlags;
uint32_t mSinceLastDropped;
+ bool mDropFramesDisable; // hevc test
int64_t mTimeSourceDeltaUs;
int64_t mVideoTimeUs;
@@ -355,6 +359,8 @@ private:
bool mAudioTearDownWasPlaying;
int64_t mAudioTearDownPosition;
+ bool mIsFirstFrameAfterResume;
+
status_t setVideoScalingMode(int32_t mode);
status_t setVideoScalingMode_l(int32_t mode);
status_t getTrackInfo(Parcel* reply) const;
diff --git a/media/libstagefright/include/NuCachedSource2.h b/media/libstagefright/include/NuCachedSource2.h
index d36da6a..1f282ca 100644
--- a/media/libstagefright/include/NuCachedSource2.h
+++ b/media/libstagefright/include/NuCachedSource2.h
@@ -28,7 +28,7 @@ struct ALooper;
struct PageCache;
struct NuCachedSource2 : public DataSource {
- NuCachedSource2(
+ static sp<NuCachedSource2> Create(
const sp<DataSource> &source,
const char *cacheConfig = NULL,
bool disconnectAtHighwatermark = false);
@@ -66,12 +66,20 @@ struct NuCachedSource2 : public DataSource {
String8 *cacheConfig,
bool *disconnectAtHighwatermark);
+ virtual status_t disconnectWhileSuspend();
+ virtual status_t connectWhileResume();
+
protected:
virtual ~NuCachedSource2();
protected:
friend struct AHandlerReflector<NuCachedSource2>;
+ NuCachedSource2(
+ const sp<DataSource> &source,
+ const char *cacheConfig,
+ bool disconnectAtHighwatermark);
+
enum {
kPageSize = 65536,
kDefaultHighWaterThreshold = 20 * 1024 * 1024,
@@ -118,6 +126,8 @@ protected:
bool mDisconnectAtHighwatermark;
+ bool mSuspended;
+
void onMessageReceived(const sp<AMessage> &msg);
void onFetch();
void onRead(const sp<AMessage> &msg);
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index d468dfc..e7c4f6d 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -140,6 +140,8 @@ public:
virtual void binderDied(const wp<IBinder> &the_late_who);
+ virtual bool isSecure(IOMX::node_id node);
+
OMX_ERRORTYPE OnEvent(
node_id node,
OMX_IN OMX_EVENTTYPE eEvent,
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index f68e0a9..e5fb45b 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -125,6 +125,10 @@ struct OMXNodeInstance {
const void *data,
size_t size);
+ bool isSecure() const {
+ return mIsSecure;
+ }
+
// handles messages and removes them from the list
void onMessages(std::list<omx_message> &messages);
void onMessage(const omx_message &msg);
@@ -142,6 +146,7 @@ private:
OMX_HANDLETYPE mHandle;
sp<IOMXObserver> mObserver;
bool mDying;
+ bool mIsSecure;
// Lock only covers mGraphicBufferSource. We can't always use mLock
// because of rare instances where we'd end up locking it recursively.
diff --git a/media/libstagefright/include/SampleIterator.h b/media/libstagefright/include/SampleIterator.h
index 7053247..2ef41ae 100644
--- a/media/libstagefright/include/SampleIterator.h
+++ b/media/libstagefright/include/SampleIterator.h
@@ -14,6 +14,10 @@
* limitations under the License.
*/
+#ifndef SAMPLE_ITERATOR_H_
+
+#define SAMPLE_ITERATOR_H_
+
#include <utils/Vector.h>
namespace android {
@@ -75,3 +79,4 @@ private:
} // namespace android
+#endif // SAMPLE_ITERATOR_H_
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index ecc2573..06057d0 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -31,6 +31,7 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
#include <utils/String8.h>
+#include <media/stagefright/foundation/ABitReader.h>
#include <inttypes.h>
@@ -136,6 +137,7 @@ private:
enum Type {
AVC,
AAC,
+ HEVC,
OTHER
};
@@ -234,6 +236,17 @@ MatroskaSource::MatroskaSource(
mNALSizeLen = 1 + (avcc[4] & 3);
ALOGV("mNALSizeLen = %zu", mNALSizeLen);
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
+ mType = HEVC;
+
+ uint32_t type;
+ const uint8_t *data;
+ size_t size;
+ CHECK(meta->findData(kKeyHVCC, &type, (const void **)&data, &size));
+
+ CHECK(size >= 7);
+ mNALSizeLen = 1 + (data[14 + 7] & 3);
+ ALOGV("mNALSizeLen = %zu", mNALSizeLen);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
mType = AAC;
}
@@ -521,8 +534,9 @@ status_t MatroskaSource::readBlock() {
const mkvparser::Block *block = mBlockIter.block();
int64_t timeUs = mBlockIter.blockTimeUs();
+ int frameCount = block->GetFrameCount();
- for (int i = 0; i < block->GetFrameCount(); ++i) {
+ for (int i = 0; i < frameCount; ++i) {
const mkvparser::Block::Frame &frame = block->GetFrame(i);
MediaBuffer *mbuf = new MediaBuffer(frame.len);
@@ -534,6 +548,7 @@ status_t MatroskaSource::readBlock() {
mPendingFrames.clear();
mBlockIter.advance();
+ mbuf->release();
return ERROR_IO;
}
@@ -542,6 +557,27 @@ status_t MatroskaSource::readBlock() {
mBlockIter.advance();
+ if (!mBlockIter.eos() && frameCount > 1) {
+ // For files with lacing enabled, we need to amend they kKeyTime of
+ // each frame so that their kKeyTime are advanced accordingly (instead
+ // of being set to the same value). To do this, we need to find out
+ // the duration of the block using the start time of the next block.
+ int64_t duration = mBlockIter.blockTimeUs() - timeUs;
+ int64_t durationPerFrame = duration / frameCount;
+ int64_t durationRemainder = duration % frameCount;
+
+ // We split duration to each of the frame, distributing the remainder (if any)
+ // to the later frames. The later frames are processed first due to the
+ // use of the iterator for the doubly linked list
+ List<MediaBuffer *>::iterator it = mPendingFrames.end();
+ for (int i = frameCount - 1; i >= 0; --i) {
+ --it;
+ int64_t frameRemainder = durationRemainder >= frameCount - i ? 1 : 0;
+ int64_t frameTimeUs = timeUs + durationPerFrame * i + frameRemainder;
+ (*it)->meta_data()->setInt64(kKeyTime, frameTimeUs);
+ }
+ }
+
return OK;
}
@@ -581,7 +617,7 @@ status_t MatroskaSource::read(
MediaBuffer *frame = *mPendingFrames.begin();
mPendingFrames.erase(mPendingFrames.begin());
- if (mType != AVC) {
+ if (mType != AVC && mType != HEVC) {
if (targetSampleTimeUs >= 0ll) {
frame->meta_data()->setInt64(
kKeyTargetTime, targetSampleTimeUs);
@@ -633,9 +669,11 @@ status_t MatroskaSource::read(
if (pass == 1) {
memcpy(&dstPtr[dstOffset], "\x00\x00\x00\x01", 4);
- memcpy(&dstPtr[dstOffset + 4],
- &srcPtr[srcOffset + mNALSizeLen],
- NALsize);
+ if (frame != buffer) {
+ memcpy(&dstPtr[dstOffset + 4],
+ &srcPtr[srcOffset + mNALSizeLen],
+ NALsize);
+ }
}
dstOffset += 4; // 0x00 00 00 01
@@ -657,7 +695,13 @@ status_t MatroskaSource::read(
if (pass == 0) {
dstSize = dstOffset;
- buffer = new MediaBuffer(dstSize);
+ if (dstSize == srcSize && mNALSizeLen == 4) {
+ // In this special case we can re-use the input buffer by substituting
+ // each 4-byte nal size with a 4-byte start code
+ buffer = frame;
+ } else {
+ buffer = new MediaBuffer(dstSize);
+ }
int64_t timeUs;
CHECK(frame->meta_data()->findInt64(kKeyTime, &timeUs));
@@ -671,8 +715,10 @@ status_t MatroskaSource::read(
}
}
- frame->release();
- frame = NULL;
+ if (frame != buffer) {
+ frame->release();
+ frame = NULL;
+ }
if (targetSampleTimeUs >= 0ll) {
buffer->meta_data()->setInt64(
@@ -819,6 +865,17 @@ static void addESDSFromCodecPrivate(
const sp<MetaData> &meta,
bool isAudio, const void *priv, size_t privSize) {
+ if(isAudio) {
+ ABitReader br((const uint8_t *)priv, privSize);
+ uint32_t objectType = br.getBits(5);
+
+ if (objectType == 31) { // AAC-ELD => additional 6 bits
+ objectType = 32 + br.getBits(6);
+ }
+
+ meta->setInt32(kKeyAACAOT, objectType);
+ }
+
int privSizeBytesRequired = bytesForSize(privSize);
int esdsSize2 = 14 + privSizeBytesRequired + privSize;
int esdsSize2BytesRequired = bytesForSize(esdsSize2);
@@ -979,6 +1036,10 @@ void MatroskaExtractor::addTracks() {
codecID);
continue;
}
+ } else if (!strcmp("V_MPEGH/ISO/HEVC", codecID)) {
+ meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_HEVC);
+ meta->setData(kKeyHVCC, kTypeHVCC, codecPrivate, codecPrivateSize);
+
} else if (!strcmp("V_VP8", codecID)) {
meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_VP8);
} else if (!strcmp("V_VP9", codecID)) {
@@ -1000,7 +1061,9 @@ void MatroskaExtractor::addTracks() {
if (!strcmp("A_AAC", codecID)) {
meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC);
- CHECK(codecPrivateSize >= 2);
+ if (codecPrivateSize < 2) {
+ return;
+ }
addESDSFromCodecPrivate(
meta, true, codecPrivate, codecPrivateSize);
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 2ef30e3..efb1a1c 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -194,6 +194,11 @@ void OMX::binderDied(const wp<IBinder> &the_late_who) {
instance->onObserverDied(mMaster);
}
+bool OMX::isSecure(node_id node) {
+ OMXNodeInstance *instance = findInstance(node);
+ return (instance == NULL ? false : instance->isSecure());
+}
+
bool OMX::livesLocally(node_id /* node */, pid_t pid) {
return pid == getpid();
}
diff --git a/media/libstagefright/omx/OMXMaster.cpp b/media/libstagefright/omx/OMXMaster.cpp
index ae3cb33..f7bb733 100644
--- a/media/libstagefright/omx/OMXMaster.cpp
+++ b/media/libstagefright/omx/OMXMaster.cpp
@@ -25,52 +25,58 @@
#include <dlfcn.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <cutils/properties.h>
namespace android {
-OMXMaster::OMXMaster()
- : mVendorLibHandle(NULL) {
+OMXMaster::OMXMaster() {
addVendorPlugin();
addPlugin(new SoftOMXPlugin);
+ addUserPlugin();
}
OMXMaster::~OMXMaster() {
clearPlugins();
-
- if (mVendorLibHandle != NULL) {
- dlclose(mVendorLibHandle);
- mVendorLibHandle = NULL;
- }
}
void OMXMaster::addVendorPlugin() {
addPlugin("libstagefrighthw.so");
}
+void OMXMaster::addUserPlugin() {
+ char plugin[PROPERTY_VALUE_MAX];
+ if (property_get("media.sf.omx-plugin", plugin, NULL)) {
+ addPlugin(plugin);
+ }
+}
+
void OMXMaster::addPlugin(const char *libname) {
- mVendorLibHandle = dlopen(libname, RTLD_NOW);
+ void* handle = dlopen(libname, RTLD_NOW);
- if (mVendorLibHandle == NULL) {
+ if (handle == NULL) {
return;
}
typedef OMXPluginBase *(*CreateOMXPluginFunc)();
CreateOMXPluginFunc createOMXPlugin =
(CreateOMXPluginFunc)dlsym(
- mVendorLibHandle, "createOMXPlugin");
+ handle, "createOMXPlugin");
if (!createOMXPlugin)
createOMXPlugin = (CreateOMXPluginFunc)dlsym(
- mVendorLibHandle, "_ZN7android15createOMXPluginEv");
+ handle, "_ZN7android15createOMXPluginEv");
if (createOMXPlugin) {
- addPlugin((*createOMXPlugin)());
+ addPlugin((*createOMXPlugin)(), handle);
}
}
-void OMXMaster::addPlugin(OMXPluginBase *plugin) {
+void OMXMaster::addPlugin(OMXPluginBase *plugin, void *handle) {
+ if (plugin == 0) {
+ return;
+ }
Mutex::Autolock autoLock(mLock);
- mPlugins.push_back(plugin);
+ mPlugins.add(plugin, handle);
OMX_U32 index = 0;
@@ -100,21 +106,32 @@ void OMXMaster::clearPlugins() {
Mutex::Autolock autoLock(mLock);
typedef void (*DestroyOMXPluginFunc)(OMXPluginBase*);
- DestroyOMXPluginFunc destroyOMXPlugin =
- (DestroyOMXPluginFunc)dlsym(
- mVendorLibHandle, "destroyOMXPlugin");
- mPluginByComponentName.clear();
+ for (unsigned int i = 0; i < mPlugins.size(); i++) {
+ OMXPluginBase *plugin = mPlugins.keyAt(i);
+ if (plugin != NULL) {
+ void *handle = mPlugins.valueAt(i);
+
+ if (handle != NULL) {
+ DestroyOMXPluginFunc destroyOMXPlugin =
+ (DestroyOMXPluginFunc)dlsym(
+ handle, "destroyOMXPlugin");
+
+ if (destroyOMXPlugin)
+ destroyOMXPlugin(plugin);
+ else
+ delete plugin;
- for (List<OMXPluginBase *>::iterator it = mPlugins.begin();
- it != mPlugins.end(); ++it) {
- if (destroyOMXPlugin)
- destroyOMXPlugin(*it);
- else
- delete *it;
- *it = NULL;
+ dlclose(handle);
+ } else {
+ delete plugin;
+ }
+
+ plugin = NULL;
+ }
}
+ mPluginByComponentName.clear();
mPlugins.clear();
}
diff --git a/media/libstagefright/omx/OMXMaster.h b/media/libstagefright/omx/OMXMaster.h
index 6069741..c07fed3 100644
--- a/media/libstagefright/omx/OMXMaster.h
+++ b/media/libstagefright/omx/OMXMaster.h
@@ -51,15 +51,14 @@ struct OMXMaster : public OMXPluginBase {
private:
Mutex mLock;
- List<OMXPluginBase *> mPlugins;
+ KeyedVector<OMXPluginBase *, void *> mPlugins;
KeyedVector<String8, OMXPluginBase *> mPluginByComponentName;
KeyedVector<OMX_COMPONENTTYPE *, OMXPluginBase *> mPluginByInstance;
- void *mVendorLibHandle;
-
void addVendorPlugin();
+ void addUserPlugin();
void addPlugin(const char *libname);
- void addPlugin(OMXPluginBase *plugin);
+ void addPlugin(OMXPluginBase *plugin, void *handle = NULL);
void clearPlugins();
OMXMaster(const OMXMaster &);
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index bdd1039..8687ba3 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -206,6 +206,7 @@ OMXNodeInstance::OMXNodeInstance(
mDebugLevelBumpPendingBuffers[1] = 0;
mMetadataType[0] = kMetadataBufferTypeInvalid;
mMetadataType[1] = kMetadataBufferTypeInvalid;
+ mIsSecure = AString(name).endsWith(".secure");
}
OMXNodeInstance::~OMXNodeInstance() {
@@ -1593,7 +1594,8 @@ void OMXNodeInstance::removeActiveBuffer(
void OMXNodeInstance::freeActiveBuffers() {
// Make sure to count down here, as freeBuffer will in turn remove
// the active buffer from the vector...
- for (size_t i = mActiveBuffers.size(); i--;) {
+ for (size_t i = mActiveBuffers.size(); i;) {
+ i--;
freeBuffer(mActiveBuffers[i].mPortIndex, mActiveBuffers[i].mID);
}
}
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index 9389f67..4afd5d5 100755
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -77,6 +77,7 @@ OMX_ERRORTYPE SoftOMXPlugin::makeComponentInstance(
OMX_COMPONENTTYPE **component) {
ALOGV("makeComponentInstance '%s'", name);
+ dlerror(); // clear any existing error
for (size_t i = 0; i < kNumComponents; ++i) {
if (strcmp(name, kComponents[i].mName)) {
continue;
@@ -94,6 +95,8 @@ OMX_ERRORTYPE SoftOMXPlugin::makeComponentInstance(
return OMX_ErrorComponentNotFound;
}
+ ALOGV("load component %s for %s", libName.c_str(), name);
+
typedef SoftOMXComponent *(*CreateSoftOMXComponentFunc)(
const char *, const OMX_CALLBACKTYPE *,
OMX_PTR, OMX_COMPONENTTYPE **);
@@ -104,7 +107,8 @@ OMX_ERRORTYPE SoftOMXPlugin::makeComponentInstance(
"_Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPE"
"PvPP17OMX_COMPONENTTYPE");
- if (createSoftOMXComponent == NULL) {
+ if (const char *error = dlerror()) {
+ ALOGE("unable to dlsym %s: %s", libName.c_str(), error);
dlclose(libHandle);
libHandle = NULL;
diff --git a/media/libstagefright/timedtext/TextDescriptions.cpp b/media/libstagefright/timedtext/TextDescriptions.cpp
index f9c1fe0..c762a74 100644
--- a/media/libstagefright/timedtext/TextDescriptions.cpp
+++ b/media/libstagefright/timedtext/TextDescriptions.cpp
@@ -30,9 +30,9 @@ status_t TextDescriptions::getParcelOfDescriptions(
if (flags & IN_BAND_TEXT_3GPP) {
if (flags & GLOBAL_DESCRIPTIONS) {
- return extract3GPPGlobalDescriptions(data, size, parcel, 0);
+ return extract3GPPGlobalDescriptions(data, size, parcel);
} else if (flags & LOCAL_DESCRIPTIONS) {
- return extract3GPPLocalDescriptions(data, size, timeMs, parcel, 0);
+ return extract3GPPLocalDescriptions(data, size, timeMs, parcel);
}
} else if (flags & OUT_OF_BAND_TEXT_SRT) {
if (flags & LOCAL_DESCRIPTIONS) {
@@ -69,314 +69,437 @@ status_t TextDescriptions::extractSRTLocalDescriptions(
// styles, and 'krok' box contains karaoke timing and positions.
status_t TextDescriptions::extract3GPPLocalDescriptions(
const uint8_t *data, ssize_t size,
- int timeMs, Parcel *parcel, int depth) {
- if (depth == 0) {
- parcel->writeInt32(KEY_LOCAL_SETTING);
-
- // write start time to display this text sample
- parcel->writeInt32(KEY_START_TIME);
- parcel->writeInt32(timeMs);
-
- ssize_t textLen = (*data) << 8 | (*(data + 1));
-
- // write text sample length and text sample itself
- parcel->writeInt32(KEY_STRUCT_TEXT);
- parcel->writeInt32(textLen);
- parcel->writeInt32(textLen);
- parcel->write(data + 2, textLen);
-
- if (size > textLen) {
- data += (textLen + 2);
- size -= (textLen + 2);
- } else {
- return OK;
- }
+ int timeMs, Parcel *parcel) {
+
+ parcel->writeInt32(KEY_LOCAL_SETTING);
+
+ // write start time to display this text sample
+ parcel->writeInt32(KEY_START_TIME);
+ parcel->writeInt32(timeMs);
+
+ if (size < 2) {
+ return OK;
}
+ ssize_t textLen = (*data) << 8 | (*(data + 1));
- const uint8_t *tmpData = data;
- ssize_t chunkSize = U32_AT(tmpData);
- uint32_t chunkType = U32_AT(tmpData + 4);
+ if (size < textLen + 2) {
+ return OK;
+ }
- if (chunkSize <= 0) {
+ // write text sample length and text sample itself
+ parcel->writeInt32(KEY_STRUCT_TEXT);
+ parcel->writeInt32(textLen);
+ parcel->writeInt32(textLen);
+ parcel->write(data + 2, textLen);
+
+ if (size > textLen + 2) {
+ data += (textLen + 2);
+ size -= (textLen + 2);
+ } else {
return OK;
}
- tmpData += 8;
+ while (size >= 8) {
+ const uint8_t *tmpData = data;
+ ssize_t chunkSize = U32_AT(tmpData); // size includes size and type
+ uint32_t chunkType = U32_AT(tmpData + 4);
- switch(chunkType) {
- // 'styl' box specifies the style of the text.
- case FOURCC('s', 't', 'y', 'l'):
- {
- uint16_t count = U16_AT(tmpData);
+ if (chunkSize <= 8 || chunkSize > size) {
+ return OK;
+ }
- tmpData += 2;
+ size_t remaining = chunkSize - 8;
- for (int i = 0; i < count; i++) {
- parcel->writeInt32(KEY_STRUCT_STYLE_LIST);
- parcel->writeInt32(KEY_START_CHAR);
- parcel->writeInt32(U16_AT(tmpData));
+ tmpData += 8;
- parcel->writeInt32(KEY_END_CHAR);
- parcel->writeInt32(U16_AT(tmpData + 2));
+ switch(chunkType) {
+ // 'styl' box specifies the style of the text.
+ case FOURCC('s', 't', 'y', 'l'):
+ {
+ if (remaining < 2) {
+ return OK;
+ }
+ size_t dataPos = parcel->dataPosition();
+ uint16_t count = U16_AT(tmpData);
- parcel->writeInt32(KEY_FONT_ID);
- parcel->writeInt32(U16_AT(tmpData + 4));
+ tmpData += 2;
+ remaining -= 2;
- parcel->writeInt32(KEY_STYLE_FLAGS);
- parcel->writeInt32(*(tmpData + 6));
+ for (int i = 0; i < count; i++) {
+ if (remaining < 12) {
+ // roll back
+ parcel->setDataPosition(dataPos);
+ return OK;
+ }
+ parcel->writeInt32(KEY_STRUCT_STYLE_LIST);
+ parcel->writeInt32(KEY_START_CHAR);
+ parcel->writeInt32(U16_AT(tmpData));
- parcel->writeInt32(KEY_FONT_SIZE);
- parcel->writeInt32(*(tmpData + 7));
+ parcel->writeInt32(KEY_END_CHAR);
+ parcel->writeInt32(U16_AT(tmpData + 2));
- parcel->writeInt32(KEY_TEXT_COLOR_RGBA);
- uint32_t rgba = *(tmpData + 8) << 24 | *(tmpData + 9) << 16
- | *(tmpData + 10) << 8 | *(tmpData + 11);
- parcel->writeInt32(rgba);
+ parcel->writeInt32(KEY_FONT_ID);
+ parcel->writeInt32(U16_AT(tmpData + 4));
- tmpData += 12;
+ parcel->writeInt32(KEY_STYLE_FLAGS);
+ parcel->writeInt32(*(tmpData + 6));
+
+ parcel->writeInt32(KEY_FONT_SIZE);
+ parcel->writeInt32(*(tmpData + 7));
+
+ parcel->writeInt32(KEY_TEXT_COLOR_RGBA);
+ uint32_t rgba = *(tmpData + 8) << 24 | *(tmpData + 9) << 16
+ | *(tmpData + 10) << 8 | *(tmpData + 11);
+ parcel->writeInt32(rgba);
+
+ tmpData += 12;
+ remaining -= 12;
+ }
+
+ break;
+ }
+ // 'krok' box. The number of highlight events is specified, and each
+ // event is specified by a starting and ending char offset and an end
+ // time for the event.
+ case FOURCC('k', 'r', 'o', 'k'):
+ {
+ if (remaining < 6) {
+ return OK;
+ }
+ size_t dataPos = parcel->dataPosition();
+
+ parcel->writeInt32(KEY_STRUCT_KARAOKE_LIST);
+
+ int startTime = U32_AT(tmpData);
+ uint16_t count = U16_AT(tmpData + 4);
+ parcel->writeInt32(count);
+
+ tmpData += 6;
+ remaining -= 6;
+ int lastEndTime = 0;
+
+ for (int i = 0; i < count; i++) {
+ if (remaining < 8) {
+ // roll back
+ parcel->setDataPosition(dataPos);
+ return OK;
+ }
+ parcel->writeInt32(startTime + lastEndTime);
+
+ lastEndTime = U32_AT(tmpData);
+ parcel->writeInt32(lastEndTime);
+
+ parcel->writeInt32(U16_AT(tmpData + 4));
+ parcel->writeInt32(U16_AT(tmpData + 6));
+
+ tmpData += 8;
+ remaining -= 8;
+ }
+
+ break;
}
+ // 'hlit' box specifies highlighted text
+ case FOURCC('h', 'l', 'i', 't'):
+ {
+ if (remaining < 4) {
+ return OK;
+ }
- break;
- }
- // 'krok' box. The number of highlight events is specified, and each
- // event is specified by a starting and ending char offset and an end
- // time for the event.
- case FOURCC('k', 'r', 'o', 'k'):
- {
+ parcel->writeInt32(KEY_STRUCT_HIGHLIGHT_LIST);
- parcel->writeInt32(KEY_STRUCT_KARAOKE_LIST);
+ // the start char offset to highlight
+ parcel->writeInt32(U16_AT(tmpData));
+ // the last char offset to highlight
+ parcel->writeInt32(U16_AT(tmpData + 2));
- int startTime = U32_AT(tmpData);
- uint16_t count = U16_AT(tmpData + 4);
- parcel->writeInt32(count);
+ tmpData += 4;
+ remaining -= 4;
+ break;
+ }
+ // 'hclr' box specifies the RGBA color: 8 bits each of
+ // red, green, blue, and an alpha(transparency) value
+ case FOURCC('h', 'c', 'l', 'r'):
+ {
+ if (remaining < 4) {
+ return OK;
+ }
+ parcel->writeInt32(KEY_HIGHLIGHT_COLOR_RGBA);
+
+ uint32_t rgba = *(tmpData) << 24 | *(tmpData + 1) << 16
+ | *(tmpData + 2) << 8 | *(tmpData + 3);
+ parcel->writeInt32(rgba);
+
+ tmpData += 4;
+ remaining -= 4;
+ break;
+ }
+ // 'dlay' box specifies a delay after a scroll in and/or
+ // before scroll out.
+ case FOURCC('d', 'l', 'a', 'y'):
+ {
+ if (remaining < 4) {
+ return OK;
+ }
+ parcel->writeInt32(KEY_SCROLL_DELAY);
+
+ uint32_t delay = *(tmpData) << 24 | *(tmpData + 1) << 16
+ | *(tmpData + 2) << 8 | *(tmpData + 3);
+ parcel->writeInt32(delay);
+
+ tmpData += 4;
+ remaining -= 4;
+ break;
+ }
+ // 'href' box for hyper text link
+ case FOURCC('h', 'r', 'e', 'f'):
+ {
+ if (remaining < 5) {
+ return OK;
+ }
- tmpData += 6;
- int lastEndTime = 0;
+ size_t dataPos = parcel->dataPosition();
- for (int i = 0; i < count; i++) {
- parcel->writeInt32(startTime + lastEndTime);
+ parcel->writeInt32(KEY_STRUCT_HYPER_TEXT_LIST);
- lastEndTime = U32_AT(tmpData);
- parcel->writeInt32(lastEndTime);
+ // the start offset of the text to be linked
+ parcel->writeInt32(U16_AT(tmpData));
+ // the end offset of the text
+ parcel->writeInt32(U16_AT(tmpData + 2));
+ // the number of bytes in the following URL
+ size_t len = *(tmpData + 4);
+ parcel->writeInt32(len);
+
+ remaining -= 5;
+
+ if (remaining < len) {
+ parcel->setDataPosition(dataPos);
+ return OK;
+ }
+ // the linked-to URL
+ parcel->writeInt32(len);
+ parcel->write(tmpData + 5, len);
+
+ tmpData += (5 + len);
+ remaining -= len;
+
+ if (remaining < 1) {
+ parcel->setDataPosition(dataPos);
+ return OK;
+ }
+
+ // the number of bytes in the following "alt" string
+ len = *tmpData;
+ parcel->writeInt32(len);
+
+ tmpData += 1;
+ remaining -= 1;
+ if (remaining < len) {
+ parcel->setDataPosition(dataPos);
+ return OK;
+ }
+
+ // an "alt" string for user display
+ parcel->writeInt32(len);
+ parcel->write(tmpData, len);
+
+ tmpData += 1;
+ remaining -= 1;
+ break;
+ }
+ // 'tbox' box to indicate the position of the text with values
+ // of top, left, bottom and right
+ case FOURCC('t', 'b', 'o', 'x'):
+ {
+ if (remaining < 8) {
+ return OK;
+ }
+ parcel->writeInt32(KEY_STRUCT_TEXT_POS);
+ parcel->writeInt32(U16_AT(tmpData));
+ parcel->writeInt32(U16_AT(tmpData + 2));
parcel->writeInt32(U16_AT(tmpData + 4));
parcel->writeInt32(U16_AT(tmpData + 6));
tmpData += 8;
+ remaining -= 8;
+ break;
}
+ // 'blnk' to specify the char range to be blinked
+ case FOURCC('b', 'l', 'n', 'k'):
+ {
+ if (remaining < 4) {
+ return OK;
+ }
- break;
- }
- // 'hlit' box specifies highlighted text
- case FOURCC('h', 'l', 'i', 't'):
- {
- parcel->writeInt32(KEY_STRUCT_HIGHLIGHT_LIST);
+ parcel->writeInt32(KEY_STRUCT_BLINKING_TEXT_LIST);
- // the start char offset to highlight
- parcel->writeInt32(U16_AT(tmpData));
- // the last char offset to highlight
- parcel->writeInt32(U16_AT(tmpData + 2));
+ // start char offset
+ parcel->writeInt32(U16_AT(tmpData));
+ // end char offset
+ parcel->writeInt32(U16_AT(tmpData + 2));
- break;
+ tmpData += 4;
+ remaining -= 4;
+ break;
+ }
+ // 'twrp' box specifies text wrap behavior. If the value if 0x00,
+ // then no wrap. If it's 0x01, then automatic 'soft' wrap is enabled.
+ // 0x02-0xff are reserved.
+ case FOURCC('t', 'w', 'r', 'p'):
+ {
+ if (remaining < 1) {
+ return OK;
+ }
+ parcel->writeInt32(KEY_WRAP_TEXT);
+ parcel->writeInt32(*tmpData);
+
+ tmpData += 1;
+ remaining -= 1;
+ break;
+ }
+ default:
+ {
+ break;
+ }
}
- // 'hclr' box specifies the RGBA color: 8 bits each of
- // red, green, blue, and an alpha(transparency) value
- case FOURCC('h', 'c', 'l', 'r'):
- {
- parcel->writeInt32(KEY_HIGHLIGHT_COLOR_RGBA);
- uint32_t rgba = *(tmpData) << 24 | *(tmpData + 1) << 16
- | *(tmpData + 2) << 8 | *(tmpData + 3);
- parcel->writeInt32(rgba);
+ data += chunkSize;
+ size -= chunkSize;
+ }
+
+ return OK;
+}
- break;
- }
- // 'dlay' box specifies a delay after a scroll in and/or
- // before scroll out.
- case FOURCC('d', 'l', 'a', 'y'):
- {
- parcel->writeInt32(KEY_SCROLL_DELAY);
+// To extract box 'tx3g' defined in 3GPP TS 26.245, and store it in a Parcel
+status_t TextDescriptions::extract3GPPGlobalDescriptions(
+ const uint8_t *data, ssize_t size, Parcel *parcel) {
+
+ parcel->writeInt32(KEY_GLOBAL_SETTING);
- uint32_t delay = *(tmpData) << 24 | *(tmpData + 1) << 16
- | *(tmpData + 2) << 8 | *(tmpData + 3);
- parcel->writeInt32(delay);
+ while (size >= 8) {
+ ssize_t chunkSize = U32_AT(data);
+ uint32_t chunkType = U32_AT(data + 4);
+ const uint8_t *tmpData = data;
+ tmpData += 8;
+ size_t remaining = size - 8;
- break;
+ if (size < chunkSize) {
+ return OK;
}
- // 'href' box for hyper text link
- case FOURCC('h', 'r', 'e', 'f'):
- {
- parcel->writeInt32(KEY_STRUCT_HYPER_TEXT_LIST);
+ switch(chunkType) {
+ case FOURCC('t', 'x', '3', 'g'):
+ {
+ if (remaining < 18) { // 8 just below, and another 10 a little further down
+ return OK;
+ }
+ tmpData += 8; // skip the first 8 bytes
+ remaining -=8;
+ parcel->writeInt32(KEY_DISPLAY_FLAGS);
+ parcel->writeInt32(U32_AT(tmpData));
+
+ parcel->writeInt32(KEY_STRUCT_JUSTIFICATION);
+ parcel->writeInt32(tmpData[4]);
+ parcel->writeInt32(tmpData[5]);
+
+ parcel->writeInt32(KEY_BACKGROUND_COLOR_RGBA);
+ uint32_t rgba = *(tmpData + 6) << 24 | *(tmpData + 7) << 16
+ | *(tmpData + 8) << 8 | *(tmpData + 9);
+ parcel->writeInt32(rgba);
- // the start offset of the text to be linked
- parcel->writeInt32(U16_AT(tmpData));
- // the end offset of the text
- parcel->writeInt32(U16_AT(tmpData + 2));
+ tmpData += 10;
+ remaining -= 10;
- // the number of bytes in the following URL
- int len = *(tmpData + 4);
- parcel->writeInt32(len);
+ if (remaining < 8) {
+ return OK;
+ }
+ parcel->writeInt32(KEY_STRUCT_TEXT_POS);
+ parcel->writeInt32(U16_AT(tmpData));
+ parcel->writeInt32(U16_AT(tmpData + 2));
+ parcel->writeInt32(U16_AT(tmpData + 4));
+ parcel->writeInt32(U16_AT(tmpData + 6));
- // the linked-to URL
- parcel->writeInt32(len);
- parcel->write(tmpData + 5, len);
+ tmpData += 8;
+ remaining -= 8;
- tmpData += (5 + len);
+ if (remaining < 12) {
+ return OK;
+ }
+ parcel->writeInt32(KEY_STRUCT_STYLE_LIST);
+ parcel->writeInt32(KEY_START_CHAR);
+ parcel->writeInt32(U16_AT(tmpData));
- // the number of bytes in the following "alt" string
- len = *tmpData;
- parcel->writeInt32(len);
+ parcel->writeInt32(KEY_END_CHAR);
+ parcel->writeInt32(U16_AT(tmpData + 2));
- // an "alt" string for user display
- parcel->writeInt32(len);
- parcel->write(tmpData + 1, len);
+ parcel->writeInt32(KEY_FONT_ID);
+ parcel->writeInt32(U16_AT(tmpData + 4));
- break;
- }
- // 'tbox' box to indicate the position of the text with values
- // of top, left, bottom and right
- case FOURCC('t', 'b', 'o', 'x'):
- {
- parcel->writeInt32(KEY_STRUCT_TEXT_POS);
- parcel->writeInt32(U16_AT(tmpData));
- parcel->writeInt32(U16_AT(tmpData + 2));
- parcel->writeInt32(U16_AT(tmpData + 4));
- parcel->writeInt32(U16_AT(tmpData + 6));
-
- break;
- }
- // 'blnk' to specify the char range to be blinked
- case FOURCC('b', 'l', 'n', 'k'):
- {
- parcel->writeInt32(KEY_STRUCT_BLINKING_TEXT_LIST);
+ parcel->writeInt32(KEY_STYLE_FLAGS);
+ parcel->writeInt32(*(tmpData + 6));
- // start char offset
- parcel->writeInt32(U16_AT(tmpData));
- // end char offset
- parcel->writeInt32(U16_AT(tmpData + 2));
+ parcel->writeInt32(KEY_FONT_SIZE);
+ parcel->writeInt32(*(tmpData + 7));
- break;
- }
- // 'twrp' box specifies text wrap behavior. If the value if 0x00,
- // then no wrap. If it's 0x01, then automatic 'soft' wrap is enabled.
- // 0x02-0xff are reserved.
- case FOURCC('t', 'w', 'r', 'p'):
- {
- parcel->writeInt32(KEY_WRAP_TEXT);
- parcel->writeInt32(*tmpData);
-
- break;
- }
- default:
- {
- break;
- }
- }
+ parcel->writeInt32(KEY_TEXT_COLOR_RGBA);
+ rgba = *(tmpData + 8) << 24 | *(tmpData + 9) << 16
+ | *(tmpData + 10) << 8 | *(tmpData + 11);
+ parcel->writeInt32(rgba);
- if (size > chunkSize) {
- data += chunkSize;
- size -= chunkSize;
- // continue to parse next box
- return extract3GPPLocalDescriptions(data, size, 0, parcel, 1);
- }
+ tmpData += 12;
+ remaining -= 12;
- return OK;
-}
+ if (remaining < 2) {
+ return OK;
+ }
-// To extract box 'tx3g' defined in 3GPP TS 26.245, and store it in a Parcel
-status_t TextDescriptions::extract3GPPGlobalDescriptions(
- const uint8_t *data, ssize_t size, Parcel *parcel, int depth) {
+ size_t dataPos = parcel->dataPosition();
- ssize_t chunkSize = U32_AT(data);
- uint32_t chunkType = U32_AT(data + 4);
- const uint8_t *tmpData = data;
- tmpData += 8;
+ parcel->writeInt32(KEY_STRUCT_FONT_LIST);
+ uint16_t count = U16_AT(tmpData);
+ parcel->writeInt32(count);
- if (size < chunkSize) {
- return OK;
- }
+ tmpData += 2;
+ remaining -= 2;
- if (depth == 0) {
- parcel->writeInt32(KEY_GLOBAL_SETTING);
- }
- switch(chunkType) {
- case FOURCC('t', 'x', '3', 'g'):
- {
- tmpData += 8; // skip the first 8 bytes
- parcel->writeInt32(KEY_DISPLAY_FLAGS);
- parcel->writeInt32(U32_AT(tmpData));
-
- parcel->writeInt32(KEY_STRUCT_JUSTIFICATION);
- parcel->writeInt32(tmpData[4]);
- parcel->writeInt32(tmpData[5]);
-
- parcel->writeInt32(KEY_BACKGROUND_COLOR_RGBA);
- uint32_t rgba = *(tmpData + 6) << 24 | *(tmpData + 7) << 16
- | *(tmpData + 8) << 8 | *(tmpData + 9);
- parcel->writeInt32(rgba);
-
- tmpData += 10;
- parcel->writeInt32(KEY_STRUCT_TEXT_POS);
- parcel->writeInt32(U16_AT(tmpData));
- parcel->writeInt32(U16_AT(tmpData + 2));
- parcel->writeInt32(U16_AT(tmpData + 4));
- parcel->writeInt32(U16_AT(tmpData + 6));
-
- tmpData += 8;
- parcel->writeInt32(KEY_STRUCT_STYLE_LIST);
- parcel->writeInt32(KEY_START_CHAR);
- parcel->writeInt32(U16_AT(tmpData));
-
- parcel->writeInt32(KEY_END_CHAR);
- parcel->writeInt32(U16_AT(tmpData + 2));
-
- parcel->writeInt32(KEY_FONT_ID);
- parcel->writeInt32(U16_AT(tmpData + 4));
-
- parcel->writeInt32(KEY_STYLE_FLAGS);
- parcel->writeInt32(*(tmpData + 6));
-
- parcel->writeInt32(KEY_FONT_SIZE);
- parcel->writeInt32(*(tmpData + 7));
-
- parcel->writeInt32(KEY_TEXT_COLOR_RGBA);
- rgba = *(tmpData + 8) << 24 | *(tmpData + 9) << 16
- | *(tmpData + 10) << 8 | *(tmpData + 11);
- parcel->writeInt32(rgba);
-
- tmpData += 12;
- parcel->writeInt32(KEY_STRUCT_FONT_LIST);
- uint16_t count = U16_AT(tmpData);
- parcel->writeInt32(count);
-
- tmpData += 2;
- for (int i = 0; i < count; i++) {
- // font ID
- parcel->writeInt32(U16_AT(tmpData));
+ for (int i = 0; i < count; i++) {
+ if (remaining < 3) {
+ // roll back
+ parcel->setDataPosition(dataPos);
+ return OK;
+ }
+ // font ID
+ parcel->writeInt32(U16_AT(tmpData));
- // font name length
- parcel->writeInt32(*(tmpData + 2));
+ // font name length
+ parcel->writeInt32(*(tmpData + 2));
- int len = *(tmpData + 2);
+ size_t len = *(tmpData + 2);
- parcel->write(tmpData + 3, len);
- tmpData += 3 + len;
- }
+ tmpData += 3;
+ remaining -= 3;
- break;
- }
- default:
- {
- break;
- }
- }
+ if (remaining < len) {
+ // roll back
+ parcel->setDataPosition(dataPos);
+ return OK;
+ }
- data += chunkSize;
- size -= chunkSize;
+ parcel->write(tmpData, len);
+ tmpData += len;
+ remaining -= len;
+ }
- if (size > 0) {
- // continue to extract next 'tx3g'
- return extract3GPPGlobalDescriptions(data, size, parcel, 1);
+ // there is a "DisparityBox" after this according to the spec, but we ignore it
+ break;
+ }
+ default:
+ {
+ break;
+ }
+ }
+
+ data += chunkSize;
+ size -= chunkSize;
}
return OK;
diff --git a/media/libstagefright/timedtext/TextDescriptions.h b/media/libstagefright/timedtext/TextDescriptions.h
index 0144917..bf67f3f 100644
--- a/media/libstagefright/timedtext/TextDescriptions.h
+++ b/media/libstagefright/timedtext/TextDescriptions.h
@@ -72,10 +72,10 @@ private:
int timeMs, Parcel *parcel);
static status_t extract3GPPGlobalDescriptions(
const uint8_t *data, ssize_t size,
- Parcel *parcel, int depth);
+ Parcel *parcel);
static status_t extract3GPPLocalDescriptions(
const uint8_t *data, ssize_t size,
- int timeMs, Parcel *parcel, int depth);
+ int timeMs, Parcel *parcel);
DISALLOW_EVIL_CONSTRUCTORS(TextDescriptions);
};
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
index b6de0d9..33766b5 100644
--- a/media/mediaserver/Android.mk
+++ b/media/mediaserver/Android.mk
@@ -50,6 +50,16 @@ LOCAL_C_INCLUDES := \
frameworks/av/services/radio \
external/sonic
+ifneq ($(BOARD_NUMBER_OF_CAMERAS),)
+ LOCAL_CFLAGS += -DMAX_CAMERAS=$(BOARD_NUMBER_OF_CAMERAS)
+endif
+
+ifeq ($(strip $(AUDIO_FEATURE_ENABLED_LISTEN)),true)
+ LOCAL_SHARED_LIBRARIES += liblisten
+ LOCAL_C_INCLUDES += $(TARGET_OUT_HEADERS)/mm-audio/audio-listen
+ LOCAL_CFLAGS += -DAUDIO_LISTEN_ENABLED
+endif
+
LOCAL_MODULE:= mediaserver
LOCAL_32_BIT_ONLY := true
diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp
index 4a485ed..c16e646 100644
--- a/media/mediaserver/main_mediaserver.cpp
+++ b/media/mediaserver/main_mediaserver.cpp
@@ -39,6 +39,10 @@
#include "SoundTriggerHwService.h"
#include "RadioService.h"
+#ifdef AUDIO_LISTEN_ENABLED
+#include "ListenService.h"
+#endif
+
using namespace android;
int main(int argc __unused, char** argv)
@@ -133,6 +137,10 @@ int main(int argc __unused, char** argv)
MediaPlayerService::instantiate();
ResourceManagerService::instantiate();
CameraService::instantiate();
+#ifdef AUDIO_LISTEN_ENABLED
+ ALOGI("ListenService instantiated");
+ ListenService::instantiate();
+#endif
AudioPolicyService::instantiate();
SoundTriggerHwService::instantiate();
RadioService::instantiate();
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 07199e3..6a334e6 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -95,6 +95,7 @@ static const MtpEventCode kSupportedEventCodes[] = {
MTP_EVENT_STORE_ADDED,
MTP_EVENT_STORE_REMOVED,
MTP_EVENT_DEVICE_PROP_CHANGED,
+ MTP_EVENT_OBJECT_PROP_CHANGED,
};
MtpServer::MtpServer(int fd, MtpDatabase* database, bool ptp,
@@ -253,6 +254,11 @@ void MtpServer::sendObjectRemoved(MtpObjectHandle handle) {
sendEvent(MTP_EVENT_OBJECT_REMOVED, handle);
}
+void MtpServer::sendObjectUpdated(MtpObjectHandle handle) {
+ ALOGV("sendObjectUpdated %d\n", handle);
+ sendEvent(MTP_EVENT_OBJECT_PROP_CHANGED, handle);
+}
+
void MtpServer::sendStoreAdded(MtpStorageID id) {
ALOGV("sendStoreAdded %08X\n", id);
sendEvent(MTP_EVENT_STORE_ADDED, id);
diff --git a/media/mtp/MtpServer.h b/media/mtp/MtpServer.h
index b3a11e0..8bd0472 100644
--- a/media/mtp/MtpServer.h
+++ b/media/mtp/MtpServer.h
@@ -105,6 +105,7 @@ public:
void sendObjectAdded(MtpObjectHandle handle);
void sendObjectRemoved(MtpObjectHandle handle);
void sendDevicePropertyChanged(MtpDeviceProperty property);
+ void sendObjectUpdated(MtpObjectHandle handle);
private:
void sendStoreAdded(MtpStorageID id);
diff --git a/media/utils/Android.mk b/media/utils/Android.mk
index dfadbc8..54d22b1 100644
--- a/media/utils/Android.mk
+++ b/media/utils/Android.mk
@@ -18,6 +18,8 @@ include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
BatteryNotifier.cpp \
+ ISchedulingPolicyService.cpp \
+ SchedulingPolicyService.cpp
LOCAL_SHARED_LIBRARIES := \
libbinder \
diff --git a/services/audioflinger/ISchedulingPolicyService.cpp b/media/utils/ISchedulingPolicyService.cpp
index f55bc02..f55bc02 100644
--- a/services/audioflinger/ISchedulingPolicyService.cpp
+++ b/media/utils/ISchedulingPolicyService.cpp
diff --git a/services/audioflinger/ISchedulingPolicyService.h b/media/utils/ISchedulingPolicyService.h
index b94b191..b94b191 100644
--- a/services/audioflinger/ISchedulingPolicyService.h
+++ b/media/utils/ISchedulingPolicyService.h
diff --git a/services/audioflinger/SchedulingPolicyService.cpp b/media/utils/SchedulingPolicyService.cpp
index 70a3f1a..17ee9bc 100644
--- a/services/audioflinger/SchedulingPolicyService.cpp
+++ b/media/utils/SchedulingPolicyService.cpp
@@ -20,7 +20,7 @@
#include <binder/IServiceManager.h>
#include <utils/Mutex.h>
#include "ISchedulingPolicyService.h"
-#include "SchedulingPolicyService.h"
+#include "mediautils/SchedulingPolicyService.h"
namespace android {
diff --git a/services/audioflinger/SchedulingPolicyService.h b/media/utils/include/mediautils/SchedulingPolicyService.h
index a9870d4..a9870d4 100644
--- a/services/audioflinger/SchedulingPolicyService.h
+++ b/media/utils/include/mediautils/SchedulingPolicyService.h
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 4791ce4..474fb46 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -3,17 +3,6 @@ LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
- ISchedulingPolicyService.cpp \
- SchedulingPolicyService.cpp
-
-# FIXME Move this library to frameworks/native
-LOCAL_MODULE := libscheduling_policy
-
-include $(BUILD_STATIC_LIBRARY)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
ServiceUtilities.cpp
# FIXME Move this library to frameworks/native
@@ -64,10 +53,10 @@ LOCAL_SHARED_LIBRARIES := \
libeffects \
libpowermanager \
libserviceutility \
- libsonic
+ libsonic \
+ libmediautils
LOCAL_STATIC_LIBRARIES := \
- libscheduling_policy \
libcpustats \
libmedia_helper
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index 7165c6c..f0ae4ec 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -137,6 +137,7 @@ public:
case AUDIO_FORMAT_PCM_8_BIT:
case AUDIO_FORMAT_PCM_16_BIT:
case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ case AUDIO_FORMAT_PCM_8_24_BIT:
case AUDIO_FORMAT_PCM_32_BIT:
case AUDIO_FORMAT_PCM_FLOAT:
return true;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 8dfdca6..cdf8b1e 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -60,7 +60,7 @@
#include "FastMixer.h"
#include "FastCapture.h"
#include "ServiceUtilities.h"
-#include "SchedulingPolicyService.h"
+#include "mediautils/SchedulingPolicyService.h"
#ifdef ADD_BATTERY_DATA
#include <media/IMediaPlayerService.h>
@@ -3498,6 +3498,12 @@ ssize_t AudioFlinger::MixerThread::threadLoop_write()
if (state->mCommand != FastMixerState::MIX_WRITE &&
(kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)) {
if (state->mCommand == FastMixerState::COLD_IDLE) {
+
+ // FIXME workaround for first HAL write being CPU bound on some devices
+ ATRACE_BEGIN("write");
+ mOutput->write((char *)mSinkBuffer, 0);
+ ATRACE_END();
+
int32_t old = android_atomic_inc(&mFastMixerFutex);
if (old == -1) {
(void) syscall(__NR_futex, &mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1);
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index a45a6f8..98eb87f 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -433,7 +433,10 @@ AudioFlinger::PlaybackThread::Track::Track(
}
// only allocate a fast track index if we were able to allocate a normal track name
if (flags & IAudioFlinger::TRACK_FAST) {
- mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
+ // FIXME: Not calling framesReadyIsCalledByMultipleThreads() exposes a potential
+ // race with setSyncEvent(). However, if we call it, we cannot properly start
+ // static fast tracks (SoundPool) immediately after stopping.
+ //mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
int i = __builtin_ctz(thread->mFastTrackAvailMask);
ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 18bcfdb..48d09ed 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -56,9 +56,21 @@ public:
const struct audio_port_config *srcConfig = NULL) const;
virtual sp<AudioPort> getAudioPort() const { return mProfile; }
void toAudioPort(struct audio_port *port) const;
+ void setPreemptedSessions(const SortedVector<audio_session_t>& sessions);
+ SortedVector<audio_session_t> getPreemptedSessions() const;
+ bool hasPreemptedSession(audio_session_t session) const;
+ void clearPreemptedSessions();
private:
audio_port_handle_t mId;
+ // Because a preemtible capture session can preempt another one, we end up in an endless loop
+ // situation were each session is allowed to restart after being preempted,
+ // thus preempting the other one which restarts and so on.
+ // To avoid this situation, we store which audio session was preempted when
+ // a particular input started and prevent preemption of this active input by this session.
+ // We also inherit sessions from the preempted input to avoid a 3 way preemption loop etc...
+ SortedVector<audio_session_t> mPreemptedSessions;
+
};
class AudioInputCollection :
diff --git a/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h b/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
index 4a394bb..03b45c2 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
@@ -99,6 +99,9 @@ const StringToEnum sDeviceTypeToEnumTable[] = {
STRING_TO_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_A2DP),
STRING_TO_ENUM(AUDIO_DEVICE_IN_LOOPBACK),
STRING_TO_ENUM(AUDIO_DEVICE_IN_IP),
+#ifdef LEGACY_ALSA_AUDIO
+ STRING_TO_ENUM(AUDIO_DEVICE_IN_COMMUNICATION),
+#endif
};
const StringToEnum sDeviceNameToEnumTable[] = {
@@ -248,6 +251,11 @@ const StringToEnum sInChannelsNameToEnumTable[] = {
STRING_TO_ENUM(AUDIO_CHANNEL_IN_STEREO),
STRING_TO_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
STRING_TO_ENUM(AUDIO_CHANNEL_IN_5POINT1),
+#ifdef LEGACY_ALSA_AUDIO
+ STRING_TO_ENUM(AUDIO_CHANNEL_IN_VOICE_CALL_MONO),
+ STRING_TO_ENUM(AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO),
+ STRING_TO_ENUM(AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO),
+#endif
};
const StringToEnum sIndexChannelsNameToEnumTable[] = {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 937160b..626fdae 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -93,6 +93,26 @@ void AudioInputDescriptor::toAudioPort(struct audio_port *port) const
port->ext.mix.latency_class = AUDIO_LATENCY_NORMAL;
}
+void AudioInputDescriptor::setPreemptedSessions(const SortedVector<audio_session_t>& sessions)
+{
+ mPreemptedSessions = sessions;
+}
+
+SortedVector<audio_session_t> AudioInputDescriptor::getPreemptedSessions() const
+{
+ return mPreemptedSessions;
+}
+
+bool AudioInputDescriptor::hasPreemptedSession(audio_session_t session) const
+{
+ return (mPreemptedSessions.indexOf(session) >= 0);
+}
+
+void AudioInputDescriptor::clearPreemptedSessions()
+{
+ mPreemptedSessions.clear();
+}
+
status_t AudioInputDescriptor::dump(int fd)
{
const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
index b682e2c..4ca27c2 100644
--- a/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
@@ -35,7 +35,10 @@ namespace android {
StreamDescriptor::StreamDescriptor()
: mIndexMin(0), mIndexMax(1), mCanBeMuted(true)
{
- mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT, 0);
+ // Initialize the current stream's index to mIndexMax so volume isn't 0 in
+ // cases where the Java layer doesn't call into the audio policy service to
+ // set the default volume.
+ mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT, mIndexMax);
}
int StreamDescriptor::getVolumeIndex(audio_devices_t device) const
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 8b4a085..71f6b51 100755
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -355,7 +355,11 @@ audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
// - cannot route from voice call RX OR
// - audio HAL version is < 3.0 and TX device is on the primary HW module
if (getPhoneState() == AUDIO_MODE_IN_CALL) {
+#ifdef LEGACY_ALSA_AUDIO
+ audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_CALL);
+#else
audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
+#endif
sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
audio_devices_t availPrimaryInputDevices =
availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle());
@@ -647,6 +651,9 @@ audio_devices_t Engine::getDeviceForInputSource(audio_source_t inputSource) cons
break;
case AUDIO_SOURCE_VOICE_COMMUNICATION:
+#ifdef LEGACY_ALSA_AUDIO
+ device = AUDIO_DEVICE_IN_COMMUNICATION;
+#else
// Allow only use of devices on primary input if in call and HAL does not support routing
// to voice call path.
if ((getPhoneState() == AUDIO_MODE_IN_CALL) &&
@@ -684,6 +691,7 @@ audio_devices_t Engine::getDeviceForInputSource(audio_source_t inputSource) cons
}
break;
}
+#endif
break;
case AUDIO_SOURCE_VOICE_RECOGNITION:
diff --git a/services/audiopolicy/enginedefault/src/Gains.cpp b/services/audiopolicy/enginedefault/src/Gains.cpp
index 78f2909..d06365c 100644
--- a/services/audiopolicy/enginedefault/src/Gains.cpp
+++ b/services/audiopolicy/enginedefault/src/Gains.cpp
@@ -171,10 +171,10 @@ const VolumeCurvePoint *Gains::sVolumeProfiles[AUDIO_STREAM_CNT]
},
{ // AUDIO_STREAM_TTS
// "Transmitted Through Speaker": always silent except on DEVICE_CATEGORY_SPEAKER
- Gains::sSilentVolumeCurve, // DEVICE_CATEGORY_HEADSET
- Gains::sLinearVolumeCurve, // DEVICE_CATEGORY_SPEAKER
- Gains::sSilentVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sSilentVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sSilentVolumeCurve, // DEVICE_CATEGORY_HEADSET
+ Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+ Gains::sSilentVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+ Gains::sSilentVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
},
{ // AUDIO_STREAM_ACCESSIBILITY
Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 483855f..ee3b72e 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -88,6 +88,14 @@ status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t device,
}
ALOGV("setDeviceConnectionState() connecting device %x", device);
+#ifdef LEGACY_ALSA_AUDIO
+ if (device & AUDIO_DEVICE_OUT_USB_ACCESSORY) {
+ AudioParameter param;
+ param.add(String8("usb_connected"), String8("true"));
+ mpClientInterface->setParameters(0, param.toString());
+ }
+#endif
+
// register new device as available
index = mAvailableOutputDevices.add(devDesc);
if (index >= 0) {
@@ -139,6 +147,14 @@ status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t device,
// remove device from available output devices
mAvailableOutputDevices.remove(devDesc);
+#ifdef LEGACY_ALSA_AUDIO
+ if (device & AUDIO_DEVICE_OUT_USB_ACCESSORY) {
+ AudioParameter param;
+ param.add(String8("usb_connected"), String8("true"));
+ mpClientInterface->setParameters(0, param.toString());
+ }
+#endif
+
checkOutputsForDevice(devDesc, state, outputs, devDesc->mAddress);
// Propagate device availability to Engine
@@ -305,7 +321,11 @@ void AudioPolicyManager::updateCallRouting(audio_devices_t rxDevice, int delayMs
if(!hasPrimaryOutput()) {
return;
}
+#ifdef LEGACY_ALSA_AUDIO
+ audio_devices_t txDevice = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_CALL);
+#else
audio_devices_t txDevice = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
+#endif
ALOGV("updateCallRouting device rxDevice %08x txDevice %08x", rxDevice, txDevice);
// release existing RX patch if any
@@ -1074,7 +1094,7 @@ status_t AudioPolicyManager::startSource(sp<AudioOutputDescriptor> outputDesc,
*delayMs = 0;
if (stream == AUDIO_STREAM_TTS) {
ALOGV("\t found BEACON stream");
- if (mOutputs.isAnyOutputActive(AUDIO_STREAM_TTS /*streamToIgnore*/)) {
+ if (!mTtsOutputAvailable && mOutputs.isAnyOutputActive(AUDIO_STREAM_TTS /*streamToIgnore*/)) {
return INVALID_OPERATION;
} else {
beaconMuteLatency = handleEventForBeacon(STARTING_BEACON);
@@ -1370,6 +1390,22 @@ status_t AudioPolicyManager::getInputForAttr(const audio_attributes_t *attr,
} else {
*inputType = API_INPUT_LEGACY;
}
+#ifdef LEGACY_ALSA_AUDIO
+ // adapt channel selection to input source
+ switch (inputSource) {
+ case AUDIO_SOURCE_VOICE_UPLINK:
+ channelMask |= AUDIO_CHANNEL_IN_VOICE_UPLINK;
+ break;
+ case AUDIO_SOURCE_VOICE_DOWNLINK:
+ channelMask |= AUDIO_CHANNEL_IN_VOICE_DNLINK;
+ break;
+ case AUDIO_SOURCE_VOICE_CALL:
+ channelMask |= AUDIO_CHANNEL_IN_VOICE_UPLINK | AUDIO_CHANNEL_IN_VOICE_DNLINK;
+ break;
+ default:
+ break;
+ }
+#endif
if (inputSource == AUDIO_SOURCE_HOTWORD) {
ssize_t index = mSoundTriggerSessions.indexOfKey(session);
if (index >= 0) {
@@ -1485,10 +1521,15 @@ status_t AudioPolicyManager::startInput(audio_io_handle_t input,
// If the already active input uses AUDIO_SOURCE_HOTWORD then it is closed,
// otherwise the active input continues and the new input cannot be started.
sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
- if (activeDesc->mInputSource == AUDIO_SOURCE_HOTWORD) {
+ if ((activeDesc->mInputSource == AUDIO_SOURCE_HOTWORD) &&
+ !activeDesc->hasPreemptedSession(session)) {
ALOGW("startInput(%d) preempting low-priority input %d", input, activeInput);
- stopInput(activeInput, activeDesc->mSessions.itemAt(0));
- releaseInput(activeInput, activeDesc->mSessions.itemAt(0));
+ audio_session_t activeSession = activeDesc->mSessions.itemAt(0);
+ SortedVector<audio_session_t> sessions = activeDesc->getPreemptedSessions();
+ sessions.add(activeSession);
+ inputDesc->setPreemptedSessions(sessions);
+ stopInput(activeInput, activeSession);
+ releaseInput(activeInput, activeSession);
} else {
ALOGE("startInput(%d) failed: other input %d already started", input, activeInput);
return INVALID_OPERATION;
@@ -1592,6 +1633,7 @@ status_t AudioPolicyManager::stopInput(audio_io_handle_t input,
if (mInputs.activeInputsCount() == 0) {
SoundTrigger::setCaptureState(false);
}
+ inputDesc->clearPreemptedSessions();
}
return NO_ERROR;
}
@@ -1718,7 +1760,9 @@ status_t AudioPolicyManager::setStreamVolumeIndex(audio_stream_type_t stream,
status = volStatus;
}
}
- if ((device == AUDIO_DEVICE_OUT_DEFAULT) || ((curDevice & accessibilityDevice) != 0)) {
+ if ((accessibilityDevice != AUDIO_DEVICE_NONE) &&
+ ((device == AUDIO_DEVICE_OUT_DEFAULT) || ((curDevice & accessibilityDevice) != 0)))
+ {
status_t volStatus = checkAndSetVolume(AUDIO_STREAM_ACCESSIBILITY,
index, desc, curDevice);
}
@@ -2007,6 +2051,9 @@ status_t AudioPolicyManager::dump(int fd)
snprintf(buffer, SIZE, " Force use for hdmi system audio %d\n",
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO));
result.append(buffer);
+ snprintf(buffer, SIZE, " TTS output %s\n", mTtsOutputAvailable ? "available" : "not available");
+ result.append(buffer);
+
write(fd, result.string(), result.size());
mAvailableOutputDevices.dump(fd, String8("output"));
@@ -2687,7 +2734,8 @@ AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterfa
mAudioPortGeneration(1),
mBeaconMuteRefCount(0),
mBeaconPlayingRefCount(0),
- mBeaconMuted(false)
+ mBeaconMuted(false),
+ mTtsOutputAvailable(false)
{
audio_policy::EngineInstance *engineInstance = audio_policy::EngineInstance::getInstance();
if (!engineInstance) {
@@ -2744,6 +2792,9 @@ AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterfa
ALOGW("Output profile contains no device on module %s", mHwModules[i]->mName);
continue;
}
+ if ((outProfile->mFlags & AUDIO_OUTPUT_FLAG_TTS) != 0) {
+ mTtsOutputAvailable = true;
+ }
if ((outProfile->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
continue;
@@ -4043,6 +4094,12 @@ void AudioPolicyManager::handleNotificationRoutingForStream(audio_stream_type_t
}
uint32_t AudioPolicyManager::handleEventForBeacon(int event) {
+
+ // skip beacon mute management if a dedicated TTS output is available
+ if (mTtsOutputAvailable) {
+ return 0;
+ }
+
switch(event) {
case STARTING_OUTPUT:
mBeaconMuteRefCount++;
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 80c41c8..c40a435 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -548,6 +548,7 @@ protected:
uint32_t mBeaconMuteRefCount; // ref count for stream that would mute beacon
uint32_t mBeaconPlayingRefCount;// ref count for the playing beacon streams
bool mBeaconMuted; // has STREAM_TTS been muted
+ bool mTtsOutputAvailable; // true if a dedicated output for TTS stream is available
AudioPolicyMixCollection mPolicyMixes; // list of registered mixes
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 58ecb11..a228798 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -76,10 +76,14 @@ status_t AudioPolicyService::setPhoneState(audio_mode_t state)
ALOGV("setPhoneState()");
+ // acquire lock before calling setMode() so that setMode() + setPhoneState() are an atomic
+ // operation from policy manager standpoint (no other operation (e.g track start or stop)
+ // can be interleaved).
+ Mutex::Autolock _l(mLock);
+
// TODO: check if it is more appropriate to do it in platform specific policy manager
AudioSystem::setMode(state);
- Mutex::Autolock _l(mLock);
mAudioPolicyManager->setPhoneState(state);
mPhoneState = state;
return NO_ERROR;
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index e8ef24e..ab09cb3 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -53,7 +53,7 @@ LOCAL_SRC_FILES:= \
device3/StatusTracker.cpp \
gui/RingBufferConsumer.cpp \
utils/CameraTraces.cpp \
- utils/AutoConditionLock.cpp \
+ utils/AutoConditionLock.cpp
LOCAL_SHARED_LIBRARIES:= \
libui \
@@ -79,6 +79,14 @@ LOCAL_C_INCLUDES += \
LOCAL_CFLAGS += -Wall -Wextra
+ifeq ($(BOARD_NEEDS_MEMORYHEAPION),true)
+ LOCAL_CFLAGS += -DUSE_MEMORY_HEAP_ION
+endif
+
+ifneq ($(BOARD_NUMBER_OF_CAMERAS),)
+ LOCAL_CFLAGS += -DMAX_CAMERAS=$(BOARD_NUMBER_OF_CAMERAS)
+endif
+
LOCAL_MODULE:= libcameraservice
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 3d706c1..861e519 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -15,6 +15,7 @@
*/
#define LOG_TAG "CameraService"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
//#define LOG_NDEBUG 0
#include <algorithm>
@@ -33,7 +34,6 @@
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
#include <binder/ProcessInfoService.h>
-#include <camera/ICameraServiceProxy.h>
#include <cutils/atomic.h>
#include <cutils/properties.h>
#include <gui/Surface.h>
@@ -157,7 +157,6 @@ void CameraService::onFirstRef()
}
mModule = new CameraModule(rawModule);
- ALOGI("Loaded \"%s\" camera module", mModule->getModuleName());
err = mModule->init();
if (err != OK) {
ALOGE("Could not initialize camera HAL module: %d (%s)", err,
@@ -169,6 +168,7 @@ void CameraService::onFirstRef()
mModule = nullptr;
return;
}
+ ALOGI("Loaded \"%s\" camera module", mModule->getModuleName());
mNumberOfCameras = mModule->getNumberOfCameras();
mNumberOfNormalCameras = mNumberOfCameras;
@@ -250,13 +250,19 @@ void CameraService::onFirstRef()
CameraService::pingCameraServiceProxy();
}
-void CameraService::pingCameraServiceProxy() {
+sp<ICameraServiceProxy> CameraService::getCameraServiceProxy() {
sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder = sm->getService(String16("media.camera.proxy"));
if (binder == nullptr) {
- return;
+ return nullptr;
}
sp<ICameraServiceProxy> proxyBinder = interface_cast<ICameraServiceProxy>(binder);
+ return proxyBinder;
+}
+
+void CameraService::pingCameraServiceProxy() {
+ sp<ICameraServiceProxy> proxyBinder = getCameraServiceProxy();
+ if (proxyBinder == nullptr) return;
proxyBinder->pingForUserUpdate();
}
@@ -308,8 +314,10 @@ void CameraService::onDeviceStatusChanged(camera_device_status_t cameraId,
clientToDisconnect = removeClientLocked(id);
// Notify the client of disconnection
- clientToDisconnect->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
- CaptureResultExtras{});
+ if (clientToDisconnect != nullptr) {
+ clientToDisconnect->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+ CaptureResultExtras{});
+ }
}
ALOGI("%s: Client for camera ID %s evicted due to device status change from HAL",
@@ -398,10 +406,12 @@ void CameraService::onTorchStatusChangedLocked(const String8& cameraId,
}
int32_t CameraService::getNumberOfCameras() {
+ ATRACE_CALL();
return getNumberOfCameras(CAMERA_TYPE_BACKWARD_COMPATIBLE);
}
int32_t CameraService::getNumberOfCameras(int type) {
+ ATRACE_CALL();
switch (type) {
case CAMERA_TYPE_BACKWARD_COMPATIBLE:
return mNumberOfNormalCameras;
@@ -416,6 +426,7 @@ int32_t CameraService::getNumberOfCameras(int type) {
status_t CameraService::getCameraInfo(int cameraId,
struct CameraInfo* cameraInfo) {
+ ATRACE_CALL();
if (!mModule) {
return -ENODEV;
}
@@ -443,6 +454,7 @@ int CameraService::cameraIdToInt(const String8& cameraId) {
}
status_t CameraService::generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo) {
+ ATRACE_CALL();
status_t ret = OK;
struct CameraInfo info;
if ((ret = getCameraInfo(cameraId, &info)) != OK) {
@@ -529,6 +541,7 @@ status_t CameraService::generateShimMetadata(int cameraId, /*out*/CameraMetadata
status_t CameraService::getCameraCharacteristics(int cameraId,
CameraMetadata* cameraInfo) {
+ ATRACE_CALL();
if (!cameraInfo) {
ALOGE("%s: cameraInfo is NULL", __FUNCTION__);
return BAD_VALUE;
@@ -597,10 +610,16 @@ int CameraService::getCameraPriorityFromProcState(int procState) {
procState);
return -1;
}
+ // Treat sleeping TOP processes the same as regular TOP processes, for
+ // access priority. This is important for lock-screen camera launch scenarios
+ if (procState == PROCESS_STATE_TOP_SLEEPING) {
+ procState = PROCESS_STATE_TOP;
+ }
return INT_MAX - procState;
}
status_t CameraService::getCameraVendorTagDescriptor(/*out*/sp<VendorTagDescriptor>& desc) {
+ ATRACE_CALL();
if (!mModule) {
ALOGE("%s: camera hardware module doesn't exist", __FUNCTION__);
return -ENODEV;
@@ -611,6 +630,7 @@ status_t CameraService::getCameraVendorTagDescriptor(/*out*/sp<VendorTagDescript
}
int CameraService::getDeviceVersion(int cameraId, int* facing) {
+ ATRACE_CALL();
struct camera_info info;
if (mModule->getCameraInfo(cameraId, &info) != OK) {
return -1;
@@ -642,6 +662,7 @@ status_t CameraService::filterGetInfoErrorCode(status_t err) {
}
bool CameraService::setUpVendorTags() {
+ ATRACE_CALL();
vendor_tag_ops_t vOps = vendor_tag_ops_t();
// Check if vendor operations have been implemented
@@ -650,9 +671,7 @@ bool CameraService::setUpVendorTags() {
return false;
}
- ATRACE_BEGIN("camera3->get_metadata_vendor_tag_ops");
mModule->getVendorTagOps(&vOps);
- ATRACE_END();
// Ensure all vendor operations are present
if (vOps.get_tag_count == NULL || vOps.get_all_tags == NULL ||
@@ -952,7 +971,7 @@ status_t CameraService::handleEvictionsLocked(const String8& cameraId, int clien
/*out*/
sp<BasicClient>* client,
std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>>* partial) {
-
+ ATRACE_CALL();
status_t ret = NO_ERROR;
std::vector<DescriptorPtr> evictedClients;
DescriptorPtr clientDescriptor;
@@ -1141,6 +1160,7 @@ status_t CameraService::connect(
/*out*/
sp<ICamera>& device) {
+ ATRACE_CALL();
status_t ret = NO_ERROR;
String8 id = String8::format("%d", cameraId);
sp<Client> client = nullptr;
@@ -1165,6 +1185,7 @@ status_t CameraService::connectLegacy(
/*out*/
sp<ICamera>& device) {
+ ATRACE_CALL();
String8 id = String8::format("%d", cameraId);
int apiVersion = mModule->getModuleApiVersion();
if (halVersion != CAMERA_HAL_API_VERSION_UNSPECIFIED &&
@@ -1205,6 +1226,7 @@ status_t CameraService::connectDevice(
/*out*/
sp<ICameraDeviceUser>& device) {
+ ATRACE_CALL();
status_t ret = NO_ERROR;
String8 id = String8::format("%d", cameraId);
sp<CameraDeviceClient> client = nullptr;
@@ -1224,6 +1246,8 @@ status_t CameraService::connectDevice(
status_t CameraService::setTorchMode(const String16& cameraId, bool enabled,
const sp<IBinder>& clientBinder) {
+
+ ATRACE_CALL();
if (enabled && clientBinder == nullptr) {
ALOGE("%s: torch client binder is NULL", __FUNCTION__);
return -EINVAL;
@@ -1312,6 +1336,8 @@ status_t CameraService::setTorchMode(const String16& cameraId, bool enabled,
}
void CameraService::notifySystemEvent(int32_t eventId, const int32_t* args, size_t length) {
+ ATRACE_CALL();
+
switch(eventId) {
case ICameraService::USER_SWITCHED: {
doUserSwitch(/*newUserIds*/args, /*length*/length);
@@ -1327,6 +1353,8 @@ void CameraService::notifySystemEvent(int32_t eventId, const int32_t* args, size
}
status_t CameraService::addListener(const sp<ICameraServiceListener>& listener) {
+ ATRACE_CALL();
+
ALOGV("%s: Add listener %p", __FUNCTION__, listener.get());
if (listener == nullptr) {
@@ -1375,6 +1403,8 @@ status_t CameraService::addListener(const sp<ICameraServiceListener>& listener)
}
status_t CameraService::removeListener(const sp<ICameraServiceListener>& listener) {
+ ATRACE_CALL();
+
ALOGV("%s: Remove listener %p", __FUNCTION__, listener.get());
if (listener == 0) {
@@ -1401,6 +1431,8 @@ status_t CameraService::removeListener(const sp<ICameraServiceListener>& listene
}
status_t CameraService::getLegacyParameters(int cameraId, /*out*/String16* parameters) {
+
+ ATRACE_CALL();
ALOGV("%s: for camera ID = %d", __FUNCTION__, cameraId);
if (parameters == NULL) {
@@ -1425,6 +1457,8 @@ status_t CameraService::getLegacyParameters(int cameraId, /*out*/String16* param
}
status_t CameraService::supportsCameraApi(int cameraId, int apiVersion) {
+ ATRACE_CALL();
+
ALOGV("%s: for camera ID = %d", __FUNCTION__, cameraId);
switch (apiVersion) {
@@ -1792,6 +1826,8 @@ MediaPlayer* CameraService::newMediaPlayer(const char *file) {
}
void CameraService::loadSound() {
+ ATRACE_CALL();
+
Mutex::Autolock lock(mSoundLock);
LOG1("CameraService::loadSound ref=%d", mSoundRef);
if (mSoundRef++) return;
@@ -1814,6 +1850,8 @@ void CameraService::releaseSound() {
}
void CameraService::playSound(sound_kind kind) {
+ ATRACE_CALL();
+
LOG1("playSound(%d)", kind);
Mutex::Autolock lock(mSoundLock);
sp<MediaPlayer> player = mSoundPlayer[kind];
@@ -1923,6 +1961,8 @@ bool CameraService::BasicClient::canCastToApiClient(apiLevel level) const {
}
status_t CameraService::BasicClient::startCameraOps() {
+ ATRACE_CALL();
+
int32_t res;
// Notify app ops that the camera is not available
mOpsCallback = new OpsCallback(this);
@@ -1956,10 +1996,16 @@ status_t CameraService::BasicClient::startCameraOps() {
mCameraService->updateStatus(ICameraServiceListener::STATUS_NOT_AVAILABLE,
String8::format("%d", mCameraId));
+ // Transition device state to OPEN
+ mCameraService->updateProxyDeviceState(ICameraServiceProxy::CAMERA_STATE_OPEN,
+ String8::format("%d", mCameraId));
+
return OK;
}
status_t CameraService::BasicClient::finishCameraOps() {
+ ATRACE_CALL();
+
// Check if startCameraOps succeeded, and if so, finish the camera op
if (mOpsActive) {
// Notify app ops that the camera is available again
@@ -1974,6 +2020,10 @@ status_t CameraService::BasicClient::finishCameraOps() {
mCameraService->updateStatus(ICameraServiceListener::STATUS_PRESENT,
String8::format("%d", mCameraId), rejected);
+ // Transition device state to CLOSED
+ mCameraService->updateProxyDeviceState(ICameraServiceProxy::CAMERA_STATE_CLOSED,
+ String8::format("%d", mCameraId));
+
// Notify flashlight that a camera device is closed.
mCameraService->mFlashlight->deviceClosed(
String8::format("%d", mCameraId));
@@ -1988,6 +2038,8 @@ status_t CameraService::BasicClient::finishCameraOps() {
}
void CameraService::BasicClient::opChanged(int32_t op, const String16& packageName) {
+ ATRACE_CALL();
+
String8 name(packageName);
String8 myName(mClientPackageName);
@@ -2215,9 +2267,11 @@ static bool tryLock(Mutex& mutex)
}
status_t CameraService::dump(int fd, const Vector<String16>& args) {
+ ATRACE_CALL();
+
String8 result("Dump of the Camera Service:\n");
if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
- result.appendFormat("Permission Denial: "
+ result = result.format("Permission Denial: "
"can't dump CameraService from pid=%d, uid=%d\n",
getCallingPid(),
getCallingUid());
@@ -2478,6 +2532,14 @@ void CameraService::updateStatus(ICameraServiceListener::Status status, const St
});
}
+void CameraService::updateProxyDeviceState(ICameraServiceProxy::CameraState newState,
+ const String8& cameraId) {
+ sp<ICameraServiceProxy> proxyBinder = getCameraServiceProxy();
+ if (proxyBinder == nullptr) return;
+ String16 id(cameraId);
+ proxyBinder->notifyCameraState(id, newState);
+}
+
status_t CameraService::getTorchStatusLocked(
const String8& cameraId,
ICameraServiceListener::TorchStatus *status) const {
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index b29317e..b3903d4 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -24,6 +24,7 @@
#include <binder/BinderService.h>
#include <binder/IAppOpsCallback.h>
#include <camera/ICameraService.h>
+#include <camera/ICameraServiceProxy.h>
#include <hardware/camera.h>
#include <camera/ICamera.h>
@@ -48,6 +49,10 @@
#include <memory>
#include <utility>
+#ifndef MAX_CAMERAS
+#define MAX_CAMERAS 2
+#endif
+
namespace android {
extern volatile int32_t gLogLevel;
@@ -74,6 +79,8 @@ public:
// Process state (mirrors frameworks/base/core/java/android/app/ActivityManager.java)
static const int PROCESS_STATE_NONEXISTENT = -1;
+ static const int PROCESS_STATE_TOP = 2;
+ static const int PROCESS_STATE_TOP_SLEEPING = 5;
// 3 second busy timeout when other clients are connecting
static const nsecs_t DEFAULT_CONNECT_TIMEOUT_NS = 3000000000;
@@ -164,6 +171,14 @@ public:
void playSound(sound_kind kind);
void releaseSound();
+ /**
+ * Update the state of a given camera device (open/close/active/idle) with
+ * the camera proxy service in the system service
+ */
+ static void updateProxyDeviceState(
+ ICameraServiceProxy::CameraState newState,
+ const String8& cameraId);
+
/////////////////////////////////////////////////////////////////////
// CameraDeviceFactory functionality
int getDeviceVersion(int cameraId, int* facing = NULL);
@@ -730,6 +745,7 @@ private:
static String8 toString(std::set<userid_t> intSet);
+ static sp<ICameraServiceProxy> getCameraServiceProxy();
static void pingCameraServiceProxy();
};
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 36e99dd..48b5a26 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -1912,6 +1912,8 @@ void Camera2Client::notifyShutter(const CaptureResultExtras& resultExtras,
ALOGV("%s: Shutter notification for request id %" PRId32 " at time %" PRId64,
__FUNCTION__, resultExtras.requestId, timestamp);
mCaptureSequencer->notifyShutter(resultExtras, timestamp);
+
+ Camera2ClientBase::notifyShutter(resultExtras, timestamp);
}
camera2::SharedParameters& Camera2Client::getParameters() {
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index f3a7988..1bb2910 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -254,6 +254,9 @@ void CameraClient::disconnect() {
// Turn off all messages.
disableMsgType(CAMERA_MSG_ALL_MSGS);
mHardware->stopPreview();
+ mCameraService->updateProxyDeviceState(
+ ICameraServiceProxy::CAMERA_STATE_IDLE,
+ String8::format("%d", mCameraId));
mHardware->cancelPicture();
// Release the hardware resources.
mHardware->release();
@@ -360,12 +363,14 @@ status_t CameraClient::setPreviewCallbackTarget(
// start preview mode
status_t CameraClient::startPreview() {
+ Mutex::Autolock lock(mLock);
LOG1("startPreview (pid %d)", getCallingPid());
return startCameraMode(CAMERA_PREVIEW_MODE);
}
// start recording mode
status_t CameraClient::startRecording() {
+ Mutex::Autolock lock(mLock);
LOG1("startRecording (pid %d)", getCallingPid());
return startCameraMode(CAMERA_RECORDING_MODE);
}
@@ -373,7 +378,6 @@ status_t CameraClient::startRecording() {
// start preview or recording
status_t CameraClient::startCameraMode(camera_mode mode) {
LOG1("startCameraMode(%d)", mode);
- Mutex::Autolock lock(mLock);
status_t result = checkPidAndHardware();
if (result != NO_ERROR) return result;
@@ -412,7 +416,11 @@ status_t CameraClient::startPreviewMode() {
}
mHardware->setPreviewWindow(mPreviewWindow);
result = mHardware->startPreview();
-
+ if (result == NO_ERROR) {
+ mCameraService->updateProxyDeviceState(
+ ICameraServiceProxy::CAMERA_STATE_ACTIVE,
+ String8::format("%d", mCameraId));
+ }
return result;
}
@@ -452,7 +460,9 @@ void CameraClient::stopPreview() {
disableMsgType(CAMERA_MSG_PREVIEW_FRAME);
mHardware->stopPreview();
-
+ mCameraService->updateProxyDeviceState(
+ ICameraServiceProxy::CAMERA_STATE_IDLE,
+ String8::format("%d", mCameraId));
mPreviewBuffer.clear();
}
@@ -813,6 +823,12 @@ void CameraClient::handleShutter(void) {
disableMsgType(CAMERA_MSG_SHUTTER);
}
+ // Shutters only happen in response to takePicture, so mark device as
+ // idle now, until preview is restarted
+ mCameraService->updateProxyDeviceState(
+ ICameraServiceProxy::CAMERA_STATE_IDLE,
+ String8::format("%d", mCameraId));
+
mLock.unlock();
}
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 442eb75..44447b4 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -214,8 +214,8 @@ status_t Parameters::initialize(const CameraMetadata *info, int deviceVersion) {
supportedPreviewFormats);
}
- previewFpsRange[0] = availableFpsRanges.data.i32[0];
- previewFpsRange[1] = availableFpsRanges.data.i32[1];
+ previewFpsRange[0] = fastInfo.bestStillCaptureFpsRange[0];
+ previewFpsRange[1] = fastInfo.bestStillCaptureFpsRange[1];
// PREVIEW_FRAME_RATE / SUPPORTED_PREVIEW_FRAME_RATES are deprecated, but
// still have to do something sane for them
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index c717a56..0c531c3 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -719,6 +719,43 @@ status_t CameraDeviceClient::prepare(int streamId) {
return res;
}
+status_t CameraDeviceClient::prepare2(int maxCount, int streamId) {
+ ATRACE_CALL();
+ ALOGV("%s", __FUNCTION__);
+
+ status_t res = OK;
+ if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ // Guard against trying to prepare non-created streams
+ ssize_t index = NAME_NOT_FOUND;
+ for (size_t i = 0; i < mStreamMap.size(); ++i) {
+ if (streamId == mStreamMap.valueAt(i)) {
+ index = i;
+ break;
+ }
+ }
+
+ if (index == NAME_NOT_FOUND) {
+ ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream created yet",
+ __FUNCTION__, mCameraId, streamId);
+ return BAD_VALUE;
+ }
+
+ if (maxCount <= 0) {
+ ALOGE("%s: Camera %d: Invalid maxCount (%d) specified, must be greater than 0.",
+ __FUNCTION__, mCameraId, maxCount);
+ return BAD_VALUE;
+ }
+
+ // Also returns BAD_VALUE if stream ID was not valid, or stream already
+ // has been used
+ res = mDevice->prepare(maxCount, streamId);
+
+ return res;
+}
+
status_t CameraDeviceClient::tearDown(int streamId) {
ATRACE_CALL();
ALOGV("%s", __FUNCTION__);
@@ -799,6 +836,7 @@ void CameraDeviceClient::notifyIdle() {
if (remoteCb != 0) {
remoteCb->onDeviceIdle();
}
+ Camera2ClientBase::notifyIdle();
}
void CameraDeviceClient::notifyShutter(const CaptureResultExtras& resultExtras,
@@ -808,6 +846,7 @@ void CameraDeviceClient::notifyShutter(const CaptureResultExtras& resultExtras,
if (remoteCb != 0) {
remoteCb->onCaptureStarted(resultExtras, timestamp);
}
+ Camera2ClientBase::notifyShutter(resultExtras, timestamp);
}
void CameraDeviceClient::notifyPrepared(int streamId) {
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 1f8b39d..d1e692c 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -114,6 +114,9 @@ public:
// Tear down stream resources by freeing its unused buffers
virtual status_t tearDown(int streamId);
+ // Prepare stream by preallocating up to maxCount of its buffers
+ virtual status_t prepare2(int maxCount, int streamId);
+
/**
* Interface used by CameraService
*/
@@ -189,6 +192,7 @@ private:
Vector<int> mStreamingRequestList;
int32_t mRequestIdCounter;
+
};
}; // namespace android
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index ba0b264..5732f80 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -55,7 +55,8 @@ Camera2ClientBase<TClientBase>::Camera2ClientBase(
TClientBase(cameraService, remoteCallback, clientPackageName,
cameraId, cameraFacing, clientPid, clientUid, servicePid),
mSharedCameraCallbacks(remoteCallback),
- mDeviceVersion(cameraService->getDeviceVersion(cameraId))
+ mDeviceVersion(cameraService->getDeviceVersion(cameraId)),
+ mDeviceActive(false)
{
ALOGI("Camera %d: Opened. Client: %s (PID %d, UID %d)", cameraId,
String8(clientPackageName).string(), clientPid, clientUid);
@@ -235,6 +236,13 @@ void Camera2ClientBase<TClientBase>::notifyError(
template <typename TClientBase>
void Camera2ClientBase<TClientBase>::notifyIdle() {
+ if (mDeviceActive) {
+ getCameraService()->updateProxyDeviceState(
+ ICameraServiceProxy::CAMERA_STATE_IDLE,
+ String8::format("%d", TClientBase::mCameraId));
+ }
+ mDeviceActive = false;
+
ALOGV("Camera device is now idle");
}
@@ -244,6 +252,13 @@ void Camera2ClientBase<TClientBase>::notifyShutter(const CaptureResultExtras& re
(void)resultExtras;
(void)timestamp;
+ if (!mDeviceActive) {
+ getCameraService()->updateProxyDeviceState(
+ ICameraServiceProxy::CAMERA_STATE_ACTIVE,
+ String8::format("%d", TClientBase::mCameraId));
+ }
+ mDeviceActive = true;
+
ALOGV("%s: Shutter notification for request id %" PRId32 " at time %" PRId64,
__FUNCTION__, resultExtras.requestId, timestamp);
}
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index f1cacdf..220c5ad 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -136,6 +136,8 @@ protected:
status_t checkPid(const char *checkLocation) const;
virtual void detachDevice();
+
+ bool mDeviceActive;
};
}; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index cd25949..7b083a3 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -294,6 +294,12 @@ class CameraDeviceBase : public virtual RefBase {
virtual status_t tearDown(int streamId) = 0;
/**
+ * Prepare stream by preallocating up to maxCount buffers for it asynchronously.
+ * Calls notifyPrepared() once allocation is complete.
+ */
+ virtual status_t prepare(int maxCount, int streamId) = 0;
+
+ /**
* Get the HAL device version.
*/
virtual uint32_t getDeviceVersion() = 0;
diff --git a/services/camera/libcameraservice/common/CameraModule.cpp b/services/camera/libcameraservice/common/CameraModule.cpp
index 6a4dfe0..16b8aba 100644
--- a/services/camera/libcameraservice/common/CameraModule.cpp
+++ b/services/camera/libcameraservice/common/CameraModule.cpp
@@ -15,14 +15,18 @@
*/
#define LOG_TAG "CameraModule"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
//#define LOG_NDEBUG 0
+#include <utils/Trace.h>
+
#include "CameraModule.h"
namespace android {
void CameraModule::deriveCameraCharacteristicsKeys(
uint32_t deviceVersion, CameraMetadata &chars) {
+ ATRACE_CALL();
// HAL1 devices should not reach here
if (deviceVersion < CAMERA_DEVICE_API_VERSION_2_0) {
ALOGV("%s: Cannot derive keys for HAL version < 2.0");
@@ -150,9 +154,7 @@ CameraModule::CameraModule(camera_module_t *module) {
ALOGE("%s: camera hardware module must not be null", __FUNCTION__);
assert(0);
}
-
mModule = module;
- mCameraInfoMap.setCapacity(getNumberOfCameras());
}
CameraModule::~CameraModule()
@@ -168,14 +170,20 @@ CameraModule::~CameraModule()
}
int CameraModule::init() {
+ ATRACE_CALL();
+ int res = OK;
if (getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_4 &&
mModule->init != NULL) {
- return mModule->init();
+ ATRACE_BEGIN("camera_module->init");
+ res = mModule->init();
+ ATRACE_END();
}
- return OK;
+ mCameraInfoMap.setCapacity(getNumberOfCameras());
+ return res;
}
int CameraModule::getCameraInfo(int cameraId, struct camera_info *info) {
+ ATRACE_CALL();
Mutex::Autolock lock(mCameraInfoLock);
if (cameraId < 0) {
ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId);
@@ -185,14 +193,20 @@ int CameraModule::getCameraInfo(int cameraId, struct camera_info *info) {
// Only override static_camera_characteristics for API2 devices
int apiVersion = mModule->common.module_api_version;
if (apiVersion < CAMERA_MODULE_API_VERSION_2_0) {
- return mModule->get_camera_info(cameraId, info);
+ int ret;
+ ATRACE_BEGIN("camera_module->get_camera_info");
+ ret = mModule->get_camera_info(cameraId, info);
+ ATRACE_END();
+ return ret;
}
ssize_t index = mCameraInfoMap.indexOfKey(cameraId);
if (index == NAME_NOT_FOUND) {
// Get camera info from raw module and cache it
camera_info rawInfo, cameraInfo;
+ ATRACE_BEGIN("camera_module->get_camera_info");
int ret = mModule->get_camera_info(cameraId, &rawInfo);
+ ATRACE_END();
if (ret != 0) {
return ret;
}
@@ -217,20 +231,36 @@ int CameraModule::getCameraInfo(int cameraId, struct camera_info *info) {
}
int CameraModule::open(const char* id, struct hw_device_t** device) {
- return filterOpenErrorCode(mModule->common.methods->open(&mModule->common, id, device));
+ int res;
+ ATRACE_BEGIN("camera_module->open");
+ res = filterOpenErrorCode(mModule->common.methods->open(&mModule->common, id, device));
+ ATRACE_END();
+ return res;
}
int CameraModule::openLegacy(
const char* id, uint32_t halVersion, struct hw_device_t** device) {
- return mModule->open_legacy(&mModule->common, id, halVersion, device);
+ int res;
+ ATRACE_BEGIN("camera_module->open_legacy");
+ res = mModule->open_legacy(&mModule->common, id, halVersion, device);
+ ATRACE_END();
+ return res;
}
int CameraModule::getNumberOfCameras() {
- return mModule->get_number_of_cameras();
+ int numCameras;
+ ATRACE_BEGIN("camera_module->get_number_of_cameras");
+ numCameras = mModule->get_number_of_cameras();
+ ATRACE_END();
+ return numCameras;
}
int CameraModule::setCallbacks(const camera_module_callbacks_t *callbacks) {
- return mModule->set_callbacks(callbacks);
+ int res;
+ ATRACE_BEGIN("camera_module->set_callbacks");
+ res = mModule->set_callbacks(callbacks);
+ ATRACE_END();
+ return res;
}
bool CameraModule::isVendorTagDefined() {
@@ -239,12 +269,18 @@ bool CameraModule::isVendorTagDefined() {
void CameraModule::getVendorTagOps(vendor_tag_ops_t* ops) {
if (mModule->get_vendor_tag_ops) {
+ ATRACE_BEGIN("camera_module->get_vendor_tag_ops");
mModule->get_vendor_tag_ops(ops);
+ ATRACE_END();
}
}
int CameraModule::setTorchMode(const char* camera_id, bool enable) {
- return mModule->set_torch_mode(camera_id, enable);
+ int res;
+ ATRACE_BEGIN("camera_module->set_torch_mode");
+ res = mModule->set_torch_mode(camera_id, enable);
+ ATRACE_END();
+ return res;
}
status_t CameraModule::filterOpenErrorCode(status_t err) {
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
index 7f14cd4..35947a9 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
@@ -25,7 +25,10 @@
#include <camera/Camera.h>
#include <camera/CameraParameters.h>
#include <system/window.h>
-#include <hardware/camera.h>
+#include "hardware/camera.h"
+#ifdef USE_MEMORY_HEAP_ION
+#include <binder/MemoryHeapIon.h>
+#endif
namespace android {
@@ -322,6 +325,10 @@ public:
void releaseRecordingFrame(const sp<IMemory>& mem)
{
ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (mem == NULL) {
+ ALOGE("%s: NULL memory reference", __FUNCTION__);
+ return;
+ }
if (mDevice->ops->release_recording_frame) {
ssize_t offset;
size_t size;
@@ -501,7 +508,11 @@ private:
mBufSize(buf_size),
mNumBufs(num_buffers)
{
+#ifdef USE_MEMORY_HEAP_ION
+ mHeap = new MemoryHeapIon(fd, buf_size * num_buffers);
+#else
mHeap = new MemoryHeapBase(fd, buf_size * num_buffers);
+#endif
commonInitialization();
}
@@ -509,7 +520,11 @@ private:
mBufSize(buf_size),
mNumBufs(num_buffers)
{
+#ifdef USE_MEMORY_HEAP_ION
+ mHeap = new MemoryHeapIon(buf_size * num_buffers);
+#else
mHeap = new MemoryHeapBase(buf_size * num_buffers);
+#endif
commonInitialization();
}
@@ -541,14 +556,24 @@ private:
camera_memory_t handle;
};
+#ifdef USE_MEMORY_HEAP_ION
+ static camera_memory_t* __get_memory(int fd, size_t buf_size, uint_t num_bufs,
+ void *ion_fd)
+ {
+#else
static camera_memory_t* __get_memory(int fd, size_t buf_size, uint_t num_bufs,
void *user __attribute__((unused)))
{
+#endif
CameraHeapMemory *mem;
if (fd < 0)
mem = new CameraHeapMemory(buf_size, num_bufs);
else
mem = new CameraHeapMemory(fd, buf_size, num_bufs);
+#ifdef USE_MEMORY_HEAP_ION
+ if (ion_fd)
+ *((int *) ion_fd) = mem->mHeap->getHeapID();
+#endif
mem->incStrong(mem);
return &mem->handle;
}
diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
index c9c990c..d74f976 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.cpp
+++ b/services/camera/libcameraservice/device2/Camera2Device.cpp
@@ -632,6 +632,12 @@ status_t Camera2Device::tearDown(int streamId) {
return NO_INIT;
}
+status_t Camera2Device::prepare(int maxCount, int streamId) {
+ ATRACE_CALL();
+ ALOGE("%s: Camera %d: unimplemented", __FUNCTION__, mId);
+ return NO_INIT;
+}
+
uint32_t Camera2Device::getDeviceVersion() {
ATRACE_CALL();
return mDeviceVersion;
diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
index 34c1ded..b4d343c 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.h
+++ b/services/camera/libcameraservice/device2/Camera2Device.h
@@ -88,6 +88,7 @@ class Camera2Device: public CameraDeviceBase {
// Prepare and tearDown are no-ops
virtual status_t prepare(int streamId);
virtual status_t tearDown(int streamId);
+ virtual status_t prepare(int maxCount, int streamId);
virtual uint32_t getDeviceVersion();
virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 0c941fb..50d9d75 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -44,6 +44,7 @@
#include <utils/Timers.h>
#include "utils/CameraTraces.h"
+#include "mediautils/SchedulingPolicyService.h"
#include "device3/Camera3Device.h"
#include "device3/Camera3OutputStream.h"
#include "device3/Camera3InputStream.h"
@@ -66,6 +67,7 @@ Camera3Device::Camera3Device(int id):
mNextResultFrameNumber(0),
mNextReprocessResultFrameNumber(0),
mNextShutterFrameNumber(0),
+ mNextReprocessShutterFrameNumber(0),
mListener(NULL)
{
ATRACE_CALL();
@@ -285,19 +287,27 @@ status_t Camera3Device::disconnect() {
mStatusTracker->join();
}
+ camera3_device_t *hal3Device;
{
Mutex::Autolock l(mLock);
mRequestThread.clear();
mStatusTracker.clear();
- if (mHal3Device != NULL) {
- ATRACE_BEGIN("camera3->close");
- mHal3Device->common.close(&mHal3Device->common);
- ATRACE_END();
- mHal3Device = NULL;
- }
+ hal3Device = mHal3Device;
+ }
+
+ // Call close without internal mutex held, as the HAL close may need to
+ // wait on assorted callbacks,etc, to complete before it can return.
+ if (hal3Device != NULL) {
+ ATRACE_BEGIN("camera3->close");
+ hal3Device->common.close(&hal3Device->common);
+ ATRACE_END();
+ }
+ {
+ Mutex::Autolock l(mLock);
+ mHal3Device = NULL;
internalUpdateStatusLocked(STATUS_UNINITIALIZED);
}
@@ -557,6 +567,18 @@ status_t Camera3Device::convertMetadataListToRequestListLocked(
ALOGV("%s: requestId = %" PRId32, __FUNCTION__, newRequest->mResultExtras.requestId);
}
+
+ // Setup batch size if this is a high speed video recording request.
+ if (mIsConstrainedHighSpeedConfiguration && requestList->size() > 0) {
+ auto firstRequest = requestList->begin();
+ for (auto& outputStream : (*firstRequest)->mOutputStreams) {
+ if (outputStream->isVideoStream()) {
+ (*firstRequest)->mBatchSize = requestList->size();
+ break;
+ }
+ }
+ }
+
return OK;
}
@@ -1398,7 +1420,7 @@ status_t Camera3Device::flush(int64_t *frameNumber) {
status_t res;
if (mHal3Device->common.version >= CAMERA_DEVICE_API_VERSION_3_1) {
- res = mHal3Device->ops->flush(mHal3Device);
+ res = mRequestThread->flush();
} else {
Mutex::Autolock l(mLock);
res = waitUntilDrainedLocked();
@@ -1408,6 +1430,10 @@ status_t Camera3Device::flush(int64_t *frameNumber) {
}
status_t Camera3Device::prepare(int streamId) {
+ return prepare(camera3::Camera3StreamInterface::ALLOCATE_PIPELINE_MAX, streamId);
+}
+
+status_t Camera3Device::prepare(int maxCount, int streamId) {
ATRACE_CALL();
ALOGV("%s: Camera %d: Preparing stream %d", __FUNCTION__, mId, streamId);
Mutex::Autolock il(mInterfaceLock);
@@ -1432,7 +1458,7 @@ status_t Camera3Device::prepare(int streamId) {
return BAD_VALUE;
}
- return mPreparerThread->prepare(stream);
+ return mPreparerThread->prepare(maxCount, stream);
}
status_t Camera3Device::tearDown(int streamId) {
@@ -1583,6 +1609,7 @@ sp<Camera3Device::CaptureRequest> Camera3Device::createCaptureRequest(
newRequest->mOutputStreams.push(stream);
}
newRequest->mSettings.erase(ANDROID_REQUEST_OUTPUT_STREAMS);
+ newRequest->mBatchSize = 1;
return newRequest;
}
@@ -1741,6 +1768,21 @@ status_t Camera3Device::configureStreamsLocked() {
// across configure_streams() calls
mRequestThread->configurationComplete();
+ // Boost priority of request thread for high speed recording to SCHED_FIFO
+ if (mIsConstrainedHighSpeedConfiguration) {
+ pid_t requestThreadTid = mRequestThread->getTid();
+ res = requestPriority(getpid(), requestThreadTid,
+ kConstrainedHighSpeedThreadPriority, true);
+ if (res != OK) {
+ ALOGW("Can't set realtime priority for request processing thread: %s (%d)",
+ strerror(-res), res);
+ } else {
+ ALOGD("Set real time priority for request queue thread (tid %d)", requestThreadTid);
+ }
+ } else {
+ // TODO: Set/restore normal priority for normal use cases
+ }
+
// Update device state
mNeedConfig = false;
@@ -2493,18 +2535,6 @@ void Camera3Device::notifyError(const camera3_error_msg_t &msg,
void Camera3Device::notifyShutter(const camera3_shutter_msg_t &msg,
NotificationListener *listener) {
ssize_t idx;
- // Verify ordering of shutter notifications
- {
- Mutex::Autolock l(mOutputLock);
- // TODO: need to track errors for tighter bounds on expected frame number.
- if (msg.frame_number < mNextShutterFrameNumber) {
- SET_ERR("Shutter notification out-of-order. Expected "
- "notification for frame %d, got frame %d",
- mNextShutterFrameNumber, msg.frame_number);
- return;
- }
- mNextShutterFrameNumber = msg.frame_number + 1;
- }
// Set timestamp for the request in the in-flight tracking
// and get the request ID to send upstream
@@ -2514,6 +2544,29 @@ void Camera3Device::notifyShutter(const camera3_shutter_msg_t &msg,
if (idx >= 0) {
InFlightRequest &r = mInFlightMap.editValueAt(idx);
+ // Verify ordering of shutter notifications
+ {
+ Mutex::Autolock l(mOutputLock);
+ // TODO: need to track errors for tighter bounds on expected frame number.
+ if (r.hasInputBuffer) {
+ if (msg.frame_number < mNextReprocessShutterFrameNumber) {
+ SET_ERR("Shutter notification out-of-order. Expected "
+ "notification for frame %d, got frame %d",
+ mNextReprocessShutterFrameNumber, msg.frame_number);
+ return;
+ }
+ mNextReprocessShutterFrameNumber = msg.frame_number + 1;
+ } else {
+ if (msg.frame_number < mNextShutterFrameNumber) {
+ SET_ERR("Shutter notification out-of-order. Expected "
+ "notification for frame %d, got frame %d",
+ mNextShutterFrameNumber, msg.frame_number);
+ return;
+ }
+ mNextShutterFrameNumber = msg.frame_number + 1;
+ }
+ }
+
ALOGVV("Camera %d: %s: Shutter fired for frame %d (id %d) at %" PRId64,
mId, __FUNCTION__,
msg.frame_number, r.resultExtras.requestId, msg.timestamp);
@@ -2754,6 +2807,17 @@ status_t Camera3Device::RequestThread::clear(
return OK;
}
+status_t Camera3Device::RequestThread::flush() {
+ ATRACE_CALL();
+ Mutex::Autolock l(mFlushLock);
+
+ if (mHal3Device->common.version >= CAMERA_DEVICE_API_VERSION_3_1) {
+ return mHal3Device->ops->flush(mHal3Device);
+ }
+
+ return -ENOTSUP;
+}
+
void Camera3Device::RequestThread::setPaused(bool paused) {
Mutex::Autolock l(mPauseLock);
mDoPause = paused;
@@ -2844,7 +2908,7 @@ void Camera3Device::overrideResultForPrecaptureCancel(
}
bool Camera3Device::RequestThread::threadLoop() {
-
+ ATRACE_CALL();
status_t res;
// Handle paused state.
@@ -2852,203 +2916,240 @@ bool Camera3Device::RequestThread::threadLoop() {
return true;
}
- // Get work to do
-
- sp<CaptureRequest> nextRequest = waitForNextRequest();
- if (nextRequest == NULL) {
+ // Wait for the next batch of requests.
+ waitForNextRequestBatch();
+ if (mNextRequests.size() == 0) {
return true;
}
- // Create request to HAL
- camera3_capture_request_t request = camera3_capture_request_t();
- request.frame_number = nextRequest->mResultExtras.frameNumber;
- Vector<camera3_stream_buffer_t> outputBuffers;
-
- // Get the request ID, if any
- int requestId;
- camera_metadata_entry_t requestIdEntry =
- nextRequest->mSettings.find(ANDROID_REQUEST_ID);
+ // Get the latest request ID, if any
+ int latestRequestId;
+ camera_metadata_entry_t requestIdEntry = mNextRequests[mNextRequests.size() - 1].
+ captureRequest->mSettings.find(ANDROID_REQUEST_ID);
if (requestIdEntry.count > 0) {
- requestId = requestIdEntry.data.i32[0];
+ latestRequestId = requestIdEntry.data.i32[0];
} else {
- ALOGW("%s: Did not have android.request.id set in the request",
- __FUNCTION__);
- requestId = NAME_NOT_FOUND;
+ ALOGW("%s: Did not have android.request.id set in the request.", __FUNCTION__);
+ latestRequestId = NAME_NOT_FOUND;
}
- // Insert any queued triggers (before metadata is locked)
- int32_t triggerCount;
- res = insertTriggers(nextRequest);
- if (res < 0) {
- SET_ERR("RequestThread: Unable to insert triggers "
- "(capture request %d, HAL device: %s (%d)",
- request.frame_number, strerror(-res), res);
- cleanUpFailedRequest(request, nextRequest, outputBuffers);
+ // Prepare a batch of HAL requests and output buffers.
+ res = prepareHalRequests();
+ if (res == TIMED_OUT) {
+ // Not a fatal error if getting output buffers time out.
+ cleanUpFailedRequests(/*sendRequestError*/ true);
+ return true;
+ } else if (res != OK) {
+ cleanUpFailedRequests(/*sendRequestError*/ false);
return false;
}
- triggerCount = res;
- bool triggersMixedIn = (triggerCount > 0 || mPrevTriggers > 0);
+ // Inform waitUntilRequestProcessed thread of a new request ID
+ {
+ Mutex::Autolock al(mLatestRequestMutex);
+
+ mLatestRequestId = latestRequestId;
+ mLatestRequestSignal.signal();
+ }
+
+ // Submit a batch of requests to HAL.
+ // Use flush lock only when submitting multilple requests in a batch.
+ // TODO: The problem with flush lock is flush() will be blocked by process_capture_request()
+ // which may take a long time to finish so synchronizing flush() and
+ // process_capture_request() defeats the purpose of cancelling requests ASAP with flush().
+ // For now, only synchronize for high speed recording and we should figure something out for
+ // removing the synchronization.
+ bool useFlushLock = mNextRequests.size() > 1;
+
+ if (useFlushLock) {
+ mFlushLock.lock();
+ }
+
+ ALOGVV("%s: %d: submitting %d requests in a batch.", __FUNCTION__, __LINE__,
+ mNextRequests.size());
+ for (auto& nextRequest : mNextRequests) {
+ // Submit request and block until ready for next one
+ ATRACE_ASYNC_BEGIN("frame capture", nextRequest.halRequest.frame_number);
+ ATRACE_BEGIN("camera3->process_capture_request");
+ res = mHal3Device->ops->process_capture_request(mHal3Device, &nextRequest.halRequest);
+ ATRACE_END();
- // If the request is the same as last, or we had triggers last time
- if (mPrevRequest != nextRequest || triggersMixedIn) {
- /**
- * HAL workaround:
- * Insert a dummy trigger ID if a trigger is set but no trigger ID is
- */
- res = addDummyTriggerIds(nextRequest);
if (res != OK) {
- SET_ERR("RequestThread: Unable to insert dummy trigger IDs "
- "(capture request %d, HAL device: %s (%d)",
- request.frame_number, strerror(-res), res);
- cleanUpFailedRequest(request, nextRequest, outputBuffers);
+ // Should only get a failure here for malformed requests or device-level
+ // errors, so consider all errors fatal. Bad metadata failures should
+ // come through notify.
+ SET_ERR("RequestThread: Unable to submit capture request %d to HAL"
+ " device: %s (%d)", nextRequest.halRequest.frame_number, strerror(-res),
+ res);
+ cleanUpFailedRequests(/*sendRequestError*/ false);
+ if (useFlushLock) {
+ mFlushLock.unlock();
+ }
return false;
}
- /**
- * The request should be presorted so accesses in HAL
- * are O(logn). Sidenote, sorting a sorted metadata is nop.
- */
- nextRequest->mSettings.sort();
- request.settings = nextRequest->mSettings.getAndLock();
- mPrevRequest = nextRequest;
- ALOGVV("%s: Request settings are NEW", __FUNCTION__);
-
- IF_ALOGV() {
- camera_metadata_ro_entry_t e = camera_metadata_ro_entry_t();
- find_camera_metadata_ro_entry(
- request.settings,
- ANDROID_CONTROL_AF_TRIGGER,
- &e
- );
- if (e.count > 0) {
- ALOGV("%s: Request (frame num %d) had AF trigger 0x%x",
- __FUNCTION__,
- request.frame_number,
- e.data.u8[0]);
- }
- }
- } else {
- // leave request.settings NULL to indicate 'reuse latest given'
- ALOGVV("%s: Request settings are REUSED",
- __FUNCTION__);
- }
+ // Mark that the request has be submitted successfully.
+ nextRequest.submitted = true;
- uint32_t totalNumBuffers = 0;
+ // Update the latest request sent to HAL
+ if (nextRequest.halRequest.settings != NULL) { // Don't update if they were unchanged
+ Mutex::Autolock al(mLatestRequestMutex);
- // Fill in buffers
- if (nextRequest->mInputStream != NULL) {
- request.input_buffer = &nextRequest->mInputBuffer;
- totalNumBuffers += 1;
- } else {
- request.input_buffer = NULL;
- }
+ camera_metadata_t* cloned = clone_camera_metadata(nextRequest.halRequest.settings);
+ mLatestRequest.acquire(cloned);
+ }
- outputBuffers.insertAt(camera3_stream_buffer_t(), 0,
- nextRequest->mOutputStreams.size());
- request.output_buffers = outputBuffers.array();
- for (size_t i = 0; i < nextRequest->mOutputStreams.size(); i++) {
- res = nextRequest->mOutputStreams.editItemAt(i)->
- getBuffer(&outputBuffers.editItemAt(i));
+ if (nextRequest.halRequest.settings != NULL) {
+ nextRequest.captureRequest->mSettings.unlock(nextRequest.halRequest.settings);
+ }
+
+ // Remove any previously queued triggers (after unlock)
+ res = removeTriggers(mPrevRequest);
if (res != OK) {
- // Can't get output buffer from gralloc queue - this could be due to
- // abandoned queue or other consumer misbehavior, so not a fatal
- // error
- ALOGE("RequestThread: Can't get output buffer, skipping request:"
- " %s (%d)", strerror(-res), res);
- {
- Mutex::Autolock l(mRequestLock);
- if (mListener != NULL) {
- mListener->notifyError(
- ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
- nextRequest->mResultExtras);
- }
+ SET_ERR("RequestThread: Unable to remove triggers "
+ "(capture request %d, HAL device: %s (%d)",
+ nextRequest.halRequest.frame_number, strerror(-res), res);
+ cleanUpFailedRequests(/*sendRequestError*/ false);
+ if (useFlushLock) {
+ mFlushLock.unlock();
}
- cleanUpFailedRequest(request, nextRequest, outputBuffers);
- return true;
+ return false;
}
- request.num_output_buffers++;
}
- totalNumBuffers += request.num_output_buffers;
- // Log request in the in-flight queue
- sp<Camera3Device> parent = mParent.promote();
- if (parent == NULL) {
- // Should not happen, and nowhere to send errors to, so just log it
- CLOGE("RequestThread: Parent is gone");
- cleanUpFailedRequest(request, nextRequest, outputBuffers);
- return false;
+ if (useFlushLock) {
+ mFlushLock.unlock();
}
- res = parent->registerInFlight(request.frame_number,
- totalNumBuffers, nextRequest->mResultExtras,
- /*hasInput*/request.input_buffer != NULL,
- nextRequest->mAeTriggerCancelOverride);
- ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
- ", burstId = %" PRId32 ".",
- __FUNCTION__,
- nextRequest->mResultExtras.requestId, nextRequest->mResultExtras.frameNumber,
- nextRequest->mResultExtras.burstId);
- if (res != OK) {
- SET_ERR("RequestThread: Unable to register new in-flight request:"
- " %s (%d)", strerror(-res), res);
- cleanUpFailedRequest(request, nextRequest, outputBuffers);
- return false;
+ // Unset as current request
+ {
+ Mutex::Autolock l(mRequestLock);
+ mNextRequests.clear();
}
- // Inform waitUntilRequestProcessed thread of a new request ID
- {
- Mutex::Autolock al(mLatestRequestMutex);
+ return true;
+}
- mLatestRequestId = requestId;
- mLatestRequestSignal.signal();
- }
+status_t Camera3Device::RequestThread::prepareHalRequests() {
+ ATRACE_CALL();
- // Submit request and block until ready for next one
- ATRACE_ASYNC_BEGIN("frame capture", request.frame_number);
- ATRACE_BEGIN("camera3->process_capture_request");
- res = mHal3Device->ops->process_capture_request(mHal3Device, &request);
- ATRACE_END();
+ for (auto& nextRequest : mNextRequests) {
+ sp<CaptureRequest> captureRequest = nextRequest.captureRequest;
+ camera3_capture_request_t* halRequest = &nextRequest.halRequest;
+ Vector<camera3_stream_buffer_t>* outputBuffers = &nextRequest.outputBuffers;
- if (res != OK) {
- // Should only get a failure here for malformed requests or device-level
- // errors, so consider all errors fatal. Bad metadata failures should
- // come through notify.
- SET_ERR("RequestThread: Unable to submit capture request %d to HAL"
- " device: %s (%d)", request.frame_number, strerror(-res), res);
- cleanUpFailedRequest(request, nextRequest, outputBuffers);
- return false;
- }
+ // Prepare a request to HAL
+ halRequest->frame_number = captureRequest->mResultExtras.frameNumber;
- // Update the latest request sent to HAL
- if (request.settings != NULL) { // Don't update them if they were unchanged
- Mutex::Autolock al(mLatestRequestMutex);
+ // Insert any queued triggers (before metadata is locked)
+ status_t res = insertTriggers(captureRequest);
- camera_metadata_t* cloned = clone_camera_metadata(request.settings);
- mLatestRequest.acquire(cloned);
- }
+ if (res < 0) {
+ SET_ERR("RequestThread: Unable to insert triggers "
+ "(capture request %d, HAL device: %s (%d)",
+ halRequest->frame_number, strerror(-res), res);
+ return INVALID_OPERATION;
+ }
+ int triggerCount = res;
+ bool triggersMixedIn = (triggerCount > 0 || mPrevTriggers > 0);
+ mPrevTriggers = triggerCount;
- if (request.settings != NULL) {
- nextRequest->mSettings.unlock(request.settings);
- }
+ // If the request is the same as last, or we had triggers last time
+ if (mPrevRequest != captureRequest || triggersMixedIn) {
+ /**
+ * HAL workaround:
+ * Insert a dummy trigger ID if a trigger is set but no trigger ID is
+ */
+ res = addDummyTriggerIds(captureRequest);
+ if (res != OK) {
+ SET_ERR("RequestThread: Unable to insert dummy trigger IDs "
+ "(capture request %d, HAL device: %s (%d)",
+ halRequest->frame_number, strerror(-res), res);
+ return INVALID_OPERATION;
+ }
- // Unset as current request
- {
- Mutex::Autolock l(mRequestLock);
- mNextRequest.clear();
- }
+ /**
+ * The request should be presorted so accesses in HAL
+ * are O(logn). Sidenote, sorting a sorted metadata is nop.
+ */
+ captureRequest->mSettings.sort();
+ halRequest->settings = captureRequest->mSettings.getAndLock();
+ mPrevRequest = captureRequest;
+ ALOGVV("%s: Request settings are NEW", __FUNCTION__);
+
+ IF_ALOGV() {
+ camera_metadata_ro_entry_t e = camera_metadata_ro_entry_t();
+ find_camera_metadata_ro_entry(
+ halRequest->settings,
+ ANDROID_CONTROL_AF_TRIGGER,
+ &e
+ );
+ if (e.count > 0) {
+ ALOGV("%s: Request (frame num %d) had AF trigger 0x%x",
+ __FUNCTION__,
+ halRequest->frame_number,
+ e.data.u8[0]);
+ }
+ }
+ } else {
+ // leave request.settings NULL to indicate 'reuse latest given'
+ ALOGVV("%s: Request settings are REUSED",
+ __FUNCTION__);
+ }
- // Remove any previously queued triggers (after unlock)
- res = removeTriggers(mPrevRequest);
- if (res != OK) {
- SET_ERR("RequestThread: Unable to remove triggers "
- "(capture request %d, HAL device: %s (%d)",
- request.frame_number, strerror(-res), res);
- return false;
+ uint32_t totalNumBuffers = 0;
+
+ // Fill in buffers
+ if (captureRequest->mInputStream != NULL) {
+ halRequest->input_buffer = &captureRequest->mInputBuffer;
+ totalNumBuffers += 1;
+ } else {
+ halRequest->input_buffer = NULL;
+ }
+
+ outputBuffers->insertAt(camera3_stream_buffer_t(), 0,
+ captureRequest->mOutputStreams.size());
+ halRequest->output_buffers = outputBuffers->array();
+ for (size_t i = 0; i < captureRequest->mOutputStreams.size(); i++) {
+ res = captureRequest->mOutputStreams.editItemAt(i)->
+ getBuffer(&outputBuffers->editItemAt(i));
+ if (res != OK) {
+ // Can't get output buffer from gralloc queue - this could be due to
+ // abandoned queue or other consumer misbehavior, so not a fatal
+ // error
+ ALOGE("RequestThread: Can't get output buffer, skipping request:"
+ " %s (%d)", strerror(-res), res);
+
+ return TIMED_OUT;
+ }
+ halRequest->num_output_buffers++;
+ }
+ totalNumBuffers += halRequest->num_output_buffers;
+
+ // Log request in the in-flight queue
+ sp<Camera3Device> parent = mParent.promote();
+ if (parent == NULL) {
+ // Should not happen, and nowhere to send errors to, so just log it
+ CLOGE("RequestThread: Parent is gone");
+ return INVALID_OPERATION;
+ }
+ res = parent->registerInFlight(halRequest->frame_number,
+ totalNumBuffers, captureRequest->mResultExtras,
+ /*hasInput*/halRequest->input_buffer != NULL,
+ captureRequest->mAeTriggerCancelOverride);
+ ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
+ ", burstId = %" PRId32 ".",
+ __FUNCTION__,
+ captureRequest->mResultExtras.requestId, captureRequest->mResultExtras.frameNumber,
+ captureRequest->mResultExtras.burstId);
+ if (res != OK) {
+ SET_ERR("RequestThread: Unable to register new in-flight request:"
+ " %s (%d)", strerror(-res), res);
+ return INVALID_OPERATION;
+ }
}
- mPrevTriggers = triggerCount;
- return true;
+ return OK;
}
CameraMetadata Camera3Device::RequestThread::getLatestRequest() const {
@@ -3063,11 +3164,13 @@ bool Camera3Device::RequestThread::isStreamPending(
sp<Camera3StreamInterface>& stream) {
Mutex::Autolock l(mRequestLock);
- if (mNextRequest != nullptr) {
- for (const auto& s : mNextRequest->mOutputStreams) {
- if (stream == s) return true;
+ for (const auto& nextRequest : mNextRequests) {
+ if (!nextRequest.submitted) {
+ for (const auto& s : nextRequest.captureRequest->mOutputStreams) {
+ if (stream == s) return true;
+ }
+ if (stream == nextRequest.captureRequest->mInputStream) return true;
}
- if (stream == mNextRequest->mInputStream) return true;
}
for (const auto& request : mRequestQueue) {
@@ -3087,37 +3190,95 @@ bool Camera3Device::RequestThread::isStreamPending(
return false;
}
-void Camera3Device::RequestThread::cleanUpFailedRequest(
- camera3_capture_request_t &request,
- sp<CaptureRequest> &nextRequest,
- Vector<camera3_stream_buffer_t> &outputBuffers) {
+void Camera3Device::RequestThread::cleanUpFailedRequests(bool sendRequestError) {
+ if (mNextRequests.empty()) {
+ return;
+ }
+
+ for (auto& nextRequest : mNextRequests) {
+ // Skip the ones that have been submitted successfully.
+ if (nextRequest.submitted) {
+ continue;
+ }
- if (request.settings != NULL) {
- nextRequest->mSettings.unlock(request.settings);
+ sp<CaptureRequest> captureRequest = nextRequest.captureRequest;
+ camera3_capture_request_t* halRequest = &nextRequest.halRequest;
+ Vector<camera3_stream_buffer_t>* outputBuffers = &nextRequest.outputBuffers;
+
+ if (halRequest->settings != NULL) {
+ captureRequest->mSettings.unlock(halRequest->settings);
+ }
+
+ if (captureRequest->mInputStream != NULL) {
+ captureRequest->mInputBuffer.status = CAMERA3_BUFFER_STATUS_ERROR;
+ captureRequest->mInputStream->returnInputBuffer(captureRequest->mInputBuffer);
+ }
+
+ for (size_t i = 0; i < halRequest->num_output_buffers; i++) {
+ outputBuffers->editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR;
+ captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i], 0);
+ }
+
+ if (sendRequestError) {
+ Mutex::Autolock l(mRequestLock);
+ if (mListener != NULL) {
+ mListener->notifyError(
+ ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+ captureRequest->mResultExtras);
+ }
+ }
}
- if (nextRequest->mInputStream != NULL) {
- nextRequest->mInputBuffer.status = CAMERA3_BUFFER_STATUS_ERROR;
- nextRequest->mInputStream->returnInputBuffer(nextRequest->mInputBuffer);
+
+ Mutex::Autolock l(mRequestLock);
+ mNextRequests.clear();
+}
+
+void Camera3Device::RequestThread::waitForNextRequestBatch() {
+ // Optimized a bit for the simple steady-state case (single repeating
+ // request), to avoid putting that request in the queue temporarily.
+ Mutex::Autolock l(mRequestLock);
+
+ assert(mNextRequests.empty());
+
+ NextRequest nextRequest;
+ nextRequest.captureRequest = waitForNextRequestLocked();
+ if (nextRequest.captureRequest == nullptr) {
+ return;
+ }
+
+ nextRequest.halRequest = camera3_capture_request_t();
+ nextRequest.submitted = false;
+ mNextRequests.add(nextRequest);
+
+ // Wait for additional requests
+ const size_t batchSize = nextRequest.captureRequest->mBatchSize;
+
+ for (size_t i = 1; i < batchSize; i++) {
+ NextRequest additionalRequest;
+ additionalRequest.captureRequest = waitForNextRequestLocked();
+ if (additionalRequest.captureRequest == nullptr) {
+ break;
+ }
+
+ additionalRequest.halRequest = camera3_capture_request_t();
+ additionalRequest.submitted = false;
+ mNextRequests.add(additionalRequest);
}
- for (size_t i = 0; i < request.num_output_buffers; i++) {
- outputBuffers.editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR;
- nextRequest->mOutputStreams.editItemAt(i)->returnBuffer(
- outputBuffers[i], 0);
+
+ if (mNextRequests.size() < batchSize) {
+ ALOGE("RequestThread: only get %d out of %d requests. Skipping requests.",
+ mNextRequests.size(), batchSize);
+ cleanUpFailedRequests(/*sendRequestError*/true);
}
- Mutex::Autolock l(mRequestLock);
- mNextRequest.clear();
+ return;
}
sp<Camera3Device::CaptureRequest>
- Camera3Device::RequestThread::waitForNextRequest() {
+ Camera3Device::RequestThread::waitForNextRequestLocked() {
status_t res;
sp<CaptureRequest> nextRequest;
- // Optimized a bit for the simple steady-state case (single repeating
- // request), to avoid putting that request in the queue temporarily.
- Mutex::Autolock l(mRequestLock);
-
while (mRequestQueue.empty()) {
if (!mRepeatingRequests.empty()) {
// Always atomically enqueue all requests in a repeating request
@@ -3212,8 +3373,6 @@ sp<Camera3Device::CaptureRequest>
handleAePrecaptureCancelRequest(nextRequest);
- mNextRequest = nextRequest;
-
return nextRequest;
}
@@ -3478,12 +3637,12 @@ Camera3Device::PreparerThread::~PreparerThread() {
clear();
}
-status_t Camera3Device::PreparerThread::prepare(sp<Camera3StreamInterface>& stream) {
+status_t Camera3Device::PreparerThread::prepare(int maxCount, sp<Camera3StreamInterface>& stream) {
status_t res;
Mutex::Autolock l(mLock);
- res = stream->startPrepare();
+ res = stream->startPrepare(maxCount);
if (res == OK) {
// No preparation needed, fire listener right off
ALOGV("%s: Stream %d already prepared", __FUNCTION__, stream->getId());
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 5287058..2cd5af3 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -62,6 +62,7 @@ class Camera3Device :
public CameraDeviceBase,
private camera3_callback_ops {
public:
+
Camera3Device(int id);
virtual ~Camera3Device();
@@ -143,6 +144,8 @@ class Camera3Device :
virtual status_t tearDown(int streamId);
+ virtual status_t prepare(int maxCount, int streamId);
+
virtual uint32_t getDeviceVersion();
virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const;
@@ -158,6 +161,8 @@ class Camera3Device :
static const nsecs_t kActiveTimeout = 500000000; // 500 ms
static const size_t kInFlightWarnLimit = 20;
static const size_t kInFlightWarnLimitHighSpeed = 256; // batch size 32 * pipe depth 8
+ // SCHED_FIFO priority for request submission thread in HFR mode
+ static const int kConstrainedHighSpeedThreadPriority = 1;
struct RequestTrigger;
// minimal jpeg buffer size: 256KB + blob header
@@ -261,6 +266,11 @@ class Camera3Device :
// Used to cancel AE precapture trigger for devices doesn't support
// CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL
AeTriggerCancelOverride_t mAeTriggerCancelOverride;
+ // The number of requests that should be submitted to HAL at a time.
+ // For example, if batch size is 8, this request and the following 7
+ // requests will be submitted to HAL at a time. The batch size for
+ // the following 7 requests will be ignored by the request thread.
+ int mBatchSize;
};
typedef List<sp<CaptureRequest> > RequestList;
@@ -438,6 +448,11 @@ class Camera3Device :
int64_t *lastFrameNumber = NULL);
/**
+ * Flush all pending requests in HAL.
+ */
+ status_t flush();
+
+ /**
* Queue a trigger to be dispatched with the next outgoing
* process_capture_request. The settings for that request only
* will be temporarily rewritten to add the trigger tag/value.
@@ -498,16 +513,30 @@ class Camera3Device :
static const nsecs_t kRequestTimeout = 50e6; // 50 ms
- // Waits for a request, or returns NULL if times out.
- sp<CaptureRequest> waitForNextRequest();
+ // Used to prepare a batch of requests.
+ struct NextRequest {
+ sp<CaptureRequest> captureRequest;
+ camera3_capture_request_t halRequest;
+ Vector<camera3_stream_buffer_t> outputBuffers;
+ bool submitted;
+ };
- // Return buffers, etc, for a request that couldn't be fully
- // constructed. The buffers will be returned in the ERROR state
- // to mark them as not having valid data.
- // All arguments will be modified.
- void cleanUpFailedRequest(camera3_capture_request_t &request,
- sp<CaptureRequest> &nextRequest,
- Vector<camera3_stream_buffer_t> &outputBuffers);
+ // Wait for the next batch of requests and put them in mNextRequests. mNextRequests will
+ // be empty if it times out.
+ void waitForNextRequestBatch();
+
+ // Waits for a request, or returns NULL if times out. Must be called with mRequestLock hold.
+ sp<CaptureRequest> waitForNextRequestLocked();
+
+ // Prepare HAL requests and output buffers in mNextRequests. Return TIMED_OUT if getting any
+ // output buffer timed out. If an error is returned, the caller should clean up the pending
+ // request batch.
+ status_t prepareHalRequests();
+
+ // Return buffers, etc, for requests in mNextRequests that couldn't be fully constructed and
+ // send request errors if sendRequestError is true. The buffers will be returned in the
+ // ERROR state to mark them as not having valid data. mNextRequests will be cleared.
+ void cleanUpFailedRequests(bool sendRequestError);
// Pause handling
bool waitIfPaused();
@@ -536,10 +565,13 @@ class Camera3Device :
Condition mRequestSignal;
RequestList mRequestQueue;
RequestList mRepeatingRequests;
- // The next request being prepped for submission to the HAL, no longer
+ // The next batch of requests being prepped for submission to the HAL, no longer
// on the request queue. Read-only even with mRequestLock held, outside
// of threadLoop
- sp<const CaptureRequest> mNextRequest;
+ Vector<NextRequest> mNextRequests;
+
+ // To protect flush() and sending a request batch to HAL.
+ Mutex mFlushLock;
bool mReconfigured;
@@ -698,10 +730,11 @@ class Camera3Device :
void setNotificationListener(NotificationListener *listener);
/**
- * Queue up a stream to be prepared. Streams are processed by
- * a background thread in FIFO order
+ * Queue up a stream to be prepared. Streams are processed by a background thread in FIFO
+ * order. Pre-allocate up to maxCount buffers for the stream, or the maximum number needed
+ * for the pipeline if maxCount is ALLOCATE_PIPELINE_MAX.
*/
- status_t prepare(sp<camera3::Camera3StreamInterface>& stream);
+ status_t prepare(int maxCount, sp<camera3::Camera3StreamInterface>& stream);
/**
* Cancel all current and pending stream preparation
@@ -738,7 +771,10 @@ class Camera3Device :
uint32_t mNextResultFrameNumber;
// the minimal frame number of the next reprocess result
uint32_t mNextReprocessResultFrameNumber;
+ // the minimal frame number of the next non-reprocess shutter
uint32_t mNextShutterFrameNumber;
+ // the minimal frame number of the next reprocess shutter
+ uint32_t mNextReprocessShutterFrameNumber;
List<CaptureResult> mResultQueue;
Condition mResultSignal;
NotificationListener *mListener;
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index ecb8ac8..1d9d04f 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -92,6 +92,10 @@ status_t Camera3DummyStream::getEndpointUsage(uint32_t *usage) const {
return OK;
}
+bool Camera3DummyStream::isVideoStream() const {
+ return false;
+}
+
}; // namespace camera3
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 3a3dbf4..97c0c96 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -54,6 +54,11 @@ class Camera3DummyStream :
status_t setTransform(int transform);
+ /**
+ * Return if this output stream is for video encoding.
+ */
+ bool isVideoStream() const;
+
protected:
/**
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 8c611d5..3f0a736 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -426,6 +426,17 @@ status_t Camera3OutputStream::getEndpointUsage(uint32_t *usage) const {
return res;
}
+bool Camera3OutputStream::isVideoStream() const {
+ uint32_t usage = 0;
+ status_t res = getEndpointUsage(&usage);
+ if (res != OK) {
+ ALOGE("%s: getting end point usage failed: %s (%d).", __FUNCTION__, strerror(-res), res);
+ return false;
+ }
+
+ return (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) != 0;
+}
+
}; // namespace camera3
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 941d693..3c083ec 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -64,6 +64,11 @@ class Camera3OutputStream :
*/
status_t setTransform(int transform);
+ /**
+ * Return if this output stream is for video encoding.
+ */
+ bool isVideoStream() const;
+
protected:
Camera3OutputStream(int id, camera3_stream_type_t type,
uint32_t width, uint32_t height, int format,
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index aae72cf..df89b34 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -34,6 +34,11 @@ class Camera3OutputStreamInterface : public virtual Camera3StreamInterface {
* HAL_TRANSFORM_* / NATIVE_WINDOW_TRANSFORM_* constants.
*/
virtual status_t setTransform(int transform) = 0;
+
+ /**
+ * Return if this output stream is for video encoding.
+ */
+ virtual bool isVideoStream() const = 0;
};
} // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 2527fd6..96299b3 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -53,7 +53,8 @@ Camera3Stream::Camera3Stream(int id,
mName(String8::format("Camera3Stream[%d]", id)),
mMaxSize(maxSize),
mState(STATE_CONSTRUCTED),
- mStatusId(StatusTracker::NO_STATUS_ID) {
+ mStatusId(StatusTracker::NO_STATUS_ID),
+ mLastMaxCount(Camera3StreamInterface::ALLOCATE_PIPELINE_MAX) {
camera3_stream::stream_type = type;
camera3_stream::width = width;
@@ -252,12 +253,18 @@ bool Camera3Stream::isUnpreparable() {
return mStreamUnpreparable;
}
-status_t Camera3Stream::startPrepare() {
+status_t Camera3Stream::startPrepare(int maxCount) {
ATRACE_CALL();
Mutex::Autolock l(mLock);
status_t res = OK;
+ if (maxCount < 0) {
+ ALOGE("%s: Stream %d: Can't prepare stream if max buffer count (%d) is < 0",
+ __FUNCTION__, mId, maxCount);
+ return BAD_VALUE;
+ }
+
// This function should be only called when the stream is configured already.
if (mState != STATE_CONFIGURED) {
ALOGE("%s: Stream %d: Can't prepare stream if stream is not in CONFIGURED "
@@ -279,9 +286,19 @@ status_t Camera3Stream::startPrepare() {
return INVALID_OPERATION;
}
+
+
+ size_t pipelineMax = getBufferCountLocked();
+ size_t clampedCount = (pipelineMax < static_cast<size_t>(maxCount)) ?
+ pipelineMax : static_cast<size_t>(maxCount);
+ size_t bufferCount = (maxCount == Camera3StreamInterface::ALLOCATE_PIPELINE_MAX) ?
+ pipelineMax : clampedCount;
+
+ mPrepared = bufferCount <= mLastMaxCount;
+
if (mPrepared) return OK;
- size_t bufferCount = getBufferCountLocked();
+ mLastMaxCount = bufferCount;
mPreparedBuffers.insertAt(camera3_stream_buffer_t(), /*index*/0, bufferCount);
mPreparedBufferIdx = 0;
@@ -438,8 +455,9 @@ status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer) {
res = mOutputBufferReturnedSignal.waitRelative(mLock, kWaitForBufferDuration);
if (res != OK) {
if (res == TIMED_OUT) {
- ALOGE("%s: wait for output buffer return timed out after %lldms", __FUNCTION__,
- kWaitForBufferDuration / 1000000LL);
+ ALOGE("%s: wait for output buffer return timed out after %lldms (max_buffers %d)",
+ __FUNCTION__, kWaitForBufferDuration / 1000000LL,
+ camera3_stream::max_buffers);
}
return res;
}
@@ -469,9 +487,12 @@ status_t Camera3Stream::returnBuffer(const camera3_stream_buffer &buffer,
status_t res = returnBufferLocked(buffer, timestamp);
if (res == OK) {
fireBufferListenersLocked(buffer, /*acquired*/false, /*output*/true);
- mOutputBufferReturnedSignal.signal();
}
+ // Even if returning the buffer failed, we still want to signal whoever is waiting for the
+ // buffer to be returned.
+ mOutputBufferReturnedSignal.signal();
+
return res;
}
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index bab2177..753280b 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -188,7 +188,9 @@ class Camera3Stream :
/**
* Start stream preparation. May only be called in the CONFIGURED state,
- * when no valid buffers have yet been returned to this stream.
+ * when no valid buffers have yet been returned to this stream. Prepares
+ * up to maxCount buffers, or the maximum number of buffers needed by the
+ * pipeline if maxCount is ALLOCATE_PIPELINE_MAX.
*
* If no prepartion is necessary, returns OK and does not transition to
* PREPARING state. Otherwise, returns NOT_ENOUGH_DATA and transitions
@@ -204,7 +206,7 @@ class Camera3Stream :
* INVALID_OPERATION if called when not in CONFIGURED state, or a
* valid buffer has already been returned to this stream.
*/
- status_t startPrepare();
+ status_t startPrepare(int maxCount);
/**
* Check if the stream is mid-preparing.
@@ -444,6 +446,9 @@ class Camera3Stream :
Vector<camera3_stream_buffer_t> mPreparedBuffers;
size_t mPreparedBufferIdx;
+ // Number of buffers allocated on last prepare call.
+ int mLastMaxCount;
+
}; // class Camera3Stream
}; // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index c086eaf..54009ae 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -34,6 +34,11 @@ class StatusTracker;
*/
class Camera3StreamInterface : public virtual RefBase {
public:
+
+ enum {
+ ALLOCATE_PIPELINE_MAX = 0, // Allocate max buffers used by a given surface
+ };
+
/**
* Get the stream's ID
*/
@@ -98,7 +103,9 @@ class Camera3StreamInterface : public virtual RefBase {
/**
* Start stream preparation. May only be called in the CONFIGURED state,
- * when no valid buffers have yet been returned to this stream.
+ * when no valid buffers have yet been returned to this stream. Prepares
+ * up to maxCount buffers, or the maximum number of buffers needed by the
+ * pipeline if maxCount is ALLOCATE_PIPELINE_MAX.
*
* If no prepartion is necessary, returns OK and does not transition to
* PREPARING state. Otherwise, returns NOT_ENOUGH_DATA and transitions
@@ -112,7 +119,7 @@ class Camera3StreamInterface : public virtual RefBase {
* INVALID_OPERATION if called when not in CONFIGURED state, or a
* valid buffer has already been returned to this stream.
*/
- virtual status_t startPrepare() = 0;
+ virtual status_t startPrepare(int maxCount) = 0;
/**
* Check if the stream is mid-preparing.
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index e54cc5a..4790754 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -96,6 +96,15 @@ status_t ResourceManagerService::dump(int fd, const Vector<String16>& /* args */
const size_t SIZE = 256;
char buffer[SIZE];
+ if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
+ result.format("Permission Denial: "
+ "can't dump ResourceManagerService from pid=%d, uid=%d\n",
+ IPCThreadState::self()->getCallingPid(),
+ IPCThreadState::self()->getCallingUid());
+ write(fd, result.string(), result.size());
+ return PERMISSION_DENIED;
+ }
+
snprintf(buffer, SIZE, "ResourceManagerService: %p\n", this);
result.append(buffer);
result.append(" Policies:\n");