diff options
436 files changed, 41536 insertions, 19153 deletions
diff --git a/CleanSpec.mk b/CleanSpec.mk new file mode 100644 index 0000000..e6d9ebf --- /dev/null +++ b/CleanSpec.mk @@ -0,0 +1,52 @@ +# Copyright (C) 2012 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# If you don't need to do a full clean build but would like to touch +# a file or delete some intermediate files, add a clean step to the end +# of the list. These steps will only be run once, if they haven't been +# run before. +# +# E.g.: +# $(call add-clean-step, touch -c external/sqlite/sqlite3.h) +# $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libz_intermediates) +# +# Always use "touch -c" and "rm -f" or "rm -rf" to gracefully deal with +# files that are missing or have been moved. +# +# Use $(PRODUCT_OUT) to get to the "out/target/product/blah/" directory. +# Use $(OUT_DIR) to refer to the "out" directory. +# +# If you need to re-do something that's already mentioned, just copy +# the command and add it to the bottom of the list. E.g., if a change +# that you made last week required touching a file and a change you +# made today requires touching the same file, just copy the old +# touch step and add it to the end of the list. +# +# ************************************************ +# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST +# ************************************************ + +# For example: +#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/APPS/AndroidTests_intermediates) +#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/core_intermediates) +#$(call add-clean-step, find $(OUT_DIR) -type f -name "IGTalkSession*" -print0 | xargs -0 rm -f) +#$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/*) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libmedia_native_intermediates) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/lib/libmedia_native.so) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/symbols/system/lib/libmedia_native.so) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libmedia_native.so) +# ************************************************ +# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST +# ************************************************ diff --git a/camera/Android.mk b/camera/Android.mk index 7286f92..fa518ff 100644 --- a/camera/Android.mk +++ b/camera/Android.mk @@ -1,22 +1,36 @@ -LOCAL_PATH:= $(call my-dir) +CAMERA_CLIENT_LOCAL_PATH:= $(call my-dir) +include $(call all-subdir-makefiles) include $(CLEAR_VARS) +LOCAL_PATH := $(CAMERA_CLIENT_LOCAL_PATH) + LOCAL_SRC_FILES:= \ Camera.cpp \ + CameraMetadata.cpp \ CameraParameters.cpp \ ICamera.cpp \ ICameraClient.cpp \ ICameraService.cpp \ + ICameraServiceListener.cpp \ ICameraRecordingProxy.cpp \ - ICameraRecordingProxyListener.cpp + ICameraRecordingProxyListener.cpp \ + IProCameraUser.cpp \ + IProCameraCallbacks.cpp \ + ProCamera.cpp \ + CameraBase.cpp \ LOCAL_SHARED_LIBRARIES := \ libcutils \ libutils \ + liblog \ libbinder \ libhardware \ libui \ - libgui + libgui \ + libcamera_metadata \ + +LOCAL_C_INCLUDES += \ + system/media/camera/include \ LOCAL_MODULE:= libcamera_client diff --git a/camera/Camera.cpp b/camera/Camera.cpp index d43cb0b..1b136de 100644 --- a/camera/Camera.cpp +++ b/camera/Camera.cpp @@ -19,6 +19,7 @@ #define LOG_TAG "Camera" #include <utils/Log.h> #include <utils/threads.h> +#include <utils/String16.h> #include <binder/IPCThreadState.h> #include <binder/IServiceManager.h> #include <binder/IMemory.h> @@ -26,46 +27,16 @@ #include <camera/Camera.h> #include <camera/ICameraRecordingProxyListener.h> #include <camera/ICameraService.h> +#include <camera/ICamera.h> -#include <gui/ISurfaceTexture.h> +#include <gui/IGraphicBufferProducer.h> #include <gui/Surface.h> namespace android { -// client singleton for camera service binder interface -Mutex Camera::mLock; -sp<ICameraService> Camera::mCameraService; -sp<Camera::DeathNotifier> Camera::mDeathNotifier; - -// establish binder interface to camera service -const sp<ICameraService>& Camera::getCameraService() -{ - Mutex::Autolock _l(mLock); - if (mCameraService.get() == 0) { - sp<IServiceManager> sm = defaultServiceManager(); - sp<IBinder> binder; - do { - binder = sm->getService(String16("media.camera")); - if (binder != 0) - break; - ALOGW("CameraService not published, waiting..."); - usleep(500000); // 0.5 s - } while(true); - if (mDeathNotifier == NULL) { - mDeathNotifier = new DeathNotifier(); - } - binder->linkToDeath(mDeathNotifier); - mCameraService = interface_cast<ICameraService>(binder); - } - ALOGE_IF(mCameraService==0, "no CameraService!?"); - return mCameraService; -} - -// --------------------------------------------------------------------------- - -Camera::Camera() +Camera::Camera(int cameraId) + : CameraBase(cameraId) { - init(); } // construct a camera client from an existing camera remote @@ -77,7 +48,7 @@ sp<Camera> Camera::create(const sp<ICamera>& camera) return 0; } - sp<Camera> c = new Camera(); + sp<Camera> c = new Camera(-1); if (camera->connect(c) == NO_ERROR) { c->mStatus = NO_ERROR; c->mCamera = camera; @@ -87,11 +58,6 @@ sp<Camera> Camera::create(const sp<ICamera>& camera) return 0; } -void Camera::init() -{ - mStatus = UNKNOWN_ERROR; -} - Camera::~Camera() { // We don't need to call disconnect() here because if the CameraService @@ -102,45 +68,10 @@ Camera::~Camera() // deadlock if we call any method of ICamera here. } -int32_t Camera::getNumberOfCameras() -{ - const sp<ICameraService>& cs = getCameraService(); - if (cs == 0) return 0; - return cs->getNumberOfCameras(); -} - -status_t Camera::getCameraInfo(int cameraId, - struct CameraInfo* cameraInfo) { - const sp<ICameraService>& cs = getCameraService(); - if (cs == 0) return UNKNOWN_ERROR; - return cs->getCameraInfo(cameraId, cameraInfo); -} - -sp<Camera> Camera::connect(int cameraId) -{ - ALOGV("connect"); - sp<Camera> c = new Camera(); - const sp<ICameraService>& cs = getCameraService(); - if (cs != 0) { - c->mCamera = cs->connect(c, cameraId); - } - if (c->mCamera != 0) { - c->mCamera->asBinder()->linkToDeath(c); - c->mStatus = NO_ERROR; - } else { - c.clear(); - } - return c; -} - -void Camera::disconnect() +sp<Camera> Camera::connect(int cameraId, const String16& clientPackageName, + int clientUid) { - ALOGV("disconnect"); - if (mCamera != 0) { - mCamera->disconnect(); - mCamera->asBinder()->unlinkToDeath(this); - mCamera = 0; - } + return CameraBaseT::connect(cameraId, clientPackageName, clientUid); } status_t Camera::reconnect() @@ -151,11 +82,6 @@ status_t Camera::reconnect() return c->connect(this); } -sp<ICamera> Camera::remote() -{ - return mCamera; -} - status_t Camera::lock() { sp <ICamera> c = mCamera; @@ -170,32 +96,14 @@ status_t Camera::unlock() return c->unlock(); } -// pass the buffered Surface to the camera service -status_t Camera::setPreviewDisplay(const sp<Surface>& surface) -{ - ALOGV("setPreviewDisplay(%p)", surface.get()); - sp <ICamera> c = mCamera; - if (c == 0) return NO_INIT; - if (surface != 0) { - return c->setPreviewDisplay(surface); - } else { - ALOGD("app passed NULL surface"); - return c->setPreviewDisplay(0); - } -} - -// pass the buffered ISurfaceTexture to the camera service -status_t Camera::setPreviewTexture(const sp<ISurfaceTexture>& surfaceTexture) +// pass the buffered IGraphicBufferProducer to the camera service +status_t Camera::setPreviewTexture(const sp<IGraphicBufferProducer>& bufferProducer) { - ALOGV("setPreviewTexture(%p)", surfaceTexture.get()); + ALOGV("setPreviewTexture(%p)", bufferProducer.get()); sp <ICamera> c = mCamera; if (c == 0) return NO_INIT; - if (surfaceTexture != 0) { - return c->setPreviewTexture(surfaceTexture); - } else { - ALOGD("app passed NULL surface"); - return c->setPreviewTexture(0); - } + ALOGD_IF(bufferProducer == 0, "app passed NULL surface"); + return c->setPreviewTexture(bufferProducer); } // start preview mode @@ -350,14 +258,7 @@ void Camera::setPreviewCallbackFlags(int flag) // callback from camera service void Camera::notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2) { - sp<CameraListener> listener; - { - Mutex::Autolock _l(mLock); - listener = mListener; - } - if (listener != NULL) { - listener->notify(msgType, ext1, ext2); - } + return CameraBaseT::notifyCallback(msgType, ext1, ext2); } // callback from camera service when frame or image is ready @@ -395,6 +296,7 @@ void Camera::dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp< Mutex::Autolock _l(mLock); listener = mListener; } + if (listener != NULL) { listener->postDataTimestamp(timestamp, msgType, dataPtr); } else { @@ -403,18 +305,6 @@ void Camera::dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp< } } -void Camera::binderDied(const wp<IBinder>& who) { - ALOGW("ICamera died"); - notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_SERVER_DIED, 0); -} - -void Camera::DeathNotifier::binderDied(const wp<IBinder>& who) { - ALOGV("binderDied"); - Mutex::Autolock _l(Camera::mLock); - Camera::mCameraService.clear(); - ALOGW("Camera server died!"); -} - sp<ICameraRecordingProxy> Camera::getRecordingProxy() { ALOGV("getProxy"); return new RecordingProxy(this); diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp new file mode 100644 index 0000000..c25c5fd --- /dev/null +++ b/camera/CameraBase.cpp @@ -0,0 +1,218 @@ +/* +** +** Copyright (C) 2013, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "CameraBase" +#include <utils/Log.h> +#include <utils/threads.h> +#include <utils/Mutex.h> + +#include <binder/IPCThreadState.h> +#include <binder/IServiceManager.h> +#include <binder/IMemory.h> + +#include <camera/CameraBase.h> +#include <camera/ICameraService.h> + +// needed to instantiate +#include <camera/ProCamera.h> +#include <camera/Camera.h> + +#include <system/camera_metadata.h> + +namespace android { + +namespace { + sp<ICameraService> gCameraService; + const int kCameraServicePollDelay = 500000; // 0.5s + const char* kCameraServiceName = "media.camera"; + + Mutex gLock; + + class DeathNotifier : public IBinder::DeathRecipient + { + public: + DeathNotifier() { + } + + virtual void binderDied(const wp<IBinder>& who) { + ALOGV("binderDied"); + Mutex::Autolock _l(gLock); + gCameraService.clear(); + ALOGW("Camera service died!"); + } + }; + + sp<DeathNotifier> gDeathNotifier; +}; // namespace anonymous + +/////////////////////////////////////////////////////////// +// CameraBase definition +/////////////////////////////////////////////////////////// + +// establish binder interface to camera service +template <typename TCam, typename TCamTraits> +const sp<ICameraService>& CameraBase<TCam, TCamTraits>::getCameraService() +{ + Mutex::Autolock _l(gLock); + if (gCameraService.get() == 0) { + sp<IServiceManager> sm = defaultServiceManager(); + sp<IBinder> binder; + do { + binder = sm->getService(String16(kCameraServiceName)); + if (binder != 0) { + break; + } + ALOGW("CameraService not published, waiting..."); + usleep(kCameraServicePollDelay); + } while(true); + if (gDeathNotifier == NULL) { + gDeathNotifier = new DeathNotifier(); + } + binder->linkToDeath(gDeathNotifier); + gCameraService = interface_cast<ICameraService>(binder); + } + ALOGE_IF(gCameraService == 0, "no CameraService!?"); + return gCameraService; +} + +template <typename TCam, typename TCamTraits> +sp<TCam> CameraBase<TCam, TCamTraits>::connect(int cameraId, + const String16& clientPackageName, + int clientUid) +{ + ALOGV("%s: connect", __FUNCTION__); + sp<TCam> c = new TCam(cameraId); + sp<TCamCallbacks> cl = c; + const sp<ICameraService>& cs = getCameraService(); + if (cs != 0) { + c->mCamera = cs->connect(cl, cameraId, clientPackageName, clientUid); + } + if (c->mCamera != 0) { + c->mCamera->asBinder()->linkToDeath(c); + c->mStatus = NO_ERROR; + } else { + c.clear(); + } + return c; +} + +template <typename TCam, typename TCamTraits> +void CameraBase<TCam, TCamTraits>::disconnect() +{ + ALOGV("%s: disconnect", __FUNCTION__); + if (mCamera != 0) { + mCamera->disconnect(); + mCamera->asBinder()->unlinkToDeath(this); + mCamera = 0; + } + ALOGV("%s: disconnect (done)", __FUNCTION__); +} + +template <typename TCam, typename TCamTraits> +CameraBase<TCam, TCamTraits>::CameraBase(int cameraId) : + mStatus(UNKNOWN_ERROR), + mCameraId(cameraId) +{ +} + +template <typename TCam, typename TCamTraits> +CameraBase<TCam, TCamTraits>::~CameraBase() +{ +} + +template <typename TCam, typename TCamTraits> +sp<typename TCamTraits::TCamUser> CameraBase<TCam, TCamTraits>::remote() +{ + return mCamera; +} + +template <typename TCam, typename TCamTraits> +status_t CameraBase<TCam, TCamTraits>::getStatus() +{ + return mStatus; +} + +template <typename TCam, typename TCamTraits> +void CameraBase<TCam, TCamTraits>::binderDied(const wp<IBinder>& who) { + ALOGW("mediaserver's remote binder Camera object died"); + notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_SERVER_DIED, /*ext2*/0); +} + +template <typename TCam, typename TCamTraits> +void CameraBase<TCam, TCamTraits>::setListener(const sp<TCamListener>& listener) +{ + Mutex::Autolock _l(mLock); + mListener = listener; +} + +// callback from camera service +template <typename TCam, typename TCamTraits> +void CameraBase<TCam, TCamTraits>::notifyCallback(int32_t msgType, + int32_t ext1, + int32_t ext2) +{ + sp<TCamListener> listener; + { + Mutex::Autolock _l(mLock); + listener = mListener; + } + if (listener != NULL) { + listener->notify(msgType, ext1, ext2); + } +} + +template <typename TCam, typename TCamTraits> +int CameraBase<TCam, TCamTraits>::getNumberOfCameras() { + const sp<ICameraService> cs = getCameraService(); + + if (!cs.get()) { + // as required by the public Java APIs + return 0; + } + return cs->getNumberOfCameras(); +} + +// this can be in BaseCamera but it should be an instance method +template <typename TCam, typename TCamTraits> +status_t CameraBase<TCam, TCamTraits>::getCameraInfo(int cameraId, + struct CameraInfo* cameraInfo) { + const sp<ICameraService>& cs = getCameraService(); + if (cs == 0) return UNKNOWN_ERROR; + return cs->getCameraInfo(cameraId, cameraInfo); +} + +template <typename TCam, typename TCamTraits> +status_t CameraBase<TCam, TCamTraits>::addServiceListener( + const sp<ICameraServiceListener>& listener) { + const sp<ICameraService>& cs = getCameraService(); + if (cs == 0) return UNKNOWN_ERROR; + return cs->addListener(listener); +} + +template <typename TCam, typename TCamTraits> +status_t CameraBase<TCam, TCamTraits>::removeServiceListener( + const sp<ICameraServiceListener>& listener) { + const sp<ICameraService>& cs = getCameraService(); + if (cs == 0) return UNKNOWN_ERROR; + return cs->removeListener(listener); +} + +template class CameraBase<ProCamera>; +template class CameraBase<Camera>; + +} // namespace android diff --git a/services/camera/libcameraservice/camera2/CameraMetadata.cpp b/camera/CameraMetadata.cpp index 835587d..a8f9eff 100644 --- a/services/camera/libcameraservice/camera2/CameraMetadata.cpp +++ b/camera/CameraMetadata.cpp @@ -14,33 +14,46 @@ * limitations under the License. */ +// #define LOG_NDEBUG 0 + #define LOG_TAG "Camera2-Metadata" #include <utils/Log.h> #include <utils/Errors.h> -#include "CameraMetadata.h" +#include <camera/CameraMetadata.h> namespace android { -namespace camera2 { CameraMetadata::CameraMetadata() : - mBuffer(NULL) { + mBuffer(NULL), mLocked(false) { } -CameraMetadata::CameraMetadata(size_t entryCapacity, size_t dataCapacity) +CameraMetadata::CameraMetadata(size_t entryCapacity, size_t dataCapacity) : + mLocked(false) { mBuffer = allocate_camera_metadata(entryCapacity, dataCapacity); } -CameraMetadata::CameraMetadata(const CameraMetadata &other) { +CameraMetadata::CameraMetadata(const CameraMetadata &other) : + mLocked(false) { mBuffer = clone_camera_metadata(other.mBuffer); } +CameraMetadata::CameraMetadata(camera_metadata_t *buffer) : + mBuffer(NULL), mLocked(false) { + acquire(buffer); +} + CameraMetadata &CameraMetadata::operator=(const CameraMetadata &other) { return operator=(other.mBuffer); } CameraMetadata &CameraMetadata::operator=(const camera_metadata_t *buffer) { + if (mLocked) { + ALOGE("%s: Assignment to a locked CameraMetadata!", __FUNCTION__); + return *this; + } + if (CC_LIKELY(buffer != mBuffer)) { camera_metadata_t *newBuffer = clone_camera_metadata(buffer); clear(); @@ -50,16 +63,44 @@ CameraMetadata &CameraMetadata::operator=(const camera_metadata_t *buffer) { } CameraMetadata::~CameraMetadata() { + mLocked = false; clear(); } +const camera_metadata_t* CameraMetadata::getAndLock() { + mLocked = true; + return mBuffer; +} + +status_t CameraMetadata::unlock(const camera_metadata_t *buffer) { + if (!mLocked) { + ALOGE("%s: Can't unlock a non-locked CameraMetadata!", __FUNCTION__); + return INVALID_OPERATION; + } + if (buffer != mBuffer) { + ALOGE("%s: Can't unlock CameraMetadata with wrong pointer!", + __FUNCTION__); + return BAD_VALUE; + } + mLocked = false; + return OK; +} + camera_metadata_t* CameraMetadata::release() { + if (mLocked) { + ALOGE("%s: CameraMetadata is locked", __FUNCTION__); + return NULL; + } camera_metadata_t *released = mBuffer; mBuffer = NULL; return released; } void CameraMetadata::clear() { + if (mLocked) { + ALOGE("%s: CameraMetadata is locked", __FUNCTION__); + return; + } if (mBuffer) { free_camera_metadata(mBuffer); mBuffer = NULL; @@ -67,15 +108,31 @@ void CameraMetadata::clear() { } void CameraMetadata::acquire(camera_metadata_t *buffer) { + if (mLocked) { + ALOGE("%s: CameraMetadata is locked", __FUNCTION__); + return; + } clear(); mBuffer = buffer; + + ALOGE_IF(validate_camera_metadata_structure(mBuffer, /*size*/NULL) != OK, + "%s: Failed to validate metadata structure %p", + __FUNCTION__, buffer); } void CameraMetadata::acquire(CameraMetadata &other) { + if (mLocked) { + ALOGE("%s: CameraMetadata is locked", __FUNCTION__); + return; + } acquire(other.release()); } status_t CameraMetadata::append(const CameraMetadata &other) { + if (mLocked) { + ALOGE("%s: CameraMetadata is locked", __FUNCTION__); + return INVALID_OPERATION; + } return append_camera_metadata(mBuffer, other.mBuffer); } @@ -89,6 +146,10 @@ bool CameraMetadata::isEmpty() const { } status_t CameraMetadata::sort() { + if (mLocked) { + ALOGE("%s: CameraMetadata is locked", __FUNCTION__); + return INVALID_OPERATION; + } return sort_camera_metadata(mBuffer); } @@ -112,69 +173,101 @@ status_t CameraMetadata::checkType(uint32_t tag, uint8_t expectedType) { status_t CameraMetadata::update(uint32_t tag, const int32_t *data, size_t data_count) { status_t res; + if (mLocked) { + ALOGE("%s: CameraMetadata is locked", __FUNCTION__); + return INVALID_OPERATION; + } if ( (res = checkType(tag, TYPE_INT32)) != OK) { return res; } - return update(tag, (const void*)data, data_count); + return updateImpl(tag, (const void*)data, data_count); } status_t CameraMetadata::update(uint32_t tag, const uint8_t *data, size_t data_count) { status_t res; + if (mLocked) { + ALOGE("%s: CameraMetadata is locked", __FUNCTION__); + return INVALID_OPERATION; + } if ( (res = checkType(tag, TYPE_BYTE)) != OK) { return res; } - return update(tag, (const void*)data, data_count); + return updateImpl(tag, (const void*)data, data_count); } status_t CameraMetadata::update(uint32_t tag, const float *data, size_t data_count) { status_t res; + if (mLocked) { + ALOGE("%s: CameraMetadata is locked", __FUNCTION__); + return INVALID_OPERATION; + } if ( (res = checkType(tag, TYPE_FLOAT)) != OK) { return res; } - return update(tag, (const void*)data, data_count); + return updateImpl(tag, (const void*)data, data_count); } status_t CameraMetadata::update(uint32_t tag, const int64_t *data, size_t data_count) { status_t res; + if (mLocked) { + ALOGE("%s: CameraMetadata is locked", __FUNCTION__); + return INVALID_OPERATION; + } if ( (res = checkType(tag, TYPE_INT64)) != OK) { return res; } - return update(tag, (const void*)data, data_count); + return updateImpl(tag, (const void*)data, data_count); } status_t CameraMetadata::update(uint32_t tag, const double *data, size_t data_count) { status_t res; + if (mLocked) { + ALOGE("%s: CameraMetadata is locked", __FUNCTION__); + return INVALID_OPERATION; + } if ( (res = checkType(tag, TYPE_DOUBLE)) != OK) { return res; } - return update(tag, (const void*)data, data_count); + return updateImpl(tag, (const void*)data, data_count); } status_t CameraMetadata::update(uint32_t tag, const camera_metadata_rational_t *data, size_t data_count) { status_t res; + if (mLocked) { + ALOGE("%s: CameraMetadata is locked", __FUNCTION__); + return INVALID_OPERATION; + } if ( (res = checkType(tag, TYPE_RATIONAL)) != OK) { return res; } - return update(tag, (const void*)data, data_count); + return updateImpl(tag, (const void*)data, data_count); } status_t CameraMetadata::update(uint32_t tag, const String8 &string) { status_t res; + if (mLocked) { + ALOGE("%s: CameraMetadata is locked", __FUNCTION__); + return INVALID_OPERATION; + } if ( (res = checkType(tag, TYPE_BYTE)) != OK) { return res; } - return update(tag, (const void*)string.string(), string.size()); + return updateImpl(tag, (const void*)string.string(), string.size()); } -status_t CameraMetadata::update(uint32_t tag, const void *data, +status_t CameraMetadata::updateImpl(uint32_t tag, const void *data, size_t data_count) { status_t res; + if (mLocked) { + ALOGE("%s: CameraMetadata is locked", __FUNCTION__); + return INVALID_OPERATION; + } int type = get_camera_metadata_tag_type(tag); if (type == -1) { ALOGE("%s: Tag %d not found", __FUNCTION__, tag); @@ -202,12 +295,31 @@ status_t CameraMetadata::update(uint32_t tag, const void *data, __FUNCTION__, get_camera_metadata_section_name(tag), get_camera_metadata_tag_name(tag), tag, strerror(-res), res); } + + IF_ALOGV() { + ALOGE_IF(validate_camera_metadata_structure(mBuffer, /*size*/NULL) != + OK, + + "%s: Failed to validate metadata structure after update %p", + __FUNCTION__, mBuffer); + } + return res; } +bool CameraMetadata::exists(uint32_t tag) const { + camera_metadata_ro_entry entry; + return find_camera_metadata_ro_entry(mBuffer, tag, &entry) == 0; +} + camera_metadata_entry_t CameraMetadata::find(uint32_t tag) { status_t res; camera_metadata_entry entry; + if (mLocked) { + ALOGE("%s: CameraMetadata is locked", __FUNCTION__); + entry.count = 0; + return entry; + } res = find_camera_metadata_entry(mBuffer, tag, &entry); if (CC_UNLIKELY( res != OK )) { entry.count = 0; @@ -230,6 +342,10 @@ camera_metadata_ro_entry_t CameraMetadata::find(uint32_t tag) const { status_t CameraMetadata::erase(uint32_t tag) { camera_metadata_entry_t entry; status_t res; + if (mLocked) { + ALOGE("%s: CameraMetadata is locked", __FUNCTION__); + return INVALID_OPERATION; + } res = find_camera_metadata_entry(mBuffer, tag, &entry); if (res == NAME_NOT_FOUND) { return OK; @@ -292,5 +408,4 @@ status_t CameraMetadata::resizeIfNeeded(size_t extraEntries, size_t extraData) { return OK; } -}; // namespace camera2 }; // namespace android diff --git a/camera/CameraParameters.cpp b/camera/CameraParameters.cpp index fd91bf2..d10f2e5 100644 --- a/camera/CameraParameters.cpp +++ b/camera/CameraParameters.cpp @@ -90,6 +90,7 @@ const char CameraParameters::KEY_RECORDING_HINT[] = "recording-hint"; const char CameraParameters::KEY_VIDEO_SNAPSHOT_SUPPORTED[] = "video-snapshot-supported"; const char CameraParameters::KEY_VIDEO_STABILIZATION[] = "video-stabilization"; const char CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED[] = "video-stabilization-supported"; +const char CameraParameters::KEY_LIGHTFX[] = "light-fx"; const char CameraParameters::TRUE[] = "true"; const char CameraParameters::FALSE[] = "false"; @@ -167,6 +168,10 @@ const char CameraParameters::FOCUS_MODE_EDOF[] = "edof"; const char CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO[] = "continuous-video"; const char CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE[] = "continuous-picture"; +// Values for light fx settings +const char CameraParameters::LIGHTFX_LOWLIGHT[] = "low-light"; +const char CameraParameters::LIGHTFX_HDR[] = "high-dynamic-range"; + CameraParameters::CameraParameters() : mMap() { diff --git a/camera/ICamera.cpp b/camera/ICamera.cpp index 8d8408c..8900867 100644 --- a/camera/ICamera.cpp +++ b/camera/ICamera.cpp @@ -22,14 +22,13 @@ #include <sys/types.h> #include <binder/Parcel.h> #include <camera/ICamera.h> -#include <gui/ISurfaceTexture.h> +#include <gui/IGraphicBufferProducer.h> #include <gui/Surface.h> namespace android { enum { DISCONNECT = IBinder::FIRST_CALL_TRANSACTION, - SET_PREVIEW_DISPLAY, SET_PREVIEW_TEXTURE, SET_PREVIEW_CALLBACK_FLAG, START_PREVIEW, @@ -68,24 +67,13 @@ public: remote()->transact(DISCONNECT, data, &reply); } - // pass the buffered Surface to the camera service - status_t setPreviewDisplay(const sp<Surface>& surface) - { - ALOGV("setPreviewDisplay"); - Parcel data, reply; - data.writeInterfaceToken(ICamera::getInterfaceDescriptor()); - Surface::writeToParcel(surface, &data); - remote()->transact(SET_PREVIEW_DISPLAY, data, &reply); - return reply.readInt32(); - } - - // pass the buffered SurfaceTexture to the camera service - status_t setPreviewTexture(const sp<ISurfaceTexture>& surfaceTexture) + // pass the buffered IGraphicBufferProducer to the camera service + status_t setPreviewTexture(const sp<IGraphicBufferProducer>& bufferProducer) { ALOGV("setPreviewTexture"); Parcel data, reply; data.writeInterfaceToken(ICamera::getInterfaceDescriptor()); - sp<IBinder> b(surfaceTexture->asBinder()); + sp<IBinder> b(bufferProducer->asBinder()); data.writeStrongBinder(b); remote()->transact(SET_PREVIEW_TEXTURE, data, &reply); return reply.readInt32(); @@ -282,17 +270,11 @@ status_t BnCamera::onTransact( disconnect(); return NO_ERROR; } break; - case SET_PREVIEW_DISPLAY: { - ALOGV("SET_PREVIEW_DISPLAY"); - CHECK_INTERFACE(ICamera, data, reply); - sp<Surface> surface = Surface::readFromParcel(data); - reply->writeInt32(setPreviewDisplay(surface)); - return NO_ERROR; - } break; case SET_PREVIEW_TEXTURE: { ALOGV("SET_PREVIEW_TEXTURE"); CHECK_INTERFACE(ICamera, data, reply); - sp<ISurfaceTexture> st = interface_cast<ISurfaceTexture>(data.readStrongBinder()); + sp<IGraphicBufferProducer> st = + interface_cast<IGraphicBufferProducer>(data.readStrongBinder()); reply->writeInt32(setPreviewTexture(st)); return NO_ERROR; } break; diff --git a/camera/ICameraService.cpp b/camera/ICameraService.cpp index f2d367e..134f7f0 100644 --- a/camera/ICameraService.cpp +++ b/camera/ICameraService.cpp @@ -23,6 +23,11 @@ #include <binder/IServiceManager.h> #include <camera/ICameraService.h> +#include <camera/ICameraServiceListener.h> +#include <camera/IProCameraUser.h> +#include <camera/IProCameraCallbacks.h> +#include <camera/ICamera.h> +#include <camera/ICameraClient.h> namespace android { @@ -56,15 +61,50 @@ public: } // connect to camera service - virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient, int cameraId) + virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient, int cameraId, + const String16 &clientPackageName, int clientUid) { Parcel data, reply; data.writeInterfaceToken(ICameraService::getInterfaceDescriptor()); data.writeStrongBinder(cameraClient->asBinder()); data.writeInt32(cameraId); + data.writeString16(clientPackageName); + data.writeInt32(clientUid); remote()->transact(BnCameraService::CONNECT, data, &reply); return interface_cast<ICamera>(reply.readStrongBinder()); } + + // connect to camera service (pro client) + virtual sp<IProCameraUser> connect(const sp<IProCameraCallbacks>& cameraCb, int cameraId, + const String16 &clientPackageName, int clientUid) + { + Parcel data, reply; + data.writeInterfaceToken(ICameraService::getInterfaceDescriptor()); + data.writeStrongBinder(cameraCb->asBinder()); + data.writeInt32(cameraId); + data.writeString16(clientPackageName); + data.writeInt32(clientUid); + remote()->transact(BnCameraService::CONNECT_PRO, data, &reply); + return interface_cast<IProCameraUser>(reply.readStrongBinder()); + } + + virtual status_t addListener(const sp<ICameraServiceListener>& listener) + { + Parcel data, reply; + data.writeInterfaceToken(ICameraService::getInterfaceDescriptor()); + data.writeStrongBinder(listener->asBinder()); + remote()->transact(BnCameraService::ADD_LISTENER, data, &reply); + return reply.readInt32(); + } + + virtual status_t removeListener(const sp<ICameraServiceListener>& listener) + { + Parcel data, reply; + data.writeInterfaceToken(ICameraService::getInterfaceDescriptor()); + data.writeStrongBinder(listener->asBinder()); + remote()->transact(BnCameraService::REMOVE_LISTENER, data, &reply); + return reply.readInt32(); + } }; IMPLEMENT_META_INTERFACE(CameraService, "android.hardware.ICameraService"); @@ -92,11 +132,41 @@ status_t BnCameraService::onTransact( } break; case CONNECT: { CHECK_INTERFACE(ICameraService, data, reply); - sp<ICameraClient> cameraClient = interface_cast<ICameraClient>(data.readStrongBinder()); - sp<ICamera> camera = connect(cameraClient, data.readInt32()); + sp<ICameraClient> cameraClient = + interface_cast<ICameraClient>(data.readStrongBinder()); + int32_t cameraId = data.readInt32(); + const String16 clientName = data.readString16(); + int32_t clientUid = data.readInt32(); + sp<ICamera> camera = connect(cameraClient, cameraId, + clientName, clientUid); + reply->writeStrongBinder(camera->asBinder()); + return NO_ERROR; + } break; + case CONNECT_PRO: { + CHECK_INTERFACE(ICameraService, data, reply); + sp<IProCameraCallbacks> cameraClient = interface_cast<IProCameraCallbacks>(data.readStrongBinder()); + int32_t cameraId = data.readInt32(); + const String16 clientName = data.readString16(); + int32_t clientUid = data.readInt32(); + sp<IProCameraUser> camera = connect(cameraClient, cameraId, + clientName, clientUid); reply->writeStrongBinder(camera->asBinder()); return NO_ERROR; } break; + case ADD_LISTENER: { + CHECK_INTERFACE(ICameraService, data, reply); + sp<ICameraServiceListener> listener = + interface_cast<ICameraServiceListener>(data.readStrongBinder()); + reply->writeInt32(addListener(listener)); + return NO_ERROR; + } break; + case REMOVE_LISTENER: { + CHECK_INTERFACE(ICameraService, data, reply); + sp<ICameraServiceListener> listener = + interface_cast<ICameraServiceListener>(data.readStrongBinder()); + reply->writeInt32(removeListener(listener)); + return NO_ERROR; + } break; default: return BBinder::onTransact(code, data, reply, flags); } diff --git a/camera/ICameraServiceListener.cpp b/camera/ICameraServiceListener.cpp new file mode 100644 index 0000000..640ee35 --- /dev/null +++ b/camera/ICameraServiceListener.cpp @@ -0,0 +1,86 @@ +/* +** +** Copyright 2013, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#include <stdint.h> +#include <sys/types.h> + +#include <binder/Parcel.h> +#include <binder/IPCThreadState.h> +#include <binder/IServiceManager.h> + +#include <camera/ICameraServiceListener.h> + +namespace android { + +namespace { + enum { + STATUS_CHANGED = IBinder::FIRST_CALL_TRANSACTION, + }; +}; // namespace anonymous + +class BpCameraServiceListener: public BpInterface<ICameraServiceListener> +{ + +public: + BpCameraServiceListener(const sp<IBinder>& impl) + : BpInterface<ICameraServiceListener>(impl) + { + } + + virtual void onStatusChanged(Status status, int32_t cameraId) + { + Parcel data, reply; + data.writeInterfaceToken( + ICameraServiceListener::getInterfaceDescriptor()); + + data.writeInt32(static_cast<int32_t>(status)); + data.writeInt32(cameraId); + + remote()->transact(STATUS_CHANGED, + data, + &reply, + IBinder::FLAG_ONEWAY); + } +}; + +IMPLEMENT_META_INTERFACE(CameraServiceListener, + "android.hardware.ICameraServiceListener"); + +// ---------------------------------------------------------------------- + +status_t BnCameraServiceListener::onTransact( + uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) +{ + switch(code) { + case STATUS_CHANGED: { + CHECK_INTERFACE(ICameraServiceListener, data, reply); + + Status status = static_cast<Status>(data.readInt32()); + int32_t cameraId = data.readInt32(); + + onStatusChanged(status, cameraId); + + return NO_ERROR; + } break; + default: + return BBinder::onTransact(code, data, reply, flags); + } +} + +// ---------------------------------------------------------------------------- + +}; // namespace android diff --git a/camera/IProCameraCallbacks.cpp b/camera/IProCameraCallbacks.cpp new file mode 100644 index 0000000..b9cd14d --- /dev/null +++ b/camera/IProCameraCallbacks.cpp @@ -0,0 +1,128 @@ +/* +** +** Copyright 2013, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "IProCameraCallbacks" +#include <utils/Log.h> +#include <stdint.h> +#include <sys/types.h> + +#include <binder/Parcel.h> +#include <gui/IGraphicBufferProducer.h> +#include <gui/Surface.h> +#include <utils/Mutex.h> + +#include <camera/IProCameraCallbacks.h> + +#include <system/camera_metadata.h> + +namespace android { + +enum { + NOTIFY_CALLBACK = IBinder::FIRST_CALL_TRANSACTION, + LOCK_STATUS_CHANGED, + RESULT_RECEIVED, +}; + +void readMetadata(const Parcel& data, camera_metadata_t** out); +void writeMetadata(Parcel& data, camera_metadata_t* metadata); + +class BpProCameraCallbacks: public BpInterface<IProCameraCallbacks> +{ +public: + BpProCameraCallbacks(const sp<IBinder>& impl) + : BpInterface<IProCameraCallbacks>(impl) + { + } + + // generic callback from camera service to app + void notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2) + { + ALOGV("notifyCallback"); + Parcel data, reply; + data.writeInterfaceToken(IProCameraCallbacks::getInterfaceDescriptor()); + data.writeInt32(msgType); + data.writeInt32(ext1); + data.writeInt32(ext2); + remote()->transact(NOTIFY_CALLBACK, data, &reply, IBinder::FLAG_ONEWAY); + } + + void onLockStatusChanged(LockStatus newLockStatus) { + ALOGV("onLockStatusChanged"); + Parcel data, reply; + data.writeInterfaceToken(IProCameraCallbacks::getInterfaceDescriptor()); + data.writeInt32(newLockStatus); + remote()->transact(LOCK_STATUS_CHANGED, data, &reply, + IBinder::FLAG_ONEWAY); + } + + void onResultReceived(int32_t frameId, camera_metadata* result) { + ALOGV("onResultReceived"); + Parcel data, reply; + data.writeInterfaceToken(IProCameraCallbacks::getInterfaceDescriptor()); + data.writeInt32(frameId); + writeMetadata(data, result); + remote()->transact(RESULT_RECEIVED, data, &reply, IBinder::FLAG_ONEWAY); + } +}; + +IMPLEMENT_META_INTERFACE(ProCameraCallbacks, + "android.hardware.IProCameraCallbacks"); + +// ---------------------------------------------------------------------- + +status_t BnProCameraCallbacks::onTransact( + uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) +{ + ALOGV("onTransact - code = %d", code); + switch(code) { + case NOTIFY_CALLBACK: { + ALOGV("NOTIFY_CALLBACK"); + CHECK_INTERFACE(IProCameraCallbacks, data, reply); + int32_t msgType = data.readInt32(); + int32_t ext1 = data.readInt32(); + int32_t ext2 = data.readInt32(); + notifyCallback(msgType, ext1, ext2); + return NO_ERROR; + } break; + case LOCK_STATUS_CHANGED: { + ALOGV("LOCK_STATUS_CHANGED"); + CHECK_INTERFACE(IProCameraCallbacks, data, reply); + LockStatus newLockStatus + = static_cast<LockStatus>(data.readInt32()); + onLockStatusChanged(newLockStatus); + return NO_ERROR; + } break; + case RESULT_RECEIVED: { + ALOGV("RESULT_RECEIVED"); + CHECK_INTERFACE(IProCameraCallbacks, data, reply); + int32_t frameId = data.readInt32(); + camera_metadata_t *result = NULL; + readMetadata(data, &result); + onResultReceived(frameId, result); + return NO_ERROR; + break; + } + default: + return BBinder::onTransact(code, data, reply, flags); + } +} + +// ---------------------------------------------------------------------------- + +}; // namespace android + diff --git a/camera/IProCameraUser.cpp b/camera/IProCameraUser.cpp new file mode 100644 index 0000000..4c4dec3 --- /dev/null +++ b/camera/IProCameraUser.cpp @@ -0,0 +1,426 @@ +/* +** +** Copyright 2013, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "IProCameraUser" +#include <utils/Log.h> +#include <stdint.h> +#include <sys/types.h> +#include <binder/Parcel.h> +#include <camera/IProCameraUser.h> +#include <gui/IGraphicBufferProducer.h> +#include <gui/Surface.h> +#include <system/camera_metadata.h> + +namespace android { + +typedef Parcel::WritableBlob WritableBlob; +typedef Parcel::ReadableBlob ReadableBlob; + +enum { + DISCONNECT = IBinder::FIRST_CALL_TRANSACTION, + CONNECT, + EXCLUSIVE_TRY_LOCK, + EXCLUSIVE_LOCK, + EXCLUSIVE_UNLOCK, + HAS_EXCLUSIVE_LOCK, + SUBMIT_REQUEST, + CANCEL_REQUEST, + DELETE_STREAM, + CREATE_STREAM, + CREATE_DEFAULT_REQUEST, + GET_CAMERA_INFO, +}; + +/** + * Caller becomes the owner of the new metadata + * 'const Parcel' doesnt prevent us from calling the read functions. + * which is interesting since it changes the internal state + * + * NULL can be returned when no metadata was sent, OR if there was an issue + * unpacking the serialized data (i.e. bad parcel or invalid structure). + */ +void readMetadata(const Parcel& data, camera_metadata_t** out) { + + status_t err = OK; + + camera_metadata_t* metadata = NULL; + + if (out) { + *out = NULL; + } + + // arg0 = metadataSize (int32) + int32_t metadataSizeTmp = -1; + if ((err = data.readInt32(&metadataSizeTmp)) != OK) { + ALOGE("%s: Failed to read metadata size (error %d %s)", + __FUNCTION__, err, strerror(-err)); + return; + } + const size_t metadataSize = static_cast<size_t>(metadataSizeTmp); + + if (metadataSize == 0) { + return; + } + + // NOTE: this doesn't make sense to me. shouldnt the blob + // know how big it is? why do we have to specify the size + // to Parcel::readBlob ? + + ReadableBlob blob; + // arg1 = metadata (blob) + do { + if ((err = data.readBlob(metadataSize, &blob)) != OK) { + ALOGE("%s: Failed to read metadata blob (sized %d). Possible " + " serialization bug. Error %d %s", + __FUNCTION__, metadataSize, err, strerror(-err)); + break; + } + const camera_metadata_t* tmp = + reinterpret_cast<const camera_metadata_t*>(blob.data()); + + metadata = allocate_copy_camera_metadata_checked(tmp, metadataSize); + } while(0); + blob.release(); + + if (out) { + *out = metadata; + } else if (metadata != NULL) { + free_camera_metadata(metadata); + } +} + +/** + * Caller retains ownership of metadata + * - Write 2 (int32 + blob) args in the current position + */ +void writeMetadata(Parcel& data, camera_metadata_t* metadata) { + // arg0 = metadataSize (int32) + + if (metadata == NULL) { + data.writeInt32(0); + return; + } + + const size_t metadataSize = get_camera_metadata_compact_size(metadata); + data.writeInt32(static_cast<int32_t>(metadataSize)); + + // arg1 = metadata (blob) + WritableBlob blob; + { + data.writeBlob(metadataSize, &blob); + copy_camera_metadata(blob.data(), metadataSize, metadata); + + IF_ALOGV() { + if (validate_camera_metadata_structure( + (const camera_metadata_t*)blob.data(), + &metadataSize) != OK) { + ALOGV("%s: Failed to validate metadata %p after writing blob", + __FUNCTION__, blob.data()); + } else { + ALOGV("%s: Metadata written to blob. Validation success", + __FUNCTION__); + } + } + + // Not too big of a problem since receiving side does hard validation + if (validate_camera_metadata_structure(metadata, &metadataSize) != OK) { + ALOGW("%s: Failed to validate metadata %p before writing blob", + __FUNCTION__, metadata); + } + + } + blob.release(); +} + +class BpProCameraUser: public BpInterface<IProCameraUser> +{ +public: + BpProCameraUser(const sp<IBinder>& impl) + : BpInterface<IProCameraUser>(impl) + { + } + + // disconnect from camera service + void disconnect() + { + ALOGV("disconnect"); + Parcel data, reply; + data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor()); + remote()->transact(DISCONNECT, data, &reply); + } + + virtual status_t connect(const sp<IProCameraCallbacks>& cameraClient) + { + Parcel data, reply; + data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor()); + data.writeStrongBinder(cameraClient->asBinder()); + remote()->transact(CONNECT, data, &reply); + return reply.readInt32(); + } + + /* Shared ProCameraUser */ + + virtual status_t exclusiveTryLock() + { + Parcel data, reply; + data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor()); + remote()->transact(EXCLUSIVE_TRY_LOCK, data, &reply); + return reply.readInt32(); + } + virtual status_t exclusiveLock() + { + Parcel data, reply; + data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor()); + remote()->transact(EXCLUSIVE_LOCK, data, &reply); + return reply.readInt32(); + } + + virtual status_t exclusiveUnlock() + { + Parcel data, reply; + data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor()); + remote()->transact(EXCLUSIVE_UNLOCK, data, &reply); + return reply.readInt32(); + } + + virtual bool hasExclusiveLock() + { + Parcel data, reply; + data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor()); + remote()->transact(HAS_EXCLUSIVE_LOCK, data, &reply); + return !!reply.readInt32(); + } + + virtual int submitRequest(camera_metadata_t* metadata, bool streaming) + { + + Parcel data, reply; + data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor()); + + // arg0+arg1 + writeMetadata(data, metadata); + + // arg2 = streaming (bool) + data.writeInt32(streaming); + + remote()->transact(SUBMIT_REQUEST, data, &reply); + return reply.readInt32(); + } + + virtual status_t cancelRequest(int requestId) + { + Parcel data, reply; + data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor()); + data.writeInt32(requestId); + + remote()->transact(CANCEL_REQUEST, data, &reply); + return reply.readInt32(); + } + + virtual status_t deleteStream(int streamId) + { + Parcel data, reply; + data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor()); + data.writeInt32(streamId); + + remote()->transact(DELETE_STREAM, data, &reply); + return reply.readInt32(); + } + + virtual status_t createStream(int width, int height, int format, + const sp<IGraphicBufferProducer>& bufferProducer, + /*out*/ + int* streamId) + { + Parcel data, reply; + data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor()); + data.writeInt32(width); + data.writeInt32(height); + data.writeInt32(format); + + sp<IBinder> b(bufferProducer->asBinder()); + data.writeStrongBinder(b); + + remote()->transact(CREATE_STREAM, data, &reply); + + int sId = reply.readInt32(); + if (streamId) { + *streamId = sId; + } + return reply.readInt32(); + } + + // Create a request object from a template. + virtual status_t createDefaultRequest(int templateId, + /*out*/ + camera_metadata** request) + { + Parcel data, reply; + data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor()); + data.writeInt32(templateId); + remote()->transact(CREATE_DEFAULT_REQUEST, data, &reply); + readMetadata(reply, /*out*/request); + return reply.readInt32(); + } + + + virtual status_t getCameraInfo(int cameraId, camera_metadata** info) + { + Parcel data, reply; + data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor()); + data.writeInt32(cameraId); + remote()->transact(GET_CAMERA_INFO, data, &reply); + readMetadata(reply, /*out*/info); + return reply.readInt32(); + } + + +private: + + +}; + +IMPLEMENT_META_INTERFACE(ProCameraUser, "android.hardware.IProCameraUser"); + +// ---------------------------------------------------------------------- + +status_t BnProCameraUser::onTransact( + uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) +{ + switch(code) { + case DISCONNECT: { + ALOGV("DISCONNECT"); + CHECK_INTERFACE(IProCameraUser, data, reply); + disconnect(); + return NO_ERROR; + } break; + case CONNECT: { + CHECK_INTERFACE(IProCameraUser, data, reply); + sp<IProCameraCallbacks> cameraClient = + interface_cast<IProCameraCallbacks>(data.readStrongBinder()); + reply->writeInt32(connect(cameraClient)); + return NO_ERROR; + } break; + + /* Shared ProCameraUser */ + case EXCLUSIVE_TRY_LOCK: { + CHECK_INTERFACE(IProCameraUser, data, reply); + reply->writeInt32(exclusiveTryLock()); + return NO_ERROR; + } break; + case EXCLUSIVE_LOCK: { + CHECK_INTERFACE(IProCameraUser, data, reply); + reply->writeInt32(exclusiveLock()); + return NO_ERROR; + } break; + case EXCLUSIVE_UNLOCK: { + CHECK_INTERFACE(IProCameraUser, data, reply); + reply->writeInt32(exclusiveUnlock()); + return NO_ERROR; + } break; + case HAS_EXCLUSIVE_LOCK: { + CHECK_INTERFACE(IProCameraUser, data, reply); + reply->writeInt32(hasExclusiveLock()); + return NO_ERROR; + } break; + case SUBMIT_REQUEST: { + CHECK_INTERFACE(IProCameraUser, data, reply); + camera_metadata_t* metadata; + readMetadata(data, /*out*/&metadata); + + // arg2 = streaming (bool) + bool streaming = data.readInt32(); + + // return code: requestId (int32) + reply->writeInt32(submitRequest(metadata, streaming)); + + return NO_ERROR; + } break; + case CANCEL_REQUEST: { + CHECK_INTERFACE(IProCameraUser, data, reply); + int requestId = data.readInt32(); + reply->writeInt32(cancelRequest(requestId)); + return NO_ERROR; + } break; + case DELETE_STREAM: { + CHECK_INTERFACE(IProCameraUser, data, reply); + int streamId = data.readInt32(); + reply->writeInt32(deleteStream(streamId)); + return NO_ERROR; + } break; + case CREATE_STREAM: { + CHECK_INTERFACE(IProCameraUser, data, reply); + int width, height, format; + + width = data.readInt32(); + height = data.readInt32(); + format = data.readInt32(); + + sp<IGraphicBufferProducer> bp = + interface_cast<IGraphicBufferProducer>(data.readStrongBinder()); + + int streamId = -1; + status_t ret; + ret = createStream(width, height, format, bp, &streamId); + + reply->writeInt32(streamId); + reply->writeInt32(ret); + + return NO_ERROR; + } break; + + case CREATE_DEFAULT_REQUEST: { + CHECK_INTERFACE(IProCameraUser, data, reply); + + int templateId = data.readInt32(); + + camera_metadata_t* request = NULL; + status_t ret; + ret = createDefaultRequest(templateId, &request); + + writeMetadata(*reply, request); + reply->writeInt32(ret); + + free_camera_metadata(request); + + return NO_ERROR; + } break; + case GET_CAMERA_INFO: { + CHECK_INTERFACE(IProCameraUser, data, reply); + + int cameraId = data.readInt32(); + + camera_metadata_t* info = NULL; + status_t ret; + ret = getCameraInfo(cameraId, &info); + + writeMetadata(*reply, info); + reply->writeInt32(ret); + + free_camera_metadata(info); + + return NO_ERROR; + } break; + default: + return BBinder::onTransact(code, data, reply, flags); + } +} + +// ---------------------------------------------------------------------------- + +}; // namespace android diff --git a/camera/ProCamera.cpp b/camera/ProCamera.cpp new file mode 100644 index 0000000..fec5461 --- /dev/null +++ b/camera/ProCamera.cpp @@ -0,0 +1,431 @@ +/* +** +** Copyright (C) 2013, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "ProCamera" +#include <utils/Log.h> +#include <utils/threads.h> +#include <utils/Mutex.h> + +#include <binder/IPCThreadState.h> +#include <binder/IServiceManager.h> +#include <binder/IMemory.h> + +#include <camera/ProCamera.h> +#include <camera/ICameraService.h> +#include <camera/IProCameraUser.h> +#include <camera/IProCameraCallbacks.h> + +#include <gui/IGraphicBufferProducer.h> + +#include <system/camera_metadata.h> + +namespace android { + +sp<ProCamera> ProCamera::connect(int cameraId) +{ + return CameraBaseT::connect(cameraId, String16(), + ICameraService::USE_CALLING_UID); +} + +ProCamera::ProCamera(int cameraId) + : CameraBase(cameraId) +{ +} + +ProCamera::~ProCamera() +{ + +} + +/* IProCameraUser's implementation */ + +// callback from camera service +void ProCamera::notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2) +{ + return CameraBaseT::notifyCallback(msgType, ext1, ext2); +} + +void ProCamera::onLockStatusChanged( + IProCameraCallbacks::LockStatus newLockStatus) +{ + ALOGV("%s: newLockStatus = %d", __FUNCTION__, newLockStatus); + + sp<ProCameraListener> listener; + { + Mutex::Autolock _l(mLock); + listener = mListener; + } + if (listener != NULL) { + switch (newLockStatus) { + case IProCameraCallbacks::LOCK_ACQUIRED: + listener->onLockAcquired(); + break; + case IProCameraCallbacks::LOCK_RELEASED: + listener->onLockReleased(); + break; + case IProCameraCallbacks::LOCK_STOLEN: + listener->onLockStolen(); + break; + default: + ALOGE("%s: Unknown lock status: %d", + __FUNCTION__, newLockStatus); + } + } +} + +void ProCamera::onResultReceived(int32_t frameId, camera_metadata* result) { + ALOGV("%s: frameId = %d, result = %p", __FUNCTION__, frameId, result); + + sp<ProCameraListener> listener; + { + Mutex::Autolock _l(mLock); + listener = mListener; + } + + CameraMetadata tmp(result); + + // Unblock waitForFrame(id) callers + { + Mutex::Autolock al(mWaitMutex); + mMetadataReady = true; + mLatestMetadata = tmp; // make copy + mWaitCondition.broadcast(); + } + + result = tmp.release(); + + if (listener != NULL) { + listener->onResultReceived(frameId, result); + } else { + free_camera_metadata(result); + } + +} + +status_t ProCamera::exclusiveTryLock() +{ + sp <IProCameraUser> c = mCamera; + if (c == 0) return NO_INIT; + + return c->exclusiveTryLock(); +} +status_t ProCamera::exclusiveLock() +{ + sp <IProCameraUser> c = mCamera; + if (c == 0) return NO_INIT; + + return c->exclusiveLock(); +} +status_t ProCamera::exclusiveUnlock() +{ + sp <IProCameraUser> c = mCamera; + if (c == 0) return NO_INIT; + + return c->exclusiveUnlock(); +} +bool ProCamera::hasExclusiveLock() +{ + sp <IProCameraUser> c = mCamera; + if (c == 0) return NO_INIT; + + return c->hasExclusiveLock(); +} + +// Note that the callee gets a copy of the metadata. +int ProCamera::submitRequest(const struct camera_metadata* metadata, + bool streaming) +{ + sp <IProCameraUser> c = mCamera; + if (c == 0) return NO_INIT; + + return c->submitRequest(const_cast<struct camera_metadata*>(metadata), + streaming); +} + +status_t ProCamera::cancelRequest(int requestId) +{ + sp <IProCameraUser> c = mCamera; + if (c == 0) return NO_INIT; + + return c->cancelRequest(requestId); +} + +status_t ProCamera::deleteStream(int streamId) +{ + sp <IProCameraUser> c = mCamera; + if (c == 0) return NO_INIT; + + status_t s = c->deleteStream(streamId); + + mStreams.removeItem(streamId); + + return s; +} + +status_t ProCamera::createStream(int width, int height, int format, + const sp<Surface>& surface, + /*out*/ + int* streamId) +{ + *streamId = -1; + + ALOGV("%s: createStreamW %dx%d (fmt=0x%x)", __FUNCTION__, width, height, + format); + + if (surface == 0) { + return BAD_VALUE; + } + + return createStream(width, height, format, + surface->getIGraphicBufferProducer(), + streamId); +} + +status_t ProCamera::createStream(int width, int height, int format, + const sp<IGraphicBufferProducer>& bufferProducer, + /*out*/ + int* streamId) { + *streamId = -1; + + ALOGV("%s: createStreamT %dx%d (fmt=0x%x)", __FUNCTION__, width, height, + format); + + if (bufferProducer == 0) { + return BAD_VALUE; + } + + sp <IProCameraUser> c = mCamera; + status_t stat = c->createStream(width, height, format, bufferProducer, + streamId); + + if (stat == OK) { + StreamInfo s(*streamId); + + mStreams.add(*streamId, s); + } + + return stat; +} + +status_t ProCamera::createStreamCpu(int width, int height, int format, + int heapCount, + /*out*/ + sp<CpuConsumer>* cpuConsumer, + int* streamId) { + return createStreamCpu(width, height, format, heapCount, + /*synchronousMode*/true, + cpuConsumer, streamId); +} + +status_t ProCamera::createStreamCpu(int width, int height, int format, + int heapCount, + bool synchronousMode, + /*out*/ + sp<CpuConsumer>* cpuConsumer, + int* streamId) +{ + ALOGV("%s: createStreamW %dx%d (fmt=0x%x)", __FUNCTION__, width, height, + format); + + *cpuConsumer = NULL; + + sp <IProCameraUser> c = mCamera; + if (c == 0) return NO_INIT; + + sp<CpuConsumer> cc = new CpuConsumer(heapCount, synchronousMode); + cc->setName(String8("ProCamera::mCpuConsumer")); + + sp<Surface> stc = new Surface( + cc->getProducerInterface()); + + status_t s = createStream(width, height, format, + stc->getIGraphicBufferProducer(), + streamId); + + if (s != OK) { + ALOGE("%s: Failure to create stream %dx%d (fmt=0x%x)", __FUNCTION__, + width, height, format); + return s; + } + + sp<ProFrameListener> frameAvailableListener = + new ProFrameListener(this, *streamId); + + getStreamInfo(*streamId).cpuStream = true; + getStreamInfo(*streamId).cpuConsumer = cc; + getStreamInfo(*streamId).synchronousMode = synchronousMode; + getStreamInfo(*streamId).stc = stc; + // for lifetime management + getStreamInfo(*streamId).frameAvailableListener = frameAvailableListener; + + cc->setFrameAvailableListener(frameAvailableListener); + + *cpuConsumer = cc; + + return s; +} + +camera_metadata* ProCamera::getCameraInfo(int cameraId) { + ALOGV("%s: cameraId = %d", __FUNCTION__, cameraId); + + sp <IProCameraUser> c = mCamera; + if (c == 0) return NULL; + + camera_metadata* ptr = NULL; + status_t status = c->getCameraInfo(cameraId, &ptr); + + if (status != OK) { + ALOGE("%s: Failed to get camera info, error = %d", __FUNCTION__, status); + } + + return ptr; +} + +status_t ProCamera::createDefaultRequest(int templateId, + camera_metadata** request) const { + ALOGV("%s: templateId = %d", __FUNCTION__, templateId); + + sp <IProCameraUser> c = mCamera; + if (c == 0) return NO_INIT; + + return c->createDefaultRequest(templateId, request); +} + +void ProCamera::onFrameAvailable(int streamId) { + ALOGV("%s: streamId = %d", __FUNCTION__, streamId); + + sp<ProCameraListener> listener = mListener; + StreamInfo& stream = getStreamInfo(streamId); + + if (listener.get() != NULL) { + listener->onFrameAvailable(streamId, stream.cpuConsumer); + } + + // Unblock waitForFrame(id) callers + { + Mutex::Autolock al(mWaitMutex); + getStreamInfo(streamId).frameReady++; + mWaitCondition.broadcast(); + } +} + +int ProCamera::waitForFrameBuffer(int streamId) { + status_t stat = BAD_VALUE; + Mutex::Autolock al(mWaitMutex); + + StreamInfo& si = getStreamInfo(streamId); + + if (si.frameReady > 0) { + int numFrames = si.frameReady; + si.frameReady = 0; + return numFrames; + } else { + while (true) { + stat = mWaitCondition.waitRelative(mWaitMutex, + mWaitTimeout); + if (stat != OK) { + ALOGE("%s: Error while waiting for frame buffer: %d", + __FUNCTION__, stat); + return stat; + } + + if (si.frameReady > 0) { + int numFrames = si.frameReady; + si.frameReady = 0; + return numFrames; + } + // else it was some other stream that got unblocked + } + } + + return stat; +} + +int ProCamera::dropFrameBuffer(int streamId, int count) { + StreamInfo& si = getStreamInfo(streamId); + + if (!si.cpuStream) { + return BAD_VALUE; + } else if (count < 0) { + return BAD_VALUE; + } + + if (!si.synchronousMode) { + ALOGW("%s: No need to drop frames on asynchronous streams," + " as asynchronous mode only keeps 1 latest frame around.", + __FUNCTION__); + return BAD_VALUE; + } + + int numDropped = 0; + for (int i = 0; i < count; ++i) { + CpuConsumer::LockedBuffer buffer; + if (si.cpuConsumer->lockNextBuffer(&buffer) != OK) { + break; + } + + si.cpuConsumer->unlockBuffer(buffer); + numDropped++; + } + + return numDropped; +} + +status_t ProCamera::waitForFrameMetadata() { + status_t stat = BAD_VALUE; + Mutex::Autolock al(mWaitMutex); + + if (mMetadataReady) { + return OK; + } else { + while (true) { + stat = mWaitCondition.waitRelative(mWaitMutex, + mWaitTimeout); + + if (stat != OK) { + ALOGE("%s: Error while waiting for metadata: %d", + __FUNCTION__, stat); + return stat; + } + + if (mMetadataReady) { + mMetadataReady = false; + return OK; + } + // else it was some other stream or metadata + } + } + + return stat; +} + +CameraMetadata ProCamera::consumeFrameMetadata() { + Mutex::Autolock al(mWaitMutex); + + // Destructive: Subsequent calls return empty metadatas + CameraMetadata tmp = mLatestMetadata; + mLatestMetadata.clear(); + + return tmp; +} + +ProCamera::StreamInfo& ProCamera::getStreamInfo(int streamId) { + return mStreams.editValueFor(streamId); +} + +}; // namespace android diff --git a/camera/tests/Android.mk b/camera/tests/Android.mk new file mode 100644 index 0000000..ec13911 --- /dev/null +++ b/camera/tests/Android.mk @@ -0,0 +1,38 @@ +LOCAL_PATH:= $(call my-dir) +include $(CLEAR_VARS) + +LOCAL_SRC_FILES:= \ + main.cpp \ + ProCameraTests.cpp \ + +LOCAL_SHARED_LIBRARIES := \ + libutils \ + libcutils \ + libstlport \ + libcamera_metadata \ + libcamera_client \ + libgui \ + libsync \ + libui \ + libdl \ + libbinder + +LOCAL_STATIC_LIBRARIES := \ + libgtest + +LOCAL_C_INCLUDES += \ + bionic \ + bionic/libstdc++/include \ + external/gtest/include \ + external/stlport/stlport \ + system/media/camera/include \ + frameworks/av/services/camera/libcameraservice \ + frameworks/av/include/camera \ + frameworks/native/include \ + +LOCAL_CFLAGS += -Wall -Wextra + +LOCAL_MODULE:= camera_client_test +LOCAL_MODULE_TAGS := tests + +include $(BUILD_NATIVE_TEST) diff --git a/camera/tests/ProCameraTests.cpp b/camera/tests/ProCameraTests.cpp new file mode 100644 index 0000000..f203949 --- /dev/null +++ b/camera/tests/ProCameraTests.cpp @@ -0,0 +1,1281 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <gtest/gtest.h> +#include <iostream> + +#include <binder/IPCThreadState.h> +#include <utils/Thread.h> + +#include "Camera.h" +#include "ProCamera.h" +#include <utils/Vector.h> +#include <utils/Mutex.h> +#include <utils/Condition.h> + +#include <gui/SurfaceComposerClient.h> +#include <gui/Surface.h> + +#include <system/camera_metadata.h> +#include <hardware/camera2.h> // for CAMERA2_TEMPLATE_PREVIEW only +#include <camera/CameraMetadata.h> + +#include <camera/ICameraServiceListener.h> + +namespace android { +namespace camera2 { +namespace tests { +namespace client { + +#define CAMERA_ID 0 +#define TEST_DEBUGGING 0 + +#define TEST_LISTENER_TIMEOUT 1000000000 // 1 second listener timeout +#define TEST_FORMAT HAL_PIXEL_FORMAT_Y16 //TODO: YUY2 instead + +#define TEST_FORMAT_MAIN HAL_PIXEL_FORMAT_Y8 +#define TEST_FORMAT_DEPTH HAL_PIXEL_FORMAT_Y16 + +// defaults for display "test" +#define TEST_DISPLAY_FORMAT HAL_PIXEL_FORMAT_Y8 +#define TEST_DISPLAY_WIDTH 320 +#define TEST_DISPLAY_HEIGHT 240 + +#define TEST_CPU_FRAME_COUNT 2 +#define TEST_CPU_HEAP_COUNT 5 + +#define TEST_FRAME_PROCESSING_DELAY_US 200000 // 200 ms + +#if TEST_DEBUGGING +#define dout std::cerr +#else +#define dout if (0) std::cerr +#endif + +#define EXPECT_OK(x) EXPECT_EQ(OK, (x)) +#define ASSERT_OK(x) ASSERT_EQ(OK, (x)) + +class ProCameraTest; + +struct ServiceListener : public BnCameraServiceListener { + + ServiceListener() : + mLatestStatus(STATUS_UNKNOWN), + mPrevStatus(STATUS_UNKNOWN) + { + } + + void onStatusChanged(Status status, int32_t cameraId) { + dout << "On status changed: 0x" << std::hex + << (unsigned int) status << " cameraId " << cameraId + << std::endl; + + Mutex::Autolock al(mMutex); + + mLatestStatus = status; + mCondition.broadcast(); + } + + status_t waitForStatusChange(Status& newStatus) { + Mutex::Autolock al(mMutex); + + if (mLatestStatus != mPrevStatus) { + newStatus = mLatestStatus; + mPrevStatus = mLatestStatus; + return OK; + } + + status_t stat = mCondition.waitRelative(mMutex, + TEST_LISTENER_TIMEOUT); + + if (stat == OK) { + newStatus = mLatestStatus; + mPrevStatus = mLatestStatus; + } + + return stat; + } + + Condition mCondition; + Mutex mMutex; + + Status mLatestStatus; + Status mPrevStatus; +}; + +enum ProEvent { + UNKNOWN, + ACQUIRED, + RELEASED, + STOLEN, + FRAME_RECEIVED, + RESULT_RECEIVED, +}; + +inline int ProEvent_Mask(ProEvent e) { + return (1 << static_cast<int>(e)); +} + +typedef Vector<ProEvent> EventList; + +class ProCameraTestThread : public Thread +{ +public: + ProCameraTestThread() { + } + + virtual bool threadLoop() { + mProc = ProcessState::self(); + mProc->startThreadPool(); + + IPCThreadState *ptr = IPCThreadState::self(); + + ptr->joinThreadPool(); + + return false; + } + + sp<ProcessState> mProc; +}; + +class ProCameraTestListener : public ProCameraListener { + +public: + static const int EVENT_MASK_ALL = 0xFFFFFFFF; + + ProCameraTestListener() { + mEventMask = EVENT_MASK_ALL; + mDropFrames = false; + } + + status_t WaitForEvent() { + Mutex::Autolock cal(mConditionMutex); + + { + Mutex::Autolock al(mListenerMutex); + + if (mProEventList.size() > 0) { + return OK; + } + } + + return mListenerCondition.waitRelative(mConditionMutex, + TEST_LISTENER_TIMEOUT); + } + + /* Read events into out. Existing queue is flushed */ + void ReadEvents(EventList& out) { + Mutex::Autolock al(mListenerMutex); + + for (size_t i = 0; i < mProEventList.size(); ++i) { + out.push(mProEventList[i]); + } + + mProEventList.clear(); + } + + /** + * Dequeue 1 event from the event queue. + * Returns UNKNOWN if queue is empty + */ + ProEvent ReadEvent() { + Mutex::Autolock al(mListenerMutex); + + if (mProEventList.size() == 0) { + return UNKNOWN; + } + + ProEvent ev = mProEventList[0]; + mProEventList.removeAt(0); + + return ev; + } + + void SetEventMask(int eventMask) { + Mutex::Autolock al(mListenerMutex); + mEventMask = eventMask; + } + + // Automatically acquire/release frames as they are available + void SetDropFrames(bool dropFrames) { + Mutex::Autolock al(mListenerMutex); + mDropFrames = dropFrames; + } + +private: + void QueueEvent(ProEvent ev) { + bool eventAdded = false; + { + Mutex::Autolock al(mListenerMutex); + + // Drop events not part of mask + if (ProEvent_Mask(ev) & mEventMask) { + mProEventList.push(ev); + eventAdded = true; + } + } + + if (eventAdded) { + mListenerCondition.broadcast(); + } + } + +protected: + + ////////////////////////////////////////////////// + ///////// ProCameraListener ////////////////////// + ////////////////////////////////////////////////// + + + // Lock has been acquired. Write operations now available. + virtual void onLockAcquired() { + QueueEvent(ACQUIRED); + } + // Lock has been released with exclusiveUnlock + virtual void onLockReleased() { + QueueEvent(RELEASED); + } + + // Lock has been stolen by another client. + virtual void onLockStolen() { + QueueEvent(STOLEN); + } + + // Lock free. + virtual void onTriggerNotify(int32_t ext1, int32_t ext2, int32_t ext3) { + + dout << "Trigger notify: " << ext1 << " " << ext2 + << " " << ext3 << std::endl; + } + + virtual void onFrameAvailable(int streamId, + const sp<CpuConsumer>& consumer) { + + QueueEvent(FRAME_RECEIVED); + + Mutex::Autolock al(mListenerMutex); + if (mDropFrames) { + CpuConsumer::LockedBuffer buf; + status_t ret; + + EXPECT_OK(ret); + if (OK == (ret = consumer->lockNextBuffer(&buf))) { + + dout << "Frame received on streamId = " << streamId << + ", dataPtr = " << (void*)buf.data << + ", timestamp = " << buf.timestamp << std::endl; + + EXPECT_OK(consumer->unlockBuffer(buf)); + } + } else { + dout << "Frame received on streamId = " << streamId << std::endl; + } + } + + virtual void onResultReceived(int32_t frameId, + camera_metadata* request) { + dout << "Result received frameId = " << frameId + << ", requestPtr = " << (void*)request << std::endl; + QueueEvent(RESULT_RECEIVED); + free_camera_metadata(request); + } + + virtual void notify(int32_t msg, int32_t ext1, int32_t ext2) { + dout << "Notify received: msg " << std::hex << msg + << ", ext1: " << std::hex << ext1 << ", ext2: " << std::hex << ext2 + << std::endl; + } + + Vector<ProEvent> mProEventList; + Mutex mListenerMutex; + Mutex mConditionMutex; + Condition mListenerCondition; + int mEventMask; + bool mDropFrames; +}; + +class ProCameraTest : public ::testing::Test { + +public: + ProCameraTest() { + char* displaySecsEnv = getenv("TEST_DISPLAY_SECS"); + if (displaySecsEnv != NULL) { + mDisplaySecs = atoi(displaySecsEnv); + if (mDisplaySecs < 0) { + mDisplaySecs = 0; + } + } else { + mDisplaySecs = 0; + } + + char* displayFmtEnv = getenv("TEST_DISPLAY_FORMAT"); + if (displayFmtEnv != NULL) { + mDisplayFmt = FormatFromString(displayFmtEnv); + } else { + mDisplayFmt = TEST_DISPLAY_FORMAT; + } + + char* displayWidthEnv = getenv("TEST_DISPLAY_WIDTH"); + if (displayWidthEnv != NULL) { + mDisplayW = atoi(displayWidthEnv); + if (mDisplayW < 0) { + mDisplayW = 0; + } + } else { + mDisplayW = TEST_DISPLAY_WIDTH; + } + + char* displayHeightEnv = getenv("TEST_DISPLAY_HEIGHT"); + if (displayHeightEnv != NULL) { + mDisplayH = atoi(displayHeightEnv); + if (mDisplayH < 0) { + mDisplayH = 0; + } + } else { + mDisplayH = TEST_DISPLAY_HEIGHT; + } + } + + static void SetUpTestCase() { + // Binder Thread Pool Initialization + mTestThread = new ProCameraTestThread(); + mTestThread->run("ProCameraTestThread"); + } + + virtual void SetUp() { + mCamera = ProCamera::connect(CAMERA_ID); + ASSERT_NE((void*)NULL, mCamera.get()); + + mListener = new ProCameraTestListener(); + mCamera->setListener(mListener); + } + + virtual void TearDown() { + ASSERT_NE((void*)NULL, mCamera.get()); + mCamera->disconnect(); + } + +protected: + sp<ProCamera> mCamera; + sp<ProCameraTestListener> mListener; + + static sp<Thread> mTestThread; + + int mDisplaySecs; + int mDisplayFmt; + int mDisplayW; + int mDisplayH; + + sp<SurfaceComposerClient> mComposerClient; + sp<SurfaceControl> mSurfaceControl; + + sp<SurfaceComposerClient> mDepthComposerClient; + sp<SurfaceControl> mDepthSurfaceControl; + + int getSurfaceWidth() { + return 512; + } + int getSurfaceHeight() { + return 512; + } + + void createOnScreenSurface(sp<Surface>& surface) { + mComposerClient = new SurfaceComposerClient; + ASSERT_EQ(NO_ERROR, mComposerClient->initCheck()); + + mSurfaceControl = mComposerClient->createSurface( + String8("ProCameraTest StreamingImage Surface"), + getSurfaceWidth(), getSurfaceHeight(), + PIXEL_FORMAT_RGB_888, 0); + + mSurfaceControl->setPosition(0, 0); + + ASSERT_TRUE(mSurfaceControl != NULL); + ASSERT_TRUE(mSurfaceControl->isValid()); + + SurfaceComposerClient::openGlobalTransaction(); + ASSERT_EQ(NO_ERROR, mSurfaceControl->setLayer(0x7FFFFFFF)); + ASSERT_EQ(NO_ERROR, mSurfaceControl->show()); + SurfaceComposerClient::closeGlobalTransaction(); + + sp<ANativeWindow> window = mSurfaceControl->getSurface(); + surface = mSurfaceControl->getSurface(); + + ASSERT_NE((void*)NULL, surface.get()); + } + + void createDepthOnScreenSurface(sp<Surface>& surface) { + mDepthComposerClient = new SurfaceComposerClient; + ASSERT_EQ(NO_ERROR, mDepthComposerClient->initCheck()); + + mDepthSurfaceControl = mDepthComposerClient->createSurface( + String8("ProCameraTest StreamingImage Surface"), + getSurfaceWidth(), getSurfaceHeight(), + PIXEL_FORMAT_RGB_888, 0); + + mDepthSurfaceControl->setPosition(640, 0); + + ASSERT_TRUE(mDepthSurfaceControl != NULL); + ASSERT_TRUE(mDepthSurfaceControl->isValid()); + + SurfaceComposerClient::openGlobalTransaction(); + ASSERT_EQ(NO_ERROR, mDepthSurfaceControl->setLayer(0x7FFFFFFF)); + ASSERT_EQ(NO_ERROR, mDepthSurfaceControl->show()); + SurfaceComposerClient::closeGlobalTransaction(); + + sp<ANativeWindow> window = mDepthSurfaceControl->getSurface(); + surface = mDepthSurfaceControl->getSurface(); + + ASSERT_NE((void*)NULL, surface.get()); + } + + template <typename T> + static bool ExistsItem(T needle, T* array, size_t count) { + if (!array) { + return false; + } + + for (size_t i = 0; i < count; ++i) { + if (array[i] == needle) { + return true; + } + } + return false; + } + + + static int FormatFromString(const char* str) { + std::string s(str); + +#define CMP_STR(x, y) \ + if (s == #x) return HAL_PIXEL_FORMAT_ ## y; +#define CMP_STR_SAME(x) CMP_STR(x, x) + + CMP_STR_SAME( Y16); + CMP_STR_SAME( Y8); + CMP_STR_SAME( YV12); + CMP_STR(NV16, YCbCr_422_SP); + CMP_STR(NV21, YCrCb_420_SP); + CMP_STR(YUY2, YCbCr_422_I); + CMP_STR(RAW, RAW_SENSOR); + CMP_STR(RGBA, RGBA_8888); + + std::cerr << "Unknown format string " << str << std::endl; + return -1; + + } + + /** + * Creating a streaming request for these output streams from a template, + * and submit it + */ + void createSubmitRequestForStreams(uint8_t* streamIds, size_t count, int requestCount=-1) { + + ASSERT_NE((void*)NULL, streamIds); + ASSERT_LT(0u, count); + + camera_metadata_t *requestTmp = NULL; + EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW, + /*out*/&requestTmp)); + ASSERT_NE((void*)NULL, requestTmp); + CameraMetadata request(requestTmp); + + // set the output streams. default is empty + + uint32_t tag = static_cast<uint32_t>(ANDROID_REQUEST_OUTPUT_STREAMS); + request.update(tag, streamIds, count); + + requestTmp = request.release(); + + if (requestCount < 0) { + EXPECT_OK(mCamera->submitRequest(requestTmp, /*streaming*/true)); + } else { + for (int i = 0; i < requestCount; ++i) { + EXPECT_OK(mCamera->submitRequest(requestTmp, + /*streaming*/false)); + } + } + request.acquire(requestTmp); + } +}; + +sp<Thread> ProCameraTest::mTestThread; + +TEST_F(ProCameraTest, AvailableFormats) { + if (HasFatalFailure()) { + return; + } + + CameraMetadata staticInfo = mCamera->getCameraInfo(CAMERA_ID); + ASSERT_FALSE(staticInfo.isEmpty()); + + uint32_t tag = static_cast<uint32_t>(ANDROID_SCALER_AVAILABLE_FORMATS); + EXPECT_TRUE(staticInfo.exists(tag)); + camera_metadata_entry_t entry = staticInfo.find(tag); + + EXPECT_TRUE(ExistsItem<int32_t>(HAL_PIXEL_FORMAT_YV12, + entry.data.i32, entry.count)); + EXPECT_TRUE(ExistsItem<int32_t>(HAL_PIXEL_FORMAT_YCrCb_420_SP, + entry.data.i32, entry.count)); +} + +// test around exclusiveTryLock (immediate locking) +TEST_F(ProCameraTest, LockingImmediate) { + + if (HasFatalFailure()) { + return; + } + + mListener->SetEventMask(ProEvent_Mask(ACQUIRED) | + ProEvent_Mask(STOLEN) | + ProEvent_Mask(RELEASED)); + + EXPECT_FALSE(mCamera->hasExclusiveLock()); + EXPECT_EQ(OK, mCamera->exclusiveTryLock()); + // at this point we definitely have the lock + + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(ACQUIRED, mListener->ReadEvent()); + + EXPECT_TRUE(mCamera->hasExclusiveLock()); + EXPECT_EQ(OK, mCamera->exclusiveUnlock()); + + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(RELEASED, mListener->ReadEvent()); + + EXPECT_FALSE(mCamera->hasExclusiveLock()); +} + +// test around exclusiveLock (locking at some future point in time) +TEST_F(ProCameraTest, LockingAsynchronous) { + + if (HasFatalFailure()) { + return; + } + + + mListener->SetEventMask(ProEvent_Mask(ACQUIRED) | + ProEvent_Mask(STOLEN) | + ProEvent_Mask(RELEASED)); + + // TODO: Add another procamera that has a lock here. + // then we can be test that the lock wont immediately be acquired + + EXPECT_FALSE(mCamera->hasExclusiveLock()); + EXPECT_EQ(OK, mCamera->exclusiveTryLock()); + // at this point we definitely have the lock + + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(ACQUIRED, mListener->ReadEvent()); + + EXPECT_TRUE(mCamera->hasExclusiveLock()); + EXPECT_EQ(OK, mCamera->exclusiveUnlock()); + + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(RELEASED, mListener->ReadEvent()); + + EXPECT_FALSE(mCamera->hasExclusiveLock()); +} + +// Stream directly to the screen. +TEST_F(ProCameraTest, DISABLED_StreamingImageSingle) { + if (HasFatalFailure()) { + return; + } + + sp<Surface> surface; + if (mDisplaySecs > 0) { + createOnScreenSurface(/*out*/surface); + } + else { + dout << "Skipping, will not render to screen" << std::endl; + return; + } + + int depthStreamId = -1; + + sp<ServiceListener> listener = new ServiceListener(); + EXPECT_OK(ProCamera::addServiceListener(listener)); + + ServiceListener::Status currentStatus; + + // when subscribing a new listener, + // we immediately get a callback to the current status + while (listener->waitForStatusChange(/*out*/currentStatus) != OK); + EXPECT_EQ(ServiceListener::STATUS_PRESENT, currentStatus); + + dout << "Will now stream and resume infinitely..." << std::endl; + while (true) { + + if (currentStatus == ServiceListener::STATUS_PRESENT) { + + ASSERT_OK(mCamera->createStream(mDisplayW, mDisplayH, mDisplayFmt, + surface, + &depthStreamId)); + EXPECT_NE(-1, depthStreamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + + uint8_t streams[] = { depthStreamId }; + ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams( + streams, + /*count*/1)); + } + + ServiceListener::Status stat = ServiceListener::STATUS_UNKNOWN; + + // TODO: maybe check for getch every once in a while? + while (listener->waitForStatusChange(/*out*/stat) != OK); + + if (currentStatus != stat) { + if (stat == ServiceListener::STATUS_PRESENT) { + dout << "Reconnecting to camera" << std::endl; + mCamera = ProCamera::connect(CAMERA_ID); + } else if (stat == ServiceListener::STATUS_NOT_AVAILABLE) { + dout << "Disconnecting from camera" << std::endl; + mCamera->disconnect(); + } else if (stat == ServiceListener::STATUS_NOT_PRESENT) { + dout << "Camera unplugged" << std::endl; + mCamera = NULL; + } else { + dout << "Unknown status change " + << std::hex << stat << std::endl; + } + + currentStatus = stat; + } + } + + EXPECT_OK(ProCamera::removeServiceListener(listener)); + EXPECT_OK(mCamera->deleteStream(depthStreamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} + +// Stream directly to the screen. +TEST_F(ProCameraTest, DISABLED_StreamingImageDual) { + if (HasFatalFailure()) { + return; + } + sp<Surface> surface; + sp<Surface> depthSurface; + if (mDisplaySecs > 0) { + createOnScreenSurface(/*out*/surface); + createDepthOnScreenSurface(/*out*/depthSurface); + } + + int streamId = -1; + EXPECT_OK(mCamera->createStream(/*width*/1280, /*height*/960, + TEST_FORMAT_MAIN, surface, &streamId)); + EXPECT_NE(-1, streamId); + + int depthStreamId = -1; + EXPECT_OK(mCamera->createStream(/*width*/320, /*height*/240, + TEST_FORMAT_DEPTH, depthSurface, &depthStreamId)); + EXPECT_NE(-1, depthStreamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + /* + */ + /* iterate in a loop submitting requests every frame. + * what kind of requests doesnt really matter, just whatever. + */ + + // it would probably be better to use CameraMetadata from camera service. + camera_metadata_t *request = NULL; + EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW, + /*out*/&request)); + EXPECT_NE((void*)NULL, request); + + /*FIXME: dont need this later, at which point the above should become an + ASSERT_NE*/ + if(request == NULL) request = allocate_camera_metadata(10, 100); + + // set the output streams to just this stream ID + + // wow what a verbose API. + uint8_t allStreams[] = { streamId, depthStreamId }; + // IMPORTANT. bad things will happen if its not a uint8. + size_t streamCount = sizeof(allStreams) / sizeof(allStreams[0]); + camera_metadata_entry_t entry; + uint32_t tag = static_cast<uint32_t>(ANDROID_REQUEST_OUTPUT_STREAMS); + int find = find_camera_metadata_entry(request, tag, &entry); + if (find == -ENOENT) { + if (add_camera_metadata_entry(request, tag, &allStreams, + /*data_count*/streamCount) != OK) { + camera_metadata_t *tmp = allocate_camera_metadata(1000, 10000); + ASSERT_OK(append_camera_metadata(tmp, request)); + free_camera_metadata(request); + request = tmp; + + ASSERT_OK(add_camera_metadata_entry(request, tag, &allStreams, + /*data_count*/streamCount)); + } + } else { + ASSERT_OK(update_camera_metadata_entry(request, entry.index, + &allStreams, /*data_count*/streamCount, &entry)); + } + + EXPECT_OK(mCamera->submitRequest(request, /*streaming*/true)); + + dout << "will sleep now for " << mDisplaySecs << std::endl; + sleep(mDisplaySecs); + + free_camera_metadata(request); + + for (int i = 0; i < streamCount; ++i) { + EXPECT_OK(mCamera->deleteStream(allStreams[i])); + } + EXPECT_OK(mCamera->exclusiveUnlock()); +} + +TEST_F(ProCameraTest, CpuConsumerSingle) { + if (HasFatalFailure()) { + return; + } + + mListener->SetEventMask(ProEvent_Mask(ACQUIRED) | + ProEvent_Mask(STOLEN) | + ProEvent_Mask(RELEASED) | + ProEvent_Mask(FRAME_RECEIVED)); + mListener->SetDropFrames(true); + + int streamId = -1; + sp<CpuConsumer> consumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/320, /*height*/240, + TEST_FORMAT_DEPTH, TEST_CPU_HEAP_COUNT, &consumer, &streamId)); + EXPECT_NE(-1, streamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(ACQUIRED, mListener->ReadEvent()); + /* iterate in a loop submitting requests every frame. + * what kind of requests doesnt really matter, just whatever. + */ + + // it would probably be better to use CameraMetadata from camera service. + camera_metadata_t *request = NULL; + EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW, + /*out*/&request)); + EXPECT_NE((void*)NULL, request); + + /*FIXME: dont need this later, at which point the above should become an + ASSERT_NE*/ + if(request == NULL) request = allocate_camera_metadata(10, 100); + + // set the output streams to just this stream ID + + uint8_t allStreams[] = { streamId }; + camera_metadata_entry_t entry; + uint32_t tag = static_cast<uint32_t>(ANDROID_REQUEST_OUTPUT_STREAMS); + int find = find_camera_metadata_entry(request, tag, &entry); + if (find == -ENOENT) { + if (add_camera_metadata_entry(request, tag, &allStreams, + /*data_count*/1) != OK) { + camera_metadata_t *tmp = allocate_camera_metadata(1000, 10000); + ASSERT_OK(append_camera_metadata(tmp, request)); + free_camera_metadata(request); + request = tmp; + + ASSERT_OK(add_camera_metadata_entry(request, tag, &allStreams, + /*data_count*/1)); + } + } else { + ASSERT_OK(update_camera_metadata_entry(request, entry.index, + &allStreams, /*data_count*/1, &entry)); + } + + EXPECT_OK(mCamera->submitRequest(request, /*streaming*/true)); + + // Consume a couple of frames + for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) { + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(FRAME_RECEIVED, mListener->ReadEvent()); + } + + // Done: clean up + free_camera_metadata(request); + EXPECT_OK(mCamera->deleteStream(streamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} + +TEST_F(ProCameraTest, CpuConsumerDual) { + if (HasFatalFailure()) { + return; + } + + mListener->SetEventMask(ProEvent_Mask(FRAME_RECEIVED)); + mListener->SetDropFrames(true); + + int streamId = -1; + sp<CpuConsumer> consumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960, + TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId)); + EXPECT_NE(-1, streamId); + + int depthStreamId = -1; + EXPECT_OK(mCamera->createStreamCpu(/*width*/320, /*height*/240, + TEST_FORMAT_DEPTH, TEST_CPU_HEAP_COUNT, &consumer, &depthStreamId)); + EXPECT_NE(-1, depthStreamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + /* + */ + /* iterate in a loop submitting requests every frame. + * what kind of requests doesnt really matter, just whatever. + */ + + // it would probably be better to use CameraMetadata from camera service. + camera_metadata_t *request = NULL; + EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW, + /*out*/&request)); + EXPECT_NE((void*)NULL, request); + + if(request == NULL) request = allocate_camera_metadata(10, 100); + + // set the output streams to just this stream ID + + // wow what a verbose API. + uint8_t allStreams[] = { streamId, depthStreamId }; + size_t streamCount = 2; + camera_metadata_entry_t entry; + uint32_t tag = static_cast<uint32_t>(ANDROID_REQUEST_OUTPUT_STREAMS); + int find = find_camera_metadata_entry(request, tag, &entry); + if (find == -ENOENT) { + if (add_camera_metadata_entry(request, tag, &allStreams, + /*data_count*/streamCount) != OK) { + camera_metadata_t *tmp = allocate_camera_metadata(1000, 10000); + ASSERT_OK(append_camera_metadata(tmp, request)); + free_camera_metadata(request); + request = tmp; + + ASSERT_OK(add_camera_metadata_entry(request, tag, &allStreams, + /*data_count*/streamCount)); + } + } else { + ASSERT_OK(update_camera_metadata_entry(request, entry.index, + &allStreams, /*data_count*/streamCount, &entry)); + } + + EXPECT_OK(mCamera->submitRequest(request, /*streaming*/true)); + + // Consume a couple of frames + for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) { + // stream id 1 + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(FRAME_RECEIVED, mListener->ReadEvent()); + + // stream id 2 + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(FRAME_RECEIVED, mListener->ReadEvent()); + + //TODO: events should be a struct with some data like the stream id + } + + // Done: clean up + free_camera_metadata(request); + EXPECT_OK(mCamera->deleteStream(streamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} + +TEST_F(ProCameraTest, ResultReceiver) { + if (HasFatalFailure()) { + return; + } + + mListener->SetEventMask(ProEvent_Mask(RESULT_RECEIVED)); + mListener->SetDropFrames(true); + //FIXME: if this is run right after the previous test we get FRAME_RECEIVED + // need to filter out events at read time + + int streamId = -1; + sp<CpuConsumer> consumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960, + TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId)); + EXPECT_NE(-1, streamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + /* + */ + /* iterate in a loop submitting requests every frame. + * what kind of requests doesnt really matter, just whatever. + */ + + camera_metadata_t *request = NULL; + EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW, + /*out*/&request)); + EXPECT_NE((void*)NULL, request); + + /*FIXME*/ + if(request == NULL) request = allocate_camera_metadata(10, 100); + + // set the output streams to just this stream ID + + uint8_t allStreams[] = { streamId }; + size_t streamCount = 1; + camera_metadata_entry_t entry; + uint32_t tag = static_cast<uint32_t>(ANDROID_REQUEST_OUTPUT_STREAMS); + int find = find_camera_metadata_entry(request, tag, &entry); + if (find == -ENOENT) { + if (add_camera_metadata_entry(request, tag, &allStreams, + /*data_count*/streamCount) != OK) { + camera_metadata_t *tmp = allocate_camera_metadata(1000, 10000); + ASSERT_OK(append_camera_metadata(tmp, request)); + free_camera_metadata(request); + request = tmp; + + ASSERT_OK(add_camera_metadata_entry(request, tag, &allStreams, + /*data_count*/streamCount)); + } + } else { + ASSERT_OK(update_camera_metadata_entry(request, entry.index, + &allStreams, /*data_count*/streamCount, &entry)); + } + + EXPECT_OK(mCamera->submitRequest(request, /*streaming*/true)); + + // Consume a couple of results + for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) { + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(RESULT_RECEIVED, mListener->ReadEvent()); + } + + // Done: clean up + free_camera_metadata(request); + EXPECT_OK(mCamera->deleteStream(streamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} + +// FIXME: This is racy and sometimes fails on waitForFrameMetadata +TEST_F(ProCameraTest, DISABLED_WaitForResult) { + if (HasFatalFailure()) { + return; + } + + mListener->SetDropFrames(true); + + int streamId = -1; + sp<CpuConsumer> consumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960, + TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId)); + EXPECT_NE(-1, streamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + + uint8_t streams[] = { streamId }; + ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/1)); + + // Consume a couple of results + for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) { + EXPECT_OK(mCamera->waitForFrameMetadata()); + CameraMetadata meta = mCamera->consumeFrameMetadata(); + EXPECT_FALSE(meta.isEmpty()); + } + + // Done: clean up + EXPECT_OK(mCamera->deleteStream(streamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} + +TEST_F(ProCameraTest, WaitForSingleStreamBuffer) { + if (HasFatalFailure()) { + return; + } + + int streamId = -1; + sp<CpuConsumer> consumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960, + TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId)); + EXPECT_NE(-1, streamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + + uint8_t streams[] = { streamId }; + ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/1, + /*requests*/TEST_CPU_FRAME_COUNT)); + + // Consume a couple of results + for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) { + EXPECT_EQ(1, mCamera->waitForFrameBuffer(streamId)); + + CpuConsumer::LockedBuffer buf; + EXPECT_OK(consumer->lockNextBuffer(&buf)); + + dout << "Buffer synchronously received on streamId = " << streamId << + ", dataPtr = " << (void*)buf.data << + ", timestamp = " << buf.timestamp << std::endl; + + EXPECT_OK(consumer->unlockBuffer(buf)); + } + + // Done: clean up + EXPECT_OK(mCamera->deleteStream(streamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} + +// FIXME: This is racy and sometimes fails on waitForFrameMetadata +TEST_F(ProCameraTest, DISABLED_WaitForDualStreamBuffer) { + if (HasFatalFailure()) { + return; + } + + const int REQUEST_COUNT = TEST_CPU_FRAME_COUNT * 10; + + // 15 fps + int streamId = -1; + sp<CpuConsumer> consumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960, + TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId)); + EXPECT_NE(-1, streamId); + + // 30 fps + int depthStreamId = -1; + sp<CpuConsumer> depthConsumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/320, /*height*/240, + TEST_FORMAT_DEPTH, TEST_CPU_HEAP_COUNT, &depthConsumer, &depthStreamId)); + EXPECT_NE(-1, depthStreamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + + uint8_t streams[] = { streamId, depthStreamId }; + ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/2, + /*requests*/REQUEST_COUNT)); + + int depthFrames = 0; + int greyFrames = 0; + + // Consume two frames simultaneously. Unsynchronized by timestamps. + for (int i = 0; i < REQUEST_COUNT; ++i) { + + // Exhaust event queue so it doesn't keep growing + while (mListener->ReadEvent() != UNKNOWN); + + // Get the metadata + EXPECT_OK(mCamera->waitForFrameMetadata()); + CameraMetadata meta = mCamera->consumeFrameMetadata(); + EXPECT_FALSE(meta.isEmpty()); + + // Get the buffers + + EXPECT_EQ(1, mCamera->waitForFrameBuffer(depthStreamId)); + + /** + * Guaranteed to be able to consume the depth frame, + * since we waited on it. + */ + CpuConsumer::LockedBuffer depthBuffer; + EXPECT_OK(depthConsumer->lockNextBuffer(&depthBuffer)); + + dout << "Depth Buffer synchronously received on streamId = " << + streamId << + ", dataPtr = " << (void*)depthBuffer.data << + ", timestamp = " << depthBuffer.timestamp << std::endl; + + EXPECT_OK(depthConsumer->unlockBuffer(depthBuffer)); + + depthFrames++; + + + /** Consume Greyscale frames if there are any. + * There may not be since it runs at half FPS */ + CpuConsumer::LockedBuffer greyBuffer; + while (consumer->lockNextBuffer(&greyBuffer) == OK) { + + dout << "GRAY Buffer synchronously received on streamId = " << + streamId << + ", dataPtr = " << (void*)greyBuffer.data << + ", timestamp = " << greyBuffer.timestamp << std::endl; + + EXPECT_OK(consumer->unlockBuffer(greyBuffer)); + + greyFrames++; + } + } + + dout << "Done, summary: depth frames " << std::dec << depthFrames + << ", grey frames " << std::dec << greyFrames << std::endl; + + // Done: clean up + EXPECT_OK(mCamera->deleteStream(streamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} + +TEST_F(ProCameraTest, WaitForSingleStreamBufferAndDropFramesSync) { + if (HasFatalFailure()) { + return; + } + + const int NUM_REQUESTS = 20 * TEST_CPU_FRAME_COUNT; + + int streamId = -1; + sp<CpuConsumer> consumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960, + TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, + /*synchronousMode*/true, &consumer, &streamId)); + EXPECT_NE(-1, streamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + + uint8_t streams[] = { streamId }; + ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/1, + /*requests*/NUM_REQUESTS)); + + // Consume a couple of results + for (int i = 0; i < NUM_REQUESTS; ++i) { + int numFrames; + EXPECT_TRUE((numFrames = mCamera->waitForFrameBuffer(streamId)) > 0); + + // Drop all but the newest framebuffer + EXPECT_EQ(numFrames-1, mCamera->dropFrameBuffer(streamId, numFrames-1)); + + dout << "Dropped " << (numFrames - 1) << " frames" << std::endl; + + // Skip the counter ahead, don't try to consume these frames again + i += numFrames-1; + + // "Consume" the buffer + CpuConsumer::LockedBuffer buf; + EXPECT_OK(consumer->lockNextBuffer(&buf)); + + dout << "Buffer synchronously received on streamId = " << streamId << + ", dataPtr = " << (void*)buf.data << + ", timestamp = " << buf.timestamp << std::endl; + + // Process at 10fps, stream is at 15fps. + // This means we will definitely fill up the buffer queue with + // extra buffers and need to drop them. + usleep(TEST_FRAME_PROCESSING_DELAY_US); + + EXPECT_OK(consumer->unlockBuffer(buf)); + } + + // Done: clean up + EXPECT_OK(mCamera->deleteStream(streamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} + +TEST_F(ProCameraTest, WaitForSingleStreamBufferAndDropFramesAsync) { + if (HasFatalFailure()) { + return; + } + + const int NUM_REQUESTS = 20 * TEST_CPU_FRAME_COUNT; + const int CONSECUTIVE_FAILS_ASSUME_TIME_OUT = 5; + + int streamId = -1; + sp<CpuConsumer> consumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960, + TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, + /*synchronousMode*/false, &consumer, &streamId)); + EXPECT_NE(-1, streamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + + uint8_t streams[] = { streamId }; + ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/1, + /*requests*/NUM_REQUESTS)); + + uint64_t lastFrameNumber = 0; + int numFrames; + + // Consume a couple of results + int i; + for (i = 0; i < NUM_REQUESTS && lastFrameNumber < NUM_REQUESTS; ++i) { + EXPECT_LT(0, (numFrames = mCamera->waitForFrameBuffer(streamId))); + + dout << "Dropped " << (numFrames - 1) << " frames" << std::endl; + + // Skip the counter ahead, don't try to consume these frames again + i += numFrames-1; + + // "Consume" the buffer + CpuConsumer::LockedBuffer buf; + + EXPECT_EQ(OK, consumer->lockNextBuffer(&buf)); + + lastFrameNumber = buf.frameNumber; + + dout << "Buffer asynchronously received on streamId = " << streamId << + ", dataPtr = " << (void*)buf.data << + ", timestamp = " << buf.timestamp << + ", framenumber = " << buf.frameNumber << std::endl; + + // Process at 10fps, stream is at 15fps. + // This means we will definitely fill up the buffer queue with + // extra buffers and need to drop them. + usleep(TEST_FRAME_PROCESSING_DELAY_US); + + EXPECT_OK(consumer->unlockBuffer(buf)); + } + + dout << "Done after " << i << " iterations " << std::endl; + + // Done: clean up + EXPECT_OK(mCamera->deleteStream(streamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} + + + +//TODO: refactor into separate file +TEST_F(ProCameraTest, ServiceListenersSubscribe) { + + ASSERT_EQ(4u, sizeof(ServiceListener::Status)); + + sp<ServiceListener> listener = new ServiceListener(); + + EXPECT_EQ(BAD_VALUE, ProCamera::removeServiceListener(listener)); + EXPECT_OK(ProCamera::addServiceListener(listener)); + + EXPECT_EQ(ALREADY_EXISTS, ProCamera::addServiceListener(listener)); + EXPECT_OK(ProCamera::removeServiceListener(listener)); + + EXPECT_EQ(BAD_VALUE, ProCamera::removeServiceListener(listener)); +} + +//TODO: refactor into separate file +TEST_F(ProCameraTest, ServiceListenersFunctional) { + + sp<ServiceListener> listener = new ServiceListener(); + + EXPECT_OK(ProCamera::addServiceListener(listener)); + + sp<Camera> cam = Camera::connect(CAMERA_ID, + /*clientPackageName*/String16(), + -1); + EXPECT_NE((void*)NULL, cam.get()); + + ServiceListener::Status stat = ServiceListener::STATUS_UNKNOWN; + EXPECT_OK(listener->waitForStatusChange(/*out*/stat)); + + EXPECT_EQ(ServiceListener::STATUS_NOT_AVAILABLE, stat); + + if (cam.get()) { + cam->disconnect(); + } + + EXPECT_OK(listener->waitForStatusChange(/*out*/stat)); + EXPECT_EQ(ServiceListener::STATUS_PRESENT, stat); + + EXPECT_OK(ProCamera::removeServiceListener(listener)); +} + + + +} +} +} +} + diff --git a/camera/tests/main.cpp b/camera/tests/main.cpp new file mode 100644 index 0000000..8c8c515 --- /dev/null +++ b/camera/tests/main.cpp @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <gtest/gtest.h> + + +int main(int argc, char **argv) { + + ::testing::InitGoogleTest(&argc, argv); + + int ret = RUN_ALL_TESTS(); + + return ret; +} diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk index 1247588..3844487 100644 --- a/cmds/stagefright/Android.mk +++ b/cmds/stagefright/Android.mk @@ -8,8 +8,8 @@ LOCAL_SRC_FILES:= \ SineSource.cpp LOCAL_SHARED_LIBRARIES := \ - libstagefright libmedia libmedia_native libutils libbinder libstagefright_foundation \ - libjpeg libgui + libstagefright libmedia libutils libbinder libstagefright_foundation \ + libjpeg libgui libcutils liblog LOCAL_C_INCLUDES:= \ frameworks/av/media/libstagefright \ @@ -104,7 +104,7 @@ LOCAL_SRC_FILES:= \ LOCAL_SHARED_LIBRARIES := \ libstagefright liblog libutils libbinder libgui \ - libstagefright_foundation libmedia libmedia_native libcutils + libstagefright_foundation libmedia libcutils LOCAL_C_INCLUDES:= \ frameworks/av/media/libstagefright \ @@ -127,7 +127,7 @@ LOCAL_SRC_FILES:= \ LOCAL_SHARED_LIBRARIES := \ libstagefright liblog libutils libbinder libstagefright_foundation \ - libmedia libmedia_native libgui libcutils libui + libmedia libgui libcutils libui LOCAL_C_INCLUDES:= \ frameworks/av/media/libstagefright \ @@ -151,7 +151,7 @@ LOCAL_SRC_FILES:= \ LOCAL_SHARED_LIBRARIES := \ libstagefright liblog libutils libbinder libstagefright_foundation \ - libmedia libmedia_native libgui libcutils libui + libmedia libgui libcutils libui LOCAL_C_INCLUDES:= \ frameworks/av/media/libstagefright \ @@ -165,3 +165,25 @@ LOCAL_MODULE:= codec include $(BUILD_EXECUTABLE) +################################################################################ + +include $(CLEAR_VARS) + +LOCAL_SRC_FILES:= \ + muxer.cpp \ + +LOCAL_SHARED_LIBRARIES := \ + libstagefright liblog libutils libbinder libstagefright_foundation \ + libmedia libgui libcutils libui libc + +LOCAL_C_INCLUDES:= \ + frameworks/av/media/libstagefright \ + $(TOP)/frameworks/native/include/media/openmax + +LOCAL_CFLAGS += -Wno-multichar + +LOCAL_MODULE_TAGS := debug + +LOCAL_MODULE:= muxer + +include $(BUILD_EXECUTABLE) diff --git a/cmds/stagefright/SimplePlayer.cpp b/cmds/stagefright/SimplePlayer.cpp index 7636906..5d2d721 100644 --- a/cmds/stagefright/SimplePlayer.cpp +++ b/cmds/stagefright/SimplePlayer.cpp @@ -20,7 +20,7 @@ #include "SimplePlayer.h" -#include <gui/SurfaceTextureClient.h> +#include <gui/Surface.h> #include <media/AudioTrack.h> #include <media/ICrypto.h> #include <media/stagefright/foundation/ABuffer.h> @@ -64,16 +64,16 @@ status_t SimplePlayer::setDataSource(const char *path) { return PostAndAwaitResponse(msg, &response); } -status_t SimplePlayer::setSurface(const sp<ISurfaceTexture> &surfaceTexture) { +status_t SimplePlayer::setSurface(const sp<IGraphicBufferProducer> &bufferProducer) { sp<AMessage> msg = new AMessage(kWhatSetSurface, id()); - sp<SurfaceTextureClient> surfaceTextureClient; - if (surfaceTexture != NULL) { - surfaceTextureClient = new SurfaceTextureClient(surfaceTexture); + sp<Surface> surface; + if (bufferProducer != NULL) { + surface = new Surface(bufferProducer); } msg->setObject( - "native-window", new NativeWindowWrapper(surfaceTextureClient)); + "native-window", new NativeWindowWrapper(surface)); sp<AMessage> response; return PostAndAwaitResponse(msg, &response); @@ -297,9 +297,11 @@ status_t SimplePlayer::onPrepare() { AString mime; CHECK(format->findString("mime", &mime)); + bool isVideo = !strncasecmp(mime.c_str(), "video/", 6); + if (!haveAudio && !strncasecmp(mime.c_str(), "audio/", 6)) { haveAudio = true; - } else if (!haveVideo && !strncasecmp(mime.c_str(), "video/", 6)) { + } else if (!haveVideo && isVideo) { haveVideo = true; } else { continue; @@ -320,7 +322,7 @@ status_t SimplePlayer::onPrepare() { err = state->mCodec->configure( format, - mNativeWindow->getSurfaceTextureClient(), + isVideo ? mNativeWindow->getSurfaceTextureClient() : NULL, NULL /* crypto */, 0 /* flags */); diff --git a/cmds/stagefright/SimplePlayer.h b/cmds/stagefright/SimplePlayer.h index 2548252..0a06059 100644 --- a/cmds/stagefright/SimplePlayer.h +++ b/cmds/stagefright/SimplePlayer.h @@ -23,7 +23,7 @@ namespace android { struct ABuffer; struct ALooper; struct AudioTrack; -struct ISurfaceTexture; +struct IGraphicBufferProducer; struct MediaCodec; struct NativeWindowWrapper; struct NuMediaExtractor; @@ -32,7 +32,7 @@ struct SimplePlayer : public AHandler { SimplePlayer(); status_t setDataSource(const char *path); - status_t setSurface(const sp<ISurfaceTexture> &surfaceTexture); + status_t setSurface(const sp<IGraphicBufferProducer> &bufferProducer); status_t prepare(); status_t start(); status_t stop(); diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp index 723a6e5..fdfefdf 100644 --- a/cmds/stagefright/codec.cpp +++ b/cmds/stagefright/codec.cpp @@ -36,6 +36,7 @@ #include <media/stagefright/NuMediaExtractor.h> #include <gui/ISurfaceComposer.h> #include <gui/SurfaceComposerClient.h> +#include <gui/Surface.h> #include <ui/DisplayInfo.h> static void usage(const char *me) { @@ -413,7 +414,7 @@ int main(int argc, char **argv) { looper->registerHandler(player); player->setDataSource(argv[0]); - player->setSurface(surface->getSurfaceTexture()); + player->setSurface(surface->getIGraphicBufferProducer()); player->start(); sleep(60); player->stop(); diff --git a/cmds/stagefright/muxer.cpp b/cmds/stagefright/muxer.cpp new file mode 100644 index 0000000..cca33e0 --- /dev/null +++ b/cmds/stagefright/muxer.cpp @@ -0,0 +1,305 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "muxer" +#include <utils/Log.h> + +#include <binder/ProcessState.h> +#include <media/stagefright/foundation/ABuffer.h> +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/ALooper.h> +#include <media/stagefright/foundation/AMessage.h> +#include <media/stagefright/foundation/AString.h> +#include <media/stagefright/DataSource.h> +#include <media/stagefright/MediaCodec.h> +#include <media/stagefright/MediaDefs.h> +#include <media/stagefright/MediaMuxer.h> +#include <media/stagefright/MetaData.h> +#include <media/stagefright/NuMediaExtractor.h> + +static void usage(const char *me) { + fprintf(stderr, "usage: %s [-a] [-v] [-s <trim start time>]" + " [-e <trim end time>] [-o <output file>]" + " <input video file>\n", me); + fprintf(stderr, " -h help\n"); + fprintf(stderr, " -a use audio\n"); + fprintf(stderr, " -v use video\n"); + fprintf(stderr, " -s Time in milli-seconds when the trim should start\n"); + fprintf(stderr, " -e Time in milli-seconds when the trim should end\n"); + fprintf(stderr, " -o output file name. Default is /sdcard/muxeroutput.mp4\n"); + + exit(1); +} + +using namespace android; + +static int muxing( + const android::sp<android::ALooper> &looper, + const char *path, + bool useAudio, + bool useVideo, + const char *outputFileName, + bool enableTrim, + int trimStartTimeMs, + int trimEndTimeMs, + int rotationDegrees) { + sp<NuMediaExtractor> extractor = new NuMediaExtractor; + if (extractor->setDataSource(path) != OK) { + fprintf(stderr, "unable to instantiate extractor. %s\n", path); + return 1; + } + + if (outputFileName == NULL) { + outputFileName = "/sdcard/muxeroutput.mp4"; + } + + ALOGV("input file %s, output file %s", path, outputFileName); + ALOGV("useAudio %d, useVideo %d", useAudio, useVideo); + + sp<MediaMuxer> muxer = new MediaMuxer(outputFileName, + MediaMuxer::OUTPUT_FORMAT_MPEG_4); + + size_t trackCount = extractor->countTracks(); + // Map the extractor's track index to the muxer's track index. + KeyedVector<size_t, ssize_t> trackIndexMap; + size_t bufferSize = 1 * 1024 * 1024; // default buffer size is 1MB. + + bool haveAudio = false; + bool haveVideo = false; + + int64_t trimStartTimeUs = trimStartTimeMs * 1000; + int64_t trimEndTimeUs = trimEndTimeMs * 1000; + bool trimStarted = false; + int64_t trimOffsetTimeUs = 0; + + for (size_t i = 0; i < trackCount; ++i) { + sp<AMessage> format; + status_t err = extractor->getTrackFormat(i, &format); + CHECK_EQ(err, (status_t)OK); + ALOGV("extractor getTrackFormat: %s", format->debugString().c_str()); + + AString mime; + CHECK(format->findString("mime", &mime)); + + bool isAudio = !strncasecmp(mime.c_str(), "audio/", 6); + bool isVideo = !strncasecmp(mime.c_str(), "video/", 6); + + if (useAudio && !haveAudio && isAudio) { + haveAudio = true; + } else if (useVideo && !haveVideo && isVideo) { + haveVideo = true; + } else { + continue; + } + + if (isVideo) { + int width , height; + CHECK(format->findInt32("width", &width)); + CHECK(format->findInt32("height", &height)); + bufferSize = width * height * 4; // Assuming it is maximally 4BPP + } + + int64_t duration; + CHECK(format->findInt64("durationUs", &duration)); + + // Since we got the duration now, correct the start time. + if (enableTrim) { + if (trimStartTimeUs > duration) { + fprintf(stderr, "Warning: trimStartTimeUs > duration," + " reset to 0\n"); + trimStartTimeUs = 0; + } + } + + ALOGV("selecting track %d", i); + + err = extractor->selectTrack(i); + CHECK_EQ(err, (status_t)OK); + + ssize_t newTrackIndex = muxer->addTrack(format); + CHECK_GE(newTrackIndex, 0); + trackIndexMap.add(i, newTrackIndex); + } + + int64_t muxerStartTimeUs = ALooper::GetNowUs(); + + bool sawInputEOS = false; + + size_t trackIndex = -1; + sp<ABuffer> newBuffer = new ABuffer(bufferSize); + + muxer->setOrientationHint(rotationDegrees); + muxer->start(); + + while (!sawInputEOS) { + status_t err = extractor->getSampleTrackIndex(&trackIndex); + if (err != OK) { + ALOGV("saw input eos, err %d", err); + sawInputEOS = true; + break; + } else { + err = extractor->readSampleData(newBuffer); + CHECK_EQ(err, (status_t)OK); + + int64_t timeUs; + err = extractor->getSampleTime(&timeUs); + CHECK_EQ(err, (status_t)OK); + + sp<MetaData> meta; + err = extractor->getSampleMeta(&meta); + CHECK_EQ(err, (status_t)OK); + + uint32_t sampleFlags = 0; + int32_t val; + if (meta->findInt32(kKeyIsSyncFrame, &val) && val != 0) { + // We only support BUFFER_FLAG_SYNCFRAME in the flag for now. + sampleFlags |= MediaCodec::BUFFER_FLAG_SYNCFRAME; + + // We turn on trimming at the sync frame. + if (enableTrim && timeUs > trimStartTimeUs && + timeUs <= trimEndTimeUs) { + if (trimStarted == false) { + trimOffsetTimeUs = timeUs; + } + trimStarted = true; + } + } + // Trim can end at any non-sync frame. + if (enableTrim && timeUs > trimEndTimeUs) { + trimStarted = false; + } + + if (!enableTrim || (enableTrim && trimStarted)) { + err = muxer->writeSampleData(newBuffer, + trackIndexMap.valueFor(trackIndex), + timeUs - trimOffsetTimeUs, sampleFlags); + } + + extractor->advance(); + } + } + + muxer->stop(); + newBuffer.clear(); + trackIndexMap.clear(); + + int64_t elapsedTimeUs = ALooper::GetNowUs() - muxerStartTimeUs; + fprintf(stderr, "SUCCESS: muxer generate the video in %lld ms\n", + elapsedTimeUs / 1000); + + return 0; +} + +int main(int argc, char **argv) { + const char *me = argv[0]; + + bool useAudio = false; + bool useVideo = false; + char *outputFileName = NULL; + int trimStartTimeMs = -1; + int trimEndTimeMs = -1; + int rotationDegrees = 0; + // When trimStartTimeMs and trimEndTimeMs seems valid, we turn this switch + // to true. + bool enableTrim = false; + + int res; + while ((res = getopt(argc, argv, "h?avo:s:e:r:")) >= 0) { + switch (res) { + case 'a': + { + useAudio = true; + break; + } + + case 'v': + { + useVideo = true; + break; + } + + case 'o': + { + outputFileName = optarg; + break; + } + + case 's': + { + trimStartTimeMs = atoi(optarg); + break; + } + + case 'e': + { + trimEndTimeMs = atoi(optarg); + break; + } + + case 'r': + { + rotationDegrees = atoi(optarg); + break; + } + + case '?': + case 'h': + default: + { + usage(me); + } + } + } + + argc -= optind; + argv += optind; + + if (argc != 1) { + usage(me); + } + + if (trimStartTimeMs < 0 || trimEndTimeMs < 0) { + // If no input on either 's' or 'e', or they are obviously wrong input, + // then turn off trimming. + ALOGV("Trimming is disabled, copying the whole length video."); + enableTrim = false; + } else if (trimStartTimeMs > trimEndTimeMs) { + fprintf(stderr, "ERROR: start time is bigger\n"); + return 1; + } else { + enableTrim = true; + } + + if (!useAudio && !useVideo) { + fprintf(stderr, "ERROR: Missing both -a and -v, no track to mux then.\n"); + return 1; + } + ProcessState::self()->startThreadPool(); + + // Make sure setDataSource() works. + DataSource::RegisterDefaultSniffers(); + + sp<ALooper> looper = new ALooper; + looper->start(); + + int result = muxing(looper, argv[0], useAudio, useVideo, outputFileName, + enableTrim, trimStartTimeMs, trimEndTimeMs, rotationDegrees); + + looper->stop(); + + return result; +} diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp index 45c3f7b..b7a40c2 100644 --- a/cmds/stagefright/record.cpp +++ b/cmds/stagefright/record.cpp @@ -264,7 +264,8 @@ int main(int argc, char **argv) { #endif #if 0 - CameraSource *source = CameraSource::Create(); + CameraSource *source = CameraSource::Create( + String16(argv[0], strlen(argv[0]))); source->start(); printf("source = %p\n", source); diff --git a/cmds/stagefright/recordvideo.cpp b/cmds/stagefright/recordvideo.cpp index e02f111..c30c122 100644 --- a/cmds/stagefright/recordvideo.cpp +++ b/cmds/stagefright/recordvideo.cpp @@ -44,7 +44,7 @@ static void usage(const char *me) { fprintf(stderr, " -p encoder profile. see omx il header (default: encoder specific)\n"); fprintf(stderr, " -v video codec: [0] AVC [1] M4V [2] H263 (default: 0)\n"); fprintf(stderr, " -s(oftware) prefer software codec\n"); - fprintf(stderr, "The output file is /sdcard/output.mp4\n"); + fprintf(stderr, " -o filename: output file (default: /sdcard/output.mp4)\n"); exit(1); } @@ -162,12 +162,12 @@ int main(int argc, char **argv) { int level = -1; // Encoder specific default int profile = -1; // Encoder specific default int codec = 0; - const char *fileName = "/sdcard/output.mp4"; + char *fileName = "/sdcard/output.mp4"; bool preferSoftwareCodec = false; android::ProcessState::self()->startThreadPool(); int res; - while ((res = getopt(argc, argv, "b:c:f:i:n:w:t:l:p:v:hs")) >= 0) { + while ((res = getopt(argc, argv, "b:c:f:i:n:w:t:l:p:v:o:hs")) >= 0) { switch (res) { case 'b': { @@ -235,6 +235,12 @@ int main(int argc, char **argv) { break; } + case 'o': + { + fileName = optarg; + break; + } + case 's': { preferSoftwareCodec = true; diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp index b92a8a0..115b07c 100644 --- a/cmds/stagefright/stagefright.cpp +++ b/cmds/stagefright/stagefright.cpp @@ -22,7 +22,6 @@ #include <stdlib.h> #include <string.h> -#include <unistd.h> #include "jpeg.h" #include "SineSource.h" @@ -31,6 +30,7 @@ #include <binder/ProcessState.h> #include <media/IMediaPlayerService.h> #include <media/stagefright/foundation/ALooper.h> +#include <media/stagefright/foundation/AMessage.h> #include "include/LiveSession.h" #include "include/NuCachedSource2.h" #include <media/stagefright/AudioPlayer.h> @@ -53,7 +53,7 @@ #include <fcntl.h> -#include <gui/SurfaceTextureClient.h> +#include <gui/Surface.h> #include <gui/SurfaceComposerClient.h> using namespace android; @@ -523,7 +523,7 @@ static void writeSourcesToMP4( } sp<MetaData> params = new MetaData; - params->setInt32(kKeyNotRealTime, true); + params->setInt32(kKeyRealTimeRecording, false); CHECK_EQ(writer->start(params.get()), (status_t)OK); while (!writer->reachedEOS()) { @@ -589,7 +589,7 @@ static void performSeekTest(const sp<MediaSource> &source) { } static void usage(const char *me) { - fprintf(stderr, "usage: %s\n", me); + fprintf(stderr, "usage: %s [options] [input_filename]\n", me); fprintf(stderr, " -h(elp)\n"); fprintf(stderr, " -a(udio)\n"); fprintf(stderr, " -n repetitions\n"); @@ -607,8 +607,8 @@ static void usage(const char *me) { "(video only)\n"); fprintf(stderr, " -S allocate buffers from a surface\n"); fprintf(stderr, " -T allocate buffers from a surface texture\n"); - fprintf(stderr, " -d(ump) filename (raw stream data to a file)\n"); - fprintf(stderr, " -D(ump) filename (decoded PCM data to a file)\n"); + fprintf(stderr, " -d(ump) output_filename (raw stream data to a file)\n"); + fprintf(stderr, " -D(ump) output_filename (decoded PCM data to a file)\n"); } static void dumpCodecProfiles(const sp<IOMX>& omx, bool queryDecoders) { @@ -821,7 +821,7 @@ int main(int argc, char **argv) { CHECK(service.get() != NULL); sp<IMediaMetadataRetriever> retriever = - service->createMetadataRetriever(getpid()); + service->createMetadataRetriever(); CHECK(retriever != NULL); @@ -940,8 +940,8 @@ int main(int argc, char **argv) { } else { CHECK(useSurfaceTexAlloc); - sp<SurfaceTexture> texture = new SurfaceTexture(0 /* tex */); - gSurface = new SurfaceTextureClient(texture); + sp<GLConsumer> texture = new GLConsumer(0 /* tex */); + gSurface = new Surface(texture->getBufferQueue()); } CHECK_EQ((status_t)OK, @@ -1005,7 +1005,7 @@ int main(int argc, char **argv) { looper = new ALooper; looper->start(); } - liveSession = new LiveSession; + liveSession = new LiveSession(NULL /* notify */); looper->registerHandler(liveSession); liveSession->connect(uri.string()); diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp index 7329dcc..dba67a9 100644 --- a/cmds/stagefright/stream.cpp +++ b/cmds/stagefright/stream.cpp @@ -35,6 +35,7 @@ #include <media/IMediaPlayerService.h> #include <gui/ISurfaceComposer.h> #include <gui/SurfaceComposerClient.h> +#include <gui/Surface.h> #include <fcntl.h> #include <ui/DisplayInfo.h> @@ -370,10 +371,10 @@ int main(int argc, char **argv) { } sp<IMediaPlayer> player = - service->create(getpid(), client, 0); + service->create(client, 0); if (player != NULL && player->setDataSource(source) == NO_ERROR) { - player->setVideoSurfaceTexture(surface->getSurfaceTexture()); + player->setVideoSurfaceTexture(surface->getIGraphicBufferProducer()); player->start(); client->waitForEOS(); diff --git a/drm/common/IDrmManagerService.cpp b/drm/common/IDrmManagerService.cpp index 0282036..91fd91e 100644 --- a/drm/common/IDrmManagerService.cpp +++ b/drm/common/IDrmManagerService.cpp @@ -190,8 +190,9 @@ DrmConstraints* BpDrmManagerService::getConstraints( if (0 < bufferSize) { data = new char[bufferSize]; reply.read(data, bufferSize); + drmConstraints->put(&key, data); + delete[] data; } - drmConstraints->put(&key, data); } } return drmConstraints; @@ -219,8 +220,9 @@ DrmMetadata* BpDrmManagerService::getMetadata(int uniqueId, const String8* path) if (0 < bufferSize) { data = new char[bufferSize]; reply.read(data, bufferSize); + drmMetadata->put(&key, data); + delete[] data; } - drmMetadata->put(&key, data); } } return drmMetadata; @@ -889,9 +891,11 @@ status_t BnDrmManagerService::onTransact( int bufferSize = 0; if (NULL != value) { bufferSize = strlen(value); + reply->writeInt32(bufferSize + 1); + reply->write(value, bufferSize + 1); + } else { + reply->writeInt32(0); } - reply->writeInt32(bufferSize + 1); - reply->write(value, bufferSize + 1); } } delete drmConstraints; drmConstraints = NULL; diff --git a/drm/common/ReadWriteUtils.cpp b/drm/common/ReadWriteUtils.cpp index fd17e98..d696f16 100644 --- a/drm/common/ReadWriteUtils.cpp +++ b/drm/common/ReadWriteUtils.cpp @@ -47,7 +47,7 @@ String8 ReadWriteUtils::readBytes(const String8& filePath) { if (length == read(fd, (void*) bytes, length)) { string.append(bytes, length); } - delete bytes; + delete[] bytes; } fclose(file); } diff --git a/drm/drmserver/Android.mk b/drm/drmserver/Android.mk index 96205a1..dc973da 100644 --- a/drm/drmserver/Android.mk +++ b/drm/drmserver/Android.mk @@ -24,6 +24,7 @@ LOCAL_SRC_FILES:= \ LOCAL_SHARED_LIBRARIES := \ libmedia \ libutils \ + liblog \ libbinder \ libdl diff --git a/drm/drmserver/DrmManager.cpp b/drm/drmserver/DrmManager.cpp index e7b0e90..bfaf4bc 100644 --- a/drm/drmserver/DrmManager.cpp +++ b/drm/drmserver/DrmManager.cpp @@ -42,7 +42,8 @@ const String8 DrmManager::EMPTY_STRING(""); DrmManager::DrmManager() : mDecryptSessionId(0), mConvertId(0) { - + srand(time(NULL)); + memset(mUniqueIdArray, 0, sizeof(bool) * kMaxNumUniqueIds); } DrmManager::~DrmManager() { @@ -52,48 +53,37 @@ DrmManager::~DrmManager() { int DrmManager::addUniqueId(bool isNative) { Mutex::Autolock _l(mLock); - int temp = 0; - bool foundUniqueId = false; - const int size = mUniqueIdVector.size(); - const int uniqueIdRange = 0xfff; - int maxLoopTimes = (uniqueIdRange - 1) / 2; - srand(time(NULL)); + int uniqueId = -1; + int random = rand(); - while (!foundUniqueId) { - temp = rand() & uniqueIdRange; + for (size_t index = 0; index < kMaxNumUniqueIds; ++index) { + int temp = (random + index) % kMaxNumUniqueIds; + if (!mUniqueIdArray[temp]) { + uniqueId = temp; + mUniqueIdArray[uniqueId] = true; - if (isNative) { - // set a flag to differentiate DrmManagerClient - // created from native side and java side - temp |= 0x1000; - } - - int index = 0; - for (; index < size; ++index) { - if (mUniqueIdVector.itemAt(index) == temp) { - foundUniqueId = false; - break; + if (isNative) { + // set a flag to differentiate DrmManagerClient + // created from native side and java side + uniqueId |= 0x1000; } + break; } - if (index == size) { - foundUniqueId = true; - } - - maxLoopTimes --; - LOG_FATAL_IF(maxLoopTimes <= 0, "cannot find an unique ID for this session"); } - mUniqueIdVector.push(temp); - return temp; + // -1 indicates that no unique id can be allocated. + return uniqueId; } void DrmManager::removeUniqueId(int uniqueId) { Mutex::Autolock _l(mLock); - for (unsigned int i = 0; i < mUniqueIdVector.size(); i++) { - if (uniqueId == mUniqueIdVector.itemAt(i)) { - mUniqueIdVector.removeAt(i); - break; - } + if (uniqueId & 0x1000) { + // clear the flag for the native side. + uniqueId &= ~(0x1000); + } + + if (uniqueId >= 0 && uniqueId < kMaxNumUniqueIds) { + mUniqueIdArray[uniqueId] = false; } } diff --git a/drm/libdrmframework/Android.mk b/drm/libdrmframework/Android.mk index 9e07fe3..49c4f9b 100644 --- a/drm/libdrmframework/Android.mk +++ b/drm/libdrmframework/Android.mk @@ -25,6 +25,7 @@ LOCAL_MODULE:= libdrmframework LOCAL_SHARED_LIBRARIES := \ libutils \ + liblog \ libbinder \ libdl diff --git a/drm/libdrmframework/include/DrmManager.h b/drm/libdrmframework/include/DrmManager.h index 491e8f7..8ab693f 100644 --- a/drm/libdrmframework/include/DrmManager.h +++ b/drm/libdrmframework/include/DrmManager.h @@ -144,7 +144,11 @@ private: bool canHandle(int uniqueId, const String8& path); private: - Vector<int> mUniqueIdVector; + enum { + kMaxNumUniqueIds = 0x1000, + }; + + bool mUniqueIdArray[kMaxNumUniqueIds]; static const String8 EMPTY_STRING; int mDecryptSessionId; diff --git a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.mk b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.mk index 205b9a5..e251f82 100644 --- a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.mk +++ b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.mk @@ -36,6 +36,7 @@ LOCAL_SHARED_LIBRARIES := \ libicui18n \ libicuuc \ libutils \ + liblog \ libdl \ libcrypto \ libssl \ diff --git a/drm/libdrmframework/plugins/forward-lock/internal-format/converter/Android.mk b/drm/libdrmframework/plugins/forward-lock/internal-format/converter/Android.mk index 37a3851..8f08c88 100644 --- a/drm/libdrmframework/plugins/forward-lock/internal-format/converter/Android.mk +++ b/drm/libdrmframework/plugins/forward-lock/internal-format/converter/Android.mk @@ -26,10 +26,6 @@ LOCAL_C_INCLUDES := \ LOCAL_SHARED_LIBRARIES := libcrypto -LOCAL_WHOLE_STATIC_LIBRARIES := libfwdlock-common - -LOCAL_STATIC_LIBRARIES := libfwdlock-common - LOCAL_MODULE := libfwdlock-converter LOCAL_MODULE_TAGS := optional diff --git a/drm/libdrmframework/plugins/forward-lock/internal-format/converter/FwdLockConv.c b/drm/libdrmframework/plugins/forward-lock/internal-format/converter/FwdLockConv.c index bb97abc..9d15835 100644 --- a/drm/libdrmframework/plugins/forward-lock/internal-format/converter/FwdLockConv.c +++ b/drm/libdrmframework/plugins/forward-lock/internal-format/converter/FwdLockConv.c @@ -245,7 +245,9 @@ static int FwdLockConv_DeriveKeys(FwdLockConv_Session_t *pSession) { AES_KEY sessionRoundKeys; unsigned char value[KEY_SIZE]; unsigned char key[KEY_SIZE]; - } *pData = malloc(sizeof *pData); + }; + const size_t kSize = sizeof(struct FwdLockConv_DeriveKeys_Data); + struct FwdLockConv_DeriveKeys_Data *pData = malloc(kSize); if (pData == NULL) { status = FwdLockConv_Status_OutOfMemory; } else { @@ -268,7 +270,7 @@ static int FwdLockConv_DeriveKeys(FwdLockConv_Session_t *pSession) { status = FwdLockConv_Status_OK; } } - memset(pData, 0, sizeof pData); // Zero out key data. + memset(pData, 0, kSize); // Zero out key data. free(pData); } return status; diff --git a/drm/libdrmframework/plugins/forward-lock/internal-format/decoder/Android.mk b/drm/libdrmframework/plugins/forward-lock/internal-format/decoder/Android.mk index d9b5cfd..7b493c3 100644 --- a/drm/libdrmframework/plugins/forward-lock/internal-format/decoder/Android.mk +++ b/drm/libdrmframework/plugins/forward-lock/internal-format/decoder/Android.mk @@ -26,10 +26,6 @@ LOCAL_C_INCLUDES := \ LOCAL_SHARED_LIBRARIES := libcrypto -LOCAL_WHOLE_STATIC_LIBRARIES := libfwdlock-common - -LOCAL_STATIC_LIBRARIES := libfwdlock-common - LOCAL_MODULE := libfwdlock-decoder LOCAL_MODULE_TAGS := optional diff --git a/drm/libdrmframework/plugins/forward-lock/internal-format/decoder/FwdLockFile.c b/drm/libdrmframework/plugins/forward-lock/internal-format/decoder/FwdLockFile.c index 7ff3c00..43b9e98 100644 --- a/drm/libdrmframework/plugins/forward-lock/internal-format/decoder/FwdLockFile.c +++ b/drm/libdrmframework/plugins/forward-lock/internal-format/decoder/FwdLockFile.c @@ -174,7 +174,10 @@ static int FwdLockFile_DeriveKeys(FwdLockFile_Session_t * pSession) { AES_KEY sessionRoundKeys; unsigned char value[KEY_SIZE]; unsigned char key[KEY_SIZE]; - } *pData = malloc(sizeof *pData); + }; + + const size_t kSize = sizeof(struct FwdLockFile_DeriveKeys_Data); + struct FwdLockFile_DeriveKeys_Data *pData = malloc(kSize); if (pData == NULL) { result = FALSE; } else { @@ -202,7 +205,7 @@ static int FwdLockFile_DeriveKeys(FwdLockFile_Session_t * pSession) { if (!result) { errno = ENOSYS; } - memset(pData, 0, sizeof pData); // Zero out key data. + memset(pData, 0, kSize); // Zero out key data. free(pData); } return result; diff --git a/drm/libdrmframework/plugins/passthru/Android.mk b/drm/libdrmframework/plugins/passthru/Android.mk index d170d49..cb3a2e2 100644 --- a/drm/libdrmframework/plugins/passthru/Android.mk +++ b/drm/libdrmframework/plugins/passthru/Android.mk @@ -25,6 +25,7 @@ LOCAL_STATIC_LIBRARIES := libdrmframeworkcommon LOCAL_SHARED_LIBRARIES := \ libutils \ + liblog \ libdl diff --git a/drm/libdrmframework/plugins/passthru/src/DrmPassthruPlugIn.cpp b/drm/libdrmframework/plugins/passthru/src/DrmPassthruPlugIn.cpp index fa659fd..084e323 100644 --- a/drm/libdrmframework/plugins/passthru/src/DrmPassthruPlugIn.cpp +++ b/drm/libdrmframework/plugins/passthru/src/DrmPassthruPlugIn.cpp @@ -65,10 +65,11 @@ DrmConstraints* DrmPassthruPlugIn::onGetConstraints( char* charValue = NULL; charValue = new char[value.length() + 1]; strncpy(charValue, value.string(), value.length()); + charValue[value.length()] = '\0'; //Just add dummy available time for verification drmConstraints->put(&(DrmConstraints::LICENSE_AVAILABLE_TIME), charValue); - + delete[] charValue; return drmConstraints; } diff --git a/drm/mediadrm/plugins/mock/Android.mk b/drm/mediadrm/plugins/mock/Android.mk new file mode 100644 index 0000000..ada23a2 --- /dev/null +++ b/drm/mediadrm/plugins/mock/Android.mk @@ -0,0 +1,38 @@ +# +# Copyright (C) 2013 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +LOCAL_PATH:= $(call my-dir) +include $(CLEAR_VARS) + +LOCAL_SRC_FILES:= \ + MockDrmCryptoPlugin.cpp + +LOCAL_MODULE := libmockdrmcryptoplugin + +LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR_SHARED_LIBRARIES)/mediadrm + +LOCAL_SHARED_LIBRARIES := \ + libutils liblog + +LOCAL_C_INCLUDES += \ + $(TOP)/frameworks/av/include \ + $(TOP)/frameworks/native/include/media + +# Set the following flag to enable the decryption passthru flow +#LOCAL_CFLAGS += -DENABLE_PASSTHRU_DECRYPTION + +LOCAL_MODULE_TAGS := optional + +include $(BUILD_SHARED_LIBRARY) diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp new file mode 100644 index 0000000..06fc29d --- /dev/null +++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp @@ -0,0 +1,697 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "MockDrmCryptoPlugin" +#include <utils/Log.h> + + +#include "drm/DrmAPI.h" +#include "MockDrmCryptoPlugin.h" +#include "media/stagefright/MediaErrors.h" + +using namespace android; + +// Shared library entry point +DrmFactory *createDrmFactory() +{ + return new MockDrmFactory(); +} + +// Shared library entry point +CryptoFactory *createCryptoFactory() +{ + return new MockCryptoFactory(); +} + +const uint8_t mock_uuid[16] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10}; + +namespace android { + + // MockDrmFactory + bool MockDrmFactory::isCryptoSchemeSupported(const uint8_t uuid[16]) + { + return (!memcmp(uuid, mock_uuid, sizeof(uuid))); + } + + status_t MockDrmFactory::createDrmPlugin(const uint8_t uuid[16], DrmPlugin **plugin) + { + *plugin = new MockDrmPlugin(); + return OK; + } + + // MockCryptoFactory + bool MockCryptoFactory::isCryptoSchemeSupported(const uint8_t uuid[16]) const + { + return (!memcmp(uuid, mock_uuid, sizeof(uuid))); + } + + status_t MockCryptoFactory::createPlugin(const uint8_t uuid[16], const void *data, + size_t size, CryptoPlugin **plugin) + { + *plugin = new MockCryptoPlugin(); + return OK; + } + + + // MockDrmPlugin methods + + status_t MockDrmPlugin::openSession(Vector<uint8_t> &sessionId) + { + const size_t kSessionIdSize = 8; + + Mutex::Autolock lock(mLock); + for (size_t i = 0; i < kSessionIdSize / sizeof(long); i++) { + long r = random(); + sessionId.appendArray((uint8_t *)&r, sizeof(long)); + } + mSessions.add(sessionId); + + ALOGD("MockDrmPlugin::openSession() -> %s", vectorToString(sessionId).string()); + return OK; + } + + status_t MockDrmPlugin::closeSession(Vector<uint8_t> const &sessionId) + { + Mutex::Autolock lock(mLock); + ALOGD("MockDrmPlugin::closeSession(%s)", vectorToString(sessionId).string()); + ssize_t index = findSession(sessionId); + if (index == kNotFound) { + ALOGD("Invalid sessionId"); + return BAD_VALUE; + } + mSessions.removeAt(index); + return OK; + } + + + status_t MockDrmPlugin::getKeyRequest(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &initData, + String8 const &mimeType, KeyType keyType, + KeyedVector<String8, String8> const &optionalParameters, + Vector<uint8_t> &request, String8 &defaultUrl) + { + Mutex::Autolock lock(mLock); + ALOGD("MockDrmPlugin::getKeyRequest(sessionId=%s, initData=%s, mimeType=%s" + ", keyType=%d, optionalParameters=%s))", + vectorToString(sessionId).string(), vectorToString(initData).string(), mimeType.string(), + keyType, stringMapToString(optionalParameters).string()); + + ssize_t index = findSession(sessionId); + if (index == kNotFound) { + ALOGD("Invalid sessionId"); + return BAD_VALUE; + } + + // Properties used in mock test, set by mock plugin and verifed cts test app + // byte[] initData -> mock-initdata + // string mimeType -> mock-mimetype + // string keyType -> mock-keytype + // string optionalParameters -> mock-optparams formatted as {key1,value1},{key2,value2} + + mByteArrayProperties.add(String8("mock-initdata"), initData); + mStringProperties.add(String8("mock-mimetype"), mimeType); + + String8 keyTypeStr; + keyTypeStr.appendFormat("%d", (int)keyType); + mStringProperties.add(String8("mock-keytype"), keyTypeStr); + + String8 params; + for (size_t i = 0; i < optionalParameters.size(); i++) { + params.appendFormat("%s{%s,%s}", i ? "," : "", + optionalParameters.keyAt(i).string(), + optionalParameters.valueAt(i).string()); + } + mStringProperties.add(String8("mock-optparams"), params); + + // Properties used in mock test, set by cts test app returned from mock plugin + // byte[] mock-request -> request + // string mock-default-url -> defaultUrl + + index = mByteArrayProperties.indexOfKey(String8("mock-request")); + if (index < 0) { + ALOGD("Missing 'mock-request' parameter for mock"); + return BAD_VALUE; + } else { + request = mByteArrayProperties.valueAt(index); + } + + index = mStringProperties.indexOfKey(String8("mock-defaultUrl")); + if (index < 0) { + ALOGD("Missing 'mock-defaultUrl' parameter for mock"); + return BAD_VALUE; + } else { + defaultUrl = mStringProperties.valueAt(index); + } + return OK; + } + + status_t MockDrmPlugin::provideKeyResponse(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &response, + Vector<uint8_t> &keySetId) + { + Mutex::Autolock lock(mLock); + ALOGD("MockDrmPlugin::provideKeyResponse(sessionId=%s, response=%s)", + vectorToString(sessionId).string(), vectorToString(response).string()); + ssize_t index = findSession(sessionId); + if (index == kNotFound) { + ALOGD("Invalid sessionId"); + return BAD_VALUE; + } + if (response.size() == 0) { + return BAD_VALUE; + } + + // Properties used in mock test, set by mock plugin and verifed cts test app + // byte[] response -> mock-response + mByteArrayProperties.add(String8("mock-response"), response); + + const size_t kKeySetIdSize = 8; + + for (size_t i = 0; i < kKeySetIdSize / sizeof(long); i++) { + long r = random(); + keySetId.appendArray((uint8_t *)&r, sizeof(long)); + } + mKeySets.add(keySetId); + + return OK; + } + + status_t MockDrmPlugin::removeKeys(Vector<uint8_t> const &keySetId) + { + Mutex::Autolock lock(mLock); + ALOGD("MockDrmPlugin::removeKeys(keySetId=%s)", + vectorToString(keySetId).string()); + + ssize_t index = findKeySet(keySetId); + if (index == kNotFound) { + ALOGD("Invalid keySetId"); + return BAD_VALUE; + } + mKeySets.removeAt(index); + + return OK; + } + + status_t MockDrmPlugin::restoreKeys(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keySetId) + { + Mutex::Autolock lock(mLock); + ALOGD("MockDrmPlugin::restoreKeys(sessionId=%s, keySetId=%s)", + vectorToString(sessionId).string(), + vectorToString(keySetId).string()); + ssize_t index = findSession(sessionId); + if (index == kNotFound) { + ALOGD("Invalid sessionId"); + return BAD_VALUE; + } + + index = findKeySet(keySetId); + if (index == kNotFound) { + ALOGD("Invalid keySetId"); + return BAD_VALUE; + } + + return OK; + } + + status_t MockDrmPlugin::queryKeyStatus(Vector<uint8_t> const &sessionId, + KeyedVector<String8, String8> &infoMap) const + { + ALOGD("MockDrmPlugin::queryKeyStatus(sessionId=%s)", + vectorToString(sessionId).string()); + + ssize_t index = findSession(sessionId); + if (index == kNotFound) { + ALOGD("Invalid sessionId"); + return BAD_VALUE; + } + + infoMap.add(String8("purchaseDuration"), String8("1000")); + infoMap.add(String8("licenseDuration"), String8("100")); + return OK; + } + + status_t MockDrmPlugin::getProvisionRequest(Vector<uint8_t> &request, + String8 &defaultUrl) + { + Mutex::Autolock lock(mLock); + ALOGD("MockDrmPlugin::getProvisionRequest()"); + + // Properties used in mock test, set by cts test app returned from mock plugin + // byte[] mock-request -> request + // string mock-default-url -> defaultUrl + + ssize_t index = mByteArrayProperties.indexOfKey(String8("mock-request")); + if (index < 0) { + ALOGD("Missing 'mock-request' parameter for mock"); + return BAD_VALUE; + } else { + request = mByteArrayProperties.valueAt(index); + } + + index = mStringProperties.indexOfKey(String8("mock-defaultUrl")); + if (index < 0) { + ALOGD("Missing 'mock-defaultUrl' parameter for mock"); + return BAD_VALUE; + } else { + defaultUrl = mStringProperties.valueAt(index); + } + return OK; + } + + status_t MockDrmPlugin::provideProvisionResponse(Vector<uint8_t> const &response) + { + Mutex::Autolock lock(mLock); + ALOGD("MockDrmPlugin::provideProvisionResponse(%s)", + vectorToString(response).string()); + + // Properties used in mock test, set by mock plugin and verifed cts test app + // byte[] response -> mock-response + + mByteArrayProperties.add(String8("mock-response"), response); + return OK; + } + + status_t MockDrmPlugin::getSecureStops(List<Vector<uint8_t> > &secureStops) + { + Mutex::Autolock lock(mLock); + ALOGD("MockDrmPlugin::getSecureStops()"); + + // Properties used in mock test, set by cts test app returned from mock plugin + // byte[] mock-secure-stop1 -> first secure stop in list + // byte[] mock-secure-stop2 -> second secure stop in list + + Vector<uint8_t> ss1, ss2; + ssize_t index = mByteArrayProperties.indexOfKey(String8("mock-secure-stop1")); + if (index < 0) { + ALOGD("Missing 'mock-secure-stop1' parameter for mock"); + return BAD_VALUE; + } else { + ss1 = mByteArrayProperties.valueAt(index); + } + + index = mByteArrayProperties.indexOfKey(String8("mock-secure-stop2")); + if (index < 0) { + ALOGD("Missing 'mock-secure-stop2' parameter for mock"); + return BAD_VALUE; + } else { + ss2 = mByteArrayProperties.valueAt(index); + } + + secureStops.push_back(ss1); + secureStops.push_back(ss2); + return OK; + } + + status_t MockDrmPlugin::releaseSecureStops(Vector<uint8_t> const &ssRelease) + { + Mutex::Autolock lock(mLock); + ALOGD("MockDrmPlugin::releaseSecureStops(%s)", + vectorToString(ssRelease).string()); + + // Properties used in mock test, set by mock plugin and verifed cts test app + // byte[] secure-stop-release -> mock-ssrelease + mByteArrayProperties.add(String8("mock-ssrelease"), ssRelease); + + return OK; + } + + status_t MockDrmPlugin::getPropertyString(String8 const &name, String8 &value) const + { + ALOGD("MockDrmPlugin::getPropertyString(name=%s)", name.string()); + ssize_t index = mStringProperties.indexOfKey(name); + if (index < 0) { + ALOGD("no property for '%s'", name.string()); + return BAD_VALUE; + } + value = mStringProperties.valueAt(index); + return OK; + } + + status_t MockDrmPlugin::getPropertyByteArray(String8 const &name, + Vector<uint8_t> &value) const + { + ALOGD("MockDrmPlugin::getPropertyByteArray(name=%s)", name.string()); + ssize_t index = mByteArrayProperties.indexOfKey(name); + if (index < 0) { + ALOGD("no property for '%s'", name.string()); + return BAD_VALUE; + } + value = mByteArrayProperties.valueAt(index); + return OK; + } + + status_t MockDrmPlugin::setPropertyString(String8 const &name, + String8 const &value) + { + Mutex::Autolock lock(mLock); + ALOGD("MockDrmPlugin::setPropertyString(name=%s, value=%s)", + name.string(), value.string()); + + if (name == "mock-send-event") { + unsigned code, extra; + sscanf(value.string(), "%d %d", &code, &extra); + DrmPlugin::EventType eventType = (DrmPlugin::EventType)code; + + Vector<uint8_t> const *pSessionId = NULL; + ssize_t index = mByteArrayProperties.indexOfKey(String8("mock-event-session-id")); + if (index >= 0) { + pSessionId = &mByteArrayProperties[index]; + } + + Vector<uint8_t> const *pData = NULL; + index = mByteArrayProperties.indexOfKey(String8("mock-event-data")); + if (index >= 0) { + pData = &mByteArrayProperties[index]; + } + ALOGD("sending event from mock drm plugin: %d %d %s %s", + (int)code, extra, pSessionId ? vectorToString(*pSessionId) : "{}", + pData ? vectorToString(*pData) : "{}"); + + sendEvent(eventType, extra, pSessionId, pData); + } else { + mStringProperties.add(name, value); + } + return OK; + } + + status_t MockDrmPlugin::setPropertyByteArray(String8 const &name, + Vector<uint8_t> const &value) + { + Mutex::Autolock lock(mLock); + ALOGD("MockDrmPlugin::setPropertyByteArray(name=%s, value=%s)", + name.string(), vectorToString(value).string()); + mByteArrayProperties.add(name, value); + return OK; + } + + status_t MockDrmPlugin::setCipherAlgorithm(Vector<uint8_t> const &sessionId, + String8 const &algorithm) + { + Mutex::Autolock lock(mLock); + + ALOGD("MockDrmPlugin::setCipherAlgorithm(sessionId=%s, algorithm=%s)", + vectorToString(sessionId).string(), algorithm.string()); + + ssize_t index = findSession(sessionId); + if (index == kNotFound) { + ALOGD("Invalid sessionId"); + return BAD_VALUE; + } + + if (algorithm == "AES/CBC/NoPadding") { + return OK; + } + return BAD_VALUE; + } + + status_t MockDrmPlugin::setMacAlgorithm(Vector<uint8_t> const &sessionId, + String8 const &algorithm) + { + Mutex::Autolock lock(mLock); + + ALOGD("MockDrmPlugin::setMacAlgorithm(sessionId=%s, algorithm=%s)", + vectorToString(sessionId).string(), algorithm.string()); + + ssize_t index = findSession(sessionId); + if (index == kNotFound) { + ALOGD("Invalid sessionId"); + return BAD_VALUE; + } + + if (algorithm == "HmacSHA256") { + return OK; + } + return BAD_VALUE; + } + + status_t MockDrmPlugin::encrypt(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &input, + Vector<uint8_t> const &iv, + Vector<uint8_t> &output) + { + Mutex::Autolock lock(mLock); + ALOGD("MockDrmPlugin::encrypt(sessionId=%s, keyId=%s, input=%s, iv=%s)", + vectorToString(sessionId).string(), + vectorToString(keyId).string(), + vectorToString(input).string(), + vectorToString(iv).string()); + + ssize_t index = findSession(sessionId); + if (index == kNotFound) { + ALOGD("Invalid sessionId"); + return BAD_VALUE; + } + + // Properties used in mock test, set by mock plugin and verifed cts test app + // byte[] keyId -> mock-keyid + // byte[] input -> mock-input + // byte[] iv -> mock-iv + mByteArrayProperties.add(String8("mock-keyid"), keyId); + mByteArrayProperties.add(String8("mock-input"), input); + mByteArrayProperties.add(String8("mock-iv"), iv); + + // Properties used in mock test, set by cts test app returned from mock plugin + // byte[] mock-output -> output + index = mByteArrayProperties.indexOfKey(String8("mock-output")); + if (index < 0) { + ALOGD("Missing 'mock-request' parameter for mock"); + return BAD_VALUE; + } else { + output = mByteArrayProperties.valueAt(index); + } + return OK; + } + + status_t MockDrmPlugin::decrypt(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &input, + Vector<uint8_t> const &iv, + Vector<uint8_t> &output) + { + Mutex::Autolock lock(mLock); + ALOGD("MockDrmPlugin::decrypt(sessionId=%s, keyId=%s, input=%s, iv=%s)", + vectorToString(sessionId).string(), + vectorToString(keyId).string(), + vectorToString(input).string(), + vectorToString(iv).string()); + + ssize_t index = findSession(sessionId); + if (index == kNotFound) { + ALOGD("Invalid sessionId"); + return BAD_VALUE; + } + + // Properties used in mock test, set by mock plugin and verifed cts test app + // byte[] keyId -> mock-keyid + // byte[] input -> mock-input + // byte[] iv -> mock-iv + mByteArrayProperties.add(String8("mock-keyid"), keyId); + mByteArrayProperties.add(String8("mock-input"), input); + mByteArrayProperties.add(String8("mock-iv"), iv); + + // Properties used in mock test, set by cts test app returned from mock plugin + // byte[] mock-output -> output + index = mByteArrayProperties.indexOfKey(String8("mock-output")); + if (index < 0) { + ALOGD("Missing 'mock-request' parameter for mock"); + return BAD_VALUE; + } else { + output = mByteArrayProperties.valueAt(index); + } + return OK; + } + + status_t MockDrmPlugin::sign(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &message, + Vector<uint8_t> &signature) + { + Mutex::Autolock lock(mLock); + ALOGD("MockDrmPlugin::sign(sessionId=%s, keyId=%s, message=%s)", + vectorToString(sessionId).string(), + vectorToString(keyId).string(), + vectorToString(message).string()); + + ssize_t index = findSession(sessionId); + if (index == kNotFound) { + ALOGD("Invalid sessionId"); + return BAD_VALUE; + } + + // Properties used in mock test, set by mock plugin and verifed cts test app + // byte[] keyId -> mock-keyid + // byte[] message -> mock-message + mByteArrayProperties.add(String8("mock-keyid"), keyId); + mByteArrayProperties.add(String8("mock-message"), message); + + // Properties used in mock test, set by cts test app returned from mock plugin + // byte[] mock-signature -> signature + index = mByteArrayProperties.indexOfKey(String8("mock-signature")); + if (index < 0) { + ALOGD("Missing 'mock-request' parameter for mock"); + return BAD_VALUE; + } else { + signature = mByteArrayProperties.valueAt(index); + } + return OK; + } + + status_t MockDrmPlugin::verify(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &message, + Vector<uint8_t> const &signature, + bool &match) + { + Mutex::Autolock lock(mLock); + ALOGD("MockDrmPlugin::verify(sessionId=%s, keyId=%s, message=%s, signature=%s)", + vectorToString(sessionId).string(), + vectorToString(keyId).string(), + vectorToString(message).string(), + vectorToString(signature).string()); + + ssize_t index = findSession(sessionId); + if (index == kNotFound) { + ALOGD("Invalid sessionId"); + return BAD_VALUE; + } + + // Properties used in mock test, set by mock plugin and verifed cts test app + // byte[] keyId -> mock-keyid + // byte[] message -> mock-message + // byte[] signature -> mock-signature + mByteArrayProperties.add(String8("mock-keyid"), keyId); + mByteArrayProperties.add(String8("mock-message"), message); + mByteArrayProperties.add(String8("mock-signature"), signature); + + // Properties used in mock test, set by cts test app returned from mock plugin + // String mock-match "1" or "0" -> match + index = mStringProperties.indexOfKey(String8("mock-match")); + if (index < 0) { + ALOGD("Missing 'mock-request' parameter for mock"); + return BAD_VALUE; + } else { + match = atol(mStringProperties.valueAt(index).string()); + } + return OK; + } + + ssize_t MockDrmPlugin::findSession(Vector<uint8_t> const &sessionId) const + { + ALOGD("findSession: nsessions=%d, size=%d", mSessions.size(), sessionId.size()); + for (size_t i = 0; i < mSessions.size(); ++i) { + if (memcmp(mSessions[i].array(), sessionId.array(), sessionId.size()) == 0) { + return i; + } + } + return kNotFound; + } + + ssize_t MockDrmPlugin::findKeySet(Vector<uint8_t> const &keySetId) const + { + ALOGD("findKeySet: nkeySets=%d, size=%d", mKeySets.size(), keySetId.size()); + for (size_t i = 0; i < mKeySets.size(); ++i) { + if (memcmp(mKeySets[i].array(), keySetId.array(), keySetId.size()) == 0) { + return i; + } + } + return kNotFound; + } + + + // Conversion utilities + String8 MockDrmPlugin::vectorToString(Vector<uint8_t> const &vector) const + { + return arrayToString(vector.array(), vector.size()); + } + + String8 MockDrmPlugin::arrayToString(uint8_t const *array, size_t len) const + { + String8 result("{ "); + for (size_t i = 0; i < len; i++) { + result.appendFormat("0x%02x ", array[i]); + } + result += "}"; + return result; + } + + String8 MockDrmPlugin::stringMapToString(KeyedVector<String8, String8> map) const + { + String8 result("{ "); + for (size_t i = 0; i < map.size(); i++) { + result.appendFormat("%s{name=%s, value=%s}", i > 0 ? ", " : "", + map.keyAt(i).string(), map.valueAt(i).string()); + } + return result + " }"; + } + + bool operator<(Vector<uint8_t> const &lhs, Vector<uint8_t> const &rhs) { + return lhs.size() < rhs.size() || (memcmp(lhs.array(), rhs.array(), lhs.size()) < 0); + } + + // + // Crypto Plugin + // + + bool MockCryptoPlugin::requiresSecureDecoderComponent(const char *mime) const + { + ALOGD("MockCryptoPlugin::requiresSecureDecoderComponent(mime=%s)", mime); + return false; + } + + ssize_t + MockCryptoPlugin::decrypt(bool secure, const uint8_t key[16], const uint8_t iv[16], + Mode mode, const void *srcPtr, const SubSample *subSamples, + size_t numSubSamples, void *dstPtr, AString *errorDetailMsg) + { + ALOGD("MockCryptoPlugin::decrypt(secure=%d, key=%s, iv=%s, mode=%d, src=%p, " + "subSamples=%s, dst=%p)", + (int)secure, + arrayToString(key, sizeof(key)).string(), + arrayToString(iv, sizeof(iv)).string(), + (int)mode, srcPtr, + subSamplesToString(subSamples, numSubSamples).string(), + dstPtr); + return OK; + } + + // Conversion utilities + String8 MockCryptoPlugin::arrayToString(uint8_t const *array, size_t len) const + { + String8 result("{ "); + for (size_t i = 0; i < len; i++) { + result.appendFormat("0x%02x ", array[i]); + } + result += "}"; + return result; + } + + String8 MockCryptoPlugin::subSamplesToString(SubSample const *subSamples, + size_t numSubSamples) const + { + String8 result; + for (size_t i = 0; i < numSubSamples; i++) { + result.appendFormat("[%d] {clear:%d, encrypted:%d} ", i, + subSamples[i].mNumBytesOfClearData, + subSamples[i].mNumBytesOfEncryptedData); + } + return result; + } + +}; diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h new file mode 100644 index 0000000..ca9eac7 --- /dev/null +++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <utils/Mutex.h> + +#include "drm/DrmAPI.h" +#include "hardware/CryptoAPI.h" + +extern "C" { + android::DrmFactory *createDrmFactory(); + android::CryptoFactory *createCryptoFactory(); +} + +namespace android { + + class MockDrmFactory : public DrmFactory { + public: + MockDrmFactory() {} + virtual ~MockDrmFactory() {} + + bool isCryptoSchemeSupported(const uint8_t uuid[16]); + status_t createDrmPlugin(const uint8_t uuid[16], DrmPlugin **plugin); + }; + + class MockCryptoFactory : public CryptoFactory { + public: + MockCryptoFactory() {} + virtual ~MockCryptoFactory() {} + + bool isCryptoSchemeSupported(const uint8_t uuid[16]) const; + status_t createPlugin( + const uint8_t uuid[16], const void *data, size_t size, + CryptoPlugin **plugin); + }; + + + + class MockDrmPlugin : public DrmPlugin { + public: + MockDrmPlugin() {} + virtual ~MockDrmPlugin() {} + + // from DrmPlugin + status_t openSession(Vector<uint8_t> &sessionId); + status_t closeSession(Vector<uint8_t> const &sessionId); + + status_t getKeyRequest(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &initData, + String8 const &mimeType, KeyType keyType, + KeyedVector<String8, String8> const &optionalParameters, + Vector<uint8_t> &request, String8 &defaultUrl); + + status_t provideKeyResponse(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &response, + Vector<uint8_t> &keySetId); + + status_t removeKeys(Vector<uint8_t> const &keySetId); + + status_t restoreKeys(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keySetId); + + status_t queryKeyStatus(Vector<uint8_t> const &sessionId, + KeyedVector<String8, String8> &infoMap) const; + + status_t getProvisionRequest(Vector<uint8_t> &request, + String8 &defaultUrl); + + status_t provideProvisionResponse(Vector<uint8_t> const &response); + + status_t getSecureStops(List<Vector<uint8_t> > &secureStops); + status_t releaseSecureStops(Vector<uint8_t> const &ssRelease); + + status_t getPropertyString(String8 const &name, String8 &value ) const; + status_t getPropertyByteArray(String8 const &name, + Vector<uint8_t> &value ) const; + + status_t setPropertyString(String8 const &name, + String8 const &value ); + status_t setPropertyByteArray(String8 const &name, + Vector<uint8_t> const &value ); + + status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId, + String8 const &algorithm); + + status_t setMacAlgorithm(Vector<uint8_t> const &sessionId, + String8 const &algorithm); + + status_t encrypt(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &input, + Vector<uint8_t> const &iv, + Vector<uint8_t> &output); + + status_t decrypt(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &input, + Vector<uint8_t> const &iv, + Vector<uint8_t> &output); + + status_t sign(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &message, + Vector<uint8_t> &signature); + + status_t verify(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &message, + Vector<uint8_t> const &signature, + bool &match); + + private: + String8 vectorToString(Vector<uint8_t> const &vector) const; + String8 arrayToString(uint8_t const *array, size_t len) const; + String8 stringMapToString(KeyedVector<String8, String8> map) const; + + SortedVector<Vector<uint8_t> > mSessions; + SortedVector<Vector<uint8_t> > mKeySets; + + static const ssize_t kNotFound = -1; + ssize_t findSession(Vector<uint8_t> const &sessionId) const; + ssize_t findKeySet(Vector<uint8_t> const &keySetId) const; + + Mutex mLock; + KeyedVector<String8, String8> mStringProperties; + KeyedVector<String8, Vector<uint8_t> > mByteArrayProperties; + }; + + + class MockCryptoPlugin : public CryptoPlugin { + + bool requiresSecureDecoderComponent(const char *mime) const; + + ssize_t decrypt(bool secure, + const uint8_t key[16], const uint8_t iv[16], + Mode mode, const void *srcPtr, + const SubSample *subSamples, size_t numSubSamples, + void *dstPtr, AString *errorDetailMsg); + private: + String8 subSamplesToString(CryptoPlugin::SubSample const *subSamples, size_t numSubSamples) const; + String8 arrayToString(uint8_t const *array, size_t len) const; + }; +}; diff --git a/include/camera/Camera.h b/include/camera/Camera.h index 234e165..37626a4 100644 --- a/include/camera/Camera.h +++ b/include/camera/Camera.h @@ -18,41 +18,20 @@ #define ANDROID_HARDWARE_CAMERA_H #include <utils/Timers.h> -#include <gui/ISurfaceTexture.h> +#include <gui/IGraphicBufferProducer.h> #include <system/camera.h> #include <camera/ICameraClient.h> #include <camera/ICameraRecordingProxy.h> #include <camera/ICameraRecordingProxyListener.h> +#include <camera/ICameraService.h> +#include <camera/ICamera.h> +#include <camera/CameraBase.h> namespace android { -struct CameraInfo { - /** - * The direction that the camera faces to. It should be CAMERA_FACING_BACK - * or CAMERA_FACING_FRONT. - */ - int facing; - - /** - * The orientation of the camera image. The value is the angle that the - * camera image needs to be rotated clockwise so it shows correctly on the - * display in its natural orientation. It should be 0, 90, 180, or 270. - * - * For example, suppose a device has a naturally tall screen. The - * back-facing camera sensor is mounted in landscape. You are looking at - * the screen. If the top side of the camera sensor is aligned with the - * right edge of the screen in natural orientation, the value should be - * 90. If the top side of a front-facing camera sensor is aligned with the - * right of the screen, the value should be 270. - */ - int orientation; -}; - -class ICameraService; -class ICamera; class Surface; -class Mutex; class String8; +class String16; // ref-counted object for callbacks class CameraListener: virtual public RefBase @@ -64,30 +43,39 @@ public: virtual void postDataTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) = 0; }; -class Camera : public BnCameraClient, public IBinder::DeathRecipient +class Camera; + +template <> +struct CameraTraits<Camera> +{ + typedef CameraListener TCamListener; + typedef ICamera TCamUser; + typedef ICameraClient TCamCallbacks; +}; + +class Camera : + public CameraBase<Camera>, + public BnCameraClient { public: + enum { + USE_CALLING_UID = ICameraService::USE_CALLING_UID + }; + // construct a camera client from an existing remote static sp<Camera> create(const sp<ICamera>& camera); - static int32_t getNumberOfCameras(); - static status_t getCameraInfo(int cameraId, - struct CameraInfo* cameraInfo); - static sp<Camera> connect(int cameraId); + static sp<Camera> connect(int cameraId, + const String16& clientPackageName, + int clientUid); + virtual ~Camera(); - void init(); status_t reconnect(); - void disconnect(); status_t lock(); status_t unlock(); - status_t getStatus() { return mStatus; } - - // pass the buffered Surface to the camera service - status_t setPreviewDisplay(const sp<Surface>& surface); - - // pass the buffered ISurfaceTexture to the camera service - status_t setPreviewTexture(const sp<ISurfaceTexture>& surfaceTexture); + // pass the buffered IGraphicBufferProducer to the camera service + status_t setPreviewTexture(const sp<IGraphicBufferProducer>& bufferProducer); // start preview mode, must call setPreviewDisplay first status_t startPreview(); @@ -143,8 +131,6 @@ public: camera_frame_metadata_t *metadata); virtual void dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr); - sp<ICamera> remote(); - class RecordingProxy : public BnCameraRecordingProxy { public: @@ -159,36 +145,14 @@ public: sp<Camera> mCamera; }; -private: - Camera(); +protected: + Camera(int cameraId); Camera(const Camera&); Camera& operator=(const Camera); - virtual void binderDied(const wp<IBinder>& who); - - class DeathNotifier: public IBinder::DeathRecipient - { - public: - DeathNotifier() { - } - - virtual void binderDied(const wp<IBinder>& who); - }; - - static sp<DeathNotifier> mDeathNotifier; - - // helper function to obtain camera service handle - static const sp<ICameraService>& getCameraService(); - - sp<ICamera> mCamera; - status_t mStatus; - - sp<CameraListener> mListener; - sp<ICameraRecordingProxyListener> mRecordingProxyListener; - friend class DeathNotifier; + sp<ICameraRecordingProxyListener> mRecordingProxyListener; - static Mutex mLock; - static sp<ICameraService> mCameraService; + friend class CameraBase; }; }; // namespace android diff --git a/include/camera/CameraBase.h b/include/camera/CameraBase.h new file mode 100644 index 0000000..9b08c0f --- /dev/null +++ b/include/camera/CameraBase.h @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_CAMERA_BASE_H +#define ANDROID_HARDWARE_CAMERA_BASE_H + +#include <utils/Mutex.h> +#include <camera/ICameraService.h> + +struct camera_frame_metadata; + +namespace android { + +struct CameraInfo { + /** + * The direction that the camera faces to. It should be CAMERA_FACING_BACK + * or CAMERA_FACING_FRONT. + */ + int facing; + + /** + * The orientation of the camera image. The value is the angle that the + * camera image needs to be rotated clockwise so it shows correctly on the + * display in its natural orientation. It should be 0, 90, 180, or 270. + * + * For example, suppose a device has a naturally tall screen. The + * back-facing camera sensor is mounted in landscape. You are looking at + * the screen. If the top side of the camera sensor is aligned with the + * right edge of the screen in natural orientation, the value should be + * 90. If the top side of a front-facing camera sensor is aligned with the + * right of the screen, the value should be 270. + */ + int orientation; +}; + +template <typename TCam> +struct CameraTraits { +}; + +template <typename TCam, typename TCamTraits = CameraTraits<TCam> > +class CameraBase : public IBinder::DeathRecipient +{ +public: + typedef typename TCamTraits::TCamListener TCamListener; + typedef typename TCamTraits::TCamUser TCamUser; + typedef typename TCamTraits::TCamCallbacks TCamCallbacks; + + static sp<TCam> connect(int cameraId, + const String16& clientPackageName, + int clientUid); + virtual void disconnect(); + + void setListener(const sp<TCamListener>& listener); + + static int getNumberOfCameras(); + + static status_t getCameraInfo(int cameraId, + /*out*/ + struct CameraInfo* cameraInfo); + + static status_t addServiceListener( + const sp<ICameraServiceListener>& listener); + + static status_t removeServiceListener( + const sp<ICameraServiceListener>& listener); + + sp<TCamUser> remote(); + + // Status is set to 'UNKNOWN_ERROR' after successful (re)connection + status_t getStatus(); + +protected: + CameraBase(int cameraId); + virtual ~CameraBase(); + + //////////////////////////////////////////////////////// + // TCamCallbacks implementation + //////////////////////////////////////////////////////// + virtual void notifyCallback(int32_t msgType, int32_t ext, + int32_t ext2); + + //////////////////////////////////////////////////////// + // Common instance variables + //////////////////////////////////////////////////////// + Mutex mLock; + + virtual void binderDied(const wp<IBinder>& who); + + // helper function to obtain camera service handle + static const sp<ICameraService>& getCameraService(); + + sp<TCamUser> mCamera; + status_t mStatus; + + sp<TCamListener> mListener; + + const int mCameraId; + + typedef CameraBase<TCam> CameraBaseT; +}; + +}; // namespace android + +#endif diff --git a/services/camera/libcameraservice/camera2/CameraMetadata.h b/include/camera/CameraMetadata.h index aee6cd7..8eeb2e7 100644 --- a/services/camera/libcameraservice/camera2/CameraMetadata.h +++ b/include/camera/CameraMetadata.h @@ -14,15 +14,14 @@ * limitations under the License. */ -#ifndef ANDROID_SERVERS_CAMERA_CAMERA2METADATA_CPP -#define ANDROID_SERVERS_CAMERA_CAMERA2METADATA_CPP +#ifndef ANDROID_CLIENT_CAMERA2_CAMERAMETADATA_CPP +#define ANDROID_CLIENT_CAMERA2_CAMERAMETADATA_CPP #include "system/camera_metadata.h" #include <utils/String8.h> #include <utils/Vector.h> namespace android { -namespace camera2 { /** * A convenience wrapper around the C-based camera_metadata_t library. @@ -50,6 +49,23 @@ class CameraMetadata { CameraMetadata &operator=(const camera_metadata_t *buffer); /** + * Get reference to the underlying metadata buffer. Ownership remains with + * the CameraMetadata object, but non-const CameraMetadata methods will not + * work until unlock() is called. Note that the lock has nothing to do with + * thread-safety, it simply prevents the camera_metadata_t pointer returned + * here from being accidentally invalidated by CameraMetadata operations. + */ + const camera_metadata_t* getAndLock(); + + /** + * Unlock the CameraMetadata for use again. After this unlock, the pointer + * given from getAndLock() may no longer be used. The pointer passed out + * from getAndLock must be provided to guarantee that the right object is + * being unlocked. + */ + status_t unlock(const camera_metadata_t *buffer); + + /** * Release a raw metadata buffer to the caller. After this call, * CameraMetadata no longer references the buffer, and the caller takes * responsibility for freeing the raw metadata buffer (using @@ -122,6 +138,12 @@ class CameraMetadata { } /** + * Check if a metadata entry exists for a given tag id + * + */ + bool exists(uint32_t tag) const; + + /** * Get metadata entry by tag id */ camera_metadata_entry find(uint32_t tag); @@ -149,6 +171,7 @@ class CameraMetadata { private: camera_metadata_t *mBuffer; + bool mLocked; /** * Check if tag has a given type @@ -158,7 +181,7 @@ class CameraMetadata { /** * Base update entry method */ - status_t update(uint32_t tag, const void *data, size_t data_count); + status_t updateImpl(uint32_t tag, const void *data, size_t data_count); /** * Resize metadata buffer if needed by reallocating it and copying it over. @@ -167,7 +190,6 @@ class CameraMetadata { }; -}; // namespace camera2 }; // namespace android #endif diff --git a/include/camera/CameraParameters.h b/include/camera/CameraParameters.h index 5540d32..d521543 100644 --- a/include/camera/CameraParameters.h +++ b/include/camera/CameraParameters.h @@ -525,6 +525,10 @@ public: // stream and record stabilized videos. static const char KEY_VIDEO_STABILIZATION_SUPPORTED[]; + // Supported modes for special effects with light. + // Example values: "lowlight,hdr". + static const char KEY_LIGHTFX[]; + // Value for KEY_ZOOM_SUPPORTED or KEY_SMOOTH_ZOOM_SUPPORTED. static const char TRUE[]; static const char FALSE[]; @@ -664,6 +668,12 @@ public: // other modes. static const char FOCUS_MODE_CONTINUOUS_PICTURE[]; + // Values for light special effects + // Low-light enhancement mode + static const char LIGHTFX_LOWLIGHT[]; + // High-dynamic range mode + static const char LIGHTFX_HDR[]; + private: DefaultKeyedVector<String8,String8> mMap; }; diff --git a/include/camera/ICamera.h b/include/camera/ICamera.h index 3d18837..2236c1f 100644 --- a/include/camera/ICamera.h +++ b/include/camera/ICamera.h @@ -27,7 +27,7 @@ namespace android { class ICameraClient; -class ISurfaceTexture; +class IGraphicBufferProducer; class Surface; class ICamera: public IInterface @@ -46,12 +46,9 @@ public: // allow other processes to use this ICamera interface virtual status_t unlock() = 0; - // pass the buffered Surface to the camera service - virtual status_t setPreviewDisplay(const sp<Surface>& surface) = 0; - - // pass the buffered ISurfaceTexture to the camera service + // pass the buffered IGraphicBufferProducer to the camera service virtual status_t setPreviewTexture( - const sp<ISurfaceTexture>& surfaceTexture) = 0; + const sp<IGraphicBufferProducer>& bufferProducer) = 0; // set the preview callback flag to affect how the received frames from // preview are handled. diff --git a/include/camera/ICameraService.h b/include/camera/ICameraService.h index 7d70c1e..aaf6eb3 100644 --- a/include/camera/ICameraService.h +++ b/include/camera/ICameraService.h @@ -21,28 +21,59 @@ #include <binder/IInterface.h> #include <binder/Parcel.h> -#include <camera/ICameraClient.h> -#include <camera/ICamera.h> - namespace android { +class ICamera; +class ICameraClient; +class IProCameraUser; +class IProCameraCallbacks; +class ICameraServiceListener; + class ICameraService : public IInterface { public: enum { GET_NUMBER_OF_CAMERAS = IBinder::FIRST_CALL_TRANSACTION, GET_CAMERA_INFO, - CONNECT + CONNECT, + CONNECT_PRO, + ADD_LISTENER, + REMOVE_LISTENER, + }; + + enum { + USE_CALLING_UID = -1 }; public: DECLARE_META_INTERFACE(CameraService); - virtual int32_t getNumberOfCameras() = 0; - virtual status_t getCameraInfo(int cameraId, + virtual int32_t getNumberOfCameras() = 0; + virtual status_t getCameraInfo(int cameraId, struct CameraInfo* cameraInfo) = 0; - virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient, - int cameraId) = 0; + + // Returns 'OK' if operation succeeded + // - Errors: ALREADY_EXISTS if the listener was already added + virtual status_t addListener(const sp<ICameraServiceListener>& listener) + = 0; + // Returns 'OK' if operation succeeded + // - Errors: BAD_VALUE if specified listener was not in the listener list + virtual status_t removeListener(const sp<ICameraServiceListener>& listener) + = 0; + /** + * clientPackageName and clientUid are used for permissions checking. if + * clientUid == USE_CALLING_UID, then the calling UID is used instead. Only + * trusted callers can set a clientUid other than USE_CALLING_UID. + */ + virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient, + int cameraId, + const String16& clientPackageName, + int clientUid) = 0; + + virtual sp<IProCameraUser> connect(const sp<IProCameraCallbacks>& cameraCb, + int cameraId, + const String16& clientPackageName, + int clientUid) = 0; }; // ---------------------------------------------------------------------------- diff --git a/include/camera/ICameraServiceListener.h b/include/camera/ICameraServiceListener.h new file mode 100644 index 0000000..f2a11c2 --- /dev/null +++ b/include/camera/ICameraServiceListener.h @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_ICAMERASERVICE_LISTENER_H +#define ANDROID_HARDWARE_ICAMERASERVICE_LISTENER_H + +#include <utils/RefBase.h> +#include <binder/IInterface.h> +#include <binder/Parcel.h> +#include <hardware/camera_common.h> + +namespace android { + +class ICameraServiceListener : public IInterface +{ +public: + + /** + * Initial status will be transmitted with onStatusChange immediately + * after this listener is added to the service listener list. + * + * Allowed transitions: + * + * (Any) -> NOT_PRESENT + * NOT_PRESENT -> PRESENT + * NOT_PRESENT -> ENUMERATING + * ENUMERATING -> PRESENT + * PRESENT -> NOT_AVAILABLE + * NOT_AVAILABLE -> PRESENT + * + * A state will never immediately transition back to itself. + */ + enum Status { + // Device physically unplugged + STATUS_NOT_PRESENT = CAMERA_DEVICE_STATUS_NOT_PRESENT, + // Device physically has been plugged in + // and the camera can be used exlusively + STATUS_PRESENT = CAMERA_DEVICE_STATUS_PRESENT, + // Device physically has been plugged in + // but it will not be connect-able until enumeration is complete + STATUS_ENUMERATING = CAMERA_DEVICE_STATUS_ENUMERATING, + + // Camera can be used exclusively + STATUS_AVAILABLE = STATUS_PRESENT, // deprecated, will be removed + + // Camera is in use by another app and cannot be used exclusively + STATUS_NOT_AVAILABLE = 0x80000000, + + // Use to initialize variables only + STATUS_UNKNOWN = 0xFFFFFFFF, + }; + + DECLARE_META_INTERFACE(CameraServiceListener); + + virtual void onStatusChanged(Status status, int32_t cameraId) = 0; +}; + +// ---------------------------------------------------------------------------- + +class BnCameraServiceListener : public BnInterface<ICameraServiceListener> +{ +public: + virtual status_t onTransact( uint32_t code, + const Parcel& data, + Parcel* reply, + uint32_t flags = 0); +}; + +}; // namespace android + +#endif diff --git a/include/camera/IProCameraCallbacks.h b/include/camera/IProCameraCallbacks.h new file mode 100644 index 0000000..563ec17 --- /dev/null +++ b/include/camera/IProCameraCallbacks.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_IPROCAMERA_CALLBACKS_H +#define ANDROID_HARDWARE_IPROCAMERA_CALLBACKS_H + +#include <utils/RefBase.h> +#include <binder/IInterface.h> +#include <binder/Parcel.h> +#include <binder/IMemory.h> +#include <utils/Timers.h> +#include <system/camera.h> + +struct camera_metadata; + +namespace android { + +class IProCameraCallbacks : public IInterface +{ +public: + DECLARE_META_INTERFACE(ProCameraCallbacks); + + virtual void notifyCallback(int32_t msgType, + int32_t ext1, + int32_t ext2) = 0; + + enum LockStatus { + LOCK_ACQUIRED, + LOCK_RELEASED, + LOCK_STOLEN, + }; + + virtual void onLockStatusChanged(LockStatus newLockStatus) = 0; + + /** Missing by design: implementation is client-side in ProCamera.cpp **/ + // virtual void onBufferReceived(int streamId, + // const CpuConsumer::LockedBufer& buf); + virtual void onResultReceived(int32_t frameId, + camera_metadata* result) = 0; +}; + +// ---------------------------------------------------------------------------- + +class BnProCameraCallbacks : public BnInterface<IProCameraCallbacks> +{ +public: + virtual status_t onTransact( uint32_t code, + const Parcel& data, + Parcel* reply, + uint32_t flags = 0); +}; + +}; // namespace android + +#endif diff --git a/include/camera/IProCameraUser.h b/include/camera/IProCameraUser.h new file mode 100644 index 0000000..45b818c --- /dev/null +++ b/include/camera/IProCameraUser.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_IPROCAMERAUSER_H +#define ANDROID_HARDWARE_IPROCAMERAUSER_H + +#include <utils/RefBase.h> +#include <binder/IInterface.h> +#include <binder/Parcel.h> +#include <binder/IMemory.h> +#include <utils/String8.h> +#include <camera/IProCameraCallbacks.h> + +struct camera_metadata; + +namespace android { + +class IProCameraUserClient; +class IGraphicBufferProducer; +class Surface; + +class IProCameraUser: public IInterface +{ +public: + DECLARE_META_INTERFACE(ProCameraUser); + + virtual void disconnect() = 0; + + // connect to the service, given a callbacks listener + virtual status_t connect(const sp<IProCameraCallbacks>& callbacks) + = 0; + + /** + * Locking + **/ + virtual status_t exclusiveTryLock() = 0; + virtual status_t exclusiveLock() = 0; + virtual status_t exclusiveUnlock() = 0; + + virtual bool hasExclusiveLock() = 0; + + /** + * Request Handling + **/ + + // Note that the callee gets a copy of the metadata. + virtual int submitRequest(struct camera_metadata* metadata, + bool streaming = false) = 0; + virtual status_t cancelRequest(int requestId) = 0; + + virtual status_t deleteStream(int streamId) = 0; + virtual status_t createStream( + int width, int height, int format, + const sp<IGraphicBufferProducer>& bufferProducer, + /*out*/ + int* streamId) = 0; + + // Create a request object from a template. + virtual status_t createDefaultRequest(int templateId, + /*out*/ + camera_metadata** request) + = 0; + + // Get static camera metadata + virtual status_t getCameraInfo(int cameraId, + /*out*/ + camera_metadata** info) = 0; + +}; + +// ---------------------------------------------------------------------------- + +class BnProCameraUser: public BnInterface<IProCameraUser> +{ +public: + virtual status_t onTransact( uint32_t code, + const Parcel& data, + Parcel* reply, + uint32_t flags = 0); +}; + +}; // namespace android + +#endif diff --git a/include/camera/ProCamera.h b/include/camera/ProCamera.h new file mode 100644 index 0000000..3d1652f --- /dev/null +++ b/include/camera/ProCamera.h @@ -0,0 +1,312 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_PRO_CAMERA_H +#define ANDROID_HARDWARE_PRO_CAMERA_H + +#include <utils/Timers.h> +#include <utils/KeyedVector.h> +#include <gui/IGraphicBufferProducer.h> +#include <system/camera.h> +#include <camera/IProCameraCallbacks.h> +#include <camera/IProCameraUser.h> +#include <camera/Camera.h> +#include <camera/CameraMetadata.h> +#include <gui/CpuConsumer.h> + +#include <gui/Surface.h> + +#include <utils/Condition.h> +#include <utils/Mutex.h> + +#include <camera/CameraBase.h> + +struct camera_metadata; + +namespace android { + +// All callbacks on this class are concurrent +// (they come from separate threads) +class ProCameraListener : virtual public RefBase +{ +public: + virtual void notify(int32_t msgType, int32_t ext1, int32_t ext2) = 0; + + // Lock has been acquired. Write operations now available. + virtual void onLockAcquired() = 0; + // Lock has been released with exclusiveUnlock. + virtual void onLockReleased() = 0; + // Lock has been stolen by another client. + virtual void onLockStolen() = 0; + + // Lock free. + virtual void onTriggerNotify(int32_t msgType, int32_t ext1, int32_t ext2) + = 0; + // onFrameAvailable and OnResultReceived can come in with any order, + // use android.sensor.timestamp and LockedBuffer.timestamp to correlate them + + /** + * A new metadata buffer has been received. + * -- Ownership of request passes on to the callee, free with + * free_camera_metadata. + */ + virtual void onResultReceived(int32_t frameId, camera_metadata* result) = 0; + + // TODO: make onFrameAvailable pure virtual + + // A new frame buffer has been received for this stream. + // -- This callback only fires for createStreamCpu streams + // -- A buffer may be obtained by calling cpuConsumer->lockNextBuffer + // -- Use buf.timestamp to correlate with result's android.sensor.timestamp + // -- The buffer should be accessed with CpuConsumer::lockNextBuffer + // and CpuConsumer::unlockBuffer + virtual void onFrameAvailable(int /*streamId*/, + const sp<CpuConsumer>& /*cpuConsumer*/) { + } + +}; + +class ProCamera; + +template <> +struct CameraTraits<ProCamera> +{ + typedef ProCameraListener TCamListener; + typedef IProCameraUser TCamUser; + typedef IProCameraCallbacks TCamCallbacks; +}; + +class ProCamera : + public CameraBase<ProCamera>, + public BnProCameraCallbacks +{ +public: + /** + * Connect a shared camera. By default access is restricted to read only + * (Lock free) operations. To be able to submit custom requests a lock needs + * to be acquired with exclusive[Try]Lock. + */ + static sp<ProCamera> connect(int cameraId); + virtual ~ProCamera(); + + /** + * Exclusive Locks: + * - We may request exclusive access to a camera if no other + * clients are using the camera. This works as a traditional + * client, writing/reading any camera state. + * - An application opening the camera (a regular 'Camera') will + * always steal away the exclusive lock from a ProCamera, + * this will call onLockReleased. + * - onLockAcquired will be called again once it is possible + * to again exclusively lock the camera. + * + */ + + /** + * All exclusiveLock/unlock functions are asynchronous. The remote endpoint + * shall not block while waiting to acquire the lock. Instead the lock + * notifications will come in asynchronously on the listener. + */ + + /** + * Attempt to acquire the lock instantly (non-blocking) + * - If this succeeds, you do not need to wait for onLockAcquired + * but the event will still be fired + * + * Returns -EBUSY if already locked. 0 on success. + */ + status_t exclusiveTryLock(); + // always returns 0. wait for onLockAcquired before lock is acquired. + status_t exclusiveLock(); + // release a lock if we have one, or cancel the lock request. + status_t exclusiveUnlock(); + + // exclusive lock = do whatever we want. no lock = read only. + bool hasExclusiveLock(); + + /** + * < 0 error, >= 0 the request ID. streaming to have the request repeat + * until cancelled. + * The request queue is flushed when a lock is released or stolen + * if not locked will return PERMISSION_DENIED + */ + int submitRequest(const struct camera_metadata* metadata, + bool streaming = false); + // if not locked will return PERMISSION_DENIED, BAD_VALUE if requestId bad + status_t cancelRequest(int requestId); + + /** + * Ask for a stream to be enabled. + * Lock free. Service maintains counter of streams. + */ + status_t requestStream(int streamId); +// TODO: remove requestStream, its useless. + + /** + * Delete a stream. + * Lock free. + * + * NOTE: As a side effect this cancels ALL streaming requests. + * + * Errors: BAD_VALUE if unknown stream ID. + * PERMISSION_DENIED if the stream wasn't yours + */ + status_t deleteStream(int streamId); + + /** + * Create a new HW stream, whose sink will be the window. + * Lock free. Service maintains counter of streams. + * Errors: -EBUSY if too many streams created + */ + status_t createStream(int width, int height, int format, + const sp<Surface>& surface, + /*out*/ + int* streamId); + + /** + * Create a new HW stream, whose sink will be the SurfaceTexture. + * Lock free. Service maintains counter of streams. + * Errors: -EBUSY if too many streams created + */ + status_t createStream(int width, int height, int format, + const sp<IGraphicBufferProducer>& bufferProducer, + /*out*/ + int* streamId); + status_t createStreamCpu(int width, int height, int format, + int heapCount, + /*out*/ + sp<CpuConsumer>* cpuConsumer, + int* streamId); + status_t createStreamCpu(int width, int height, int format, + int heapCount, + bool synchronousMode, + /*out*/ + sp<CpuConsumer>* cpuConsumer, + int* streamId); + + // Create a request object from a template. + status_t createDefaultRequest(int templateId, + /*out*/ + camera_metadata** request) const; + + // Get static camera metadata + camera_metadata* getCameraInfo(int cameraId); + + // Blocks until a frame is available (CPU streams only) + // - Obtain the frame data by calling CpuConsumer::lockNextBuffer + // - Release the frame data after use with CpuConsumer::unlockBuffer + // Return value: + // - >0 - number of frames available to be locked + // - <0 - error (refer to error codes) + // Error codes: + // -ETIMEDOUT if it took too long to get a frame + int waitForFrameBuffer(int streamId); + + // Blocks until a metadata result is available + // - Obtain the metadata by calling consumeFrameMetadata() + // Error codes: + // -ETIMEDOUT if it took too long to get a frame + status_t waitForFrameMetadata(); + + // Get the latest metadata. This is destructive. + // - Calling this repeatedly will produce empty metadata objects. + // - Use waitForFrameMetadata to sync until new data is available. + CameraMetadata consumeFrameMetadata(); + + // Convenience method to drop frame buffers (CPU streams only) + // Return values: + // >=0 - number of frames dropped (up to count) + // <0 - error code + // Error codes: + // BAD_VALUE - invalid streamId or count passed + int dropFrameBuffer(int streamId, int count); + +protected: + //////////////////////////////////////////////////////// + // IProCameraCallbacks implementation + //////////////////////////////////////////////////////// + virtual void notifyCallback(int32_t msgType, + int32_t ext, + int32_t ext2); + + virtual void onLockStatusChanged( + IProCameraCallbacks::LockStatus newLockStatus); + + virtual void onResultReceived(int32_t frameId, + camera_metadata* result); +private: + ProCamera(int cameraId); + + class ProFrameListener : public CpuConsumer::FrameAvailableListener { + public: + ProFrameListener(wp<ProCamera> camera, int streamID) { + mCamera = camera; + mStreamId = streamID; + } + + protected: + virtual void onFrameAvailable() { + sp<ProCamera> c = mCamera.promote(); + if (c.get() != NULL) { + c->onFrameAvailable(mStreamId); + } + } + + private: + wp<ProCamera> mCamera; + int mStreamId; + }; + friend class ProFrameListener; + + struct StreamInfo + { + StreamInfo(int streamId) { + this->streamID = streamId; + cpuStream = false; + frameReady = 0; + } + + StreamInfo() { + streamID = -1; + cpuStream = false; + } + + int streamID; + bool cpuStream; + sp<CpuConsumer> cpuConsumer; + bool synchronousMode; + sp<ProFrameListener> frameAvailableListener; + sp<Surface> stc; + int frameReady; + }; + + Condition mWaitCondition; + Mutex mWaitMutex; + static const nsecs_t mWaitTimeout = 1000000000; // 1sec + KeyedVector<int, StreamInfo> mStreams; + bool mMetadataReady; + CameraMetadata mLatestMetadata; + + void onFrameAvailable(int streamId); + + StreamInfo& getStreamInfo(int streamId); + + friend class CameraBase; +}; + +}; // namespace android + +#endif diff --git a/include/media/AudioBufferProvider.h b/include/media/AudioBufferProvider.h index 865ed7e..43e4de7 100644 --- a/include/media/AudioBufferProvider.h +++ b/include/media/AudioBufferProvider.h @@ -36,11 +36,8 @@ public: size_t frameCount; }; -protected: - AudioBufferProvider() : mValid(kValid) { } - virtual ~AudioBufferProvider() { mValid = kDead; } + virtual ~AudioBufferProvider() {} -public: // value representing an invalid presentation timestamp static const int64_t kInvalidPTS = 0x7FFFFFFFFFFFFFFFLL; // <stdint.h> is too painful @@ -50,13 +47,6 @@ public: virtual status_t getNextBuffer(Buffer* buffer, int64_t pts = kInvalidPTS) = 0; virtual void releaseBuffer(Buffer* buffer) = 0; - - int getValid() const { return mValid; } - static const int kValid = 'GOOD'; - static const int kDead = 'DEAD'; - -private: - int mValid; }; // ---------------------------------------------------------------------------- diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h index 156c592..38c6548 100644 --- a/include/media/AudioRecord.h +++ b/include/media/AudioRecord.h @@ -29,6 +29,7 @@ namespace android { class audio_track_cblk_t; +class AudioRecordClientProxy; // ---------------------------------------------------------------------------- @@ -43,46 +44,43 @@ public: */ enum event_type { EVENT_MORE_DATA = 0, // Request to read more data from PCM buffer. - EVENT_OVERRUN = 1, // PCM buffer overrun occured. + EVENT_OVERRUN = 1, // PCM buffer overrun occurred. EVENT_MARKER = 2, // Record head is at the specified marker position // (See setMarkerPosition()). EVENT_NEW_POS = 3, // Record head is at a new position // (See setPositionUpdatePeriod()). }; - /* Create Buffer on the stack and pass it to obtainBuffer() - * and releaseBuffer(). + /* Client should declare Buffer on the stack and pass address to obtainBuffer() + * and releaseBuffer(). See also callback_t for EVENT_MORE_DATA. */ class Buffer { public: - enum { - MUTE = 0x00000001 - }; - uint32_t flags; - int channelCount; - audio_format_t format; - size_t frameCount; + size_t frameCount; // number of sample frames corresponding to size; + // on input it is the number of frames available, + // on output is the number of frames actually drained + size_t size; // total size in bytes == frameCount * frameSize union { void* raw; - short* i16; - int8_t* i8; + short* i16; // signed 16-bit + int8_t* i8; // unsigned 8-bit, offset by 0x80 }; }; /* As a convenience, if a callback is supplied, a handler thread * is automatically created with the appropriate priority. This thread - * invokes the callback when a new buffer becomes ready or an overrun condition occurs. + * invokes the callback when a new buffer becomes ready or various conditions occur. * Parameters: * * event: type of event notified (see enum AudioRecord::event_type). * user: Pointer to context for use by the callback receiver. * info: Pointer to optional parameter according to event type: * - EVENT_MORE_DATA: pointer to AudioRecord::Buffer struct. The callback must not read - * more bytes than indicated by 'size' field and update 'size' if less bytes are - * read. + * more bytes than indicated by 'size' field and update 'size' if fewer bytes are + * consumed. * - EVENT_OVERRUN: unused. * - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames. * - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames. @@ -98,7 +96,7 @@ public: * - BAD_VALUE: unsupported configuration */ - static status_t getMinFrameCount(int* frameCount, + static status_t getMinFrameCount(size_t* frameCount, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask); @@ -108,7 +106,7 @@ public: */ AudioRecord(); - /* Creates an AudioRecord track and registers it with AudioFlinger. + /* Creates an AudioRecord object and registers it with AudioFlinger. * Once created, the track needs to be started before it can be used. * Unspecified values are set to the audio hardware's current * values. @@ -120,10 +118,13 @@ public: * format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed * 16 bits per sample). * channelMask: Channel mask. - * frameCount: Total size of track PCM buffer in frames. This defines the - * latency of the track. + * frameCount: Minimum size of track PCM buffer in frames. This defines the + * application's contribution to the + * latency of the track. The actual size selected by the AudioRecord could + * be larger if the requested size is not compatible with current audio HAL + * latency. Zero means to use a default value. * cbf: Callback function. If not null, this function is called periodically - * to provide new PCM data. + * to consume new PCM data. * user: Context for use by the callback receiver. * notificationFrames: The callback function is called each time notificationFrames PCM * frames are ready in record track output buffer. @@ -154,7 +155,7 @@ public: * - BAD_VALUE: invalid parameter (channels, format, sampleRate...) * - NO_INIT: audio server or audio hardware not initialized * - PERMISSION_DENIED: recording is not allowed for the requesting process - * */ + */ status_t set(audio_source_t inputSource = AUDIO_SOURCE_DEFAULT, uint32_t sampleRate = 0, audio_format_t format = AUDIO_FORMAT_DEFAULT, @@ -168,14 +169,14 @@ public: /* Result of constructing the AudioRecord. This must be checked - * before using any AudioRecord API (except for set()), using + * before using any AudioRecord API (except for set()), because using * an uninitialized AudioRecord produces undefined results. * See set() method above for possible return codes. */ status_t initCheck() const; - /* Returns this track's latency in milliseconds. - * This includes the latency due to AudioRecord buffer size + /* Returns this track's estimated latency in milliseconds. + * This includes the latency due to AudioRecord buffer size, * and audio hardware driver. */ uint32_t latency() const; @@ -183,15 +184,15 @@ public: /* getters, see constructor and set() */ audio_format_t format() const; - int channelCount() const; - uint32_t frameCount() const; - size_t frameSize() const; + uint32_t channelCount() const; + size_t frameCount() const; + size_t frameSize() const { return mFrameSize; } audio_source_t inputSource() const; /* After it's created the track is not active. Call start() to * make it active. If set, the callback will start being called. - * if event is not AudioSystem::SYNC_EVENT_NONE, the capture start will be delayed until + * If event is not AudioSystem::SYNC_EVENT_NONE, the capture start will be delayed until * the specified event occurs on the specified trigger session. */ status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE, @@ -199,12 +200,12 @@ public: /* Stop a track. If set, the callback will cease being called and * obtainBuffer returns STOPPED. Note that obtainBuffer() still works - * and will fill up buffers until the pool is exhausted. + * and will drain buffers until the pool is exhausted. */ void stop(); bool stopped() const; - /* get sample rate for this record track + /* Get sample rate for this record track in Hz. */ uint32_t getSampleRate() const; @@ -258,7 +259,7 @@ public: */ status_t getPosition(uint32_t *position) const; - /* returns a handle on the audio input used by this AudioRecord. + /* Returns a handle on the audio input used by this AudioRecord. * * Parameters: * none. @@ -268,7 +269,7 @@ public: */ audio_io_handle_t getInput() const; - /* returns the audio session ID associated with this AudioRecord. + /* Returns the audio session ID associated with this AudioRecord. * * Parameters: * none. @@ -278,22 +279,30 @@ public: */ int getSessionId() const; - /* obtains a buffer of "frameCount" frames. The buffer must be - * filled entirely. If the track is stopped, obtainBuffer() returns + /* Obtains a buffer of "frameCount" frames. The buffer must be + * drained entirely, and then released with releaseBuffer(). + * If the track is stopped, obtainBuffer() returns * STOPPED instead of NO_ERROR as long as there are buffers available, * at which point NO_MORE_BUFFERS is returned. - * Buffers will be returned until the pool (buffercount()) + * Buffers will be returned until the pool * is exhausted, at which point obtainBuffer() will either block * or return WOULD_BLOCK depending on the value of the "blocking" * parameter. + * + * Interpretation of waitCount: + * +n limits wait time to n * WAIT_PERIOD_MS, + * -1 causes an (almost) infinite wait time, + * 0 non-blocking. */ enum { - NO_MORE_BUFFERS = 0x80000001, + NO_MORE_BUFFERS = 0x80000001, // same name in AudioFlinger.h, ok to be different value STOPPED = 1 }; status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount); + + /* Release an emptied buffer of "frameCount" frames for AudioFlinger to re-fill. */ void releaseBuffer(Buffer* audioBuffer); @@ -302,16 +311,16 @@ public: */ ssize_t read(void* buffer, size_t size); - /* Return the amount of input frames lost in the audio driver since the last call of this + /* Return the number of input frames lost in the audio driver since the last call of this * function. Audio driver is expected to reset the value to 0 and restart counting upon * returning the current value by this function call. Such loss typically occurs when the * user space process is blocked longer than the capacity of audio driver buffers. - * Unit: the number of input audio frames + * Units: the number of input audio frames. */ unsigned int getInputFramesLost() const; private: - /* copying audio tracks is not allowed */ + /* copying audio record objects is not allowed */ AudioRecord(const AudioRecord& other); AudioRecord& operator = (const AudioRecord& other); @@ -343,8 +352,7 @@ private: status_t openRecord_l(uint32_t sampleRate, audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, + size_t frameCount, audio_io_handle_t input); audio_io_handle_t getInput_l(); status_t restoreRecord_l(audio_track_cblk_t*& cblk); @@ -355,7 +363,7 @@ private: bool mActive; // protected by mLock // for client callback handler - callback_t mCbf; + callback_t mCbf; // callback handler for events, or NULL void* mUserData; // for notification APIs @@ -367,9 +375,11 @@ private: uint32_t mUpdatePeriod; // in ms // constant after constructor or set() - uint32_t mFrameCount; + uint32_t mSampleRate; + size_t mFrameCount; audio_format_t mFormat; uint8_t mChannelCount; + size_t mFrameSize; // app-level frame size == AudioFlinger frame size audio_source_t mInputSource; status_t mStatus; uint32_t mLatency; @@ -381,9 +391,11 @@ private: sp<IAudioRecord> mAudioRecord; sp<IMemory> mCblkMemory; audio_track_cblk_t* mCblk; + void* mBuffers; // starting address of buffers in shared memory int mPreviousPriority; // before start() SchedPolicy mPreviousSchedulingGroup; + AudioRecordClientProxy* mProxy; }; }; // namespace android diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h index 49e1afc..b11c812 100644 --- a/include/media/AudioSystem.h +++ b/include/media/AudioSystem.h @@ -67,9 +67,14 @@ public: // set audio mode in audio hardware static status_t setMode(audio_mode_t mode); - // returns true in *state if tracks are active on the specified stream or has been active + // returns true in *state if tracks are active on the specified stream or have been active // in the past inPastMs milliseconds static status_t isStreamActive(audio_stream_type_t stream, bool *state, uint32_t inPastMs = 0); + // returns true in *state if tracks are active for what qualifies as remote playback + // on the specified stream or have been active in the past inPastMs milliseconds. Remote + // playback isn't mutually exclusive with local playback. + static status_t isStreamActiveRemotely(audio_stream_type_t stream, bool *state, + uint32_t inPastMs = 0); // returns true in *state if a recorder is currently recording with the specified source static status_t isSourceActive(audio_source_t source, bool *state); @@ -87,29 +92,26 @@ public: static float linearToLog(int volume); static int logToLinear(float volume); - static status_t getOutputSamplingRate(int* samplingRate, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); - static status_t getOutputFrameCount(int* frameCount, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); - static status_t getOutputLatency(uint32_t* latency, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); + static status_t getOutputSamplingRate(uint32_t* samplingRate, + audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); + static status_t getOutputFrameCount(size_t* frameCount, + audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); + static status_t getOutputLatency(uint32_t* latency, + audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); static status_t getSamplingRate(audio_io_handle_t output, audio_stream_type_t streamType, - int* samplingRate); + uint32_t* samplingRate); // returns the number of frames per audio HAL write buffer. Corresponds to // audio_stream->get_buffer_size()/audio_stream_frame_size() static status_t getFrameCount(audio_io_handle_t output, audio_stream_type_t stream, - int* frameCount); + size_t* frameCount); // returns the audio output stream latency in ms. Corresponds to // audio_stream_out->get_latency() static status_t getLatency(audio_io_handle_t output, audio_stream_type_t stream, uint32_t* latency); - // DEPRECATED - static status_t getOutputSamplingRate(int* samplingRate, int stream = AUDIO_STREAM_DEFAULT); - - // DEPRECATED - static status_t getOutputFrameCount(int* frameCount, int stream = AUDIO_STREAM_DEFAULT); - static bool routedToA2dpOutput(audio_stream_type_t streamType); static status_t getInputBufferSize(uint32_t sampleRate, audio_format_t format, @@ -126,10 +128,11 @@ public: // - BAD_VALUE: invalid parameter // NOTE: this feature is not supported on all hardware platforms and it is // necessary to check returned status before using the returned values. - static status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); + static status_t getRenderPosition(size_t *halFrames, size_t *dspFrames, + audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); // return the number of input frames lost by HAL implementation, or 0 if the handle is invalid - static unsigned int getInputFramesLost(audio_io_handle_t ioHandle); + static size_t getInputFramesLost(audio_io_handle_t ioHandle); static int newAudioSessionId(); static void acquireAudioSessionId(int audioSession); @@ -147,8 +150,8 @@ public: NUM_CONFIG_EVENTS }; - // audio output descriptor used to cache output configurations in client process to avoid frequent calls - // through IAudioFlinger + // audio output descriptor used to cache output configurations in client process to avoid + // frequent calls through IAudioFlinger class OutputDescriptor { public: OutputDescriptor() @@ -162,8 +165,8 @@ public: }; // Events used to synchronize actions between audio sessions. - // For instance SYNC_EVENT_PRESENTATION_COMPLETE can be used to delay recording start until playback - // is complete on another audio session. + // For instance SYNC_EVENT_PRESENTATION_COMPLETE can be used to delay recording start until + // playback is complete on another audio session. // See definitions in MediaSyncEvent.java enum sync_event_t { SYNC_EVENT_SAME = -1, // used internally to indicate restart with same event @@ -183,8 +186,10 @@ public: // // IAudioPolicyService interface (see AudioPolicyInterface for method descriptions) // - static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state, const char *device_address); - static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device, const char *device_address); + static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state, + const char *device_address); + static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device, + const char *device_address); static status_t setPhoneState(audio_mode_t state); static status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config); static audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage); @@ -237,8 +242,8 @@ public: static const sp<IAudioPolicyService>& get_audio_policy_service(); // helpers for android.media.AudioManager.getProperty(), see description there for meaning - static int32_t getPrimaryOutputSamplingRate(); - static int32_t getPrimaryOutputFrameCount(); + static uint32_t getPrimaryOutputSamplingRate(); + static size_t getPrimaryOutputFrameCount(); // ---------------------------------------------------------------------------- diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h index 34108b3..64f82bb 100644 --- a/include/media/AudioTrack.h +++ b/include/media/AudioTrack.h @@ -36,6 +36,7 @@ namespace android { // ---------------------------------------------------------------------------- class audio_track_cblk_t; +class AudioTrackClientProxy; // ---------------------------------------------------------------------------- @@ -49,13 +50,20 @@ public: }; /* Events used by AudioTrack callback function (audio_track_cblk_t). + * Keep in sync with frameworks/base/media/java/android/media/AudioTrack.java NATIVE_EVENT_*. */ enum event_type { - EVENT_MORE_DATA = 0, // Request to write more data to PCM buffer. - EVENT_UNDERRUN = 1, // PCM buffer underrun occured. - EVENT_LOOP_END = 2, // Sample loop end was reached; playback restarted from loop start if loop count was not 0. - EVENT_MARKER = 3, // Playback head is at the specified marker position (See setMarkerPosition()). - EVENT_NEW_POS = 4, // Playback head is at a new position (See setPositionUpdatePeriod()). + EVENT_MORE_DATA = 0, // Request to write more data to buffer. + // If this event is delivered but the callback handler + // does not want to write more data, the handler must explicitly + // ignore the event by setting frameCount to zero. + EVENT_UNDERRUN = 1, // Buffer underrun occurred. + EVENT_LOOP_END = 2, // Sample loop end was reached; playback restarted from + // loop start if loop count was not 0. + EVENT_MARKER = 3, // Playback head is at the specified marker position + // (See setMarkerPosition()). + EVENT_NEW_POS = 4, // Playback head is at a new position + // (See setPositionUpdatePeriod()). EVENT_BUFFER_END = 5 // Playback head is at the end of the buffer. }; @@ -66,14 +74,6 @@ public: class Buffer { public: - enum { - MUTE = 0x00000001 - }; - uint32_t flags; // 0 or MUTE - audio_format_t format; // but AUDIO_FORMAT_PCM_8_BIT -> AUDIO_FORMAT_PCM_16_BIT - // accessed directly by WebKit ANP callback - int channelCount; // will be removed in the future, do not use - size_t frameCount; // number of sample frames corresponding to size; // on input it is the number of frames desired, // on output is the number of frames actually filled @@ -114,19 +114,21 @@ public: * - NO_INIT: audio server or audio hardware not initialized */ - static status_t getMinFrameCount(int* frameCount, + static status_t getMinFrameCount(size_t* frameCount, audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT, uint32_t sampleRate = 0); /* Constructs an uninitialized AudioTrack. No connection with - * AudioFlinger takes place. + * AudioFlinger takes place. Use set() after this. */ AudioTrack(); - /* Creates an audio track and registers it with AudioFlinger. + /* Creates an AudioTrack object and registers it with AudioFlinger. * Once created, the track needs to be started before it can be used. - * Unspecified values are set to the audio hardware's current - * values. + * Unspecified values are set to appropriate default values. + * With this constructor, the track is configured for streaming mode. + * Data to be rendered is supplied by write() or by the callback EVENT_MORE_DATA. + * Intermixing a combination of write() and non-ignored EVENT_MORE_DATA is deprecated. * * Parameters: * @@ -137,12 +139,13 @@ public: * 16 bits per sample). * channelMask: Channel mask. * frameCount: Minimum size of track PCM buffer in frames. This defines the + * application's contribution to the * latency of the track. The actual size selected by the AudioTrack could be * larger if the requested size is not compatible with current audio HAL - * latency. Zero means to use a default value. + * configuration. Zero means to use a default value. * flags: See comments on audio_output_flags_t in <system/audio.h>. * cbf: Callback function. If not null, this function is called periodically - * to request new PCM data. + * to provide new data and inform of marker, position updates, etc. * user: Context for use by the callback receiver. * notificationFrames: The callback function is called each time notificationFrames PCM * frames have been consumed from track input buffer. @@ -162,25 +165,16 @@ public: int notificationFrames = 0, int sessionId = 0); - // DEPRECATED - explicit AudioTrack( int streamType, - uint32_t sampleRate = 0, - int format = AUDIO_FORMAT_DEFAULT, - int channelMask = 0, - int frameCount = 0, - uint32_t flags = (uint32_t) AUDIO_OUTPUT_FLAG_NONE, - callback_t cbf = 0, - void* user = 0, - int notificationFrames = 0, - int sessionId = 0); - - /* Creates an audio track and registers it with AudioFlinger. With this constructor, - * the PCM data to be rendered by AudioTrack is passed in a shared memory buffer - * identified by the argument sharedBuffer. This prototype is for static buffer playback. - * PCM data must be present in memory before the AudioTrack is started. - * The write() and flush() methods are not supported in this case. + /* Creates an audio track and registers it with AudioFlinger. + * With this constructor, the track is configured for static buffer mode. + * The format must not be 8-bit linear PCM. + * Data to be rendered is passed in a shared memory buffer + * identified by the argument sharedBuffer, which must be non-0. + * The memory should be initialized to the desired data before calling start(). + * The write() method is not supported in this case. * It is recommended to pass a callback function to be notified of playback end by an * EVENT_UNDERRUN event. + * FIXME EVENT_MORE_DATA still occurs; it must be ignored. */ AudioTrack( audio_stream_type_t streamType, @@ -199,14 +193,15 @@ public: */ ~AudioTrack(); - /* Initialize an uninitialized AudioTrack. * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful initialization * - INVALID_OPERATION: AudioTrack is already initialized * - BAD_VALUE: invalid parameter (channelMask, format, sampleRate...) * - NO_INIT: audio server or audio hardware not initialized - * */ + * If sharedBuffer is non-0, the frameCount parameter is ignored and + * replaced by the shared buffer's total allocated size in frame units. + */ status_t set(audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT, uint32_t sampleRate = 0, audio_format_t format = AUDIO_FORMAT_DEFAULT, @@ -220,69 +215,78 @@ public: bool threadCanCallJava = false, int sessionId = 0); - /* Result of constructing the AudioTrack. This must be checked * before using any AudioTrack API (except for set()), because using * an uninitialized AudioTrack produces undefined results. * See set() method above for possible return codes. */ - status_t initCheck() const; + status_t initCheck() const { return mStatus; } /* Returns this track's estimated latency in milliseconds. * This includes the latency due to AudioTrack buffer size, AudioMixer (if any) * and audio hardware driver. */ - uint32_t latency() const; + uint32_t latency() const { return mLatency; } /* getters, see constructors and set() */ - audio_stream_type_t streamType() const; - audio_format_t format() const; - int channelCount() const; - uint32_t frameCount() const; + audio_stream_type_t streamType() const { return mStreamType; } + audio_format_t format() const { return mFormat; } - /* Return channelCount * (bit depth per channel / 8). + /* Return frame size in bytes, which for linear PCM is channelCount * (bit depth per channel / 8). * channelCount is determined from channelMask, and bit depth comes from format. + * For non-linear formats, the frame size is typically 1 byte. */ - size_t frameSize() const; + uint32_t channelCount() const { return mChannelCount; } - sp<IMemory>& sharedBuffer(); + uint32_t frameCount() const { return mFrameCount; } + size_t frameSize() const { return mFrameSize; } + /* Return the static buffer specified in constructor or set(), or 0 for streaming mode */ + sp<IMemory> sharedBuffer() const { return mSharedBuffer; } /* After it's created the track is not active. Call start() to * make it active. If set, the callback will start being called. + * If the track was previously paused, volume is ramped up over the first mix buffer. */ void start(); - /* Stop a track. If set, the callback will cease being called and + /* Stop a track. + * In static buffer mode, the track is stopped immediately. + * In streaming mode, the callback will cease being called and * obtainBuffer returns STOPPED. Note that obtainBuffer() still works * and will fill up buffers until the pool is exhausted. + * The stop does not occur immediately: any data remaining in the buffer + * is first drained, mixed, and output, and only then is the track marked as stopped. */ void stop(); bool stopped() const; - /* Flush a stopped track. All pending buffers are discarded. - * This function has no effect if the track is not stopped. + /* Flush a stopped or paused track. All previously buffered data is discarded immediately. + * This has the effect of draining the buffers without mixing or output. + * Flush is intended for streaming mode, for example before switching to non-contiguous content. + * This function is a no-op if the track is not stopped or paused, or uses a static buffer. */ void flush(); - /* Pause a track. If set, the callback will cease being called and + /* Pause a track. After pause, the callback will cease being called and * obtainBuffer returns STOPPED. Note that obtainBuffer() still works * and will fill up buffers until the pool is exhausted. + * Volume is ramped down over the next mix buffer following the pause request, + * and then the track is marked as paused. It can be resumed with ramp up by start(). */ void pause(); - /* Mute or unmute this track. - * While muted, the callback, if set, is still called. - */ - void mute(bool); - bool muted() const; - /* Set volume for this track, mostly used for games' sound effects * left and right volumes. Levels must be >= 0.0 and <= 1.0. + * This is the older API. New applications should use setVolume(float) when possible. */ status_t setVolume(float left, float right); - void getVolume(float* left, float* right) const; + + /* Set volume for all channels. This is the preferred API for new applications, + * especially for multi-channel content. + */ + status_t setVolume(float volume); /* Set the send level for this track. An auxiliary effect should be attached * to the track with attachEffect(). Level must be >= 0.0 and <= 1.0. @@ -290,12 +294,15 @@ public: status_t setAuxEffectSendLevel(float level); void getAuxEffectSendLevel(float* level) const; - /* Set sample rate for this track, mostly used for games' sound effects + /* Set sample rate for this track in Hz, mostly used for games' sound effects */ - status_t setSampleRate(int sampleRate); + status_t setSampleRate(uint32_t sampleRate); + + /* Return current sample rate in Hz, or 0 if unknown */ uint32_t getSampleRate() const; /* Enables looping and sets the start and end points of looping. + * Only supported for static buffer mode. * * Parameters: * @@ -311,12 +318,15 @@ public: /* Sets marker position. When playback reaches the number of frames specified, a callback with * event type EVENT_MARKER is called. Calling setMarkerPosition with marker == 0 cancels marker - * notification callback. - * If the AudioTrack has been opened with no callback function associated, the operation will fail. + * notification callback. To set a marker at a position which would compute as 0, + * a workaround is to the set the marker at a nearby position such as -1 or 1. + * If the AudioTrack has been opened with no callback function associated, the operation will + * fail. * * Parameters: * - * marker: marker position expressed in frames. + * marker: marker position expressed in wrapping (overflow) frame units, + * like the return value of getPosition(). * * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful operation @@ -325,12 +335,13 @@ public: status_t setMarkerPosition(uint32_t marker); status_t getMarkerPosition(uint32_t *marker) const; - /* Sets position update period. Every time the number of frames specified has been played, * a callback with event type EVENT_NEW_POS is called. * Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification * callback. - * If the AudioTrack has been opened with no callback function associated, the operation will fail. + * If the AudioTrack has been opened with no callback function associated, the operation will + * fail. + * Extremely small values may be rounded up to a value the implementation can support. * * Parameters: * @@ -358,19 +369,26 @@ public: * * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful operation - * - INVALID_OPERATION: the AudioTrack is not stopped. - * - BAD_VALUE: The specified position is beyond the number of frames present in AudioTrack buffer + * - INVALID_OPERATION: the AudioTrack is not stopped or paused, or is streaming mode. + * - BAD_VALUE: The specified position is beyond the number of frames present in AudioTrack + * buffer */ status_t setPosition(uint32_t position); + + /* Return the total number of frames played since playback start. + * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz. + * It is reset to zero by flush(), reload(), and stop(). + */ status_t getPosition(uint32_t *position); /* Forces AudioTrack buffer full condition. When playing a static buffer, this method avoids * rewriting the buffer before restarting playback after a stop. * This method must be called with the AudioTrack in paused or stopped state. + * Not allowed in streaming mode. * * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful operation - * - INVALID_OPERATION: the AudioTrack is not stopped. + * - INVALID_OPERATION: the AudioTrack is not stopped or paused, or is streaming mode. */ status_t reload(); @@ -392,7 +410,7 @@ public: * Returned value: * AudioTrack session ID. */ - int getSessionId() const; + int getSessionId() const { return mSessionId; } /* Attach track auxiliary output to specified effect. Use effectId = 0 * to detach track from effect. @@ -413,15 +431,30 @@ public: * If the track is stopped, obtainBuffer() returns * STOPPED instead of NO_ERROR as long as there are buffers available, * at which point NO_MORE_BUFFERS is returned. - * Buffers will be returned until the pool (buffercount()) + * Buffers will be returned until the pool * is exhausted, at which point obtainBuffer() will either block * or return WOULD_BLOCK depending on the value of the "blocking" * parameter. * + * obtainBuffer() and releaseBuffer() are deprecated for direct use by applications, + * which should use write() or callback EVENT_MORE_DATA instead. + * * Interpretation of waitCount: * +n limits wait time to n * WAIT_PERIOD_MS, * -1 causes an (almost) infinite wait time, * 0 non-blocking. + * + * Buffer fields + * On entry: + * frameCount number of frames requested + * After error return: + * frameCount 0 + * size 0 + * raw undefined + * After successful return: + * frameCount actual number of frames available, <= number requested + * size actual number of bytes available + * raw pointer to the buffer */ enum { @@ -443,6 +476,7 @@ public: * STOPPED AudioTrack was stopped during the write * NO_MORE_BUFFERS when obtainBuffer() returns same * or any other error code returned by IAudioTrack::start() or restoreTrack_l(). + * Not supported for static buffer mode. */ ssize_t write(const void* buffer, size_t size); @@ -482,15 +516,18 @@ protected: // body of AudioTrackThread::threadLoop() bool processAudioBuffer(const sp<AudioTrackThread>& thread); + // caller must hold lock on mLock for all _l methods status_t createTrack_l(audio_stream_type_t streamType, uint32_t sampleRate, audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, + size_t frameCount, audio_output_flags_t flags, const sp<IMemory>& sharedBuffer, audio_io_handle_t output); + + // can only be called when !mActive void flush_l(); + status_t setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount); audio_io_handle_t getOutput_l(); status_t restoreTrack_l(audio_track_cblk_t*& cblk, bool fromStart); @@ -502,40 +539,67 @@ protected: float mVolume[2]; float mSendLevel; - uint32_t mFrameCount; + uint32_t mSampleRate; + size_t mFrameCount; // corresponds to current IAudioTrack + size_t mReqFrameCount; // frame count to request the next time a new + // IAudioTrack is needed - audio_track_cblk_t* mCblk; - audio_format_t mFormat; + audio_track_cblk_t* mCblk; // re-load after mLock.unlock() + + // Starting address of buffers in shared memory. If there is a shared buffer, mBuffers + // is the value of pointer() for the shared buffer, otherwise mBuffers points + // immediately after the control block. This address is for the mapping within client + // address space. AudioFlinger::TrackBase::mBuffer is for the server address space. + void* mBuffers; + + audio_format_t mFormat; // as requested by client, not forced to 16-bit audio_stream_type_t mStreamType; - uint8_t mChannelCount; - uint8_t mMuted; - uint8_t mReserved; + uint32_t mChannelCount; audio_channel_mask_t mChannelMask; + + // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data. + // For 8-bit PCM data, mFrameSizeAF is + // twice as large because data is expanded to 16-bit before being stored in buffer. + size_t mFrameSize; // app-level frame size + size_t mFrameSizeAF; // AudioFlinger frame size + status_t mStatus; uint32_t mLatency; bool mActive; // protected by mLock callback_t mCbf; // callback handler for events, or NULL - void* mUserData; - uint32_t mNotificationFramesReq; // requested number of frames between each notification callback - uint32_t mNotificationFramesAct; // actual number of frames between each notification callback + void* mUserData; // for client callback handler + + // for notification APIs + uint32_t mNotificationFramesReq; // requested number of frames between each + // notification callback + uint32_t mNotificationFramesAct; // actual number of frames between each + // notification callback sp<IMemory> mSharedBuffer; int mLoopCount; uint32_t mRemainingFrames; - uint32_t mMarkerPosition; + uint32_t mMarkerPosition; // in wrapping (overflow) frame units bool mMarkerReached; - uint32_t mNewPosition; - uint32_t mUpdatePeriod; + uint32_t mNewPosition; // in frames + uint32_t mUpdatePeriod; // in frames + bool mFlushed; // FIXME will be made obsolete by making flush() synchronous audio_output_flags_t mFlags; int mSessionId; int mAuxEffectId; + + // When locking both mLock and mCblk->lock, must lock in this order to avoid deadlock: + // 1. mLock + // 2. mCblk->lock + // It is OK to lock only mCblk->lock. mutable Mutex mLock; - status_t mRestoreStatus; + bool mIsTimed; int mPreviousPriority; // before start() SchedPolicy mPreviousSchedulingGroup; + AudioTrackClientProxy* mProxy; + bool mAwaitBoost; // thread should wait for priority boost before running }; class TimedAudioTrack : public AudioTrack diff --git a/include/media/EffectsFactoryApi.h b/include/media/EffectsFactoryApi.h index 65c26f4..b1ed7b0 100644 --- a/include/media/EffectsFactoryApi.h +++ b/include/media/EffectsFactoryApi.h @@ -74,7 +74,8 @@ int EffectQueryNumberEffects(uint32_t *pNumEffects); // -ENOENT no more effect available // -ENODEV factory failed to initialize // -EINVAL invalid pDescriptor -// -ENOSYS effect list has changed since last execution of EffectQueryNumberEffects() +// -ENOSYS effect list has changed since last execution of +// EffectQueryNumberEffects() // *pDescriptor: updated with the effect descriptor. // //////////////////////////////////////////////////////////////////////////////// @@ -91,12 +92,12 @@ int EffectQueryEffect(uint32_t index, effect_descriptor_t *pDescriptor); // // Input: // pEffectUuid: pointer to the effect uuid. -// sessionId: audio session to which this effect instance will be attached. All effects created -// with the same session ID are connected in series and process the same signal stream. -// Knowing that two effects are part of the same effect chain can help the library implement -// some kind of optimizations. -// ioId: identifies the output or input stream this effect is directed to at audio HAL. For future -// use especially with tunneled HW accelerated effects +// sessionId: audio session to which this effect instance will be attached. All effects +// created with the same session ID are connected in series and process the same signal +// stream. Knowing that two effects are part of the same effect chain can help the +// library implement some kind of optimizations. +// ioId: identifies the output or input stream this effect is directed to at audio HAL. +// For future use especially with tunneled HW accelerated effects // // Input/Output: // pHandle: address where to return the effect handle. @@ -109,7 +110,8 @@ int EffectQueryEffect(uint32_t index, effect_descriptor_t *pDescriptor); // *pHandle: updated with the effect handle. // //////////////////////////////////////////////////////////////////////////////// -int EffectCreate(const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle); +int EffectCreate(const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId, + effect_handle_t *pHandle); //////////////////////////////////////////////////////////////////////////////// // diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h index 5170a87..9c3067e 100644 --- a/include/media/IAudioFlinger.h +++ b/include/media/IAudioFlinger.h @@ -56,13 +56,12 @@ public: * return null if the track cannot be created. */ virtual sp<IAudioTrack> createTrack( - pid_t pid, audio_stream_type_t streamType, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - int frameCount, - track_flags_t flags, + size_t frameCount, + track_flags_t *flags, const sp<IMemory>& sharedBuffer, audio_io_handle_t output, pid_t tid, // -1 means unused, otherwise must be valid non-0 @@ -70,12 +69,11 @@ public: status_t *status) = 0; virtual sp<IAudioRecord> openRecord( - pid_t pid, audio_io_handle_t input, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - int frameCount, + size_t frameCount, track_flags_t flags, pid_t tid, // -1 means unused, otherwise must be valid non-0 int *sessionId, @@ -123,7 +121,8 @@ public: virtual status_t setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs) = 0; - virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) const = 0; + virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) + const = 0; // register a current process for audio output change notifications virtual void registerClient(const sp<IAudioFlingerClient>& client) = 0; @@ -156,10 +155,10 @@ public: virtual status_t setVoiceVolume(float volume) = 0; - virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, + virtual status_t getRenderPosition(size_t *halFrames, size_t *dspFrames, audio_io_handle_t output) const = 0; - virtual unsigned int getInputFramesLost(audio_io_handle_t ioHandle) const = 0; + virtual size_t getInputFramesLost(audio_io_handle_t ioHandle) const = 0; virtual int newAudioSessionId() = 0; @@ -173,7 +172,7 @@ public: virtual status_t getEffectDescriptor(const effect_uuid_t *pEffectUUID, effect_descriptor_t *pDescriptor) const = 0; - virtual sp<IEffect> createEffect(pid_t pid, + virtual sp<IEffect> createEffect( effect_descriptor_t *pDesc, const sp<IEffectClient>& client, int32_t priority, @@ -191,8 +190,8 @@ public: // helpers for android.media.AudioManager.getProperty(), see description there for meaning // FIXME move these APIs to AudioPolicy to permit a more accurate implementation // that looks on primary device for a stream with fast flag, primary flag, or first one. - virtual int32_t getPrimaryOutputSamplingRate() = 0; - virtual int32_t getPrimaryOutputFrameCount() = 0; + virtual uint32_t getPrimaryOutputSamplingRate() = 0; + virtual size_t getPrimaryOutputFrameCount() = 0; }; diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h index cc2e069..b5ad4ef 100644 --- a/include/media/IAudioPolicyService.h +++ b/include/media/IAudioPolicyService.h @@ -44,9 +44,10 @@ public: audio_policy_dev_state_t state, const char *device_address) = 0; virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device, - const char *device_address) = 0; + const char *device_address) = 0; virtual status_t setPhoneState(audio_mode_t state) = 0; - virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) = 0; + virtual status_t setForceUse(audio_policy_force_use_t usage, + audio_policy_forced_cfg_t config) = 0; virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) = 0; virtual audio_io_handle_t getOutput(audio_stream_type_t stream, uint32_t samplingRate = 0, @@ -88,6 +89,8 @@ public: virtual status_t unregisterEffect(int id) = 0; virtual status_t setEffectEnabled(int id, bool enabled) = 0; virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const = 0; + virtual bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) + const = 0; virtual bool isSourceActive(audio_source_t source) const = 0; virtual status_t queryDefaultPreProcessing(int audioSession, effect_descriptor_t *descriptors, diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h index 9e0e389..144be0e 100644 --- a/include/media/IAudioTrack.h +++ b/include/media/IAudioTrack.h @@ -54,11 +54,6 @@ public: */ virtual void flush() = 0; - /* Mute or unmute this track. - * While muted, the callback, if set, is still called. - */ - virtual void mute(bool) = 0; - /* Pause a track. If set, the callback will cease being called and * obtainBuffer will return an error. Buffers that are already released * will continue to be processed, unless/until flush() is called. diff --git a/include/media/ICrypto.h b/include/media/ICrypto.h index 61059bd..9dcb8d9 100644 --- a/include/media/ICrypto.h +++ b/include/media/ICrypto.h @@ -31,7 +31,7 @@ struct ICrypto : public IInterface { virtual status_t initCheck() const = 0; - virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]) const = 0; + virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]) = 0; virtual status_t createPlugin( const uint8_t uuid[16], const void *data, size_t size) = 0; diff --git a/include/media/IDrm.h b/include/media/IDrm.h new file mode 100644 index 0000000..d630c40 --- /dev/null +++ b/include/media/IDrm.h @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <binder/IInterface.h> +#include <media/stagefright/foundation/ABase.h> +#include <media/drm/DrmAPI.h> +#include <media/IDrmClient.h> + +#ifndef ANDROID_IDRM_H_ + +#define ANDROID_IDRM_H_ + +namespace android { + +struct AString; + +struct IDrm : public IInterface { + DECLARE_META_INTERFACE(Drm); + + virtual status_t initCheck() const = 0; + + virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]) = 0; + + virtual status_t createPlugin(const uint8_t uuid[16]) = 0; + + virtual status_t destroyPlugin() = 0; + + virtual status_t openSession(Vector<uint8_t> &sessionId) = 0; + + virtual status_t closeSession(Vector<uint8_t> const &sessionId) = 0; + + virtual status_t + getKeyRequest(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &initData, + String8 const &mimeType, DrmPlugin::KeyType keyType, + KeyedVector<String8, String8> const &optionalParameters, + Vector<uint8_t> &request, String8 &defaultUrl) = 0; + + virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &response, + Vector<uint8_t> &keySetId) = 0; + + virtual status_t removeKeys(Vector<uint8_t> const &keySetId) = 0; + + virtual status_t restoreKeys(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keySetId) = 0; + + virtual status_t queryKeyStatus(Vector<uint8_t> const &sessionId, + KeyedVector<String8, String8> &infoMap) const = 0; + + virtual status_t getProvisionRequest(Vector<uint8_t> &request, + String8 &defaulUrl) = 0; + + virtual status_t provideProvisionResponse(Vector<uint8_t> const &response) = 0; + + virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops) = 0; + + virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease) = 0; + + virtual status_t getPropertyString(String8 const &name, String8 &value) const = 0; + virtual status_t getPropertyByteArray(String8 const &name, + Vector<uint8_t> &value) const = 0; + virtual status_t setPropertyString(String8 const &name, + String8 const &value ) const = 0; + virtual status_t setPropertyByteArray(String8 const &name, + Vector<uint8_t> const &value) const = 0; + + virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId, + String8 const &algorithm) = 0; + + virtual status_t setMacAlgorithm(Vector<uint8_t> const &sessionId, + String8 const &algorithm) = 0; + + virtual status_t encrypt(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &input, + Vector<uint8_t> const &iv, + Vector<uint8_t> &output) = 0; + + virtual status_t decrypt(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &input, + Vector<uint8_t> const &iv, + Vector<uint8_t> &output) = 0; + + virtual status_t sign(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &message, + Vector<uint8_t> &signature) = 0; + + virtual status_t verify(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &message, + Vector<uint8_t> const &signature, + bool &match) = 0; + + virtual status_t setListener(const sp<IDrmClient>& listener) = 0; + +private: + DISALLOW_EVIL_CONSTRUCTORS(IDrm); +}; + +struct BnDrm : public BnInterface<IDrm> { + virtual status_t onTransact( + uint32_t code, const Parcel &data, Parcel *reply, + uint32_t flags = 0); +private: + void readVector(const Parcel &data, Vector<uint8_t> &vector) const; + void writeVector(Parcel *reply, Vector<uint8_t> const &vector) const; +}; + +} // namespace android + +#endif // ANDROID_IDRM_H_ + diff --git a/include/media/IDrmClient.h b/include/media/IDrmClient.h new file mode 100644 index 0000000..3b2fc7c --- /dev/null +++ b/include/media/IDrmClient.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_IDRMCLIENT_H +#define ANDROID_IDRMCLIENT_H + +#include <utils/RefBase.h> +#include <binder/IInterface.h> +#include <binder/Parcel.h> +#include <media/drm/DrmAPI.h> + +namespace android { + +class IDrmClient: public IInterface +{ +public: + DECLARE_META_INTERFACE(DrmClient); + + virtual void notify(DrmPlugin::EventType eventType, int extra, const Parcel *obj) = 0; +}; + +// ---------------------------------------------------------------------------- + +class BnDrmClient: public BnInterface<IDrmClient> +{ +public: + virtual status_t onTransact(uint32_t code, + const Parcel& data, + Parcel* reply, + uint32_t flags = 0); +}; + +}; // namespace android + +#endif // ANDROID_IDRMCLIENT_H diff --git a/include/media/IHDCP.h b/include/media/IHDCP.h index a0613c7..6d27b18 100644 --- a/include/media/IHDCP.h +++ b/include/media/IHDCP.h @@ -45,18 +45,34 @@ struct IHDCP : public IInterface { // Request to shutdown the active HDCP session. virtual status_t shutdownAsync() = 0; - // Encrypt a data according to the HDCP spec. The data is to be - // encrypted in-place, only size bytes of data should be read/write, - // even if the size is not a multiple of 128 bit (16 bytes). + // ENCRYPTION only: + // Encrypt data according to the HDCP spec. "size" bytes of data are + // available at "inData" (virtual address), "size" may not be a multiple + // of 128 bits (16 bytes). An equal number of encrypted bytes should be + // written to the buffer at "outData" (virtual address). // This operation is to be synchronous, i.e. this call does not return // until outData contains size bytes of encrypted data. // streamCTR will be assigned by the caller (to 0 for the first PES stream, // 1 for the second and so on) - // inputCTR will be maintained by the callee for each PES stream. + // inputCTR _will_be_maintained_by_the_callee_ for each PES stream. virtual status_t encrypt( const void *inData, size_t size, uint32_t streamCTR, uint64_t *outInputCTR, void *outData) = 0; + // DECRYPTION only: + // Decrypt data according to the HDCP spec. + // "size" bytes of encrypted data are available at "inData" + // (virtual address), "size" may not be a multiple of 128 bits (16 bytes). + // An equal number of decrypted bytes should be written to the buffer + // at "outData" (virtual address). + // This operation is to be synchronous, i.e. this call does not return + // until outData contains size bytes of decrypted data. + // Both streamCTR and inputCTR will be provided by the caller. + virtual status_t decrypt( + const void *inData, size_t size, + uint32_t streamCTR, uint64_t inputCTR, + void *outData) = 0; + private: DISALLOW_EVIL_CONSTRUCTORS(IHDCP); }; diff --git a/include/media/IMediaLogService.h b/include/media/IMediaLogService.h new file mode 100644 index 0000000..1f5777e --- /dev/null +++ b/include/media/IMediaLogService.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_IMEDIALOGSERVICE_H +#define ANDROID_IMEDIALOGSERVICE_H + +#include <binder/IInterface.h> +#include <binder/IMemory.h> +#include <binder/Parcel.h> + +namespace android { + +class IMediaLogService: public IInterface +{ +public: + DECLARE_META_INTERFACE(MediaLogService); + + virtual void registerWriter(const sp<IMemory>& shared, size_t size, const char *name) = 0; + virtual void unregisterWriter(const sp<IMemory>& shared) = 0; + +}; + +class BnMediaLogService: public BnInterface<IMediaLogService> +{ +public: + virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, + uint32_t flags = 0); +}; + +} // namespace android + +#endif // ANDROID_IMEDIALOGSERVICE_H diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h index 4ed1863..0cbd269 100644 --- a/include/media/IMediaPlayer.h +++ b/include/media/IMediaPlayer.h @@ -32,7 +32,7 @@ namespace android { class Parcel; class Surface; class IStreamSource; -class ISurfaceTexture; +class IGraphicBufferProducer; class IMediaPlayer: public IInterface { @@ -46,7 +46,7 @@ public: virtual status_t setDataSource(int fd, int64_t offset, int64_t length) = 0; virtual status_t setDataSource(const sp<IStreamSource>& source) = 0; virtual status_t setVideoSurfaceTexture( - const sp<ISurfaceTexture>& surfaceTexture) = 0; + const sp<IGraphicBufferProducer>& bufferProducer) = 0; virtual status_t prepareAsync() = 0; virtual status_t start() = 0; virtual status_t stop() = 0; diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h index 7a89135..fef7af2 100644 --- a/include/media/IMediaPlayerService.h +++ b/include/media/IMediaPlayerService.h @@ -32,6 +32,7 @@ namespace android { struct ICrypto; +struct IDrm; struct IHDCP; class IMediaRecorder; class IOMX; @@ -44,15 +45,16 @@ class IMediaPlayerService: public IInterface public: DECLARE_META_INTERFACE(MediaPlayerService); - virtual sp<IMediaRecorder> createMediaRecorder(pid_t pid) = 0; - virtual sp<IMediaMetadataRetriever> createMetadataRetriever(pid_t pid) = 0; - virtual sp<IMediaPlayer> create(pid_t pid, const sp<IMediaPlayerClient>& client, int audioSessionId = 0) = 0; + virtual sp<IMediaRecorder> createMediaRecorder() = 0; + virtual sp<IMediaMetadataRetriever> createMetadataRetriever() = 0; + virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client, int audioSessionId = 0) = 0; virtual sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) = 0; virtual sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) = 0; virtual sp<IOMX> getOMX() = 0; virtual sp<ICrypto> makeCrypto() = 0; - virtual sp<IHDCP> makeHDCP() = 0; + virtual sp<IDrm> makeDrm() = 0; + virtual sp<IHDCP> makeHDCP(bool createEncryptionModule) = 0; // Connects to a remote display. // 'iface' specifies the address of the local interface on which to listen for @@ -87,6 +89,9 @@ public: virtual void addBatteryData(uint32_t params) = 0; virtual status_t pullBatteryData(Parcel* reply) = 0; + + virtual status_t updateProxyConfig( + const char *host, int32_t port, const char *exclusionList) = 0; }; // ---------------------------------------------------------------------------- diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h index ec84e25..3e67550 100644 --- a/include/media/IMediaRecorder.h +++ b/include/media/IMediaRecorder.h @@ -26,7 +26,7 @@ class Surface; class ICamera; class ICameraRecordingProxy; class IMediaRecorderClient; -class ISurfaceTexture; +class IGraphicBufferProducer; class IMediaRecorder: public IInterface { @@ -35,7 +35,7 @@ public: virtual status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy) = 0; - virtual status_t setPreviewSurface(const sp<Surface>& surface) = 0; + virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0; virtual status_t setVideoSource(int vs) = 0; virtual status_t setAudioSource(int as) = 0; virtual status_t setOutputFormat(int of) = 0; @@ -47,6 +47,7 @@ public: virtual status_t setVideoFrameRate(int frames_per_second) = 0; virtual status_t setParameters(const String8& params) = 0; virtual status_t setListener(const sp<IMediaRecorderClient>& listener) = 0; + virtual status_t setClientName(const String16& clientName) = 0; virtual status_t prepare() = 0; virtual status_t getMaxAmplitude(int* max) = 0; virtual status_t start() = 0; @@ -55,7 +56,7 @@ public: virtual status_t init() = 0; virtual status_t close() = 0; virtual status_t release() = 0; - virtual sp<ISurfaceTexture> querySurfaceMediaSource() = 0; + virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() = 0; }; // ---------------------------------------------------------------------------- diff --git a/include/media/IOMX.h b/include/media/IOMX.h index be1b2fc..0b1d1e4 100644 --- a/include/media/IOMX.h +++ b/include/media/IOMX.h @@ -19,6 +19,7 @@ #define ANDROID_IOMX_H_ #include <binder/IInterface.h> +#include <gui/IGraphicBufferProducer.h> #include <ui/GraphicBuffer.h> #include <utils/List.h> #include <utils/String8.h> @@ -96,6 +97,12 @@ public: node_id node, OMX_U32 port_index, const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) = 0; + virtual status_t createInputSurface( + node_id node, OMX_U32 port_index, + sp<IGraphicBufferProducer> *bufferProducer) = 0; + + virtual status_t signalEndOfInputStream(node_id node) = 0; + // This API clearly only makes sense if the caller lives in the // same process as the callee, i.e. is the media_server, as the // returned "buffer_data" pointer is just that, a pointer into local diff --git a/include/media/IRemoteDisplayClient.h b/include/media/IRemoteDisplayClient.h index 252b401..7b0fa9e 100644 --- a/include/media/IRemoteDisplayClient.h +++ b/include/media/IRemoteDisplayClient.h @@ -26,7 +26,7 @@ namespace android { -class ISurfaceTexture; +class IGraphicBufferProducer; class IRemoteDisplayClient : public IInterface { @@ -48,7 +48,7 @@ public: // Indicates that the remote display has been connected successfully. // Provides a surface texture that the client should use to stream buffers to // the remote display. - virtual void onDisplayConnected(const sp<ISurfaceTexture>& surfaceTexture, + virtual void onDisplayConnected(const sp<IGraphicBufferProducer>& bufferProducer, uint32_t width, uint32_t height, uint32_t flags) = 0; // one-way // Indicates that the remote display has been disconnected normally. diff --git a/include/media/IStreamSource.h b/include/media/IStreamSource.h index 39e0a9e..677119b 100644 --- a/include/media/IStreamSource.h +++ b/include/media/IStreamSource.h @@ -37,6 +37,9 @@ struct IStreamSource : public IInterface { enum { // Video PES packets contain exactly one (aligned) access unit. kFlagAlignedVideoData = 1, + + // Timestamps are in ALooper::GetNowUs() units. + kFlagIsRealTimeData = 2, }; virtual uint32_t flags() const { return 0; } }; diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h index b7bee3f..9a75f81 100644 --- a/include/media/MediaPlayerInterface.h +++ b/include/media/MediaPlayerInterface.h @@ -37,7 +37,7 @@ namespace android { class Parcel; class Surface; -class ISurfaceTexture; +class IGraphicBufferProducer; template<typename T> class SortedVector; @@ -131,9 +131,9 @@ public: return INVALID_OPERATION; } - // pass the buffered ISurfaceTexture to the media player service + // pass the buffered IGraphicBufferProducer to the media player service virtual status_t setVideoSurfaceTexture( - const sp<ISurfaceTexture>& surfaceTexture) = 0; + const sp<IGraphicBufferProducer>& bufferProducer) = 0; virtual status_t prepare() = 0; virtual status_t prepareAsync() = 0; @@ -198,6 +198,11 @@ public: return INVALID_OPERATION; } + virtual status_t updateProxyConfig( + const char *host, int32_t port, const char *exclusionList) { + return INVALID_OPERATION; + } + private: friend class MediaPlayerService; diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h index ef799f5..d7ac302 100644 --- a/include/media/MediaRecorderBase.h +++ b/include/media/MediaRecorderBase.h @@ -26,7 +26,7 @@ namespace android { class ICameraRecordingProxy; class Surface; -class ISurfaceTexture; +class IGraphicBufferProducer; struct MediaRecorderBase { MediaRecorderBase() {} @@ -42,12 +42,13 @@ struct MediaRecorderBase { virtual status_t setVideoFrameRate(int frames_per_second) = 0; virtual status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy) = 0; - virtual status_t setPreviewSurface(const sp<Surface>& surface) = 0; + virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0; virtual status_t setOutputFile(const char *path) = 0; virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0; virtual status_t setOutputFileAuxiliary(int fd) {return INVALID_OPERATION;} virtual status_t setParameters(const String8& params) = 0; virtual status_t setListener(const sp<IMediaRecorderClient>& listener) = 0; + virtual status_t setClientName(const String16& clientName) = 0; virtual status_t prepare() = 0; virtual status_t start() = 0; virtual status_t stop() = 0; @@ -55,7 +56,7 @@ struct MediaRecorderBase { virtual status_t reset() = 0; virtual status_t getMaxAmplitude(int *max) = 0; virtual status_t dump(int fd, const Vector<String16>& args) const = 0; - virtual sp<ISurfaceTexture> querySurfaceMediaSource() const = 0; + virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() const = 0; private: MediaRecorderBase(const MediaRecorderBase &); diff --git a/include/media/SingleStateQueue.h b/include/media/SingleStateQueue.h new file mode 100644 index 0000000..04c5fd0 --- /dev/null +++ b/include/media/SingleStateQueue.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SINGLE_STATE_QUEUE_H +#define SINGLE_STATE_QUEUE_H + +// Non-blocking single element state queue, or +// Non-blocking single-reader / single-writer multi-word atomic load / store + +#include <stdint.h> + +namespace android { + +template<typename T> class SingleStateQueue { + +public: + + class Mutator; + class Observer; + + struct Shared { + // needs to be part of a union so don't define constructor or destructor + + friend class Mutator; + friend class Observer; + +private: + void init() { mAck = 0; mSequence = 0; } + + volatile int32_t mAck; +#if 0 + int mPad[7]; + // cache line boundary +#endif + volatile int32_t mSequence; + T mValue; + }; + + class Mutator { + public: + Mutator(Shared *shared); + /*virtual*/ ~Mutator() { } + + // push new value onto state queue, overwriting previous value; + // returns a sequence number which can be used with ack() + int32_t push(const T& value); + + // return true if most recent push has been observed + bool ack(); + + // return true if a push with specified sequence number or later has been observed + bool ack(int32_t sequence); + + private: + int32_t mSequence; + Shared * const mShared; + }; + + class Observer { + public: + Observer(Shared *shared); + /*virtual*/ ~Observer() { } + + // return true if value has changed + bool poll(T& value); + + private: + int32_t mSequence; + int mSeed; // for PRNG + Shared * const mShared; + }; + +#if 0 + SingleStateQueue(void /*Shared*/ *shared); + /*virtual*/ ~SingleStateQueue() { } + + static size_t size() { return sizeof(Shared); } +#endif + +}; + +} // namespace android + +#endif // SINGLE_STATE_QUEUE_H diff --git a/include/media/SoundPool.h b/include/media/SoundPool.h index 002b045..7bf3069 100644 --- a/include/media/SoundPool.h +++ b/include/media/SoundPool.h @@ -65,8 +65,10 @@ public: sp<IMemory> getIMemory() { return mData; } // hack - void init(int numChannels, int sampleRate, audio_format_t format, size_t size, sp<IMemory> data ) { - mNumChannels = numChannels; mSampleRate = sampleRate; mFormat = format; mSize = size; mData = data; } + void init(int numChannels, int sampleRate, audio_format_t format, size_t size, + sp<IMemory> data ) { + mNumChannels = numChannels; mSampleRate = sampleRate; mFormat = format; mSize = size; + mData = data; } private: void init(); diff --git a/include/media/ToneGenerator.h b/include/media/ToneGenerator.h index 29c8fd9..2183fbe 100644 --- a/include/media/ToneGenerator.h +++ b/include/media/ToneGenerator.h @@ -263,7 +263,7 @@ private: unsigned short mLoopCounter; // Current tone loopback count - int mSamplingRate; // AudioFlinger Sampling rate + uint32_t mSamplingRate; // AudioFlinger Sampling rate AudioTrack *mpAudioTrack; // Pointer to audio track used for playback Mutex mLock; // Mutex to control concurent access to ToneGenerator object from audio callback and application API Mutex mCbkCondLock; // Mutex associated to mWaitCbkCond @@ -271,6 +271,7 @@ private: float mVolume; // Volume applied to audio track audio_stream_type_t mStreamType; // Audio stream used for output unsigned int mProcessSize; // Size of audio blocks generated at a time by audioCallback() (in PCM frames). + struct timespec mStartTime; // tone start time: needed to guaranty actual tone duration bool initAudioTrack(); static void audioCallback(int event, void* user, void *info); diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h index d753eba..14381c7 100644 --- a/include/media/mediaplayer.h +++ b/include/media/mediaplayer.h @@ -33,7 +33,7 @@ class ANativeWindow; namespace android { class Surface; -class ISurfaceTexture; +class IGraphicBufferProducer; enum media_event_type { MEDIA_NOP = 0, // interface test message @@ -199,7 +199,7 @@ public: status_t setDataSource(int fd, int64_t offset, int64_t length); status_t setDataSource(const sp<IStreamSource> &source); status_t setVideoSurfaceTexture( - const sp<ISurfaceTexture>& surfaceTexture); + const sp<IGraphicBufferProducer>& bufferProducer); status_t setListener(const sp<MediaPlayerListener>& listener); status_t prepare(); status_t prepareAsync(); @@ -232,6 +232,9 @@ public: status_t setRetransmitEndpoint(const char* addrString, uint16_t port); status_t setNextMediaPlayer(const sp<MediaPlayer>& player); + status_t updateProxyConfig( + const char *host, int32_t port, const char *exclusionList); + private: void clear_l(); status_t seekTo_l(int msec); diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h index 6d304e0..88a42a0 100644 --- a/include/media/mediarecorder.h +++ b/include/media/mediarecorder.h @@ -31,8 +31,8 @@ class Surface; class IMediaRecorder; class ICamera; class ICameraRecordingProxy; -class ISurfaceTexture; -class SurfaceTextureClient; +class IGraphicBufferProducer; +class Surface; typedef void (*media_completion_f)(status_t status, void *cookie); @@ -207,7 +207,7 @@ public: void died(); status_t initCheck(); status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy); - status_t setPreviewSurface(const sp<Surface>& surface); + status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface); status_t setVideoSource(int vs); status_t setAudioSource(int as); status_t setOutputFormat(int of); @@ -219,6 +219,7 @@ public: status_t setVideoFrameRate(int frames_per_second); status_t setParameters(const String8& params); status_t setListener(const sp<MediaRecorderListener>& listener); + status_t setClientName(const String16& clientName); status_t prepare(); status_t getMaxAmplitude(int* max); status_t start(); @@ -228,7 +229,7 @@ public: status_t close(); status_t release(); void notify(int msg, int ext1, int ext2); - sp<ISurfaceTexture> querySurfaceMediaSourceFromMediaServer(); + sp<IGraphicBufferProducer> querySurfaceMediaSourceFromMediaServer(); private: void doCleanUp(); @@ -237,10 +238,10 @@ private: sp<IMediaRecorder> mMediaRecorder; sp<MediaRecorderListener> mListener; - // Reference toISurfaceTexture + // Reference to IGraphicBufferProducer // for encoding GL Frames. That is useful only when the // video source is set to VIDEO_SOURCE_GRALLOC_BUFFER - sp<ISurfaceTexture> mSurfaceMediaSource; + sp<IGraphicBufferProducer> mSurfaceMediaSource; media_recorder_states mCurrentState; bool mIsAudioSourceSet; diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h index 81f42ed..f5d6eb5 100644 --- a/include/media/nbaio/NBAIO.h +++ b/include/media/nbaio/NBAIO.h @@ -45,17 +45,15 @@ enum { // Negotiation of format is based on the data provider and data sink, or the data consumer and // data source, exchanging prioritized arrays of offers and counter-offers until a single offer is // mutually agreed upon. Each offer is an NBAIO_Format. For simplicity and performance, -// NBAIO_Format is an enum that ties together the most important combinations of the various +// NBAIO_Format is a typedef that ties together the most important combinations of the various // attributes, rather than a struct with separate fields for format, sample rate, channel count, // interleave, packing, alignment, etc. The reason is that NBAIO_Format tries to abstract out only -// the combinations that are actually needed within AudioFligner. If the list of combinations grows +// the combinations that are actually needed within AudioFlinger. If the list of combinations grows // too large, then this decision should be re-visited. -enum NBAIO_Format { - Format_Invalid, - Format_SR44_1_C2_I16, // 44.1 kHz PCM stereo interleaved 16-bit signed - Format_SR48_C2_I16, // 48 kHz PCM stereo interleaved 16-bit signed - Format_SR44_1_C1_I16, // 44.1 kHz PCM mono interleaved 16-bit signed - Format_SR48_C1_I16, // 48 kHz PCM mono interleaved 16-bit signed +// Sample rate and channel count are explicit, PCM interleaved 16-bit is assumed. +typedef unsigned NBAIO_Format; +enum { + Format_Invalid }; // Return the frame size of an NBAIO_Format in bytes diff --git a/include/media/nbaio/NBLog.h b/include/media/nbaio/NBLog.h new file mode 100644 index 0000000..107ba66 --- /dev/null +++ b/include/media/nbaio/NBLog.h @@ -0,0 +1,188 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Non-blocking event logger intended for safe communication between processes via shared memory + +#ifndef ANDROID_MEDIA_NBLOG_H +#define ANDROID_MEDIA_NBLOG_H + +#include <binder/IMemory.h> +#include <utils/Mutex.h> +#include <media/nbaio/roundup.h> + +namespace android { + +class NBLog { + +public: + +class Writer; +class Reader; + +private: + +enum Event { + EVENT_RESERVED, + EVENT_STRING, // ASCII string, not NUL-terminated + EVENT_TIMESTAMP, // clock_gettime(CLOCK_MONOTONIC) +}; + +// --------------------------------------------------------------------------- + +// representation of a single log entry in private memory +struct Entry { + Entry(Event event, const void *data, size_t length) + : mEvent(event), mLength(length), mData(data) { } + /*virtual*/ ~Entry() { } + + int readAt(size_t offset) const; + +private: + friend class Writer; + Event mEvent; // event type + size_t mLength; // length of additional data, 0 <= mLength <= 255 + const void *mData; // event type-specific data +}; + +// representation of a single log entry in shared memory +// byte[0] mEvent +// byte[1] mLength +// byte[2] mData[0] +// ... +// byte[2+i] mData[i] +// ... +// byte[2+mLength-1] mData[mLength-1] +// byte[2+mLength] duplicate copy of mLength to permit reverse scan +// byte[3+mLength] start of next log entry + +// located in shared memory +struct Shared { + Shared() : mRear(0) { } + /*virtual*/ ~Shared() { } + + volatile int32_t mRear; // index one byte past the end of most recent Entry + char mBuffer[0]; // circular buffer for entries +}; + +public: + +// --------------------------------------------------------------------------- + +// FIXME Timeline was intended to wrap Writer and Reader, but isn't actually used yet. +// For now it is just a namespace for sharedSize(). +class Timeline : public RefBase { +public: +#if 0 + Timeline(size_t size, void *shared = NULL); + virtual ~Timeline(); +#endif + + static size_t sharedSize(size_t size); + +#if 0 +private: + friend class Writer; + friend class Reader; + + const size_t mSize; // circular buffer size in bytes, must be a power of 2 + bool mOwn; // whether I own the memory at mShared + Shared* const mShared; // pointer to shared memory +#endif +}; + +// --------------------------------------------------------------------------- + +// Writer is thread-safe with respect to Reader, but not with respect to multiple threads +// calling Writer methods. If you need multi-thread safety for writing, use LockedWriter. +class Writer : public RefBase { +public: + Writer(); // dummy nop implementation without shared memory + Writer(size_t size, void *shared); + Writer(size_t size, const sp<IMemory>& iMemory); + virtual ~Writer() { } + + virtual void log(const char *string); + virtual void logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); + virtual void logvf(const char *fmt, va_list ap); + virtual void logTimestamp(); + virtual void logTimestamp(const struct timespec& ts); + + virtual bool isEnabled() const; + + // return value for all of these is the previous isEnabled() + virtual bool setEnabled(bool enabled); // but won't enable if no shared memory + bool enable() { return setEnabled(true); } + bool disable() { return setEnabled(false); } + + sp<IMemory> getIMemory() const { return mIMemory; } + +private: + void log(Event event, const void *data, size_t length); + void log(const Entry *entry, bool trusted = false); + + const size_t mSize; // circular buffer size in bytes, must be a power of 2 + Shared* const mShared; // raw pointer to shared memory + const sp<IMemory> mIMemory; // ref-counted version + int32_t mRear; // my private copy of mShared->mRear + bool mEnabled; // whether to actually log +}; + +// --------------------------------------------------------------------------- + +// Similar to Writer, but safe for multiple threads to call concurrently +class LockedWriter : public Writer { +public: + LockedWriter(); + LockedWriter(size_t size, void *shared); + + virtual void log(const char *string); + virtual void logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); + virtual void logvf(const char *fmt, va_list ap); + virtual void logTimestamp(); + virtual void logTimestamp(const struct timespec& ts); + + virtual bool isEnabled() const; + virtual bool setEnabled(bool enabled); + +private: + mutable Mutex mLock; +}; + +// --------------------------------------------------------------------------- + +class Reader : public RefBase { +public: + Reader(size_t size, const void *shared); + Reader(size_t size, const sp<IMemory>& iMemory); + virtual ~Reader() { } + + void dump(int fd, size_t indent = 0); + bool isIMemory(const sp<IMemory>& iMemory) const; + +private: + const size_t mSize; // circular buffer size in bytes, must be a power of 2 + const Shared* const mShared; // raw pointer to shared memory + const sp<IMemory> mIMemory; // ref-counted version + int32_t mFront; // index of oldest acknowledged Entry + + static const size_t kSquashTimestamp = 5; // squash this many or more adjacent timestamps +}; + +}; // class NBLog + +} // namespace android + +#endif // ANDROID_MEDIA_NBLOG_H diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h index df1c46b..34bae29 100644 --- a/include/media/stagefright/ACodec.h +++ b/include/media/stagefright/ACodec.h @@ -43,7 +43,10 @@ struct ACodec : public AHierarchicalStateMachine { kWhatError = 'erro', kWhatComponentAllocated = 'cAll', kWhatComponentConfigured = 'cCon', + kWhatInputSurfaceCreated = 'isfc', + kWhatSignaledInputEOS = 'seos', kWhatBuffersAllocated = 'allc', + kWhatOMXDied = 'OMXd', }; ACodec(); @@ -54,8 +57,12 @@ struct ACodec : public AHierarchicalStateMachine { void signalResume(); void initiateShutdown(bool keepComponentAllocated = false); + void signalSetParameters(const sp<AMessage> &msg); + void signalEndOfInputStream(); + void initiateAllocateComponent(const sp<AMessage> &msg); void initiateConfigureComponent(const sp<AMessage> &msg); + void initiateCreateInputSurface(); void initiateStart(); void signalRequestIDRFrame(); @@ -91,6 +98,7 @@ private: struct ExecutingToIdleState; struct IdleToLoadedState; struct FlushingState; + struct DeathNotifier; enum { kWhatSetup = 'setu', @@ -103,8 +111,11 @@ private: kWhatDrainDeferredMessages = 'drai', kWhatAllocateComponent = 'allo', kWhatConfigureComponent = 'conf', + kWhatCreateInputSurface = 'cisf', + kWhatSignalEndOfInputStream = 'eois', kWhatStart = 'star', kWhatRequestIDRFrame = 'ridr', + kWhatSetParameters = 'setP', }; enum { @@ -253,12 +264,16 @@ private: status_t pushBlankBuffersToNativeWindow(); - // Returns true iff all buffers on the given port have status OWNED_BY_US. + // Returns true iff all buffers on the given port have status + // OWNED_BY_US or OWNED_BY_NATIVE_WINDOW. bool allYourBuffersAreBelongToUs(OMX_U32 portIndex); bool allYourBuffersAreBelongToUs(); + void waitUntilAllPossibleNativeWindowBuffersAreReturnedToUs(); + size_t countBuffersOwnedByComponent(OMX_U32 portIndex) const; + size_t countBuffersOwnedByNativeWindow() const; void deferMessage(const sp<AMessage> &msg); void processDeferredMessages(); @@ -270,6 +285,10 @@ private: status_t internalError = UNKNOWN_ERROR); status_t requestIDRFrame(); + status_t setParameters(const sp<AMessage> ¶ms); + + // Send EOS on input stream. + void onSignalEndOfInputStream(); DISALLOW_EVIL_CONSTRUCTORS(ACodec); }; diff --git a/include/media/stagefright/BufferProducerWrapper.h b/include/media/stagefright/BufferProducerWrapper.h new file mode 100644 index 0000000..d8acf30 --- /dev/null +++ b/include/media/stagefright/BufferProducerWrapper.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef BUFFER_PRODUCER_WRAPPER_H_ + +#define BUFFER_PRODUCER_WRAPPER_H_ + +#include <gui/IGraphicBufferProducer.h> + +namespace android { + +// Can't use static_cast to cast a RefBase back to an IGraphicBufferProducer, +// because IGBP's parent (IInterface) uses virtual inheritance. This class +// wraps IGBP while we pass it through AMessage. + +struct BufferProducerWrapper : RefBase { + BufferProducerWrapper( + const sp<IGraphicBufferProducer>& bufferProducer) : + mBufferProducer(bufferProducer) { } + + sp<IGraphicBufferProducer> getBufferProducer() const { + return mBufferProducer; + } + +private: + const sp<IGraphicBufferProducer> mBufferProducer; + + DISALLOW_EVIL_CONSTRUCTORS(BufferProducerWrapper); +}; + +} // namespace android + +#endif // BUFFER_PRODUCER_WRAPPER_H_ diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h index 6d6b8a9..a829916 100644 --- a/include/media/stagefright/CameraSource.h +++ b/include/media/stagefright/CameraSource.h @@ -25,6 +25,7 @@ #include <camera/CameraParameters.h> #include <utils/List.h> #include <utils/RefBase.h> +#include <utils/String16.h> namespace android { @@ -39,9 +40,11 @@ public: * settings (such as video size, frame rate, color format, etc) * from the default camera. * + * @param clientName The package/process name of the client application. + * This is used for permissions checking. * @return NULL on error. */ - static CameraSource *Create(); + static CameraSource *Create(const String16 &clientName); /** * Factory method to create a new CameraSource. @@ -52,7 +55,11 @@ public: * * @param cameraId the id of the camera that the source will connect * to if camera is NULL; otherwise ignored. - * + * @param clientName the package/process name of the camera-using + * application if camera is NULL; otherwise ignored. Used for + * permissions checking. + * @param clientUid the UID of the camera-using application if camera is + * NULL; otherwise ignored. Used for permissions checking. * @param videoSize the dimension (in pixels) of the video frame * @param frameRate the target frames per second * @param surface the preview surface for display where preview @@ -71,9 +78,11 @@ public: static CameraSource *CreateFromCamera(const sp<ICamera> &camera, const sp<ICameraRecordingProxy> &proxy, int32_t cameraId, + const String16& clientName, + uid_t clientUid, Size videoSize, int32_t frameRate, - const sp<Surface>& surface, + const sp<IGraphicBufferProducer>& surface, bool storeMetaDataInVideoBuffers = false); virtual ~CameraSource(); @@ -145,7 +154,7 @@ protected: sp<Camera> mCamera; sp<ICameraRecordingProxy> mCameraRecordingProxy; sp<DeathNotifier> mDeathNotifier; - sp<Surface> mSurface; + sp<IGraphicBufferProducer> mSurface; sp<MetaData> mMeta; int64_t mStartTimeUs; @@ -158,9 +167,9 @@ protected: int64_t mTimeBetweenFrameCaptureUs; CameraSource(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy, - int32_t cameraId, + int32_t cameraId, const String16& clientName, uid_t clientUid, Size videoSize, int32_t frameRate, - const sp<Surface>& surface, + const sp<IGraphicBufferProducer>& surface, bool storeMetaDataInVideoBuffers); virtual void startCameraRecording(); @@ -198,17 +207,20 @@ private: status_t init(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy, - int32_t cameraId, Size videoSize, int32_t frameRate, - bool storeMetaDataInVideoBuffers); + int32_t cameraId, const String16& clientName, uid_t clientUid, + Size videoSize, int32_t frameRate, bool storeMetaDataInVideoBuffers); status_t initWithCameraAccess( const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy, - int32_t cameraId, Size videoSize, int32_t frameRate, - bool storeMetaDataInVideoBuffers); + int32_t cameraId, const String16& clientName, uid_t clientUid, + Size videoSize, int32_t frameRate, bool storeMetaDataInVideoBuffers); status_t isCameraAvailable(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy, - int32_t cameraId); + int32_t cameraId, + const String16& clientName, + uid_t clientUid); + status_t isCameraColorFormatSupported(const CameraParameters& params); status_t configureCamera(CameraParameters* params, int32_t width, int32_t height, diff --git a/include/media/stagefright/CameraSourceTimeLapse.h b/include/media/stagefright/CameraSourceTimeLapse.h index 0936da2..6b7a63c 100644 --- a/include/media/stagefright/CameraSourceTimeLapse.h +++ b/include/media/stagefright/CameraSourceTimeLapse.h @@ -22,6 +22,7 @@ #include <utils/RefBase.h> #include <utils/threads.h> +#include <utils/String16.h> namespace android { @@ -35,9 +36,11 @@ public: const sp<ICamera> &camera, const sp<ICameraRecordingProxy> &proxy, int32_t cameraId, + const String16& clientName, + uid_t clientUid, Size videoSize, int32_t videoFrameRate, - const sp<Surface>& surface, + const sp<IGraphicBufferProducer>& surface, int64_t timeBetweenTimeLapseFrameCaptureUs); virtual ~CameraSourceTimeLapse(); @@ -108,9 +111,11 @@ private: const sp<ICamera> &camera, const sp<ICameraRecordingProxy> &proxy, int32_t cameraId, + const String16& clientName, + uid_t clientUid, Size videoSize, int32_t videoFrameRate, - const sp<Surface>& surface, + const sp<IGraphicBufferProducer>& surface, int64_t timeBetweenTimeLapseFrameCaptureUs); // Wrapper over CameraSource::signalBufferReturned() to implement quick stop. diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h index 00d583e..742bc0e 100644 --- a/include/media/stagefright/DataSource.h +++ b/include/media/stagefright/DataSource.h @@ -54,6 +54,9 @@ public: // Convenience methods: bool getUInt16(off64_t offset, uint16_t *x); + bool getUInt24(off64_t offset, uint32_t *x); // 3 byte int, returned as a 32-bit int + bool getUInt32(off64_t offset, uint32_t *x); + bool getUInt64(off64_t offset, uint64_t *x); // May return ERROR_UNSUPPORTED. virtual status_t getSize(off64_t *size); diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h index 3596b38..3ef6b9a 100644 --- a/include/media/stagefright/MPEG4Writer.h +++ b/include/media/stagefright/MPEG4Writer.h @@ -35,7 +35,13 @@ public: MPEG4Writer(const char *filename); MPEG4Writer(int fd); + // Limitations + // 1. No more than 2 tracks can be added + // 2. Only video or audio source can be added + // 3. No more than one video and/or one audio source can be added. virtual status_t addSource(const sp<MediaSource> &source); + + // Returns INVALID_OPERATION if there is no source or track. virtual status_t start(MetaData *param = NULL); virtual status_t stop() { return reset(); } virtual status_t pause(); @@ -68,6 +74,7 @@ private: int mFd; status_t mInitCheck; + bool mIsRealTimeRecording; bool mUse4ByteNalLength; bool mUse32BitOffset; bool mIsFileSizeLimitExplicitlyRequested; @@ -162,6 +169,13 @@ private: // Only makes sense for H.264/AVC bool useNalLengthFour(); + // Return whether the writer is used for real time recording. + // In real time recording mode, new samples will be allowed to buffered into + // chunks in higher priority thread, even though the file writer has not + // drained the chunks yet. + // By default, real time recording is on. + bool isRealTimeRecording() const; + void lock(); void unlock(); diff --git a/include/media/stagefright/MediaAdapter.h b/include/media/stagefright/MediaAdapter.h new file mode 100644 index 0000000..369fce6 --- /dev/null +++ b/include/media/stagefright/MediaAdapter.h @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MEDIA_ADAPTER_H +#define MEDIA_ADAPTER_H + +#include <media/stagefright/foundation/ABase.h> +#include <media/stagefright/MediaSource.h> +#include <media/stagefright/MediaBuffer.h> +#include <media/stagefright/MetaData.h> +#include <utils/threads.h> + +namespace android { + +// Convert the MediaMuxer's push model into MPEG4Writer's pull model. +// Used only by the MediaMuxer for now. +struct MediaAdapter : public MediaSource, public MediaBufferObserver { +public: + // MetaData is used to set the format and returned at getFormat. + MediaAdapter(const sp<MetaData> &meta); + virtual ~MediaAdapter(); + ///////////////////////////////////////////////// + // Inherited functions from MediaSource + ///////////////////////////////////////////////// + + virtual status_t start(MetaData *params = NULL); + virtual status_t stop(); + virtual sp<MetaData> getFormat(); + virtual status_t read( + MediaBuffer **buffer, const ReadOptions *options = NULL); + + ///////////////////////////////////////////////// + // Inherited functions from MediaBufferObserver + ///////////////////////////////////////////////// + + virtual void signalBufferReturned(MediaBuffer *buffer); + + ///////////////////////////////////////////////// + // Non-inherited functions: + ///////////////////////////////////////////////// + + // pushBuffer() will wait for the read() finish, and read() will have a + // deep copy, such that after pushBuffer return, the buffer can be re-used. + status_t pushBuffer(MediaBuffer *buffer); + +private: + Mutex mAdapterLock; + // Make sure the read() wait for the incoming buffer. + Condition mBufferReadCond; + // Make sure the pushBuffer() wait for the current buffer consumed. + Condition mBufferReturnedCond; + + MediaBuffer *mCurrentMediaBuffer; + + bool mStarted; + sp<MetaData> mOutputFormat; + + DISALLOW_EVIL_CONSTRUCTORS(MediaAdapter); +}; + +} // namespace android + +#endif // MEDIA_ADAPTER_H diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h index b1e57cf..76aa503 100644 --- a/include/media/stagefright/MediaCodec.h +++ b/include/media/stagefright/MediaCodec.h @@ -18,7 +18,7 @@ #define MEDIA_CODEC_H_ -#include <gui/ISurfaceTexture.h> +#include <gui/IGraphicBufferProducer.h> #include <media/hardware/CryptoAPI.h> #include <media/stagefright/foundation/AHandler.h> #include <utils/Vector.h> @@ -31,7 +31,7 @@ struct AMessage; struct AString; struct ICrypto; struct SoftwareRenderer; -struct SurfaceTextureClient; +struct Surface; struct MediaCodec : public AHandler { enum ConfigureFlags { @@ -52,10 +52,12 @@ struct MediaCodec : public AHandler { status_t configure( const sp<AMessage> &format, - const sp<SurfaceTextureClient> &nativeWindow, + const sp<Surface> &nativeWindow, const sp<ICrypto> &crypto, uint32_t flags); + status_t createInputSurface(sp<IGraphicBufferProducer>* bufferProducer); + status_t start(); // Returns to a state in which the component remains allocated but @@ -101,6 +103,8 @@ struct MediaCodec : public AHandler { status_t renderOutputBufferAndRelease(size_t index); status_t releaseOutputBuffer(size_t index); + status_t signalEndOfInputStream(); + status_t getOutputFormat(sp<AMessage> *format) const; status_t getInputBuffers(Vector<sp<ABuffer> > *buffers) const; @@ -115,6 +119,8 @@ struct MediaCodec : public AHandler { status_t getName(AString *componentName) const; + status_t setParameters(const sp<AMessage> ¶ms); + protected: virtual ~MediaCodec(); virtual void onMessageReceived(const sp<AMessage> &msg); @@ -141,6 +147,7 @@ private: enum { kWhatInit = 'init', kWhatConfigure = 'conf', + kWhatCreateInputSurface = 'cisf', kWhatStart = 'strt', kWhatStop = 'stop', kWhatRelease = 'rele', @@ -148,6 +155,7 @@ private: kWhatQueueInputBuffer = 'queI', kWhatDequeueOutputBuffer = 'deqO', kWhatReleaseOutputBuffer = 'relO', + kWhatSignalEndOfInputStream = 'eois', kWhatGetBuffers = 'getB', kWhatFlush = 'flus', kWhatGetOutputFormat = 'getO', @@ -157,6 +165,7 @@ private: kWhatRequestIDRFrame = 'ridr', kWhatRequestActivityNotification = 'racN', kWhatGetName = 'getN', + kWhatSetParameters = 'setP', }; enum { @@ -167,6 +176,9 @@ private: kFlagDequeueInputPending = 16, kFlagDequeueOutputPending = 32, kFlagIsSecure = 64, + kFlagSawMediaServerDie = 128, + kFlagIsEncoder = 256, + kFlagGatherCodecSpecificData = 512, }; struct BufferInfo { @@ -184,7 +196,7 @@ private: AString mComponentName; uint32_t mReplyID; uint32_t mFlags; - sp<SurfaceTextureClient> mNativeWindow; + sp<Surface> mNativeWindow; SoftwareRenderer *mSoftRenderer; sp<AMessage> mOutputFormat; @@ -203,6 +215,8 @@ private: sp<AMessage> mActivityNotify; + bool mHaveInputSurface; + MediaCodec(const sp<ALooper> &looper); static status_t PostAndAwaitResponse( @@ -226,10 +240,14 @@ private: status_t queueCSDInputBuffer(size_t bufferIndex); status_t setNativeWindow( - const sp<SurfaceTextureClient> &surfaceTextureClient); + const sp<Surface> &surface); void postActivityNotificationIfPossible(); + status_t onSetParameters(const sp<AMessage> ¶ms); + + status_t amendOutputFormatWithCodecSpecificData(const sp<ABuffer> &buffer); + DISALLOW_EVIL_CONSTRUCTORS(MediaCodec); }; diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h index 457d5d7..81de6e4 100644 --- a/include/media/stagefright/MediaDefs.h +++ b/include/media/stagefright/MediaDefs.h @@ -42,6 +42,7 @@ extern const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW; extern const char *MEDIA_MIMETYPE_AUDIO_RAW; extern const char *MEDIA_MIMETYPE_AUDIO_FLAC; extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS; +extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM; extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4; extern const char *MEDIA_MIMETYPE_CONTAINER_WAV; diff --git a/include/media/stagefright/MediaErrors.h b/include/media/stagefright/MediaErrors.h index bb01467..ee5e4e2 100644 --- a/include/media/stagefright/MediaErrors.h +++ b/include/media/stagefright/MediaErrors.h @@ -54,6 +54,8 @@ enum { ERROR_DRM_DECRYPT = DRM_ERROR_BASE - 5, ERROR_DRM_CANNOT_HANDLE = DRM_ERROR_BASE - 6, ERROR_DRM_TAMPER_DETECTED = DRM_ERROR_BASE - 7, + ERROR_DRM_NOT_PROVISIONED = DRM_ERROR_BASE - 8, + ERROR_DRM_DEVICE_REVOKED = DRM_ERROR_BASE - 9, ERROR_DRM_VENDOR_MAX = DRM_ERROR_BASE - 500, ERROR_DRM_VENDOR_MIN = DRM_ERROR_BASE - 999, diff --git a/include/media/stagefright/MediaMuxer.h b/include/media/stagefright/MediaMuxer.h new file mode 100644 index 0000000..c1fdbad --- /dev/null +++ b/include/media/stagefright/MediaMuxer.h @@ -0,0 +1,128 @@ +/* + * Copyright 2013, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MEDIA_MUXER_H_ +#define MEDIA_MUXER_H_ + +#include <utils/Errors.h> +#include <utils/RefBase.h> +#include <utils/Vector.h> +#include <utils/threads.h> + +namespace android { + +struct ABuffer; +struct AMessage; +struct MediaAdapter; +struct MediaBuffer; +struct MediaSource; +struct MetaData; +struct MPEG4Writer; + +// MediaMuxer is used to mux multiple tracks into a video. Currently, we only +// support a mp4 file as the output. +// The expected calling order of the functions is: +// Constructor -> addTrack+ -> start -> writeSampleData+ -> stop +// If muxing operation need to be cancelled, the app is responsible for +// deleting the output file after stop. +struct MediaMuxer : public RefBase { +public: + // Please update media/java/android/media/MediaMuxer.java if the + // OutputFormat is updated. + enum OutputFormat { + OUTPUT_FORMAT_MPEG_4 = 0, + OUTPUT_FORMAT_LIST_END // must be last - used to validate format type + }; + + // Construct the muxer with the output file path. + MediaMuxer(const char *path, OutputFormat format); + + // Construct the muxer with the file descriptor. Note that the MediaMuxer + // will close this file at stop(). + MediaMuxer(int fd, OutputFormat format); + + virtual ~MediaMuxer(); + + /** + * Add a track with its format information. This should be + * called before start(). + * @param format the track's format. + * @return the track's index or negative number if error. + */ + ssize_t addTrack(const sp<AMessage> &format); + + /** + * Start muxing. Make sure all the tracks have been added before + * calling this. + */ + status_t start(); + + /** + * Set the orientation hint. + * @param degrees The rotation degrees. It has to be either 0, + * 90, 180 or 270. + * @return OK if no error. + */ + status_t setOrientationHint(int degrees); + + /** + * Stop muxing. + * This method is a blocking call. Depending on how + * much data is bufferred internally, the time needed for stopping + * the muxer may be time consuming. UI thread is + * not recommended for launching this call. + * @return OK if no error. + */ + status_t stop(); + + /** + * Send a sample buffer for muxing. + * The buffer can be reused once this method returns. Typically, + * this function won't be blocked for very long, and thus there + * is no need to use a separate thread calling this method to + * push a buffer. + * @param buffer the incoming sample buffer. + * @param trackIndex the buffer's track index number. + * @param timeUs the buffer's time stamp. + * @param flags the only supported flag for now is + * MediaCodec::BUFFER_FLAG_SYNCFRAME. + * @return OK if no error. + */ + status_t writeSampleData(const sp<ABuffer> &buffer, size_t trackIndex, + int64_t timeUs, uint32_t flags) ; + +private: + sp<MPEG4Writer> mWriter; + Vector< sp<MediaAdapter> > mTrackList; // Each track has its MediaAdapter. + sp<MetaData> mFileMeta; // Metadata for the whole file. + + Mutex mMuxerLock; + + enum State { + UNINITIALIZED, + INITIALIZED, + STARTED, + STOPPED + }; + State mState; + + DISALLOW_EVIL_CONSTRUCTORS(MediaMuxer); +}; + +} // namespace android + +#endif // MEDIA_MUXER_H_ + diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h index e91904c..de3fc36 100644 --- a/include/media/stagefright/MetaData.h +++ b/include/media/stagefright/MetaData.h @@ -35,6 +35,8 @@ enum { kKeyHeight = 'heig', // int32_t, image pixel kKeyDisplayWidth = 'dWid', // int32_t, display/presentation kKeyDisplayHeight = 'dHgt', // int32_t, display/presentation + kKeySARWidth = 'sarW', // int32_t, sampleAspectRatio width + kKeySARHeight = 'sarH', // int32_t, sampleAspectRatio height // a rectangle, if absent assumed to be (0, 0, width - 1, height - 1) kKeyCropRect = 'crop', @@ -110,7 +112,7 @@ enum { // kKeyTrackTimeStatus is used to track progress in elapsed time kKeyTrackTimeStatus = 'tktm', // int64_t - kKeyNotRealTime = 'ntrt', // bool (int32_t) + kKeyRealTimeRecording = 'rtrc', // bool (int32_t) kKeyNumBuffers = 'nbbf', // int32_t // Ogg files can be tagged to be automatically looping... @@ -155,6 +157,10 @@ enum { kKeyCryptoKey = 'cryK', // uint8_t[16] kKeyCryptoIV = 'cryI', // uint8_t[16] kKeyCryptoMode = 'cryM', // int32_t + + kKeyCryptoDefaultIVSize = 'cryS', // int32_t + + kKeyPssh = 'pssh', // raw data }; enum { diff --git a/include/media/stagefright/NativeWindowWrapper.h b/include/media/stagefright/NativeWindowWrapper.h index 97cc0ce..cfeec22 100644 --- a/include/media/stagefright/NativeWindowWrapper.h +++ b/include/media/stagefright/NativeWindowWrapper.h @@ -18,29 +18,29 @@ #define NATIVE_WINDOW_WRAPPER_H_ -#include <gui/SurfaceTextureClient.h> +#include <gui/Surface.h> namespace android { -// SurfaceTextureClient derives from ANativeWindow which derives from multiple +// Surface derives from ANativeWindow which derives from multiple // base classes, in order to carry it in AMessages, we'll temporarily wrap it // into a NativeWindowWrapper. struct NativeWindowWrapper : RefBase { NativeWindowWrapper( - const sp<SurfaceTextureClient> &surfaceTextureClient) : + const sp<Surface> &surfaceTextureClient) : mSurfaceTextureClient(surfaceTextureClient) { } sp<ANativeWindow> getNativeWindow() const { return mSurfaceTextureClient; } - sp<SurfaceTextureClient> getSurfaceTextureClient() const { + sp<Surface> getSurfaceTextureClient() const { return mSurfaceTextureClient; } private: - const sp<SurfaceTextureClient> mSurfaceTextureClient; + const sp<Surface> mSurfaceTextureClient; DISALLOW_EVIL_CONSTRUCTORS(NativeWindowWrapper); }; diff --git a/include/media/stagefright/NuMediaExtractor.h b/include/media/stagefright/NuMediaExtractor.h index 0833110..5ae6f6b 100644 --- a/include/media/stagefright/NuMediaExtractor.h +++ b/include/media/stagefright/NuMediaExtractor.h @@ -55,6 +55,8 @@ struct NuMediaExtractor : public RefBase { size_t countTracks() const; status_t getTrackFormat(size_t index, sp<AMessage> *format) const; + status_t getFileFormat(sp<AMessage> *format) const; + status_t selectTrack(size_t index); status_t unselectTrack(size_t index); diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h index e56527d..5f21da9 100644 --- a/include/media/stagefright/SurfaceMediaSource.h +++ b/include/media/stagefright/SurfaceMediaSource.h @@ -17,7 +17,7 @@ #ifndef ANDROID_GUI_SURFACEMEDIASOURCE_H #define ANDROID_GUI_SURFACEMEDIASOURCE_H -#include <gui/ISurfaceTexture.h> +#include <gui/IGraphicBufferProducer.h> #include <gui/BufferQueue.h> #include <utils/threads.h> @@ -35,7 +35,7 @@ class GraphicBuffer; // ASSUMPTIONS // 1. SurfaceMediaSource is initialized with width*height which // can never change. However, deqeueue buffer does not currently -// enforce this as in BufferQueue, dequeue can be used by SurfaceTexture +// enforce this as in BufferQueue, dequeue can be used by Surface // which can modify the default width and heght. Also neither the width // nor height can be 0. // 2. setSynchronousMode is never used (basically no one should call @@ -122,7 +122,7 @@ public: protected: // Implementation of the BufferQueue::ConsumerListener interface. These - // calls are used to notify the SurfaceTexture of asynchronous events in the + // calls are used to notify the Surface of asynchronous events in the // BufferQueue. virtual void onFrameAvailable(); @@ -157,7 +157,7 @@ private: // mCurrentSlot is the buffer slot index of the buffer that is currently // being used by buffer consumer // (e.g. StageFrightRecorder in the case of SurfaceMediaSource or GLTexture - // in the case of SurfaceTexture). + // in the case of Surface). // It is initialized to INVALID_BUFFER_SLOT, // indicating that no buffer slot is currently bound to the texture. Note, // however, that a value of INVALID_BUFFER_SLOT does not necessarily mean diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h index 8213af9..73940d3 100644 --- a/include/media/stagefright/Utils.h +++ b/include/media/stagefright/Utils.h @@ -18,6 +18,7 @@ #define UTILS_H_ +#include <media/stagefright/foundation/AString.h> #include <stdint.h> #include <utils/Errors.h> #include <utils/RefBase.h> @@ -45,6 +46,8 @@ status_t convertMetaDataToMessage( void convertMessageToMetaData( const sp<AMessage> &format, sp<MetaData> &meta); +AString MakeUserAgent(); + } // namespace android #endif // UTILS_H_ diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h index 5b133f3..41e20f8 100644 --- a/include/private/media/AudioTrackShared.h +++ b/include/private/media/AudioTrackShared.h @@ -21,43 +21,44 @@ #include <sys/types.h> #include <utils/threads.h> +#include <utils/Log.h> namespace android { // ---------------------------------------------------------------------------- // Maximum cumulated timeout milliseconds before restarting audioflinger thread -#define MAX_STARTUP_TIMEOUT_MS 3000 // Longer timeout period at startup to cope with A2DP init time +#define MAX_STARTUP_TIMEOUT_MS 3000 // Longer timeout period at startup to cope with A2DP + // init time #define MAX_RUN_TIMEOUT_MS 1000 #define WAIT_PERIOD_MS 10 -#define RESTORE_TIMEOUT_MS 5000 // Maximum waiting time for a track to be restored - -#define CBLK_UNDERRUN_MSK 0x0001 -#define CBLK_UNDERRUN_ON 0x0001 // underrun (out) or overrrun (in) indication -#define CBLK_UNDERRUN_OFF 0x0000 // no underrun -#define CBLK_DIRECTION_MSK 0x0002 -#define CBLK_DIRECTION_OUT 0x0002 // this cblk is for an AudioTrack -#define CBLK_DIRECTION_IN 0x0000 // this cblk is for an AudioRecord -#define CBLK_FORCEREADY_MSK 0x0004 -#define CBLK_FORCEREADY_ON 0x0004 // track is considered ready immediately by AudioFlinger -#define CBLK_FORCEREADY_OFF 0x0000 // track is ready when buffer full -#define CBLK_INVALID_MSK 0x0008 -#define CBLK_INVALID_ON 0x0008 // track buffer is invalidated by AudioFlinger: -#define CBLK_INVALID_OFF 0x0000 // must be re-created -#define CBLK_DISABLED_MSK 0x0010 -#define CBLK_DISABLED_ON 0x0010 // track disabled by AudioFlinger due to underrun: -#define CBLK_DISABLED_OFF 0x0000 // must be re-started -#define CBLK_RESTORING_MSK 0x0020 -#define CBLK_RESTORING_ON 0x0020 // track is being restored after invalidation -#define CBLK_RESTORING_OFF 0x0000 // by AudioFlinger -#define CBLK_RESTORED_MSK 0x0040 -#define CBLK_RESTORED_ON 0x0040 // track has been restored after invalidation -#define CBLK_RESTORED_OFF 0x0040 // by AudioFlinger -#define CBLK_FAST 0x0080 // AudioFlinger successfully created a fast track + +#define CBLK_UNDERRUN 0x01 // set: underrun (out) or overrrun (in), clear: no underrun or overrun +#define CBLK_FORCEREADY 0x02 // set: track is considered ready immediately by AudioFlinger, + // clear: track is ready when buffer full +#define CBLK_INVALID 0x04 // track buffer invalidated by AudioFlinger, need to re-create +#define CBLK_DISABLED 0x08 // track disabled by AudioFlinger due to underrun, need to re-start + +struct AudioTrackSharedStreaming { + // similar to NBAIO MonoPipe + volatile int32_t mFront; + volatile int32_t mRear; +}; + +// future +struct AudioTrackSharedStatic { + int mReserved; +}; + +// ---------------------------------------------------------------------------- // Important: do not add any virtual methods, including ~ struct audio_track_cblk_t { + friend class Proxy; + friend class AudioTrackClientProxy; + friend class AudioRecordClientProxy; + friend class ServerProxy; // The data members are grouped so that members accessed frequently and in the same context // are in the same line of data cache. @@ -70,12 +71,14 @@ struct audio_track_cblk_t uint32_t userBase; uint32_t serverBase; - // if there is a shared buffer, "buffers" is the value of pointer() for the shared - // buffer, otherwise "buffers" points immediately after the control block - void* buffers; - uint32_t frameCount; + int mPad1; // unused, but preserves cache line alignment + + size_t frameCount_; // used during creation to pass actual track buffer size + // from AudioFlinger to client, and not referenced again + // FIXME remove here and replace by createTrack() in/out parameter + // renamed to "_" to detect incorrect use - // Cache line boundary + // Cache line boundary (32 bytes) uint32_t loopStart; uint32_t loopEnd; // read-only for server, read/write for client @@ -87,20 +90,19 @@ struct audio_track_cblk_t // For AudioTrack only, not used by AudioRecord. private: uint32_t mVolumeLR; -public: - uint32_t sampleRate; + uint32_t mSampleRate; // AudioTrack only: client's requested sample rate in Hz + // or 0 == default. Write-only client, read-only server. - // NOTE: audio_track_cblk_t::frameSize is not equal to AudioTrack::frameSize() for - // 8 bit PCM data: in this case, mCblk->frameSize is based on a sample size of - // 16 bit because data is converted to 16 bit before being stored in buffer + uint8_t mPad2; // unused +public: // read-only for client, server writes once at initialization and is then read-only - uint8_t frameSize; // would normally be size_t, but 8 bits is plenty uint8_t mName; // normal tracks: track name, fast tracks: track index // used by client only - uint16_t bufferTimeoutMs; // Maximum cumulated timeout before restarting audioflinger + uint16_t bufferTimeoutMs; // Maximum cumulated timeout before restarting + // audioflinger uint16_t waitTimeMs; // Cumulated wait time, used by client only private: @@ -111,43 +113,184 @@ public: // Cache line boundary (32 bytes) +#if 0 + union { + AudioTrackSharedStreaming mStreaming; + AudioTrackSharedStatic mStatic; + int mAlign[8]; + } u; + + // Cache line boundary (32 bytes) +#endif + // Since the control block is always located in shared memory, this constructor // is only used for placement new(). It is never used for regular new() or stack. audio_track_cblk_t(); - uint32_t stepUser(uint32_t frameCount); // called by client only, where - // client includes regular AudioTrack and AudioFlinger::PlaybackThread::OutputTrack - bool stepServer(uint32_t frameCount); // called by server only - void* buffer(uint32_t offset) const; - uint32_t framesAvailable(); - uint32_t framesAvailable_l(); - uint32_t framesReady(); // called by server only + +private: + // if there is a shared buffer, "buffers" is the value of pointer() for the shared + // buffer, otherwise "buffers" points immediately after the control block + void* buffer(void *buffers, uint32_t frameSize, size_t offset) const; + bool tryLock(); - // No barriers on the following operations, so the ordering of loads/stores - // with respect to other parameters is UNPREDICTABLE. That's considered safe. + // isOut == true means AudioTrack, isOut == false means AudioRecord + bool stepServer(size_t stepCount, size_t frameCount, bool isOut); + uint32_t stepUser(size_t stepCount, size_t frameCount, bool isOut); + uint32_t framesAvailable(size_t frameCount, bool isOut); + uint32_t framesAvailable_l(size_t frameCount, bool isOut); + uint32_t framesReady(bool isOut); +}; + +// ---------------------------------------------------------------------------- + +// Proxy for shared memory control block, to isolate callers from needing to know the details. +// There is exactly one ClientProxy and one ServerProxy per shared memory control block. +// The proxies are located in normal memory, and are not multi-thread safe within a given side. +class Proxy { +protected: + Proxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize) + : mCblk(cblk), mBuffers(buffers), mFrameCount(frameCount), mFrameSize(frameSize) { } + virtual ~Proxy() { } + +public: + void* buffer(size_t offset) const { + return mCblk->buffer(mBuffers, mFrameSize, offset); + } + +protected: + // These refer to shared memory, and are virtual addresses with respect to the current process. + // They may have different virtual addresses within the other process. + audio_track_cblk_t* const mCblk; // the control block + void* const mBuffers; // starting address of buffers + + const size_t mFrameCount; // not necessarily a power of 2 + const size_t mFrameSize; // in bytes +#if 0 + const size_t mFrameCountP2; // mFrameCount rounded to power of 2, streaming mode +#endif + +}; + +// ---------------------------------------------------------------------------- + +// Proxy seen by AudioTrack client and AudioRecord client +class ClientProxy : public Proxy { +protected: + ClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize) + : Proxy(cblk, buffers, frameCount, frameSize) { } + virtual ~ClientProxy() { } +}; + +// ---------------------------------------------------------------------------- + +// Proxy used by AudioTrack client, which also includes AudioFlinger::PlaybackThread::OutputTrack +class AudioTrackClientProxy : public ClientProxy { +public: + AudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize) + : ClientProxy(cblk, buffers, frameCount, frameSize) { } + virtual ~AudioTrackClientProxy() { } + + // No barriers on the following operations, so the ordering of loads/stores + // with respect to other parameters is UNPREDICTABLE. That's considered safe. + + // caller must limit to 0.0 <= sendLevel <= 1.0 + void setSendLevel(float sendLevel) { + mCblk->mSendLevel = uint16_t(sendLevel * 0x1000); + } - // for AudioTrack client only, caller must limit to 0.0 <= sendLevel <= 1.0 - void setSendLevel(float sendLevel) { - mSendLevel = uint16_t(sendLevel * 0x1000); - } + // caller must limit to 0 <= volumeLR <= 0x10001000 + void setVolumeLR(uint32_t volumeLR) { + mCblk->mVolumeLR = volumeLR; + } - // for AudioFlinger only; the return value must be validated by the caller - uint16_t getSendLevel_U4_12() const { - return mSendLevel; - } + void setSampleRate(uint32_t sampleRate) { + mCblk->mSampleRate = sampleRate; + } - // for AudioTrack client only, caller must limit to 0 <= volumeLR <= 0x10001000 - void setVolumeLR(uint32_t volumeLR) { - mVolumeLR = volumeLR; - } + // called by: + // PlaybackThread::OutputTrack::write + // AudioTrack::createTrack_l + // AudioTrack::releaseBuffer + // AudioTrack::reload + // AudioTrack::restoreTrack_l (2 places) + size_t stepUser(size_t stepCount) { + return mCblk->stepUser(stepCount, mFrameCount, true /*isOut*/); + } - // for AudioFlinger only; the return value must be validated by the caller - uint32_t getVolumeLR() const { - return mVolumeLR; - } + // called by AudioTrack::obtainBuffer and AudioTrack::processBuffer + size_t framesAvailable() { + return mCblk->framesAvailable(mFrameCount, true /*isOut*/); + } + + // called by AudioTrack::obtainBuffer and PlaybackThread::OutputTrack::obtainBuffer + // FIXME remove this API since it assumes a lock that should be invisible to caller + size_t framesAvailable_l() { + return mCblk->framesAvailable_l(mFrameCount, true /*isOut*/); + } }; +// ---------------------------------------------------------------------------- + +// Proxy used by AudioRecord client +class AudioRecordClientProxy : public ClientProxy { +public: + AudioRecordClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize) + : ClientProxy(cblk, buffers, frameCount, frameSize) { } + ~AudioRecordClientProxy() { } + + // called by AudioRecord::releaseBuffer + size_t stepUser(size_t stepCount) { + return mCblk->stepUser(stepCount, mFrameCount, false /*isOut*/); + } + + // called by AudioRecord::processBuffer + size_t framesAvailable() { + return mCblk->framesAvailable(mFrameCount, false /*isOut*/); + } + + // called by AudioRecord::obtainBuffer + size_t framesReady() { + return mCblk->framesReady(false /*isOut*/); + } + +}; + +// ---------------------------------------------------------------------------- + +// Proxy used by AudioFlinger server +class ServerProxy : public Proxy { +public: + ServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize, bool isOut) + : Proxy(cblk, buffers, frameCount, frameSize), mIsOut(isOut) { } + virtual ~ServerProxy() { } + + // for AudioTrack and AudioRecord + bool step(size_t stepCount) { return mCblk->stepServer(stepCount, mFrameCount, mIsOut); } + + // return value of these methods must be validated by the caller + uint32_t getSampleRate() const { return mCblk->mSampleRate; } + uint16_t getSendLevel_U4_12() const { return mCblk->mSendLevel; } + uint32_t getVolumeLR() const { return mCblk->mVolumeLR; } + + // for AudioTrack only + size_t framesReady() { + ALOG_ASSERT(mIsOut); + return mCblk->framesReady(true); + } + + // for AudioRecord only, called by RecordThread::RecordTrack::getNextBuffer + // FIXME remove this API since it assumes a lock that should be invisible to caller + size_t framesAvailableIn_l() { + ALOG_ASSERT(!mIsOut); + return mCblk->framesAvailable_l(mFrameCount, false); + } + +private: + const bool mIsOut; // true for AudioTrack, false for AudioRecord + +}; // ---------------------------------------------------------------------------- diff --git a/include/private/media/StaticAudioTrackState.h b/include/private/media/StaticAudioTrackState.h new file mode 100644 index 0000000..46a5946 --- /dev/null +++ b/include/private/media/StaticAudioTrackState.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef STATIC_AUDIO_TRACK_STATE_H +#define STATIC_AUDIO_TRACK_STATE_H + +namespace android { + +// Represents a single state of an AudioTrack that was created in static mode (shared memory buffer +// supplied by the client). This state needs to be communicated from the client to server. As this +// state is too large to be updated atomically without a mutex, and mutexes aren't allowed here, the +// state is wrapped by a SingleStateQueue. +struct StaticAudioTrackState { + // do not define constructors, destructors, or virtual methods + size_t mLoopStart; + size_t mLoopEnd; + int mLoopCount; +}; + +} // namespace android + +#endif // STATIC_AUDIO_TRACK_STATE_H diff --git a/libvideoeditor/lvpp/Android.mk b/libvideoeditor/lvpp/Android.mk index 0ed7e6c..2286827 100755 --- a/libvideoeditor/lvpp/Android.mk +++ b/libvideoeditor/lvpp/Android.mk @@ -50,11 +50,11 @@ LOCAL_SHARED_LIBRARIES := \ libaudioutils \ libbinder \ libcutils \ + liblog \ libEGL \ libGLESv2 \ libgui \ libmedia \ - libmedia_native \ libdrmframework \ libstagefright \ libstagefright_foundation \ diff --git a/libvideoeditor/lvpp/NativeWindowRenderer.cpp b/libvideoeditor/lvpp/NativeWindowRenderer.cpp index 2e15ff9..702900b 100755 --- a/libvideoeditor/lvpp/NativeWindowRenderer.cpp +++ b/libvideoeditor/lvpp/NativeWindowRenderer.cpp @@ -20,8 +20,8 @@ #include <GLES2/gl2.h> #include <GLES2/gl2ext.h> #include <cutils/log.h> -#include <gui/SurfaceTexture.h> -#include <gui/SurfaceTextureClient.h> +#include <gui/GLConsumer.h> +#include <gui/Surface.h> #include <media/stagefright/MediaBuffer.h> #include <media/stagefright/MetaData.h> #include <media/stagefright/foundation/ADebug.h> @@ -315,8 +315,8 @@ NativeWindowRenderer::~NativeWindowRenderer() { } void NativeWindowRenderer::render(RenderInput* input) { - sp<SurfaceTexture> ST = input->mST; - sp<SurfaceTextureClient> STC = input->mSTC; + sp<GLConsumer> ST = input->mST; + sp<Surface> STC = input->mSTC; if (input->mIsExternalBuffer) { queueExternalBuffer(STC.get(), input->mBuffer, @@ -568,8 +568,8 @@ void NativeWindowRenderer::destroyRenderInput(RenderInput* input) { RenderInput::RenderInput(NativeWindowRenderer* renderer, GLuint textureId) : mRenderer(renderer) , mTextureId(textureId) { - mST = new SurfaceTexture(mTextureId); - mSTC = new SurfaceTextureClient(mST); + mST = new GLConsumer(mTextureId); + mSTC = new Surface(mST->getBufferQueue()); native_window_connect(mSTC.get(), NATIVE_WINDOW_API_MEDIA); } diff --git a/libvideoeditor/lvpp/NativeWindowRenderer.h b/libvideoeditor/lvpp/NativeWindowRenderer.h index 8fbb4f9..26b4cba 100755 --- a/libvideoeditor/lvpp/NativeWindowRenderer.h +++ b/libvideoeditor/lvpp/NativeWindowRenderer.h @@ -37,16 +37,16 @@ // we only expect that happens briefly when one clip is about to finish // and the next clip is about to start. // -// We allocate a SurfaceTexture for each RenderInput and the user can use +// We allocate a Surface for each RenderInput and the user can use // the getTargetWindow() function to get the corresponding ANativeWindow -// for that SurfaceTexture. The intention is that the user can pass that +// for that Surface. The intention is that the user can pass that // ANativeWindow to OMXCodec::Create() so the codec can decode directly // to buffers provided by the texture. namespace android { -class SurfaceTexture; -class SurfaceTextureClient; +class GLConsumer; +class Surface; class RenderInput; class NativeWindowRenderer { @@ -110,7 +110,7 @@ private: // destination aspect ratio. GLfloat mPositionCoordinates[8]; - // We use a different GL id for each SurfaceTexture. + // We use a different GL id for each Surface. GLuint mNextTextureId; // Number of existing RenderInputs, just for debugging. @@ -146,7 +146,7 @@ private: class RenderInput { public: - // Returns the ANativeWindow corresponds to the SurfaceTexture. + // Returns the ANativeWindow corresponds to the Surface. ANativeWindow* getTargetWindow(); // Updates video frame size from the MediaSource's metadata. Specifically @@ -156,7 +156,7 @@ public: // Renders the buffer with the given video effect and rending mode. // The video effets are defined in VideoEditorTools.h // Set isExternalBuffer to true only when the buffer given is not - // provided by the SurfaceTexture. + // provided by the Surface. void render(MediaBuffer *buffer, uint32_t videoEffect, M4xVSS_MediaRendering renderingMode, bool isExternalBuffer); private: @@ -164,8 +164,8 @@ private: ~RenderInput(); NativeWindowRenderer* mRenderer; GLuint mTextureId; - sp<SurfaceTexture> mST; - sp<SurfaceTextureClient> mSTC; + sp<GLConsumer> mST; + sp<Surface> mSTC; int mWidth, mHeight; // These are only valid during render() calls diff --git a/libvideoeditor/lvpp/PreviewPlayer.cpp b/libvideoeditor/lvpp/PreviewPlayer.cpp index 34731d7..2bd9f84 100755 --- a/libvideoeditor/lvpp/PreviewPlayer.cpp +++ b/libvideoeditor/lvpp/PreviewPlayer.cpp @@ -31,8 +31,8 @@ #include <media/stagefright/OMXCodec.h> #include <media/stagefright/foundation/ADebug.h> #include <gui/Surface.h> -#include <gui/ISurfaceTexture.h> -#include <gui/SurfaceTextureClient.h> +#include <gui/IGraphicBufferProducer.h> +#include <gui/Surface.h> #include "VideoEditorPreviewController.h" #include "DummyAudioSource.h" @@ -1775,12 +1775,12 @@ void PreviewPlayer::setSurface(const sp<Surface> &surface) { setNativeWindow_l(surface); } -void PreviewPlayer::setSurfaceTexture(const sp<ISurfaceTexture> &surfaceTexture) { +void PreviewPlayer::setSurfaceTexture(const sp<IGraphicBufferProducer> &bufferProducer) { Mutex::Autolock autoLock(mLock); mSurface.clear(); - if (surfaceTexture != NULL) { - setNativeWindow_l(new SurfaceTextureClient(surfaceTexture)); + if (bufferProducer != NULL) { + setNativeWindow_l(new Surface(bufferProducer)); } } diff --git a/libvideoeditor/lvpp/PreviewPlayer.h b/libvideoeditor/lvpp/PreviewPlayer.h index 177853f..5a13b58 100755 --- a/libvideoeditor/lvpp/PreviewPlayer.h +++ b/libvideoeditor/lvpp/PreviewPlayer.h @@ -44,7 +44,7 @@ struct PreviewPlayer { bool isPlaying() const; void setSurface(const sp<Surface> &surface); - void setSurfaceTexture(const sp<ISurfaceTexture> &surfaceTexture); + void setSurfaceTexture(const sp<IGraphicBufferProducer> &bufferProducer); status_t seekTo(int64_t timeUs); status_t getVideoDimensions(int32_t *width, int32_t *height) const; diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.cpp b/libvideoeditor/lvpp/VideoEditorPlayer.cpp index fc9fb49..91a4415 100755 --- a/libvideoeditor/lvpp/VideoEditorPlayer.cpp +++ b/libvideoeditor/lvpp/VideoEditorPlayer.cpp @@ -81,10 +81,10 @@ status_t VideoEditorPlayer::setVideoSurface(const sp<Surface> &surface) { return OK; } -status_t VideoEditorPlayer::setVideoSurfaceTexture(const sp<ISurfaceTexture> &surfaceTexture) { +status_t VideoEditorPlayer::setVideoSurfaceTexture(const sp<IGraphicBufferProducer> &bufferProducer) { ALOGV("setVideoSurfaceTexture"); - mPlayer->setSurfaceTexture(surfaceTexture); + mPlayer->setSurfaceTexture(bufferProducer); return OK; } @@ -406,8 +406,8 @@ status_t VideoEditorPlayer::VeAudioOutput::open( } ALOGV("open(%u, %d, %d, %d)", sampleRate, channelCount, format, bufferCount); if (mTrack) close(); - int afSampleRate; - int afFrameCount; + uint32_t afSampleRate; + size_t afFrameCount; int frameCount; if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) != diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.h b/libvideoeditor/lvpp/VideoEditorPlayer.h index 2ab4eef..77194ab 100755 --- a/libvideoeditor/lvpp/VideoEditorPlayer.h +++ b/libvideoeditor/lvpp/VideoEditorPlayer.h @@ -99,7 +99,7 @@ public: virtual status_t setDataSource(int fd, int64_t offset, int64_t length); virtual status_t setVideoSurface(const sp<Surface> &surface); - virtual status_t setVideoSurfaceTexture(const sp<ISurfaceTexture> &surfaceTexture); + virtual status_t setVideoSurfaceTexture(const sp<IGraphicBufferProducer> &bufferProducer); virtual status_t prepare(); virtual status_t prepareAsync(); virtual status_t start(); diff --git a/libvideoeditor/osal/src/Android.mk b/libvideoeditor/osal/src/Android.mk index b73b9ae..4f38b0c 100755 --- a/libvideoeditor/osal/src/Android.mk +++ b/libvideoeditor/osal/src/Android.mk @@ -41,7 +41,7 @@ LOCAL_SRC_FILES:= \ LOCAL_MODULE_TAGS := optional -LOCAL_SHARED_LIBRARIES := libcutils libutils +LOCAL_SHARED_LIBRARIES := libcutils libutils liblog LOCAL_C_INCLUDES += \ $(TOP)/frameworks/av/libvideoeditor/osal/inc \ @@ -64,4 +64,3 @@ LOCAL_CFLAGS += -Wno-multichar \ -DUSE_STAGEFRIGHT_3GPP_READER include $(BUILD_SHARED_LIBRARY) - diff --git a/libvideoeditor/vss/src/Android.mk b/libvideoeditor/vss/src/Android.mk index cda7a83..0caa15b 100755 --- a/libvideoeditor/vss/src/Android.mk +++ b/libvideoeditor/vss/src/Android.mk @@ -57,6 +57,7 @@ LOCAL_SHARED_LIBRARIES := \ libaudioutils \ libbinder \ libcutils \ + liblog \ libmedia \ libstagefright \ libstagefright_foundation \ @@ -96,4 +97,3 @@ LOCAL_CFLAGS += -Wno-multichar \ -DDECODE_GIF_ON_SAVING include $(BUILD_SHARED_LIBRARY) - diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorUtils.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorUtils.cpp index 5309bd4..5a7237d 100755 --- a/libvideoeditor/vss/stagefrightshells/src/VideoEditorUtils.cpp +++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorUtils.cpp @@ -189,8 +189,8 @@ void displayMetaData(const sp<MetaData> meta) { if (meta->findInt64(kKeyTrackTimeStatus, &int64Data)) { LOG1("displayMetaData kKeyTrackTimeStatus %lld", int64Data); } - if (meta->findInt32(kKeyNotRealTime, &int32Data)) { - LOG1("displayMetaData kKeyNotRealTime %d", int32Data); + if (meta->findInt32(kKeyRealTimeRecording, &int32Data)) { + LOG1("displayMetaData kKeyRealTimeRecording %d", int32Data); } } diff --git a/media/common_time/Android.mk b/media/common_time/Android.mk index 526f17b..632acbc 100644 --- a/media/common_time/Android.mk +++ b/media/common_time/Android.mk @@ -16,6 +16,7 @@ LOCAL_SRC_FILES := cc_helper.cpp \ utils.cpp LOCAL_SHARED_LIBRARIES := libbinder \ libhardware \ - libutils + libutils \ + liblog include $(BUILD_SHARED_LIBRARY) diff --git a/media/libeffects/downmix/Android.mk b/media/libeffects/downmix/Android.mk index 95ca6fd..5d0a87c 100644 --- a/media/libeffects/downmix/Android.mk +++ b/media/libeffects/downmix/Android.mk @@ -7,7 +7,7 @@ LOCAL_SRC_FILES:= \ EffectDownmix.c LOCAL_SHARED_LIBRARIES := \ - libcutils + libcutils liblog LOCAL_MODULE:= libdownmix @@ -25,4 +25,6 @@ LOCAL_C_INCLUDES := \ LOCAL_PRELINK_MODULE := false +LOCAL_CFLAGS += -fvisibility=hidden + include $(BUILD_SHARED_LIBRARY) diff --git a/media/libeffects/downmix/EffectDownmix.c b/media/libeffects/downmix/EffectDownmix.c index 5bf052a..f17a6e8 100644 --- a/media/libeffects/downmix/EffectDownmix.c +++ b/media/libeffects/downmix/EffectDownmix.c @@ -58,13 +58,13 @@ const struct effect_interface_s gDownmixInterface = { NULL /* no process_reverse function, no reference stream needed */ }; +// This is the only symbol that needs to be exported +__attribute__ ((visibility ("default"))) audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = { tag : AUDIO_EFFECT_LIBRARY_TAG, version : EFFECT_LIBRARY_API_VERSION, name : "Downmix Library", implementor : "The Android Open Source Project", - query_num_effects : DownmixLib_QueryNumberEffects, - query_effect : DownmixLib_QueryEffect, create_effect : DownmixLib_Create, release_effect : DownmixLib_Release, get_descriptor : DownmixLib_GetDescriptor, @@ -159,25 +159,6 @@ void Downmix_testIndexComputation(uint32_t mask) { /*--- Effect Library Interface Implementation ---*/ -int32_t DownmixLib_QueryNumberEffects(uint32_t *pNumEffects) { - ALOGV("DownmixLib_QueryNumberEffects()"); - *pNumEffects = kNbEffects; - return 0; -} - -int32_t DownmixLib_QueryEffect(uint32_t index, effect_descriptor_t *pDescriptor) { - ALOGV("DownmixLib_QueryEffect() index=%d", index); - if (pDescriptor == NULL) { - return -EINVAL; - } - if (index >= (uint32_t)kNbEffects) { - return -EINVAL; - } - memcpy(pDescriptor, gDescriptors[index], sizeof(effect_descriptor_t)); - return 0; -} - - int32_t DownmixLib_Create(const effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, diff --git a/media/libeffects/downmix/EffectDownmix.h b/media/libeffects/downmix/EffectDownmix.h index be3ca3f..cb6b957 100644 --- a/media/libeffects/downmix/EffectDownmix.h +++ b/media/libeffects/downmix/EffectDownmix.h @@ -65,9 +65,6 @@ const uint32_t kUnsupported = * Effect API *------------------------------------ */ -int32_t DownmixLib_QueryNumberEffects(uint32_t *pNumEffects); -int32_t DownmixLib_QueryEffect(uint32_t index, - effect_descriptor_t *pDescriptor); int32_t DownmixLib_Create(const effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, diff --git a/media/libeffects/factory/Android.mk b/media/libeffects/factory/Android.mk index 6e69151..60a6ce5 100644 --- a/media/libeffects/factory/Android.mk +++ b/media/libeffects/factory/Android.mk @@ -7,7 +7,7 @@ LOCAL_SRC_FILES:= \ EffectsFactory.c LOCAL_SHARED_LIBRARIES := \ - libcutils + libcutils liblog LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES) LOCAL_MODULE:= libeffects diff --git a/media/libeffects/lvm/lib/Android.mk b/media/libeffects/lvm/lib/Android.mk index f49267e..bb56c75 100644 --- a/media/libeffects/lvm/lib/Android.mk +++ b/media/libeffects/lvm/lib/Android.mk @@ -105,8 +105,6 @@ LOCAL_SRC_FILES:= \ LOCAL_MODULE:= libmusicbundle - - LOCAL_C_INCLUDES += \ $(LOCAL_PATH)/Eq/lib \ $(LOCAL_PATH)/Eq/src \ @@ -121,8 +119,12 @@ LOCAL_C_INCLUDES += \ $(LOCAL_PATH)/StereoWidening/src \ $(LOCAL_PATH)/StereoWidening/lib +LOCAL_CFLAGS += -fvisibility=hidden + include $(BUILD_STATIC_LIBRARY) + + # Reverb library include $(CLEAR_VARS) @@ -168,12 +170,11 @@ LOCAL_SRC_FILES:= \ LOCAL_MODULE:= libreverb - - LOCAL_C_INCLUDES += \ $(LOCAL_PATH)/Reverb/lib \ $(LOCAL_PATH)/Reverb/src \ $(LOCAL_PATH)/Common/lib \ $(LOCAL_PATH)/Common/src +LOCAL_CFLAGS += -fvisibility=hidden include $(BUILD_STATIC_LIBRARY) diff --git a/media/libeffects/lvm/wrapper/Android.mk b/media/libeffects/lvm/wrapper/Android.mk index 4313424..f1af389 100644 --- a/media/libeffects/lvm/wrapper/Android.mk +++ b/media/libeffects/lvm/wrapper/Android.mk @@ -9,28 +9,27 @@ LOCAL_ARM_MODE := arm LOCAL_SRC_FILES:= \ Bundle/EffectBundle.cpp +LOCAL_CFLAGS += -fvisibility=hidden + LOCAL_MODULE:= libbundlewrapper LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/soundfx - - LOCAL_STATIC_LIBRARIES += libmusicbundle LOCAL_SHARED_LIBRARIES := \ libcutils \ libdl - LOCAL_C_INCLUDES += \ $(LOCAL_PATH)/Bundle \ $(LOCAL_PATH)/../lib/Common/lib/ \ $(LOCAL_PATH)/../lib/Bundle/lib/ \ $(call include-path-for, audio-effects) - include $(BUILD_SHARED_LIBRARY) + # reverb wrapper include $(CLEAR_VARS) @@ -39,12 +38,12 @@ LOCAL_ARM_MODE := arm LOCAL_SRC_FILES:= \ Reverb/EffectReverb.cpp +LOCAL_CFLAGS += -fvisibility=hidden + LOCAL_MODULE:= libreverbwrapper LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/soundfx - - LOCAL_STATIC_LIBRARIES += libreverb LOCAL_SHARED_LIBRARIES := \ diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp index d706c2d..54f8d9e 100644 --- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp +++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp @@ -158,42 +158,6 @@ int Volume_getParameter (EffectContext *pContext, int Effect_setEnabled(EffectContext *pContext, bool enabled); /* Effect Library Interface Implementation */ -extern "C" int EffectQueryNumberEffects(uint32_t *pNumEffects){ - ALOGV("\n\tEffectQueryNumberEffects start"); - *pNumEffects = 4; - ALOGV("\tEffectQueryNumberEffects creating %d effects", *pNumEffects); - ALOGV("\tEffectQueryNumberEffects end\n"); - return 0; -} /* end EffectQueryNumberEffects */ - -extern "C" int EffectQueryEffect(uint32_t index, effect_descriptor_t *pDescriptor){ - ALOGV("\n\tEffectQueryEffect start"); - ALOGV("\tEffectQueryEffect processing index %d", index); - - if (pDescriptor == NULL){ - ALOGV("\tLVM_ERROR : EffectQueryEffect was passed NULL pointer"); - return -EINVAL; - } - if (index > 3){ - ALOGV("\tLVM_ERROR : EffectQueryEffect index out of range %d", index); - return -ENOENT; - } - if(index == LVM_BASS_BOOST){ - ALOGV("\tEffectQueryEffect processing LVM_BASS_BOOST"); - *pDescriptor = gBassBoostDescriptor; - }else if(index == LVM_VIRTUALIZER){ - ALOGV("\tEffectQueryEffect processing LVM_VIRTUALIZER"); - *pDescriptor = gVirtualizerDescriptor; - } else if(index == LVM_EQUALIZER){ - ALOGV("\tEffectQueryEffect processing LVM_EQUALIZER"); - *pDescriptor = gEqualizerDescriptor; - } else if(index == LVM_VOLUME){ - ALOGV("\tEffectQueryEffect processing LVM_VOLUME"); - *pDescriptor = gVolumeDescriptor; - } - ALOGV("\tEffectQueryEffect end\n"); - return 0; -} /* end EffectQueryEffect */ extern "C" int EffectCreate(const effect_uuid_t *uuid, int32_t sessionId, @@ -260,6 +224,7 @@ extern "C" int EffectCreate(const effect_uuid_t *uuid, pContext->pBundledContext->NumberEffectsEnabled = 0; pContext->pBundledContext->NumberEffectsCalled = 0; pContext->pBundledContext->firstVolume = LVM_TRUE; + pContext->pBundledContext->volume = 0; #ifdef LVM_PCM char fileName[256]; @@ -3299,13 +3264,13 @@ const struct effect_interface_s gLvmEffectInterface = { NULL, }; /* end gLvmEffectInterface */ +// This is the only symbol that needs to be exported +__attribute__ ((visibility ("default"))) audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = { tag : AUDIO_EFFECT_LIBRARY_TAG, version : EFFECT_LIBRARY_API_VERSION, name : "Effect Bundle Library", implementor : "NXP Software Ltd.", - query_num_effects : android::EffectQueryNumberEffects, - query_effect : android::EffectQueryEffect, create_effect : android::EffectCreate, release_effect : android::EffectRelease, get_descriptor : android::EffectGetDescriptor, diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp index 941d651..8a96212 100755..100644 --- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp +++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp @@ -186,30 +186,6 @@ int Reverb_getParameter (ReverbContext *pContext, int Reverb_LoadPreset (ReverbContext *pContext); /* Effect Library Interface Implementation */ -extern "C" int EffectQueryNumberEffects(uint32_t *pNumEffects){ - ALOGV("\n\tEffectQueryNumberEffects start"); - *pNumEffects = sizeof(gDescriptors) / sizeof(const effect_descriptor_t *); - ALOGV("\tEffectQueryNumberEffects creating %d effects", *pNumEffects); - ALOGV("\tEffectQueryNumberEffects end\n"); - return 0; -} /* end EffectQueryNumberEffects */ - -extern "C" int EffectQueryEffect(uint32_t index, - effect_descriptor_t *pDescriptor){ - ALOGV("\n\tEffectQueryEffect start"); - ALOGV("\tEffectQueryEffect processing index %d", index); - if (pDescriptor == NULL){ - ALOGV("\tLVM_ERROR : EffectQueryEffect was passed NULL pointer"); - return -EINVAL; - } - if (index >= sizeof(gDescriptors) / sizeof(const effect_descriptor_t *)) { - ALOGV("\tLVM_ERROR : EffectQueryEffect index out of range %d", index); - return -ENOENT; - } - *pDescriptor = *gDescriptors[index]; - ALOGV("\tEffectQueryEffect end\n"); - return 0; -} /* end EffectQueryEffect */ extern "C" int EffectCreate(const effect_uuid_t *uuid, int32_t sessionId, @@ -640,10 +616,6 @@ int Reverb_setConfig(ReverbContext *pContext, effect_config_t *pConfig){ || pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE); CHECK_ARG(pConfig->inputCfg.format == AUDIO_FORMAT_PCM_16_BIT); - if(pConfig->inputCfg.samplingRate != 44100){ - return -EINVAL; - } - //ALOGV("\tReverb_setConfig calling memcpy"); pContext->config = *pConfig; @@ -672,7 +644,7 @@ int Reverb_setConfig(ReverbContext *pContext, effect_config_t *pConfig){ return -EINVAL; } - if(pContext->SampleRate != SampleRate){ + if (pContext->SampleRate != SampleRate) { LVREV_ControlParams_st ActiveParams; LVREV_ReturnStatus_en LvmStatus = LVREV_SUCCESS; @@ -686,11 +658,14 @@ int Reverb_setConfig(ReverbContext *pContext, effect_config_t *pConfig){ LVM_ERROR_CHECK(LvmStatus, "LVREV_GetControlParameters", "Reverb_setConfig") if(LvmStatus != LVREV_SUCCESS) return -EINVAL; + ActiveParams.SampleRate = SampleRate; + LvmStatus = LVREV_SetControlParameters(pContext->hInstance, &ActiveParams); LVM_ERROR_CHECK(LvmStatus, "LVREV_SetControlParameters", "Reverb_setConfig") + if(LvmStatus != LVREV_SUCCESS) return -EINVAL; //ALOGV("\tReverb_setConfig Succesfully called LVREV_SetControlParameters\n"); - + pContext->SampleRate = SampleRate; }else{ //ALOGV("\tReverb_setConfig keep sampling rate at %d", SampleRate); } @@ -842,6 +817,7 @@ int Reverb_init(ReverbContext *pContext){ /* General parameters */ params.OperatingMode = LVM_MODE_ON; params.SampleRate = LVM_FS_44100; + pContext->SampleRate = LVM_FS_44100; if(pContext->config.inputCfg.channels == AUDIO_CHANNEL_OUT_MONO){ params.SourceFormat = LVM_MONO; @@ -2170,13 +2146,13 @@ const struct effect_interface_s gReverbInterface = { NULL, }; /* end gReverbInterface */ +// This is the only symbol that needs to be exported +__attribute__ ((visibility ("default"))) audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = { tag : AUDIO_EFFECT_LIBRARY_TAG, version : EFFECT_LIBRARY_API_VERSION, name : "Reverb Library", implementor : "NXP Software Ltd.", - query_num_effects : android::EffectQueryNumberEffects, - query_effect : android::EffectQueryEffect, create_effect : android::EffectCreate, release_effect : android::EffectRelease, get_descriptor : android::EffectGetDescriptor, diff --git a/media/libeffects/preprocessing/Android.mk b/media/libeffects/preprocessing/Android.mk index c13b9d4..c344352 100755..100644 --- a/media/libeffects/preprocessing/Android.mk +++ b/media/libeffects/preprocessing/Android.mk @@ -21,7 +21,8 @@ LOCAL_C_INCLUDES += $(call include-path-for, speex) LOCAL_SHARED_LIBRARIES := \ libwebrtc_audio_preprocessing \ libspeexresampler \ - libutils + libutils \ + liblog ifeq ($(TARGET_SIMULATOR),true) LOCAL_LDLIBS += -ldl @@ -29,4 +30,6 @@ else LOCAL_SHARED_LIBRARIES += libdl endif +LOCAL_CFLAGS += -fvisibility=hidden + include $(BUILD_SHARED_LIBRARY) diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp index 597866a..25586e8 100755..100644 --- a/media/libeffects/preprocessing/PreProcessing.cpp +++ b/media/libeffects/preprocessing/PreProcessing.cpp @@ -1818,30 +1818,6 @@ const struct effect_interface_s sEffectInterfaceReverse = { // Effect Library Interface Implementation //------------------------------------------------------------------------------ -int PreProcessingLib_QueryNumberEffects(uint32_t *pNumEffects) -{ - if (PreProc_Init() != 0) { - return sInitStatus; - } - if (pNumEffects == NULL) { - return -EINVAL; - } - *pNumEffects = PREPROC_NUM_EFFECTS; - return sInitStatus; -} - -int PreProcessingLib_QueryEffect(uint32_t index, effect_descriptor_t *pDescriptor) -{ - if (PreProc_Init() != 0) { - return sInitStatus; - } - if (index >= PREPROC_NUM_EFFECTS) { - return -EINVAL; - } - *pDescriptor = *sDescriptors[index]; - return 0; -} - int PreProcessingLib_Create(const effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, @@ -1913,13 +1889,13 @@ int PreProcessingLib_GetDescriptor(const effect_uuid_t *uuid, return 0; } +// This is the only symbol that needs to be exported +__attribute__ ((visibility ("default"))) audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = { tag : AUDIO_EFFECT_LIBRARY_TAG, version : EFFECT_LIBRARY_API_VERSION, name : "Audio Preprocessing Library", implementor : "The Android Open Source Project", - query_num_effects : PreProcessingLib_QueryNumberEffects, - query_effect : PreProcessingLib_QueryEffect, create_effect : PreProcessingLib_Create, release_effect : PreProcessingLib_Release, get_descriptor : PreProcessingLib_GetDescriptor diff --git a/media/libeffects/testlibs/EffectEqualizer.cpp b/media/libeffects/testlibs/EffectEqualizer.cpp index 90ebe1f..c35453b 100644 --- a/media/libeffects/testlibs/EffectEqualizer.cpp +++ b/media/libeffects/testlibs/EffectEqualizer.cpp @@ -123,23 +123,6 @@ int Equalizer_setParameter(AudioEqualizer * pEqualizer, int32_t *pParam, void *p //--- Effect Library Interface Implementation // -extern "C" int EffectQueryNumberEffects(uint32_t *pNumEffects) { - *pNumEffects = 1; - return 0; -} /* end EffectQueryNumberEffects */ - -extern "C" int EffectQueryEffect(uint32_t index, - effect_descriptor_t *pDescriptor) { - if (pDescriptor == NULL) { - return -EINVAL; - } - if (index > 0) { - return -EINVAL; - } - *pDescriptor = gEqualizerDescriptor; - return 0; -} /* end EffectQueryNext */ - extern "C" int EffectCreate(const effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, @@ -771,8 +754,6 @@ audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = { version : EFFECT_LIBRARY_API_VERSION, name : "Test Equalizer Library", implementor : "The Android Open Source Project", - query_num_effects : android::EffectQueryNumberEffects, - query_effect : android::EffectQueryEffect, create_effect : android::EffectCreate, release_effect : android::EffectRelease, get_descriptor : android::EffectGetDescriptor, diff --git a/media/libeffects/testlibs/EffectReverb.c b/media/libeffects/testlibs/EffectReverb.c index a87a834..c37f392 100644 --- a/media/libeffects/testlibs/EffectReverb.c +++ b/media/libeffects/testlibs/EffectReverb.c @@ -94,23 +94,6 @@ static const effect_descriptor_t * const gDescriptors[] = { /*--- Effect Library Interface Implementation ---*/ -int EffectQueryNumberEffects(uint32_t *pNumEffects) { - *pNumEffects = sizeof(gDescriptors) / sizeof(const effect_descriptor_t *); - return 0; -} - -int EffectQueryEffect(uint32_t index, effect_descriptor_t *pDescriptor) { - if (pDescriptor == NULL) { - return -EINVAL; - } - if (index >= sizeof(gDescriptors) / sizeof(const effect_descriptor_t *)) { - return -EINVAL; - } - memcpy(pDescriptor, gDescriptors[index], - sizeof(effect_descriptor_t)); - return 0; -} - int EffectCreate(const effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, @@ -2222,8 +2205,6 @@ audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = { .version = EFFECT_LIBRARY_API_VERSION, .name = "Test Equalizer Library", .implementor = "The Android Open Source Project", - .query_num_effects = EffectQueryNumberEffects, - .query_effect = EffectQueryEffect, .create_effect = EffectCreate, .release_effect = EffectRelease, .get_descriptor = EffectGetDescriptor, diff --git a/media/libeffects/testlibs/EffectReverb.h b/media/libeffects/testlibs/EffectReverb.h index 1fb14a7..e5248fe 100644 --- a/media/libeffects/testlibs/EffectReverb.h +++ b/media/libeffects/testlibs/EffectReverb.h @@ -300,9 +300,6 @@ typedef struct reverb_module_s { * Effect API *------------------------------------ */ -int EffectQueryNumberEffects(uint32_t *pNumEffects); -int EffectQueryEffect(uint32_t index, - effect_descriptor_t *pDescriptor); int EffectCreate(const effect_uuid_t *effectUID, int32_t sessionId, int32_t ioId, diff --git a/media/libeffects/visualizer/Android.mk b/media/libeffects/visualizer/Android.mk index 76b5110..e196eb2 100644 --- a/media/libeffects/visualizer/Android.mk +++ b/media/libeffects/visualizer/Android.mk @@ -6,10 +6,11 @@ include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ EffectVisualizer.cpp -LOCAL_CFLAGS+= -O2 +LOCAL_CFLAGS+= -O2 -fvisibility=hidden LOCAL_SHARED_LIBRARIES := \ libcutils \ + liblog \ libdl LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/soundfx diff --git a/media/libeffects/visualizer/EffectVisualizer.cpp b/media/libeffects/visualizer/EffectVisualizer.cpp index 44baf93..e7eccf1 100644 --- a/media/libeffects/visualizer/EffectVisualizer.cpp +++ b/media/libeffects/visualizer/EffectVisualizer.cpp @@ -177,23 +177,6 @@ int Visualizer_init(VisualizerContext *pContext) //--- Effect Library Interface Implementation // -int VisualizerLib_QueryNumberEffects(uint32_t *pNumEffects) { - *pNumEffects = 1; - return 0; -} - -int VisualizerLib_QueryEffect(uint32_t index, - effect_descriptor_t *pDescriptor) { - if (pDescriptor == NULL) { - return -EINVAL; - } - if (index > 0) { - return -EINVAL; - } - *pDescriptor = gVisualizerDescriptor; - return 0; -} - int VisualizerLib_Create(const effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, @@ -574,14 +557,13 @@ const struct effect_interface_s gVisualizerInterface = { NULL, }; - +// This is the only symbol that needs to be exported +__attribute__ ((visibility ("default"))) audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = { tag : AUDIO_EFFECT_LIBRARY_TAG, version : EFFECT_LIBRARY_API_VERSION, name : "Visualizer Library", implementor : "The Android Open Source Project", - query_num_effects : VisualizerLib_QueryNumberEffects, - query_effect : VisualizerLib_QueryEffect, create_effect : VisualizerLib_Create, release_effect : VisualizerLib_Release, get_descriptor : VisualizerLib_GetDescriptor, diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk index 54666fb..2c0c3a5 100644 --- a/media/libmedia/Android.mk +++ b/media/libmedia/Android.mk @@ -13,15 +13,19 @@ include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ AudioTrack.cpp \ + AudioTrackShared.cpp \ IAudioFlinger.cpp \ IAudioFlingerClient.cpp \ IAudioTrack.cpp \ IAudioRecord.cpp \ ICrypto.cpp \ + IDrm.cpp \ + IDrmClient.cpp \ IHDCP.cpp \ AudioRecord.cpp \ AudioSystem.cpp \ mediaplayer.cpp \ + IMediaLogService.cpp \ IMediaPlayerService.cpp \ IMediaPlayerClient.cpp \ IMediaRecorderClient.cpp \ @@ -51,10 +55,17 @@ LOCAL_SRC_FILES:= \ SoundPool.cpp \ SoundPoolThread.cpp +LOCAL_SRC_FILES += ../libnbaio/roundup.c + +# for <cutils/atomic-inline.h> +LOCAL_CFLAGS += -DANDROID_SMP=$(if $(findstring true,$(TARGET_CPU_SMP)),1,0) +LOCAL_SRC_FILES += SingleStateQueue.cpp +LOCAL_CFLAGS += -DSINGLE_STATE_QUEUE_INSTANTIATIONS='"SingleStateQueueInstantiations.cpp"' + LOCAL_SHARED_LIBRARIES := \ - libui libcutils libutils libbinder libsonivox libicuuc libexpat \ + libui liblog libcutils libutils libbinder libsonivox libicuuc libexpat \ libcamera_client libstagefright_foundation \ - libgui libdl libaudioutils libmedia_native + libgui libdl libaudioutils LOCAL_WHOLE_STATIC_LIBRARY := libmedia_helper diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp index 680604b..8dfffb3 100644 --- a/media/libmedia/AudioEffect.cpp +++ b/media/libmedia/AudioEffect.cpp @@ -127,7 +127,7 @@ status_t AudioEffect::set(const effect_uuid_t *type, mIEffectClient = new EffectClient(this); - iEffect = audioFlinger->createEffect(getpid(), &mDescriptor, + iEffect = audioFlinger->createEffect((effect_descriptor_t *)&mDescriptor, mIEffectClient, priority, io, mSessionId, &mStatus, &mId, &enabled); if (iEffect == 0 || (mStatus != NO_ERROR && mStatus != ALREADY_EXISTS)) { @@ -152,7 +152,8 @@ status_t AudioEffect::set(const effect_uuid_t *type, mCblk->buffer = (uint8_t *)mCblk + bufOffset; iEffect->asBinder()->linkToDeath(mIEffectClient); - ALOGV("set() %p OK effect: %s id: %d status %d enabled %d", this, mDescriptor.name, mId, mStatus, mEnabled); + ALOGV("set() %p OK effect: %s id: %d status %d enabled %d", this, mDescriptor.name, mId, + mStatus, mEnabled); return mStatus; } @@ -266,9 +267,11 @@ status_t AudioEffect::setParameter(effect_param_t *param) uint32_t size = sizeof(int); uint32_t psize = ((param->psize - 1) / sizeof(int) + 1) * sizeof(int) + param->vsize; - ALOGV("setParameter: param: %d, param2: %d", *(int *)param->data, (param->psize == 8) ? *((int *)param->data + 1): -1); + ALOGV("setParameter: param: %d, param2: %d", *(int *)param->data, + (param->psize == 8) ? *((int *)param->data + 1): -1); - return mIEffect->command(EFFECT_CMD_SET_PARAM, sizeof (effect_param_t) + psize, param, &size, ¶m->status); + return mIEffect->command(EFFECT_CMD_SET_PARAM, sizeof (effect_param_t) + psize, param, &size, + ¶m->status); } status_t AudioEffect::setParameterDeferred(effect_param_t *param) @@ -321,11 +324,14 @@ status_t AudioEffect::getParameter(effect_param_t *param) return BAD_VALUE; } - ALOGV("getParameter: param: %d, param2: %d", *(int *)param->data, (param->psize == 8) ? *((int *)param->data + 1): -1); + ALOGV("getParameter: param: %d, param2: %d", *(int *)param->data, + (param->psize == 8) ? *((int *)param->data + 1): -1); - uint32_t psize = sizeof(effect_param_t) + ((param->psize - 1) / sizeof(int) + 1) * sizeof(int) + param->vsize; + uint32_t psize = sizeof(effect_param_t) + ((param->psize - 1) / sizeof(int) + 1) * sizeof(int) + + param->vsize; - return mIEffect->command(EFFECT_CMD_GET_PARAM, sizeof(effect_param_t) + param->psize, param, &psize, param); + return mIEffect->command(EFFECT_CMD_GET_PARAM, sizeof(effect_param_t) + param->psize, param, + &psize, param); } @@ -346,7 +352,8 @@ void AudioEffect::binderDied() void AudioEffect::controlStatusChanged(bool controlGranted) { - ALOGV("controlStatusChanged %p control %d callback %p mUserData %p", this, controlGranted, mCbf, mUserData); + ALOGV("controlStatusChanged %p control %d callback %p mUserData %p", this, controlGranted, mCbf, + mUserData); if (controlGranted) { if (mStatus == ALREADY_EXISTS) { mStatus = NO_ERROR; diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp index 8ea6306..40ff1bf 100644 --- a/media/libmedia/AudioRecord.cpp +++ b/media/libmedia/AudioRecord.cpp @@ -36,7 +36,7 @@ namespace android { // static status_t AudioRecord::getMinFrameCount( - int* frameCount, + size_t* frameCount, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask) @@ -47,14 +47,14 @@ status_t AudioRecord::getMinFrameCount( *frameCount = 0; size_t size = 0; - if (AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &size) - != NO_ERROR) { - ALOGE("AudioSystem could not query the input buffer size."); + status_t status = AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &size); + if (status != NO_ERROR) { + ALOGE("AudioSystem could not query the input buffer size; status %d", status); return NO_INIT; } if (size == 0) { - ALOGE("Unsupported configuration: sampleRate %d, format %d, channelMask %#x", + ALOGE("Unsupported configuration: sampleRate %u, format %d, channelMask %#x", sampleRate, format, channelMask); return BAD_VALUE; } @@ -63,7 +63,7 @@ status_t AudioRecord::getMinFrameCount( size <<= 1; if (audio_is_linear_pcm(format)) { - int channelCount = popcount(channelMask); + uint32_t channelCount = popcount(channelMask); size /= channelCount * audio_bytes_per_sample(format); } @@ -75,7 +75,8 @@ status_t AudioRecord::getMinFrameCount( AudioRecord::AudioRecord() : mStatus(NO_INIT), mSessionId(0), - mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT) + mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT), + mProxy(NULL) { } @@ -90,10 +91,12 @@ AudioRecord::AudioRecord( int notificationFrames, int sessionId) : mStatus(NO_INIT), mSessionId(0), - mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT) + mPreviousPriority(ANDROID_PRIORITY_NORMAL), + mPreviousSchedulingGroup(SP_DEFAULT), + mProxy(NULL) { mStatus = set(inputSource, sampleRate, format, channelMask, - frameCount, cbf, user, notificationFrames, sessionId); + frameCount, cbf, user, notificationFrames, false /*threadCanCallJava*/, sessionId); } AudioRecord::~AudioRecord() @@ -112,6 +115,7 @@ AudioRecord::~AudioRecord() IPCThreadState::self()->flushCommands(); AudioSystem::releaseAudioSessionId(mSessionId); } + delete mProxy; } status_t AudioRecord::set( @@ -119,15 +123,22 @@ status_t AudioRecord::set( uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - int frameCount, + int frameCountInt, callback_t cbf, void* user, int notificationFrames, bool threadCanCallJava, int sessionId) { + // FIXME "int" here is legacy and will be replaced by size_t later + if (frameCountInt < 0) { + ALOGE("Invalid frame count %d", frameCountInt); + return BAD_VALUE; + } + size_t frameCount = frameCountInt; - ALOGV("set(): sampleRate %d, channelMask %#x, frameCount %d",sampleRate, channelMask, frameCount); + ALOGV("set(): sampleRate %u, channelMask %#x, frameCount %u", sampleRate, channelMask, + frameCount); AutoMutex lock(mLock); @@ -142,6 +153,8 @@ status_t AudioRecord::set( if (sampleRate == 0) { sampleRate = DEFAULT_SAMPLE_RATE; } + mSampleRate = sampleRate; + // these below should probably come from the audioFlinger too... if (format == AUDIO_FORMAT_DEFAULT) { format = AUDIO_FORMAT_PCM_16_BIT; @@ -151,12 +164,20 @@ status_t AudioRecord::set( ALOGE("Invalid format"); return BAD_VALUE; } + mFormat = format; if (!audio_is_input_channel(channelMask)) { return BAD_VALUE; } + mChannelMask = channelMask; + uint32_t channelCount = popcount(channelMask); + mChannelCount = channelCount; - int channelCount = popcount(channelMask); + if (audio_is_linear_pcm(format)) { + mFrameSize = channelCount * audio_bytes_per_sample(format); + } else { + mFrameSize = sizeof(uint8_t); + } if (sessionId == 0 ) { mSessionId = AudioSystem::newAudioSessionId(); @@ -176,7 +197,7 @@ status_t AudioRecord::set( } // validate framecount - int minFrameCount = 0; + size_t minFrameCount = 0; status_t status = getMinFrameCount(&minFrameCount, sampleRate, format, channelMask); if (status != NO_ERROR) { return status; @@ -194,8 +215,7 @@ status_t AudioRecord::set( } // create the IAudioRecord - status = openRecord_l(sampleRate, format, channelMask, - frameCount, input); + status = openRecord_l(sampleRate, format, frameCount, input); if (status != NO_ERROR) { return status; } @@ -207,11 +227,9 @@ status_t AudioRecord::set( mStatus = NO_ERROR; - mFormat = format; // Update buffer size in case it has been limited by AudioFlinger during track creation - mFrameCount = mCblk->frameCount; - mChannelCount = (uint8_t)channelCount; - mChannelMask = channelMask; + mFrameCount = mCblk->frameCount_; + mActive = false; mCbf = cbf; mNotificationFrames = notificationFrames; @@ -247,25 +265,16 @@ audio_format_t AudioRecord::format() const return mFormat; } -int AudioRecord::channelCount() const +uint32_t AudioRecord::channelCount() const { return mChannelCount; } -uint32_t AudioRecord::frameCount() const +size_t AudioRecord::frameCount() const { return mFrameCount; } -size_t AudioRecord::frameSize() const -{ - if (audio_is_linear_pcm(mFormat)) { - return channelCount()*audio_bytes_per_sample(mFormat); - } else { - return sizeof(uint8_t); - } -} - audio_source_t AudioRecord::inputSource() const { return mInputSource; @@ -291,17 +300,19 @@ status_t AudioRecord::start(AudioSystem::sync_event_t event, int triggerSession) mActive = true; cblk->lock.lock(); - if (!(cblk->flags & CBLK_INVALID_MSK)) { + if (!(cblk->flags & CBLK_INVALID)) { cblk->lock.unlock(); ALOGV("mAudioRecord->start()"); ret = mAudioRecord->start(event, triggerSession); cblk->lock.lock(); if (ret == DEAD_OBJECT) { - android_atomic_or(CBLK_INVALID_ON, &cblk->flags); + android_atomic_or(CBLK_INVALID, &cblk->flags); } } - if (cblk->flags & CBLK_INVALID_MSK) { - ret = restoreRecord_l(cblk); + if (cblk->flags & CBLK_INVALID) { + audio_track_cblk_t* temp = cblk; + ret = restoreRecord_l(temp); + cblk = temp; } cblk->lock.unlock(); if (ret == NO_ERROR) { @@ -355,7 +366,7 @@ bool AudioRecord::stopped() const uint32_t AudioRecord::getSampleRate() const { - return mCblk->sampleRate; + return mSampleRate; } status_t AudioRecord::setMarkerPosition(uint32_t marker) @@ -425,13 +436,13 @@ unsigned int AudioRecord::getInputFramesLost() const status_t AudioRecord::openRecord_l( uint32_t sampleRate, audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, + size_t frameCount, audio_io_handle_t input) { status_t status; const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger(); if (audioFlinger == 0) { + ALOGE("Could not get audioflinger"); return NO_INIT; } @@ -439,9 +450,9 @@ status_t AudioRecord::openRecord_l( // FIXME see similar logic at AudioTrack int originalSessionId = mSessionId; - sp<IAudioRecord> record = audioFlinger->openRecord(getpid(), input, + sp<IAudioRecord> record = audioFlinger->openRecord(input, sampleRate, format, - channelMask, + mChannelMask, frameCount, IAudioFlinger::TRACK_DEFAULT, tid, @@ -454,25 +465,32 @@ status_t AudioRecord::openRecord_l( ALOGE("AudioFlinger could not create record track, status: %d", status); return status; } - sp<IMemory> cblk = record->getCblk(); - if (cblk == 0) { + sp<IMemory> iMem = record->getCblk(); + if (iMem == 0) { ALOGE("Could not get control block"); return NO_INIT; } mAudioRecord.clear(); mAudioRecord = record; mCblkMemory.clear(); - mCblkMemory = cblk; - mCblk = static_cast<audio_track_cblk_t*>(cblk->pointer()); - mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t); - android_atomic_and(~CBLK_DIRECTION_MSK, &mCblk->flags); - mCblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS; - mCblk->waitTimeMs = 0; + mCblkMemory = iMem; + audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer()); + mCblk = cblk; + mBuffers = (char*)cblk + sizeof(audio_track_cblk_t); + cblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS; + cblk->waitTimeMs = 0; + + // update proxy + delete mProxy; + mProxy = new AudioRecordClientProxy(cblk, mBuffers, frameCount, mFrameSize); + return NO_ERROR; } status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) { + ALOG_ASSERT(mStatus == NO_ERROR && mProxy != NULL); + AutoMutex lock(mLock); bool active; status_t result = NO_ERROR; @@ -483,7 +501,7 @@ status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) audioBuffer->frameCount = 0; audioBuffer->size = 0; - uint32_t framesReady = cblk->framesReady(); + size_t framesReady = mProxy->framesReady(); if (framesReady == 0) { cblk->lock.lock(); @@ -498,17 +516,22 @@ status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) cblk->lock.unlock(); return WOULD_BLOCK; } - if (!(cblk->flags & CBLK_INVALID_MSK)) { + if (!(cblk->flags & CBLK_INVALID)) { mLock.unlock(); + // this condition is in shared memory, so if IAudioRecord and control block + // are replaced due to mediaserver death or IAudioRecord invalidation then + // cv won't be signalled, but fortunately the timeout will limit the wait result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs)); cblk->lock.unlock(); mLock.lock(); if (!mActive) { return status_t(STOPPED); } + // IAudioRecord may have been re-created while mLock was unlocked + cblk = mCblk; cblk->lock.lock(); } - if (cblk->flags & CBLK_INVALID_MSK) { + if (cblk->flags & CBLK_INVALID) { goto create_new_record; } if (CC_UNLIKELY(result != NO_ERROR)) { @@ -521,9 +544,11 @@ status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, 0); cblk->lock.lock(); if (result == DEAD_OBJECT) { - android_atomic_or(CBLK_INVALID_ON, &cblk->flags); + android_atomic_or(CBLK_INVALID, &cblk->flags); create_new_record: - result = AudioRecord::restoreRecord_l(cblk); + audio_track_cblk_t* temp = cblk; + result = AudioRecord::restoreRecord_l(temp); + cblk = temp; } if (result != NO_ERROR) { ALOGW("obtainBuffer create Track error %d", result); @@ -539,7 +564,7 @@ create_new_record: } // read the server count again start_loop_here: - framesReady = cblk->framesReady(); + framesReady = mProxy->framesReady(); } cblk->lock.unlock(); } @@ -553,26 +578,25 @@ create_new_record: } uint32_t u = cblk->user; - uint32_t bufferEnd = cblk->userBase + cblk->frameCount; + uint32_t bufferEnd = cblk->userBase + mFrameCount; if (framesReq > bufferEnd - u) { framesReq = bufferEnd - u; } - audioBuffer->flags = 0; - audioBuffer->channelCount= mChannelCount; - audioBuffer->format = mFormat; audioBuffer->frameCount = framesReq; - audioBuffer->size = framesReq*cblk->frameSize; - audioBuffer->raw = (int8_t*)cblk->buffer(u); + audioBuffer->size = framesReq * mFrameSize; + audioBuffer->raw = mProxy->buffer(u); active = mActive; return active ? status_t(NO_ERROR) : status_t(STOPPED); } void AudioRecord::releaseBuffer(Buffer* audioBuffer) { + ALOG_ASSERT(mStatus == NO_ERROR && mProxy != NULL); + AutoMutex lock(mLock); - mCblk->stepUser(audioBuffer->frameCount); + (void) mProxy->stepUser(audioBuffer->frameCount); } audio_io_handle_t AudioRecord::getInput() const @@ -585,7 +609,7 @@ audio_io_handle_t AudioRecord::getInput() const audio_io_handle_t AudioRecord::getInput_l() { mInput = AudioSystem::getInput(mInputSource, - mCblk->sampleRate, + mSampleRate, mFormat, mChannelMask, mSessionId); @@ -631,10 +655,13 @@ ssize_t AudioRecord::read(void* buffer, size_t userSize) status_t err = obtainBuffer(&audioBuffer, ((2 * MAX_RUN_TIMEOUT_MS) / WAIT_PERIOD_MS)); if (err < 0) { // out of buffers, return #bytes written - if (err == status_t(NO_MORE_BUFFERS)) + if (err == status_t(NO_MORE_BUFFERS)) { break; - if (err == status_t(TIMED_OUT)) - err = 0; + } + if (err == status_t(TIMED_OUT)) { + // return partial transfer count + return read; + } return ssize_t(err); } @@ -701,7 +728,8 @@ bool AudioRecord::processAudioBuffer(const sp<AudioRecordThread>& thread) status_t err = obtainBuffer(&audioBuffer, 1); if (err < NO_ERROR) { if (err != TIMED_OUT) { - ALOGE_IF(err != status_t(NO_MORE_BUFFERS), "Error obtaining an audio buffer, giving up."); + ALOGE_IF(err != status_t(NO_MORE_BUFFERS), + "Error obtaining an audio buffer, giving up."); return false; } break; @@ -733,11 +761,11 @@ bool AudioRecord::processAudioBuffer(const sp<AudioRecordThread>& thread) // Manage overrun callback - if (active && (cblk->framesAvailable() == 0)) { + if (active && (mProxy->framesAvailable() == 0)) { // The value of active is stale, but we are almost sure to be active here because // otherwise we would have exited when obtainBuffer returned STOPPED earlier. ALOGV("Overrun user: %x, server: %x, flags %04x", cblk->user, cblk->server, cblk->flags); - if (!(android_atomic_or(CBLK_UNDERRUN_ON, &cblk->flags) & CBLK_UNDERRUN_MSK)) { + if (!(android_atomic_or(CBLK_UNDERRUN, &cblk->flags) & CBLK_UNDERRUN)) { mCbf(EVENT_OVERRUN, mUserData, NULL); } } @@ -753,57 +781,40 @@ bool AudioRecord::processAudioBuffer(const sp<AudioRecordThread>& thread) // must be called with mLock and cblk.lock held. Callers must also hold strong references on // the IAudioRecord and IMemory in case they are recreated here. // If the IAudioRecord is successfully restored, the cblk pointer is updated -status_t AudioRecord::restoreRecord_l(audio_track_cblk_t*& cblk) +status_t AudioRecord::restoreRecord_l(audio_track_cblk_t*& refCblk) { status_t result; - if (!(android_atomic_or(CBLK_RESTORING_ON, &cblk->flags) & CBLK_RESTORING_MSK)) { - ALOGW("dead IAudioRecord, creating a new one"); - // signal old cblk condition so that other threads waiting for available buffers stop - // waiting now - cblk->cv.broadcast(); - cblk->lock.unlock(); + audio_track_cblk_t* cblk = refCblk; + audio_track_cblk_t* newCblk = cblk; + ALOGW("dead IAudioRecord, creating a new one"); - // if the new IAudioRecord is created, openRecord_l() will modify the - // following member variables: mAudioRecord, mCblkMemory and mCblk. - // It will also delete the strong references on previous IAudioRecord and IMemory - result = openRecord_l(cblk->sampleRate, mFormat, mChannelMask, - mFrameCount, getInput_l()); - if (result == NO_ERROR) { - // callback thread or sync event hasn't changed - result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, 0); - } - if (result != NO_ERROR) { - mActive = false; - } + // signal old cblk condition so that other threads waiting for available buffers stop + // waiting now + cblk->cv.broadcast(); + cblk->lock.unlock(); - // signal old cblk condition for other threads waiting for restore completion - android_atomic_or(CBLK_RESTORED_ON, &cblk->flags); - cblk->cv.broadcast(); - } else { - if (!(cblk->flags & CBLK_RESTORED_MSK)) { - ALOGW("dead IAudioRecord, waiting for a new one to be created"); - mLock.unlock(); - result = cblk->cv.waitRelative(cblk->lock, milliseconds(RESTORE_TIMEOUT_MS)); - cblk->lock.unlock(); - mLock.lock(); - } else { - ALOGW("dead IAudioRecord, already restored"); - result = NO_ERROR; - cblk->lock.unlock(); - } - if (result != NO_ERROR || !mActive) { - result = status_t(STOPPED); - } + // if the new IAudioRecord is created, openRecord_l() will modify the + // following member variables: mAudioRecord, mCblkMemory and mCblk. + // It will also delete the strong references on previous IAudioRecord and IMemory + result = openRecord_l(mSampleRate, mFormat, mFrameCount, getInput_l()); + if (result == NO_ERROR) { + newCblk = mCblk; + // callback thread or sync event hasn't changed + result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, 0); } + if (result != NO_ERROR) { + mActive = false; + } + ALOGV("restoreRecord_l() status %d mActive %d cblk %p, old cblk %p flags %08x old flags %08x", - result, mActive, mCblk, cblk, mCblk->flags, cblk->flags); + result, mActive, newCblk, cblk, newCblk->flags, cblk->flags); if (result == NO_ERROR) { // from now on we switch to the newly created cblk - cblk = mCblk; + refCblk = newCblk; } - cblk->lock.lock(); + newCblk->lock.lock(); ALOGW_IF(result != NO_ERROR, "restoreRecord_l() error %d", result); diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp index 207f96f..693df60 100644 --- a/media/libmedia/AudioSystem.cpp +++ b/media/libmedia/AudioSystem.cpp @@ -205,12 +205,7 @@ int AudioSystem::logToLinear(float volume) return volume ? 100 - int(dBConvertInverse * log(volume) + 0.5) : 0; } -// DEPRECATED -status_t AudioSystem::getOutputSamplingRate(int* samplingRate, int streamType) { - return getOutputSamplingRate(samplingRate, (audio_stream_type_t)streamType); -} - -status_t AudioSystem::getOutputSamplingRate(int* samplingRate, audio_stream_type_t streamType) +status_t AudioSystem::getOutputSamplingRate(uint32_t* samplingRate, audio_stream_type_t streamType) { audio_io_handle_t output; @@ -228,7 +223,7 @@ status_t AudioSystem::getOutputSamplingRate(int* samplingRate, audio_stream_type status_t AudioSystem::getSamplingRate(audio_io_handle_t output, audio_stream_type_t streamType, - int* samplingRate) + uint32_t* samplingRate) { OutputDescriptor *outputDesc; @@ -246,17 +241,13 @@ status_t AudioSystem::getSamplingRate(audio_io_handle_t output, gLock.unlock(); } - ALOGV("getSamplingRate() streamType %d, output %d, sampling rate %d", streamType, output, *samplingRate); + ALOGV("getSamplingRate() streamType %d, output %d, sampling rate %u", streamType, output, + *samplingRate); return NO_ERROR; } -// DEPRECATED -status_t AudioSystem::getOutputFrameCount(int* frameCount, int streamType) { - return getOutputFrameCount(frameCount, (audio_stream_type_t)streamType); -} - -status_t AudioSystem::getOutputFrameCount(int* frameCount, audio_stream_type_t streamType) +status_t AudioSystem::getOutputFrameCount(size_t* frameCount, audio_stream_type_t streamType) { audio_io_handle_t output; @@ -274,7 +265,7 @@ status_t AudioSystem::getOutputFrameCount(int* frameCount, audio_stream_type_t s status_t AudioSystem::getFrameCount(audio_io_handle_t output, audio_stream_type_t streamType, - int* frameCount) + size_t* frameCount) { OutputDescriptor *outputDesc; @@ -290,7 +281,8 @@ status_t AudioSystem::getFrameCount(audio_io_handle_t output, gLock.unlock(); } - ALOGV("getFrameCount() streamType %d, output %d, frameCount %d", streamType, output, *frameCount); + ALOGV("getFrameCount() streamType %d, output %d, frameCount %d", streamType, output, + *frameCount); return NO_ERROR; } @@ -369,7 +361,8 @@ status_t AudioSystem::setVoiceVolume(float value) return af->setVoiceVolume(value); } -status_t AudioSystem::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, audio_stream_type_t stream) +status_t AudioSystem::getRenderPosition(size_t *halFrames, size_t *dspFrames, + audio_stream_type_t stream) { const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); if (af == 0) return PERMISSION_DENIED; @@ -381,7 +374,7 @@ status_t AudioSystem::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames return af->getRenderPosition(halFrames, dspFrames, getOutput(stream)); } -unsigned int AudioSystem::getInputFramesLost(audio_io_handle_t ioHandle) { +size_t AudioSystem::getInputFramesLost(audio_io_handle_t ioHandle) { const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); unsigned int result = 0; if (af == 0) return result; @@ -449,8 +442,10 @@ void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, audio_io_handle OutputDescriptor *outputDesc = new OutputDescriptor(*desc); gOutputs.add(ioHandle, outputDesc); - ALOGV("ioConfigChanged() new output samplingRate %d, format %d channels %#x frameCount %d latency %d", - outputDesc->samplingRate, outputDesc->format, outputDesc->channels, outputDesc->frameCount, outputDesc->latency); + ALOGV("ioConfigChanged() new output samplingRate %u, format %d channels %#x frameCount %u " + "latency %d", + outputDesc->samplingRate, outputDesc->format, outputDesc->channels, + outputDesc->frameCount, outputDesc->latency); } break; case OUTPUT_CLOSED: { if (gOutputs.indexOfKey(ioHandle) < 0) { @@ -471,7 +466,8 @@ void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, audio_io_handle if (param2 == NULL) break; desc = (const OutputDescriptor *)param2; - ALOGV("ioConfigChanged() new config for output %d samplingRate %d, format %d channels %#x frameCount %d latency %d", + ALOGV("ioConfigChanged() new config for output %d samplingRate %u, format %d channels %#x " + "frameCount %d latency %d", ioHandle, desc->samplingRate, desc->format, desc->channels, desc->frameCount, desc->latency); OutputDescriptor *outputDesc = gOutputs.valueAt(index); @@ -510,7 +506,7 @@ sp<IAudioPolicyService> AudioSystem::gAudioPolicyService; sp<AudioSystem::AudioPolicyServiceClient> AudioSystem::gAudioPolicyServiceClient; -// establish binder interface to AudioFlinger service +// establish binder interface to AudioPolicy service const sp<IAudioPolicyService>& AudioSystem::get_audio_policy_service() { gLock.lock(); @@ -735,6 +731,16 @@ status_t AudioSystem::isStreamActive(audio_stream_type_t stream, bool* state, ui return NO_ERROR; } +status_t AudioSystem::isStreamActiveRemotely(audio_stream_type_t stream, bool* state, + uint32_t inPastMs) +{ + const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); + if (aps == 0) return PERMISSION_DENIED; + if (state == NULL) return BAD_VALUE; + *state = aps->isStreamActiveRemotely(stream, inPastMs); + return NO_ERROR; +} + status_t AudioSystem::isSourceActive(audio_source_t stream, bool* state) { const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); @@ -744,14 +750,14 @@ status_t AudioSystem::isSourceActive(audio_source_t stream, bool* state) return NO_ERROR; } -int32_t AudioSystem::getPrimaryOutputSamplingRate() +uint32_t AudioSystem::getPrimaryOutputSamplingRate() { const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); if (af == 0) return 0; return af->getPrimaryOutputSamplingRate(); } -int32_t AudioSystem::getPrimaryOutputFrameCount() +size_t AudioSystem::getPrimaryOutputFrameCount() { const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); if (af == 0) return 0; diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp index 3278dbe..7eeb4f8 100644 --- a/media/libmedia/AudioTrack.cpp +++ b/media/libmedia/AudioTrack.cpp @@ -50,11 +50,13 @@ namespace android { // static status_t AudioTrack::getMinFrameCount( - int* frameCount, + size_t* frameCount, audio_stream_type_t streamType, uint32_t sampleRate) { - if (frameCount == NULL) return BAD_VALUE; + if (frameCount == NULL) { + return BAD_VALUE; + } // default to 0 in case of error *frameCount = 0; @@ -65,11 +67,11 @@ status_t AudioTrack::getMinFrameCount( // audio_format_t format // audio_channel_mask_t channelMask // audio_output_flags_t flags - int afSampleRate; + uint32_t afSampleRate; if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) { return NO_INIT; } - int afFrameCount; + size_t afFrameCount; if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) { return NO_INIT; } @@ -95,7 +97,8 @@ AudioTrack::AudioTrack() : mStatus(NO_INIT), mIsTimed(false), mPreviousPriority(ANDROID_PRIORITY_NORMAL), - mPreviousSchedulingGroup(SP_DEFAULT) + mPreviousSchedulingGroup(SP_DEFAULT), + mProxy(NULL) { } @@ -113,35 +116,14 @@ AudioTrack::AudioTrack( : mStatus(NO_INIT), mIsTimed(false), mPreviousPriority(ANDROID_PRIORITY_NORMAL), - mPreviousSchedulingGroup(SP_DEFAULT) + mPreviousSchedulingGroup(SP_DEFAULT), + mProxy(NULL) { mStatus = set(streamType, sampleRate, format, channelMask, frameCount, flags, cbf, user, notificationFrames, 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId); } -// DEPRECATED -AudioTrack::AudioTrack( - int streamType, - uint32_t sampleRate, - int format, - int channelMask, - int frameCount, - uint32_t flags, - callback_t cbf, - void* user, - int notificationFrames, - int sessionId) - : mStatus(NO_INIT), - mIsTimed(false), - mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT) -{ - mStatus = set((audio_stream_type_t)streamType, sampleRate, (audio_format_t)format, - (audio_channel_mask_t) channelMask, - frameCount, (audio_output_flags_t)flags, cbf, user, notificationFrames, - 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId); -} - AudioTrack::AudioTrack( audio_stream_type_t streamType, uint32_t sampleRate, @@ -156,8 +138,14 @@ AudioTrack::AudioTrack( : mStatus(NO_INIT), mIsTimed(false), mPreviousPriority(ANDROID_PRIORITY_NORMAL), - mPreviousSchedulingGroup(SP_DEFAULT) + mPreviousSchedulingGroup(SP_DEFAULT), + mProxy(NULL) { + if (sharedBuffer == 0) { + ALOGE("sharedBuffer must be non-0"); + mStatus = BAD_VALUE; + return; + } mStatus = set(streamType, sampleRate, format, channelMask, 0 /*frameCount*/, flags, cbf, user, notificationFrames, sharedBuffer, false /*threadCanCallJava*/, sessionId); @@ -181,6 +169,7 @@ AudioTrack::~AudioTrack() IPCThreadState::self()->flushCommands(); AudioSystem::releaseAudioSessionId(mSessionId); } + delete mProxy; } status_t AudioTrack::set( @@ -188,7 +177,7 @@ status_t AudioTrack::set( uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - int frameCount, + int frameCountInt, audio_output_flags_t flags, callback_t cbf, void* user, @@ -197,10 +186,17 @@ status_t AudioTrack::set( bool threadCanCallJava, int sessionId) { + // FIXME "int" here is legacy and will be replaced by size_t later + if (frameCountInt < 0) { + ALOGE("Invalid frame count %d", frameCountInt); + return BAD_VALUE; + } + size_t frameCount = frameCountInt; - ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size()); + ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), + sharedBuffer->size()); - ALOGV("set() streamType %d frameCount %d flags %04x", streamType, frameCount, flags); + ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags); AutoMutex lock(mLock); if (mAudioTrack != 0) { @@ -214,12 +210,13 @@ status_t AudioTrack::set( } if (sampleRate == 0) { - int afSampleRate; + uint32_t afSampleRate; if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) { return NO_INIT; } sampleRate = afSampleRate; } + mSampleRate = sampleRate; // these below should probably come from the audioFlinger too... if (format == AUDIO_FORMAT_DEFAULT) { @@ -256,7 +253,17 @@ status_t AudioTrack::set( ALOGE("Invalid channel mask %#x", channelMask); return BAD_VALUE; } + mChannelMask = channelMask; uint32_t channelCount = popcount(channelMask); + mChannelCount = channelCount; + + if (audio_is_linear_pcm(format)) { + mFrameSize = channelCount * audio_bytes_per_sample(format); + mFrameSizeAF = channelCount * sizeof(int16_t); + } else { + mFrameSize = sizeof(uint8_t); + mFrameSizeAF = sizeof(uint8_t); + } audio_io_handle_t output = AudioSystem::getOutput( streamType, @@ -272,6 +279,7 @@ status_t AudioTrack::set( mVolume[RIGHT] = 1.0f; mSendLevel = 0.0f; mFrameCount = frameCount; + mReqFrameCount = frameCount; mNotificationFramesReq = notificationFrames; mSessionId = sessionId; mAuxEffectId = 0; @@ -287,7 +295,6 @@ status_t AudioTrack::set( status_t status = createTrack_l(streamType, sampleRate, format, - channelMask, frameCount, flags, sharedBuffer, @@ -305,10 +312,8 @@ status_t AudioTrack::set( mStreamType = streamType; mFormat = format; - mChannelMask = channelMask; - mChannelCount = channelCount; + mSharedBuffer = sharedBuffer; - mMuted = false; mActive = false; mUserData = user; mLoopCount = 0; @@ -318,56 +323,9 @@ status_t AudioTrack::set( mUpdatePeriod = 0; mFlushed = false; AudioSystem::acquireAudioSessionId(mSessionId); - mRestoreStatus = NO_ERROR; return NO_ERROR; } -status_t AudioTrack::initCheck() const -{ - return mStatus; -} - -// ------------------------------------------------------------------------- - -uint32_t AudioTrack::latency() const -{ - return mLatency; -} - -audio_stream_type_t AudioTrack::streamType() const -{ - return mStreamType; -} - -audio_format_t AudioTrack::format() const -{ - return mFormat; -} - -int AudioTrack::channelCount() const -{ - return mChannelCount; -} - -uint32_t AudioTrack::frameCount() const -{ - return mCblk->frameCount; -} - -size_t AudioTrack::frameSize() const -{ - if (audio_is_linear_pcm(mFormat)) { - return channelCount()*audio_bytes_per_sample(mFormat); - } else { - return sizeof(uint8_t); - } -} - -sp<IMemory>& AudioTrack::sharedBuffer() -{ - return mSharedBuffer; -} - // ------------------------------------------------------------------------- void AudioTrack::start() @@ -390,7 +348,7 @@ void AudioTrack::start() cblk->lock.lock(); cblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS; cblk->waitTimeMs = 0; - android_atomic_and(~CBLK_DISABLED_ON, &cblk->flags); + android_atomic_and(~CBLK_DISABLED, &cblk->flags); if (t != 0) { t->resume(); } else { @@ -399,19 +357,21 @@ void AudioTrack::start() androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO); } - ALOGV("start %p before lock cblk %p", this, mCblk); + ALOGV("start %p before lock cblk %p", this, cblk); status_t status = NO_ERROR; - if (!(cblk->flags & CBLK_INVALID_MSK)) { + if (!(cblk->flags & CBLK_INVALID)) { cblk->lock.unlock(); ALOGV("mAudioTrack->start()"); status = mAudioTrack->start(); cblk->lock.lock(); if (status == DEAD_OBJECT) { - android_atomic_or(CBLK_INVALID_ON, &cblk->flags); + android_atomic_or(CBLK_INVALID, &cblk->flags); } } - if (cblk->flags & CBLK_INVALID_MSK) { - status = restoreTrack_l(cblk, true); + if (cblk->flags & CBLK_INVALID) { + audio_track_cblk_t* temp = cblk; + status = restoreTrack_l(temp, true /*fromStart*/); + cblk = temp; } cblk->lock.unlock(); if (status != NO_ERROR) { @@ -447,6 +407,7 @@ void AudioTrack::stop() mMarkerReached = false; // Force flush if a shared buffer is used otherwise audioflinger // will not stop before end of buffer is reached. + // It may be needed to make sure that we stop playback, likely in case looping is on. if (mSharedBuffer != 0) { flush_l(); } @@ -469,26 +430,26 @@ bool AudioTrack::stopped() const void AudioTrack::flush() { AutoMutex lock(mLock); - flush_l(); + if (!mActive && mSharedBuffer == 0) { + flush_l(); + } } -// must be called with mLock held void AudioTrack::flush_l() { ALOGV("flush"); + ALOG_ASSERT(!mActive); // clear playback marker and periodic update counter mMarkerPosition = 0; mMarkerReached = false; mUpdatePeriod = 0; - if (!mActive) { - mFlushed = true; - mAudioTrack->flush(); - // Release AudioTrack callback thread in case it was waiting for new buffers - // in AudioTrack::obtainBuffer() - mCblk->cv.signal(); - } + mFlushed = true; + mAudioTrack->flush(); + // Release AudioTrack callback thread in case it was waiting for new buffers + // in AudioTrack::obtainBuffer() + mCblk->cv.signal(); } void AudioTrack::pause() @@ -502,19 +463,13 @@ void AudioTrack::pause() } } -void AudioTrack::mute(bool e) -{ - mAudioTrack->mute(e); - mMuted = e; -} - -bool AudioTrack::muted() const -{ - return mMuted; -} - status_t AudioTrack::setVolume(float left, float right) { + if (mStatus != NO_ERROR) { + return mStatus; + } + ALOG_ASSERT(mProxy != NULL); + if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) { return BAD_VALUE; } @@ -523,32 +478,32 @@ status_t AudioTrack::setVolume(float left, float right) mVolume[LEFT] = left; mVolume[RIGHT] = right; - mCblk->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000)); + mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000)); return NO_ERROR; } -void AudioTrack::getVolume(float* left, float* right) const +status_t AudioTrack::setVolume(float volume) { - if (left != NULL) { - *left = mVolume[LEFT]; - } - if (right != NULL) { - *right = mVolume[RIGHT]; - } + return setVolume(volume, volume); } status_t AudioTrack::setAuxEffectSendLevel(float level) { ALOGV("setAuxEffectSendLevel(%f)", level); + + if (mStatus != NO_ERROR) { + return mStatus; + } + ALOG_ASSERT(mProxy != NULL); + if (level < 0.0f || level > 1.0f) { return BAD_VALUE; } AutoMutex lock(mLock); mSendLevel = level; - - mCblk->setSendLevel(level); + mProxy->setSendLevel(level); return NO_ERROR; } @@ -560,9 +515,9 @@ void AudioTrack::getAuxEffectSendLevel(float* level) const } } -status_t AudioTrack::setSampleRate(int rate) +status_t AudioTrack::setSampleRate(uint32_t rate) { - int afSamplingRate; + uint32_t afSamplingRate; if (mIsTimed) { return INVALID_OPERATION; @@ -572,21 +527,25 @@ status_t AudioTrack::setSampleRate(int rate) return NO_INIT; } // Resampler implementation limits input sampling rate to 2 x output sampling rate. - if (rate <= 0 || rate > afSamplingRate*2 ) return BAD_VALUE; + if (rate == 0 || rate > afSamplingRate*2 ) { + return BAD_VALUE; + } AutoMutex lock(mLock); - mCblk->sampleRate = rate; + mSampleRate = rate; + mProxy->setSampleRate(rate); + return NO_ERROR; } uint32_t AudioTrack::getSampleRate() const { if (mIsTimed) { - return INVALID_OPERATION; + return 0; } AutoMutex lock(mLock); - return mCblk->sampleRate; + return mSampleRate; } status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount) @@ -598,6 +557,10 @@ status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount // must be called with mLock held status_t AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount) { + if (mSharedBuffer == 0 || mIsTimed) { + return INVALID_OPERATION; + } + audio_track_cblk_t* cblk = mCblk; Mutex::Autolock _l(cblk->lock); @@ -610,20 +573,18 @@ status_t AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCou return NO_ERROR; } - if (mIsTimed) { - return INVALID_OPERATION; - } - if (loopStart >= loopEnd || - loopEnd - loopStart > cblk->frameCount || + loopEnd - loopStart > mFrameCount || cblk->server > loopStart) { - ALOGE("setLoop invalid value: loopStart %d, loopEnd %d, loopCount %d, framecount %d, user %d", loopStart, loopEnd, loopCount, cblk->frameCount, cblk->user); + ALOGE("setLoop invalid value: loopStart %d, loopEnd %d, loopCount %d, framecount %d, " + "user %d", loopStart, loopEnd, loopCount, mFrameCount, cblk->user); return BAD_VALUE; } - if ((mSharedBuffer != 0) && (loopEnd > cblk->frameCount)) { - ALOGE("setLoop invalid value: loop markers beyond data: loopStart %d, loopEnd %d, framecount %d", - loopStart, loopEnd, cblk->frameCount); + if ((mSharedBuffer != 0) && (loopEnd > mFrameCount)) { + ALOGE("setLoop invalid value: loop markers beyond data: loopStart %d, loopEnd %d, " + "framecount %d", + loopStart, loopEnd, mFrameCount); return BAD_VALUE; } @@ -637,7 +598,9 @@ status_t AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCou status_t AudioTrack::setMarkerPosition(uint32_t marker) { - if (mCbf == NULL) return INVALID_OPERATION; + if (mCbf == NULL) { + return INVALID_OPERATION; + } mMarkerPosition = marker; mMarkerReached = false; @@ -647,7 +610,9 @@ status_t AudioTrack::setMarkerPosition(uint32_t marker) status_t AudioTrack::getMarkerPosition(uint32_t *marker) const { - if (marker == NULL) return BAD_VALUE; + if (marker == NULL) { + return BAD_VALUE; + } *marker = mMarkerPosition; @@ -656,7 +621,9 @@ status_t AudioTrack::getMarkerPosition(uint32_t *marker) const status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod) { - if (mCbf == NULL) return INVALID_OPERATION; + if (mCbf == NULL) { + return INVALID_OPERATION; + } uint32_t curPosition; getPosition(&curPosition); @@ -668,7 +635,9 @@ status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod) status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const { - if (updatePeriod == NULL) return BAD_VALUE; + if (updatePeriod == NULL) { + return BAD_VALUE; + } *updatePeriod = mUpdatePeriod; @@ -677,25 +646,34 @@ status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const status_t AudioTrack::setPosition(uint32_t position) { - if (mIsTimed) return INVALID_OPERATION; + if (mSharedBuffer == 0 || mIsTimed) { + return INVALID_OPERATION; + } AutoMutex lock(mLock); - if (!stopped_l()) return INVALID_OPERATION; + if (!stopped_l()) { + return INVALID_OPERATION; + } - Mutex::Autolock _l(mCblk->lock); + audio_track_cblk_t* cblk = mCblk; + Mutex::Autolock _l(cblk->lock); - if (position > mCblk->user) return BAD_VALUE; + if (position > cblk->user) { + return BAD_VALUE; + } - mCblk->server = position; - android_atomic_or(CBLK_FORCEREADY_ON, &mCblk->flags); + cblk->server = position; + android_atomic_or(CBLK_FORCEREADY, &cblk->flags); return NO_ERROR; } status_t AudioTrack::getPosition(uint32_t *position) { - if (position == NULL) return BAD_VALUE; + if (position == NULL) { + return BAD_VALUE; + } AutoMutex lock(mLock); *position = mFlushed ? 0 : mCblk->server; @@ -704,13 +682,24 @@ status_t AudioTrack::getPosition(uint32_t *position) status_t AudioTrack::reload() { + if (mStatus != NO_ERROR) { + return mStatus; + } + ALOG_ASSERT(mProxy != NULL); + + if (mSharedBuffer == 0 || mIsTimed) { + return INVALID_OPERATION; + } + AutoMutex lock(mLock); - if (!stopped_l()) return INVALID_OPERATION; + if (!stopped_l()) { + return INVALID_OPERATION; + } flush_l(); - mCblk->stepUser(mCblk->frameCount); + (void) mProxy->stepUser(mFrameCount); return NO_ERROR; } @@ -725,12 +714,7 @@ audio_io_handle_t AudioTrack::getOutput() audio_io_handle_t AudioTrack::getOutput_l() { return AudioSystem::getOutput(mStreamType, - mCblk->sampleRate, mFormat, mChannelMask, mFlags); -} - -int AudioTrack::getSessionId() const -{ - return mSessionId; + mSampleRate, mFormat, mChannelMask, mFlags); } status_t AudioTrack::attachAuxEffect(int effectId) @@ -750,8 +734,7 @@ status_t AudioTrack::createTrack_l( audio_stream_type_t streamType, uint32_t sampleRate, audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, + size_t frameCount, audio_output_flags_t flags, const sp<IMemory>& sharedBuffer, audio_io_handle_t output) @@ -791,7 +774,7 @@ status_t AudioTrack::createTrack_l( // Same comment as below about ignoring frameCount parameter for set() frameCount = sharedBuffer->size(); } else if (frameCount == 0) { - int afFrameCount; + size_t afFrameCount; if (AudioSystem::getFrameCount(output, streamType, &afFrameCount) != NO_ERROR) { return NO_INIT; } @@ -800,17 +783,16 @@ status_t AudioTrack::createTrack_l( } else if (sharedBuffer != 0) { - // Ensure that buffer alignment matches channelCount - int channelCount = popcount(channelMask); + // Ensure that buffer alignment matches channel count // 8-bit data in shared memory is not currently supported by AudioFlinger size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2; - if (channelCount > 1) { + if (mChannelCount > 1) { // More than 2 channels does not require stronger alignment than stereo alignment <<= 1; } - if (((uint32_t)sharedBuffer->pointer() & (alignment - 1)) != 0) { - ALOGE("Invalid buffer alignment: address %p, channelCount %d", - sharedBuffer->pointer(), channelCount); + if (((size_t)sharedBuffer->pointer() & (alignment - 1)) != 0) { + ALOGE("Invalid buffer alignment: address %p, channel count %u", + sharedBuffer->pointer(), mChannelCount); return BAD_VALUE; } @@ -818,16 +800,16 @@ status_t AudioTrack::createTrack_l( // there's no frameCount parameter. // But when initializing a shared buffer AudioTrack via set(), // there _is_ a frameCount parameter. We silently ignore it. - frameCount = sharedBuffer->size()/channelCount/sizeof(int16_t); + frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t); } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) { // FIXME move these calculations and associated checks to server - int afSampleRate; + uint32_t afSampleRate; if (AudioSystem::getSamplingRate(output, streamType, &afSampleRate) != NO_ERROR) { return NO_INIT; } - int afFrameCount; + size_t afFrameCount; if (AudioSystem::getFrameCount(output, streamType, &afFrameCount) != NO_ERROR) { return NO_INIT; } @@ -836,8 +818,8 @@ status_t AudioTrack::createTrack_l( uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate); if (minBufCount < 2) minBufCount = 2; - int minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate; - ALOGV("minFrameCount: %d, afFrameCount=%d, minBufCount=%d, sampleRate=%d, afSampleRate=%d" + size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate; + ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u" ", afLatency=%d", minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency); @@ -849,7 +831,7 @@ status_t AudioTrack::createTrack_l( } // Make sure that application is notified with sufficient margin // before underrun - if (mNotificationFramesAct > (uint32_t)frameCount/2) { + if (mNotificationFramesAct > frameCount/2) { mNotificationFramesAct = frameCount/2; } if (frameCount < minFrameCount) { @@ -876,13 +858,14 @@ status_t AudioTrack::createTrack_l( } } - sp<IAudioTrack> track = audioFlinger->createTrack(getpid(), - streamType, + sp<IAudioTrack> track = audioFlinger->createTrack(streamType, sampleRate, - format, - channelMask, + // AudioFlinger only sees 16-bit PCM + format == AUDIO_FORMAT_PCM_8_BIT ? + AUDIO_FORMAT_PCM_16_BIT : format, + mChannelMask, frameCount, - trackFlags, + &trackFlags, sharedBuffer, output, tid, @@ -893,55 +876,76 @@ status_t AudioTrack::createTrack_l( ALOGE("AudioFlinger could not create track, status: %d", status); return status; } - sp<IMemory> cblk = track->getCblk(); - if (cblk == 0) { + sp<IMemory> iMem = track->getCblk(); + if (iMem == 0) { ALOGE("Could not get control block"); return NO_INIT; } mAudioTrack = track; - mCblkMemory = cblk; - mCblk = static_cast<audio_track_cblk_t*>(cblk->pointer()); - // old has the previous value of mCblk->flags before the "or" operation - int32_t old = android_atomic_or(CBLK_DIRECTION_OUT, &mCblk->flags); + mCblkMemory = iMem; + audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer()); + mCblk = cblk; + size_t temp = cblk->frameCount_; + if (temp < frameCount || (frameCount == 0 && temp == 0)) { + // In current design, AudioTrack client checks and ensures frame count validity before + // passing it to AudioFlinger so AudioFlinger should not return a different value except + // for fast track as it uses a special method of assigning frame count. + ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp); + } + frameCount = temp; + mAwaitBoost = false; if (flags & AUDIO_OUTPUT_FLAG_FAST) { - if (old & CBLK_FAST) { - ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", mCblk->frameCount); + if (trackFlags & IAudioFlinger::TRACK_FAST) { + ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount); + mAwaitBoost = true; } else { - ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", mCblk->frameCount); + ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount); // once denied, do not request again if IAudioTrack is re-created flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST); mFlags = flags; } if (sharedBuffer == 0) { - mNotificationFramesAct = mCblk->frameCount/2; + mNotificationFramesAct = frameCount/2; } } if (sharedBuffer == 0) { - mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t); + mBuffers = (char*)cblk + sizeof(audio_track_cblk_t); } else { - mCblk->buffers = sharedBuffer->pointer(); - // Force buffer full condition as data is already present in shared memory - mCblk->stepUser(mCblk->frameCount); + mBuffers = sharedBuffer->pointer(); } - mCblk->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) | uint16_t(mVolume[LEFT] * 0x1000)); - mCblk->setSendLevel(mSendLevel); mAudioTrack->attachAuxEffect(mAuxEffectId); - mCblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS; - mCblk->waitTimeMs = 0; + cblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS; + cblk->waitTimeMs = 0; mRemainingFrames = mNotificationFramesAct; // FIXME don't believe this lie - mLatency = afLatency + (1000*mCblk->frameCount) / sampleRate; + mLatency = afLatency + (1000*frameCount) / sampleRate; + mFrameCount = frameCount; // If IAudioTrack is re-created, don't let the requested frameCount // decrease. This can confuse clients that cache frameCount(). - if (mCblk->frameCount > mFrameCount) { - mFrameCount = mCblk->frameCount; + if (frameCount > mReqFrameCount) { + mReqFrameCount = frameCount; + } + + // update proxy + delete mProxy; + mProxy = new AudioTrackClientProxy(cblk, mBuffers, frameCount, mFrameSizeAF); + mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) | + uint16_t(mVolume[LEFT] * 0x1000)); + mProxy->setSendLevel(mSendLevel); + mProxy->setSampleRate(mSampleRate); + if (sharedBuffer != 0) { + // Force buffer full condition as data is already present in shared memory + mProxy->stepUser(frameCount); } + return NO_ERROR; } status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) { + ALOG_ASSERT(mStatus == NO_ERROR && mProxy != NULL); + AutoMutex lock(mLock); bool active; status_t result = NO_ERROR; @@ -952,10 +956,10 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) audioBuffer->frameCount = 0; audioBuffer->size = 0; - uint32_t framesAvail = cblk->framesAvailable(); + size_t framesAvail = mProxy->framesAvailable(); cblk->lock.lock(); - if (cblk->flags & CBLK_INVALID_MSK) { + if (cblk->flags & CBLK_INVALID) { goto create_new_track; } cblk->lock.unlock(); @@ -974,18 +978,23 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) cblk->lock.unlock(); return WOULD_BLOCK; } - if (!(cblk->flags & CBLK_INVALID_MSK)) { + if (!(cblk->flags & CBLK_INVALID)) { mLock.unlock(); + // this condition is in shared memory, so if IAudioTrack and control block + // are replaced due to mediaserver death or IAudioTrack invalidation then + // cv won't be signalled, but fortunately the timeout will limit the wait result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs)); cblk->lock.unlock(); mLock.lock(); if (!mActive) { return status_t(STOPPED); } + // IAudioTrack may have been re-created while mLock was unlocked + cblk = mCblk; cblk->lock.lock(); } - if (cblk->flags & CBLK_INVALID_MSK) { + if (cblk->flags & CBLK_INVALID) { goto create_new_track; } if (CC_UNLIKELY(result != NO_ERROR)) { @@ -994,16 +1003,18 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) // timing out when a loop has been set and we have already written upto loop end // is a normal condition: no need to wake AudioFlinger up. if (cblk->user < cblk->loopEnd) { - ALOGW( "obtainBuffer timed out (is the CPU pegged?) %p name=%#x" - "user=%08x, server=%08x", this, cblk->mName, cblk->user, cblk->server); + ALOGW("obtainBuffer timed out (is the CPU pegged?) %p name=%#x user=%08x, " + "server=%08x", this, cblk->mName, cblk->user, cblk->server); //unlock cblk mutex before calling mAudioTrack->start() (see issue #1617140) cblk->lock.unlock(); result = mAudioTrack->start(); cblk->lock.lock(); if (result == DEAD_OBJECT) { - android_atomic_or(CBLK_INVALID_ON, &cblk->flags); + android_atomic_or(CBLK_INVALID, &cblk->flags); create_new_track: - result = restoreTrack_l(cblk, false); + audio_track_cblk_t* temp = cblk; + result = restoreTrack_l(temp, false /*fromStart*/); + cblk = temp; } if (result != NO_ERROR) { ALOGW("obtainBuffer create Track error %d", result); @@ -1021,7 +1032,7 @@ create_new_track: } // read the server count again start_loop_here: - framesAvail = cblk->framesAvailable_l(); + framesAvail = mProxy->framesAvailable_l(); } cblk->lock.unlock(); } @@ -1033,35 +1044,31 @@ create_new_track: } uint32_t u = cblk->user; - uint32_t bufferEnd = cblk->userBase + cblk->frameCount; + uint32_t bufferEnd = cblk->userBase + mFrameCount; if (framesReq > bufferEnd - u) { framesReq = bufferEnd - u; } - audioBuffer->flags = mMuted ? Buffer::MUTE : 0; - audioBuffer->channelCount = mChannelCount; audioBuffer->frameCount = framesReq; - audioBuffer->size = framesReq * cblk->frameSize; - if (audio_is_linear_pcm(mFormat)) { - audioBuffer->format = AUDIO_FORMAT_PCM_16_BIT; - } else { - audioBuffer->format = mFormat; - } - audioBuffer->raw = (int8_t *)cblk->buffer(u); + audioBuffer->size = framesReq * mFrameSizeAF; + audioBuffer->raw = mProxy->buffer(u); active = mActive; return active ? status_t(NO_ERROR) : status_t(STOPPED); } void AudioTrack::releaseBuffer(Buffer* audioBuffer) { + ALOG_ASSERT(mStatus == NO_ERROR && mProxy != NULL); + AutoMutex lock(mLock); - mCblk->stepUser(audioBuffer->frameCount); + audio_track_cblk_t* cblk = mCblk; + (void) mProxy->stepUser(audioBuffer->frameCount); if (audioBuffer->frameCount > 0) { // restart track if it was disabled by audioflinger due to previous underrun - if (mActive && (mCblk->flags & CBLK_DISABLED_MSK)) { - android_atomic_and(~CBLK_DISABLED_ON, &mCblk->flags); - ALOGW("releaseBuffer() track %p name=%#x disabled, restarting", this, mCblk->mName); + if (mActive && (cblk->flags & CBLK_DISABLED)) { + android_atomic_and(~CBLK_DISABLED, &cblk->flags); + ALOGW("releaseBuffer() track %p name=%#x disabled, restarting", this, cblk->mName); mAudioTrack->start(); } } @@ -1072,8 +1079,9 @@ void AudioTrack::releaseBuffer(Buffer* audioBuffer) ssize_t AudioTrack::write(const void* buffer, size_t userSize) { - if (mSharedBuffer != 0) return INVALID_OPERATION; - if (mIsTimed) return INVALID_OPERATION; + if (mSharedBuffer != 0 || mIsTimed) { + return INVALID_OPERATION; + } if (ssize_t(userSize) < 0) { // Sanity-check: user is most-likely passing an error code, and it would @@ -1096,6 +1104,9 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize) sp<IMemory> iMem = mCblkMemory; mLock.unlock(); + // since mLock is unlocked the IAudioTrack and shared memory may be re-created, + // so all cblk references might still refer to old shared memory, but that should be benign + ssize_t written = 0; const int8_t *src = (const int8_t *)buffer; Buffer audioBuffer; @@ -1107,8 +1118,9 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize) status_t err = obtainBuffer(&audioBuffer, -1); if (err < 0) { // out of buffers, return #bytes written - if (err == status_t(NO_MORE_BUFFERS)) + if (err == status_t(NO_MORE_BUFFERS)) { break; + } return ssize_t(err); } @@ -1140,27 +1152,37 @@ TimedAudioTrack::TimedAudioTrack() { status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer) { + AutoMutex lock(mLock); status_t result = UNKNOWN_ERROR; + // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed + // while we are accessing the cblk + sp<IAudioTrack> audioTrack = mAudioTrack; + sp<IMemory> iMem = mCblkMemory; + // If the track is not invalid already, try to allocate a buffer. alloc // fails indicating that the server is dead, flag the track as invalid so // we can attempt to restore in just a bit. - if (!(mCblk->flags & CBLK_INVALID_MSK)) { + audio_track_cblk_t* cblk = mCblk; + if (!(cblk->flags & CBLK_INVALID)) { result = mAudioTrack->allocateTimedBuffer(size, buffer); if (result == DEAD_OBJECT) { - android_atomic_or(CBLK_INVALID_ON, &mCblk->flags); + android_atomic_or(CBLK_INVALID, &cblk->flags); } } // If the track is invalid at this point, attempt to restore it. and try the // allocation one more time. - if (mCblk->flags & CBLK_INVALID_MSK) { - mCblk->lock.lock(); - result = restoreTrack_l(mCblk, false); - mCblk->lock.unlock(); + if (cblk->flags & CBLK_INVALID) { + cblk->lock.lock(); + audio_track_cblk_t* temp = cblk; + result = restoreTrack_l(temp, false /*fromStart*/); + cblk = temp; + cblk->lock.unlock(); - if (result == OK) + if (result == OK) { result = mAudioTrack->allocateTimedBuffer(size, buffer); + } } return result; @@ -1172,10 +1194,11 @@ status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer, status_t status = mAudioTrack->queueTimedBuffer(buffer, pts); { AutoMutex lock(mLock); + audio_track_cblk_t* cblk = mCblk; // restart track if it was disabled by audioflinger due to previous underrun if (buffer->size() != 0 && status == NO_ERROR && - mActive && (mCblk->flags & CBLK_DISABLED_MSK)) { - android_atomic_and(~CBLK_DISABLED_ON, &mCblk->flags); + mActive && (cblk->flags & CBLK_DISABLED)) { + android_atomic_and(~CBLK_DISABLED, &cblk->flags); ALOGW("queueTimedBuffer() track %p disabled, restarting", this); mAudioTrack->start(); } @@ -1198,6 +1221,25 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread) size_t writtenSize; mLock.lock(); + if (mAwaitBoost) { + mAwaitBoost = false; + mLock.unlock(); + static const int32_t kMaxTries = 5; + int32_t tryCounter = kMaxTries; + uint32_t pollUs = 10000; + do { + int policy = sched_getscheduler(0); + if (policy == SCHED_FIFO || policy == SCHED_RR) { + break; + } + usleep(pollUs); + pollUs <<= 1; + } while (tryCounter-- > 0); + if (tryCounter < 0) { + ALOGE("did not receive expected priority boost on time"); + } + return true; + } // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed // while we are accessing the cblk sp<IAudioTrack> audioTrack = mAudioTrack; @@ -1206,15 +1248,20 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread) bool active = mActive; mLock.unlock(); + // since mLock is unlocked the IAudioTrack and shared memory may be re-created, + // so all cblk references might still refer to old shared memory, but that should be benign + // Manage underrun callback - if (active && (cblk->framesAvailable() == cblk->frameCount)) { + if (active && (mProxy->framesAvailable() == mFrameCount)) { ALOGV("Underrun user: %x, server: %x, flags %04x", cblk->user, cblk->server, cblk->flags); - if (!(android_atomic_or(CBLK_UNDERRUN_ON, &cblk->flags) & CBLK_UNDERRUN_MSK)) { + if (!(android_atomic_or(CBLK_UNDERRUN, &cblk->flags) & CBLK_UNDERRUN)) { mCbf(EVENT_UNDERRUN, mUserData, 0); - if (cblk->server == cblk->frameCount) { + if (cblk->server == mFrameCount) { mCbf(EVENT_BUFFER_END, mUserData, 0); } - if (mSharedBuffer != 0) return false; + if (mSharedBuffer != 0) { + return false; + } } } @@ -1265,12 +1312,15 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread) status_t err = obtainBuffer(&audioBuffer, waitCount); if (err < NO_ERROR) { if (err != TIMED_OUT) { - ALOGE_IF(err != status_t(NO_MORE_BUFFERS), "Error obtaining an audio buffer, giving up."); + ALOGE_IF(err != status_t(NO_MORE_BUFFERS), + "Error obtaining an audio buffer, giving up."); return false; } break; } - if (err == status_t(STOPPED)) return false; + if (err == status_t(STOPPED)) { + return false; + } // Divide buffer size by 2 to take into account the expansion // due to 8 to 16 bit conversion: the callback must fill only half @@ -1293,7 +1343,9 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread) break; } - if (writtenSize > reqSize) writtenSize = reqSize; + if (writtenSize > reqSize) { + writtenSize = reqSize; + } if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) { // 8 to 16 bit conversion, note that source and destination are the same address @@ -1302,10 +1354,10 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread) } audioBuffer.size = writtenSize; - // NOTE: mCblk->frameSize is not equal to AudioTrack::frameSize() for - // 8 bit PCM data: in this case, mCblk->frameSize is based on a sample size of + // NOTE: cblk->frameSize is not equal to AudioTrack::frameSize() for + // 8 bit PCM data: in this case, cblk->frameSize is based on a sample size of // 16 bit. - audioBuffer.frameCount = writtenSize/mCblk->frameSize; + audioBuffer.frameCount = writtenSize / mFrameSizeAF; frames -= audioBuffer.frameCount; @@ -1321,112 +1373,93 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread) return true; } -// must be called with mLock and cblk.lock held. Callers must also hold strong references on +// must be called with mLock and refCblk.lock held. Callers must also hold strong references on // the IAudioTrack and IMemory in case they are recreated here. -// If the IAudioTrack is successfully restored, the cblk pointer is updated -status_t AudioTrack::restoreTrack_l(audio_track_cblk_t*& cblk, bool fromStart) +// If the IAudioTrack is successfully restored, the refCblk pointer is updated +// FIXME Don't depend on caller to hold strong references. +status_t AudioTrack::restoreTrack_l(audio_track_cblk_t*& refCblk, bool fromStart) { status_t result; - if (!(android_atomic_or(CBLK_RESTORING_ON, &cblk->flags) & CBLK_RESTORING_MSK)) { - ALOGW("dead IAudioTrack, creating a new one from %s TID %d", - fromStart ? "start()" : "obtainBuffer()", gettid()); + audio_track_cblk_t* cblk = refCblk; + audio_track_cblk_t* newCblk = cblk; + ALOGW("dead IAudioTrack, creating a new one from %s", + fromStart ? "start()" : "obtainBuffer()"); - // signal old cblk condition so that other threads waiting for available buffers stop - // waiting now - cblk->cv.broadcast(); - cblk->lock.unlock(); + // signal old cblk condition so that other threads waiting for available buffers stop + // waiting now + cblk->cv.broadcast(); + cblk->lock.unlock(); - // refresh the audio configuration cache in this process to make sure we get new - // output parameters in getOutput_l() and createTrack_l() - AudioSystem::clearAudioConfigCache(); - - // if the new IAudioTrack is created, createTrack_l() will modify the - // following member variables: mAudioTrack, mCblkMemory and mCblk. - // It will also delete the strong references on previous IAudioTrack and IMemory - result = createTrack_l(mStreamType, - cblk->sampleRate, - mFormat, - mChannelMask, - mFrameCount, - mFlags, - mSharedBuffer, - getOutput_l()); - - if (result == NO_ERROR) { - uint32_t user = cblk->user; - uint32_t server = cblk->server; - // restore write index and set other indexes to reflect empty buffer status - mCblk->user = user; - mCblk->server = user; - mCblk->userBase = user; - mCblk->serverBase = user; - // restore loop: this is not guaranteed to succeed if new frame count is not - // compatible with loop length - setLoop_l(cblk->loopStart, cblk->loopEnd, cblk->loopCount); - if (!fromStart) { - mCblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS; - // Make sure that a client relying on callback events indicating underrun or - // the actual amount of audio frames played (e.g SoundPool) receives them. - if (mSharedBuffer == 0) { - uint32_t frames = 0; - if (user > server) { - frames = ((user - server) > mCblk->frameCount) ? - mCblk->frameCount : (user - server); - memset(mCblk->buffers, 0, frames * mCblk->frameSize); - } - // restart playback even if buffer is not completely filled. - android_atomic_or(CBLK_FORCEREADY_ON, &mCblk->flags); - // stepUser() clears CBLK_UNDERRUN_ON flag enabling underrun callbacks to - // the client - mCblk->stepUser(frames); + // refresh the audio configuration cache in this process to make sure we get new + // output parameters in getOutput_l() and createTrack_l() + AudioSystem::clearAudioConfigCache(); + + // if the new IAudioTrack is created, createTrack_l() will modify the + // following member variables: mAudioTrack, mCblkMemory and mCblk. + // It will also delete the strong references on previous IAudioTrack and IMemory + result = createTrack_l(mStreamType, + mSampleRate, + mFormat, + mReqFrameCount, // so that frame count never goes down + mFlags, + mSharedBuffer, + getOutput_l()); + + if (result == NO_ERROR) { + uint32_t user = cblk->user; + uint32_t server = cblk->server; + // restore write index and set other indexes to reflect empty buffer status + newCblk = mCblk; + newCblk->user = user; + newCblk->server = user; + newCblk->userBase = user; + newCblk->serverBase = user; + // restore loop: this is not guaranteed to succeed if new frame count is not + // compatible with loop length + setLoop_l(cblk->loopStart, cblk->loopEnd, cblk->loopCount); + size_t frames = 0; + if (!fromStart) { + newCblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS; + // Make sure that a client relying on callback events indicating underrun or + // the actual amount of audio frames played (e.g SoundPool) receives them. + if (mSharedBuffer == 0) { + if (user > server) { + frames = ((user - server) > mFrameCount) ? + mFrameCount : (user - server); + memset(mBuffers, 0, frames * mFrameSizeAF); } - } - if (mSharedBuffer != 0) { - mCblk->stepUser(mCblk->frameCount); - } - if (mActive) { - result = mAudioTrack->start(); - ALOGW_IF(result != NO_ERROR, "restoreTrack_l() start() failed status %d", result); - } - if (fromStart && result == NO_ERROR) { - mNewPosition = mCblk->server + mUpdatePeriod; + // restart playback even if buffer is not completely filled. + android_atomic_or(CBLK_FORCEREADY, &newCblk->flags); } } - if (result != NO_ERROR) { - android_atomic_and(~CBLK_RESTORING_ON, &cblk->flags); - ALOGW_IF(result != NO_ERROR, "restoreTrack_l() failed status %d", result); + if (mSharedBuffer != 0) { + frames = mFrameCount; } - mRestoreStatus = result; - // signal old cblk condition for other threads waiting for restore completion - android_atomic_or(CBLK_RESTORED_ON, &cblk->flags); - cblk->cv.broadcast(); - } else { - if (!(cblk->flags & CBLK_RESTORED_MSK)) { - ALOGW("dead IAudioTrack, waiting for a new one TID %d", gettid()); - mLock.unlock(); - result = cblk->cv.waitRelative(cblk->lock, milliseconds(RESTORE_TIMEOUT_MS)); - if (result == NO_ERROR) { - result = mRestoreStatus; - } - cblk->lock.unlock(); - mLock.lock(); - } else { - ALOGW("dead IAudioTrack, already restored TID %d", gettid()); - result = mRestoreStatus; - cblk->lock.unlock(); + if (frames > 0) { + // stepUser() clears CBLK_UNDERRUN flag enabling underrun callbacks to + // the client + mProxy->stepUser(frames); + } + if (mActive) { + result = mAudioTrack->start(); + ALOGW_IF(result != NO_ERROR, "restoreTrack_l() start() failed status %d", result); + } + if (fromStart && result == NO_ERROR) { + mNewPosition = newCblk->server + mUpdatePeriod; } } + ALOGW_IF(result != NO_ERROR, "restoreTrack_l() failed status %d", result); ALOGV("restoreTrack_l() status %d mActive %d cblk %p, old cblk %p flags %08x old flags %08x", - result, mActive, mCblk, cblk, mCblk->flags, cblk->flags); + result, mActive, newCblk, cblk, newCblk->flags, cblk->flags); if (result == NO_ERROR) { // from now on we switch to the newly created cblk - cblk = mCblk; + refCblk = newCblk; } - cblk->lock.lock(); + newCblk->lock.lock(); - ALOGW_IF(result != NO_ERROR, "restoreTrack_l() error %d TID %d", result, gettid()); + ALOGW_IF(result != NO_ERROR, "restoreTrack_l() error %d", result); return result; } @@ -1439,11 +1472,13 @@ status_t AudioTrack::dump(int fd, const Vector<String16>& args) const String8 result; result.append(" AudioTrack::dump\n"); - snprintf(buffer, 255, " stream type(%d), left - right volume(%f, %f)\n", mStreamType, mVolume[0], mVolume[1]); + snprintf(buffer, 255, " stream type(%d), left - right volume(%f, %f)\n", mStreamType, + mVolume[0], mVolume[1]); result.append(buffer); - snprintf(buffer, 255, " format(%d), channel count(%d), frame count(%d)\n", mFormat, mChannelCount, (mCblk == 0) ? 0 : mCblk->frameCount); + snprintf(buffer, 255, " format(%d), channel count(%d), frame count(%d)\n", mFormat, + mChannelCount, mFrameCount); result.append(buffer); - snprintf(buffer, 255, " sample rate(%d), status(%d), muted(%d)\n", (mCblk == 0) ? 0 : mCblk->sampleRate, mStatus, mMuted); + snprintf(buffer, 255, " sample rate(%u), status(%d)\n", mSampleRate, mStatus); result.append(buffer); snprintf(buffer, 255, " active(%d), latency (%d)\n", mActive, mLatency); result.append(buffer); @@ -1500,182 +1535,4 @@ void AudioTrack::AudioTrackThread::resume() } } -// ========================================================================= - - -audio_track_cblk_t::audio_track_cblk_t() - : lock(Mutex::SHARED), cv(Condition::SHARED), user(0), server(0), - userBase(0), serverBase(0), buffers(NULL), frameCount(0), - loopStart(UINT_MAX), loopEnd(UINT_MAX), loopCount(0), mVolumeLR(0x10001000), - mSendLevel(0), flags(0) -{ -} - -uint32_t audio_track_cblk_t::stepUser(uint32_t frameCount) -{ - ALOGV("stepuser %08x %08x %d", user, server, frameCount); - - uint32_t u = user; - u += frameCount; - // Ensure that user is never ahead of server for AudioRecord - if (flags & CBLK_DIRECTION_MSK) { - // If stepServer() has been called once, switch to normal obtainBuffer() timeout period - if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS-1) { - bufferTimeoutMs = MAX_RUN_TIMEOUT_MS; - } - } else if (u > server) { - ALOGW("stepUser occurred after track reset"); - u = server; - } - - uint32_t fc = this->frameCount; - if (u >= fc) { - // common case, user didn't just wrap - if (u - fc >= userBase ) { - userBase += fc; - } - } else if (u >= userBase + fc) { - // user just wrapped - userBase += fc; - } - - user = u; - - // Clear flow control error condition as new data has been written/read to/from buffer. - if (flags & CBLK_UNDERRUN_MSK) { - android_atomic_and(~CBLK_UNDERRUN_MSK, &flags); - } - - return u; -} - -bool audio_track_cblk_t::stepServer(uint32_t frameCount) -{ - ALOGV("stepserver %08x %08x %d", user, server, frameCount); - - if (!tryLock()) { - ALOGW("stepServer() could not lock cblk"); - return false; - } - - uint32_t s = server; - bool flushed = (s == user); - - s += frameCount; - if (flags & CBLK_DIRECTION_MSK) { - // Mark that we have read the first buffer so that next time stepUser() is called - // we switch to normal obtainBuffer() timeout period - if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS) { - bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS - 1; - } - // It is possible that we receive a flush() - // while the mixer is processing a block: in this case, - // stepServer() is called After the flush() has reset u & s and - // we have s > u - if (flushed) { - ALOGW("stepServer occurred after track reset"); - s = user; - } - } - - if (s >= loopEnd) { - ALOGW_IF(s > loopEnd, "stepServer: s %u > loopEnd %u", s, loopEnd); - s = loopStart; - if (--loopCount == 0) { - loopEnd = UINT_MAX; - loopStart = UINT_MAX; - } - } - - uint32_t fc = this->frameCount; - if (s >= fc) { - // common case, server didn't just wrap - if (s - fc >= serverBase ) { - serverBase += fc; - } - } else if (s >= serverBase + fc) { - // server just wrapped - serverBase += fc; - } - - server = s; - - if (!(flags & CBLK_INVALID_MSK)) { - cv.signal(); - } - lock.unlock(); - return true; -} - -void* audio_track_cblk_t::buffer(uint32_t offset) const -{ - return (int8_t *)buffers + (offset - userBase) * frameSize; -} - -uint32_t audio_track_cblk_t::framesAvailable() -{ - Mutex::Autolock _l(lock); - return framesAvailable_l(); -} - -uint32_t audio_track_cblk_t::framesAvailable_l() -{ - uint32_t u = user; - uint32_t s = server; - - if (flags & CBLK_DIRECTION_MSK) { - uint32_t limit = (s < loopStart) ? s : loopStart; - return limit + frameCount - u; - } else { - return frameCount + u - s; - } -} - -uint32_t audio_track_cblk_t::framesReady() -{ - uint32_t u = user; - uint32_t s = server; - - if (flags & CBLK_DIRECTION_MSK) { - if (u < loopEnd) { - return u - s; - } else { - // do not block on mutex shared with client on AudioFlinger side - if (!tryLock()) { - ALOGW("framesReady() could not lock cblk"); - return 0; - } - uint32_t frames = UINT_MAX; - if (loopCount >= 0) { - frames = (loopEnd - loopStart)*loopCount + u - s; - } - lock.unlock(); - return frames; - } - } else { - return s - u; - } -} - -bool audio_track_cblk_t::tryLock() -{ - // the code below simulates lock-with-timeout - // we MUST do this to protect the AudioFlinger server - // as this lock is shared with the client. - status_t err; - - err = lock.tryLock(); - if (err == -EBUSY) { // just wait a bit - usleep(1000); - err = lock.tryLock(); - } - if (err != NO_ERROR) { - // probably, the client just died. - return false; - } - return true; -} - -// ------------------------------------------------------------------------- - }; // namespace android diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp new file mode 100644 index 0000000..13d47c9 --- /dev/null +++ b/media/libmedia/AudioTrackShared.cpp @@ -0,0 +1,196 @@ +/* + * Copyright (C) 2007 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "AudioTrackShared" +//#define LOG_NDEBUG 0 + +#include <private/media/AudioTrackShared.h> +#include <utils/Log.h> + +namespace android { + +audio_track_cblk_t::audio_track_cblk_t() + : lock(Mutex::SHARED), cv(Condition::SHARED), user(0), server(0), + userBase(0), serverBase(0), frameCount_(0), + loopStart(UINT_MAX), loopEnd(UINT_MAX), loopCount(0), mVolumeLR(0x10001000), + mSampleRate(0), mSendLevel(0), flags(0) +{ +} + +uint32_t audio_track_cblk_t::stepUser(size_t stepCount, size_t frameCount, bool isOut) +{ + ALOGV("stepuser %08x %08x %d", user, server, stepCount); + + uint32_t u = user; + u += stepCount; + // Ensure that user is never ahead of server for AudioRecord + if (isOut) { + // If stepServer() has been called once, switch to normal obtainBuffer() timeout period + if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS-1) { + bufferTimeoutMs = MAX_RUN_TIMEOUT_MS; + } + } else if (u > server) { + ALOGW("stepUser occurred after track reset"); + u = server; + } + + if (u >= frameCount) { + // common case, user didn't just wrap + if (u - frameCount >= userBase ) { + userBase += frameCount; + } + } else if (u >= userBase + frameCount) { + // user just wrapped + userBase += frameCount; + } + + user = u; + + // Clear flow control error condition as new data has been written/read to/from buffer. + if (flags & CBLK_UNDERRUN) { + android_atomic_and(~CBLK_UNDERRUN, &flags); + } + + return u; +} + +bool audio_track_cblk_t::stepServer(size_t stepCount, size_t frameCount, bool isOut) +{ + ALOGV("stepserver %08x %08x %d", user, server, stepCount); + + if (!tryLock()) { + ALOGW("stepServer() could not lock cblk"); + return false; + } + + uint32_t s = server; + bool flushed = (s == user); + + s += stepCount; + if (isOut) { + // Mark that we have read the first buffer so that next time stepUser() is called + // we switch to normal obtainBuffer() timeout period + if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS) { + bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS - 1; + } + // It is possible that we receive a flush() + // while the mixer is processing a block: in this case, + // stepServer() is called After the flush() has reset u & s and + // we have s > u + if (flushed) { + ALOGW("stepServer occurred after track reset"); + s = user; + } + } + + if (s >= loopEnd) { + ALOGW_IF(s > loopEnd, "stepServer: s %u > loopEnd %u", s, loopEnd); + s = loopStart; + if (--loopCount == 0) { + loopEnd = UINT_MAX; + loopStart = UINT_MAX; + } + } + + if (s >= frameCount) { + // common case, server didn't just wrap + if (s - frameCount >= serverBase ) { + serverBase += frameCount; + } + } else if (s >= serverBase + frameCount) { + // server just wrapped + serverBase += frameCount; + } + + server = s; + + if (!(flags & CBLK_INVALID)) { + cv.signal(); + } + lock.unlock(); + return true; +} + +void* audio_track_cblk_t::buffer(void *buffers, size_t frameSize, uint32_t offset) const +{ + return (int8_t *)buffers + (offset - userBase) * frameSize; +} + +uint32_t audio_track_cblk_t::framesAvailable(size_t frameCount, bool isOut) +{ + Mutex::Autolock _l(lock); + return framesAvailable_l(frameCount, isOut); +} + +uint32_t audio_track_cblk_t::framesAvailable_l(size_t frameCount, bool isOut) +{ + uint32_t u = user; + uint32_t s = server; + + if (isOut) { + uint32_t limit = (s < loopStart) ? s : loopStart; + return limit + frameCount - u; + } else { + return frameCount + u - s; + } +} + +uint32_t audio_track_cblk_t::framesReady(bool isOut) +{ + uint32_t u = user; + uint32_t s = server; + + if (isOut) { + if (u < loopEnd) { + return u - s; + } else { + // do not block on mutex shared with client on AudioFlinger side + if (!tryLock()) { + ALOGW("framesReady() could not lock cblk"); + return 0; + } + uint32_t frames = UINT_MAX; + if (loopCount >= 0) { + frames = (loopEnd - loopStart)*loopCount + u - s; + } + lock.unlock(); + return frames; + } + } else { + return s - u; + } +} + +bool audio_track_cblk_t::tryLock() +{ + // the code below simulates lock-with-timeout + // we MUST do this to protect the AudioFlinger server + // as this lock is shared with the client. + status_t err; + + err = lock.tryLock(); + if (err == -EBUSY) { // just wait a bit + usleep(1000); + err = lock.tryLock(); + } + if (err != NO_ERROR) { + // probably, the client just died. + return false; + } + return true; +} + +} // namespace android diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp index ce8ffc4..2f18680 100644 --- a/media/libmedia/IAudioFlinger.cpp +++ b/media/libmedia/IAudioFlinger.cpp @@ -32,7 +32,7 @@ enum { CREATE_TRACK = IBinder::FIRST_CALL_TRANSACTION, OPEN_RECORD, SAMPLE_RATE, - CHANNEL_COUNT, // obsolete + RESERVED, // obsolete, was CHANNEL_COUNT FORMAT, FRAME_COUNT, LATENCY, @@ -84,13 +84,12 @@ public: } virtual sp<IAudioTrack> createTrack( - pid_t pid, audio_stream_type_t streamType, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - int frameCount, - track_flags_t flags, + size_t frameCount, + track_flags_t *flags, const sp<IMemory>& sharedBuffer, audio_io_handle_t output, pid_t tid, @@ -100,13 +99,13 @@ public: Parcel data, reply; sp<IAudioTrack> track; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(pid); data.writeInt32((int32_t) streamType); data.writeInt32(sampleRate); data.writeInt32(format); data.writeInt32(channelMask); data.writeInt32(frameCount); - data.writeInt32((int32_t) flags); + track_flags_t lFlags = flags != NULL ? *flags : (track_flags_t) TRACK_DEFAULT; + data.writeInt32(lFlags); data.writeStrongBinder(sharedBuffer->asBinder()); data.writeInt32((int32_t) output); data.writeInt32((int32_t) tid); @@ -119,6 +118,10 @@ public: if (lStatus != NO_ERROR) { ALOGE("createTrack error: %s", strerror(-lStatus)); } else { + lFlags = reply.readInt32(); + if (flags != NULL) { + *flags = lFlags; + } lSessionId = reply.readInt32(); if (sessionId != NULL) { *sessionId = lSessionId; @@ -133,12 +136,11 @@ public: } virtual sp<IAudioRecord> openRecord( - pid_t pid, audio_io_handle_t input, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - int frameCount, + size_t frameCount, track_flags_t flags, pid_t tid, int *sessionId, @@ -147,7 +149,6 @@ public: Parcel data, reply; sp<IAudioRecord> record; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(pid); data.writeInt32((int32_t) input); data.writeInt32(sampleRate); data.writeInt32(format); @@ -186,17 +187,6 @@ public: return reply.readInt32(); } -#if 0 - virtual int channelCount(audio_io_handle_t output) const - { - Parcel data, reply; - data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32((int32_t) output); - remote()->transact(CHANNEL_COUNT, data, &reply); - return reply.readInt32(); - } -#endif - virtual audio_format_t format(audio_io_handle_t output) const { Parcel data, reply; @@ -501,7 +491,7 @@ public: return reply.readInt32(); } - virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, + virtual status_t getRenderPosition(size_t *halFrames, size_t *dspFrames, audio_io_handle_t output) const { Parcel data, reply; @@ -522,7 +512,7 @@ public: return status; } - virtual unsigned int getInputFramesLost(audio_io_handle_t ioHandle) const + virtual size_t getInputFramesLost(audio_io_handle_t ioHandle) const { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); @@ -618,7 +608,7 @@ public: return NO_ERROR; } - virtual sp<IEffect> createEffect(pid_t pid, + virtual sp<IEffect> createEffect( effect_descriptor_t *pDesc, const sp<IEffectClient>& client, int32_t priority, @@ -639,7 +629,6 @@ public: } data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(pid); data.write(pDesc, sizeof(effect_descriptor_t)); data.writeStrongBinder(client->asBinder()); data.writeInt32(priority); @@ -690,7 +679,7 @@ public: return (audio_module_handle_t) reply.readInt32(); } - virtual int32_t getPrimaryOutputSamplingRate() + virtual uint32_t getPrimaryOutputSamplingRate() { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); @@ -698,7 +687,7 @@ public: return reply.readInt32(); } - virtual int32_t getPrimaryOutputFrameCount() + virtual size_t getPrimaryOutputFrameCount() { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); @@ -718,21 +707,21 @@ status_t BnAudioFlinger::onTransact( switch (code) { case CREATE_TRACK: { CHECK_INTERFACE(IAudioFlinger, data, reply); - pid_t pid = data.readInt32(); int streamType = data.readInt32(); uint32_t sampleRate = data.readInt32(); audio_format_t format = (audio_format_t) data.readInt32(); audio_channel_mask_t channelMask = data.readInt32(); - size_t bufferCount = data.readInt32(); + size_t frameCount = data.readInt32(); track_flags_t flags = (track_flags_t) data.readInt32(); sp<IMemory> buffer = interface_cast<IMemory>(data.readStrongBinder()); audio_io_handle_t output = (audio_io_handle_t) data.readInt32(); pid_t tid = (pid_t) data.readInt32(); int sessionId = data.readInt32(); status_t status; - sp<IAudioTrack> track = createTrack(pid, + sp<IAudioTrack> track = createTrack( (audio_stream_type_t) streamType, sampleRate, format, - channelMask, bufferCount, flags, buffer, output, tid, &sessionId, &status); + channelMask, frameCount, &flags, buffer, output, tid, &sessionId, &status); + reply->writeInt32(flags); reply->writeInt32(sessionId); reply->writeInt32(status); reply->writeStrongBinder(track->asBinder()); @@ -740,18 +729,17 @@ status_t BnAudioFlinger::onTransact( } break; case OPEN_RECORD: { CHECK_INTERFACE(IAudioFlinger, data, reply); - pid_t pid = data.readInt32(); audio_io_handle_t input = (audio_io_handle_t) data.readInt32(); uint32_t sampleRate = data.readInt32(); audio_format_t format = (audio_format_t) data.readInt32(); audio_channel_mask_t channelMask = data.readInt32(); - size_t bufferCount = data.readInt32(); + size_t frameCount = data.readInt32(); track_flags_t flags = (track_flags_t) data.readInt32(); pid_t tid = (pid_t) data.readInt32(); int sessionId = data.readInt32(); status_t status; - sp<IAudioRecord> record = openRecord(pid, input, - sampleRate, format, channelMask, bufferCount, flags, tid, &sessionId, &status); + sp<IAudioRecord> record = openRecord(input, + sampleRate, format, channelMask, frameCount, flags, tid, &sessionId, &status); reply->writeInt32(sessionId); reply->writeInt32(status); reply->writeStrongBinder(record->asBinder()); @@ -762,13 +750,6 @@ status_t BnAudioFlinger::onTransact( reply->writeInt32( sampleRate((audio_io_handle_t) data.readInt32()) ); return NO_ERROR; } break; -#if 0 - case CHANNEL_COUNT: { - CHECK_INTERFACE(IAudioFlinger, data, reply); - reply->writeInt32( channelCount((audio_io_handle_t) data.readInt32()) ); - return NO_ERROR; - } break; -#endif case FORMAT: { CHECK_INTERFACE(IAudioFlinger, data, reply); reply->writeInt32( format((audio_io_handle_t) data.readInt32()) ); @@ -865,7 +846,8 @@ status_t BnAudioFlinger::onTransact( case REGISTER_CLIENT: { CHECK_INTERFACE(IAudioFlinger, data, reply); - sp<IAudioFlingerClient> client = interface_cast<IAudioFlingerClient>(data.readStrongBinder()); + sp<IAudioFlingerClient> client = interface_cast<IAudioFlingerClient>( + data.readStrongBinder()); registerClient(client); return NO_ERROR; } break; @@ -965,8 +947,8 @@ status_t BnAudioFlinger::onTransact( case GET_RENDER_POSITION: { CHECK_INTERFACE(IAudioFlinger, data, reply); audio_io_handle_t output = (audio_io_handle_t) data.readInt32(); - uint32_t halFrames; - uint32_t dspFrames; + size_t halFrames; + size_t dspFrames; status_t status = getRenderPosition(&halFrames, &dspFrames, output); reply->writeInt32(status); if (status == NO_ERROR) { @@ -1032,7 +1014,6 @@ status_t BnAudioFlinger::onTransact( } case CREATE_EFFECT: { CHECK_INTERFACE(IAudioFlinger, data, reply); - pid_t pid = data.readInt32(); effect_descriptor_t desc; data.read(&desc, sizeof(effect_descriptor_t)); sp<IEffectClient> client = interface_cast<IEffectClient>(data.readStrongBinder()); @@ -1043,7 +1024,8 @@ status_t BnAudioFlinger::onTransact( int id; int enabled; - sp<IEffect> effect = createEffect(pid, &desc, client, priority, output, sessionId, &status, &id, &enabled); + sp<IEffect> effect = createEffect(&desc, client, priority, output, sessionId, + &status, &id, &enabled); reply->writeInt32(status); reply->writeInt32(id); reply->writeInt32(enabled); diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp index 4178b29..2d1e0f8 100644 --- a/media/libmedia/IAudioFlingerClient.cpp +++ b/media/libmedia/IAudioFlingerClient.cpp @@ -50,7 +50,8 @@ public: ALOGV("ioConfigChanged stream %d", stream); data.writeInt32(stream); } else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) { - const AudioSystem::OutputDescriptor *desc = (const AudioSystem::OutputDescriptor *)param2; + const AudioSystem::OutputDescriptor *desc = + (const AudioSystem::OutputDescriptor *)param2; data.writeInt32(desc->samplingRate); data.writeInt32(desc->format); data.writeInt32(desc->channels); diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp index 401437c..386c351 100644 --- a/media/libmedia/IAudioPolicyService.cpp +++ b/media/libmedia/IAudioPolicyService.cpp @@ -55,7 +55,8 @@ enum { IS_SOURCE_ACTIVE, GET_DEVICES_FOR_STREAM, QUERY_DEFAULT_PRE_PROCESSING, - SET_EFFECT_ENABLED + SET_EFFECT_ENABLED, + IS_STREAM_ACTIVE_REMOTELY }; class BpAudioPolicyService : public BpInterface<IAudioPolicyService> @@ -330,6 +331,16 @@ public: return reply.readInt32(); } + virtual bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const + { + Parcel data, reply; + data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); + data.writeInt32((int32_t) stream); + data.writeInt32(inPastMs); + remote()->transact(IS_STREAM_ACTIVE_REMOTELY, data, &reply); + return reply.readInt32(); + } + virtual bool isSourceActive(audio_source_t source) const { Parcel data, reply; @@ -399,13 +410,15 @@ status_t BnAudioPolicyService::onTransact( case SET_PHONE_STATE: { CHECK_INTERFACE(IAudioPolicyService, data, reply); - reply->writeInt32(static_cast <uint32_t>(setPhoneState((audio_mode_t) data.readInt32()))); + reply->writeInt32(static_cast <uint32_t>(setPhoneState( + (audio_mode_t) data.readInt32()))); return NO_ERROR; } break; case SET_FORCE_USE: { CHECK_INTERFACE(IAudioPolicyService, data, reply); - audio_policy_force_use_t usage = static_cast <audio_policy_force_use_t>(data.readInt32()); + audio_policy_force_use_t usage = static_cast <audio_policy_force_use_t>( + data.readInt32()); audio_policy_forced_cfg_t config = static_cast <audio_policy_forced_cfg_t>(data.readInt32()); reply->writeInt32(static_cast <uint32_t>(setForceUse(usage, config))); @@ -414,7 +427,8 @@ status_t BnAudioPolicyService::onTransact( case GET_FORCE_USE: { CHECK_INTERFACE(IAudioPolicyService, data, reply); - audio_policy_force_use_t usage = static_cast <audio_policy_force_use_t>(data.readInt32()); + audio_policy_force_use_t usage = static_cast <audio_policy_force_use_t>( + data.readInt32()); reply->writeInt32(static_cast <uint32_t>(getForceUse(usage))); return NO_ERROR; } break; @@ -602,6 +616,14 @@ status_t BnAudioPolicyService::onTransact( return NO_ERROR; } break; + case IS_STREAM_ACTIVE_REMOTELY: { + CHECK_INTERFACE(IAudioPolicyService, data, reply); + audio_stream_type_t stream = (audio_stream_type_t) data.readInt32(); + uint32_t inPastMs = (uint32_t)data.readInt32(); + reply->writeInt32( isStreamActiveRemotely((audio_stream_type_t) stream, inPastMs) ); + return NO_ERROR; + } break; + case IS_SOURCE_ACTIVE: { CHECK_INTERFACE(IAudioPolicyService, data, reply); audio_source_t source = (audio_source_t) data.readInt32(); diff --git a/media/libmedia/IAudioTrack.cpp b/media/libmedia/IAudioTrack.cpp index 867d1a5..e92f8aa 100644 --- a/media/libmedia/IAudioTrack.cpp +++ b/media/libmedia/IAudioTrack.cpp @@ -33,7 +33,7 @@ enum { START, STOP, FLUSH, - MUTE, + RESERVED, // was MUTE PAUSE, ATTACH_AUX_EFFECT, ALLOCATE_TIMED_BUFFER, @@ -88,14 +88,6 @@ public: remote()->transact(FLUSH, data, &reply); } - virtual void mute(bool e) - { - Parcel data, reply; - data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor()); - data.writeInt32(e); - remote()->transact(MUTE, data, &reply); - } - virtual void pause() { Parcel data, reply; @@ -192,11 +184,6 @@ status_t BnAudioTrack::onTransact( flush(); return NO_ERROR; } break; - case MUTE: { - CHECK_INTERFACE(IAudioTrack, data, reply); - mute( data.readInt32() ); - return NO_ERROR; - } break; case PAUSE: { CHECK_INTERFACE(IAudioTrack, data, reply); pause(); diff --git a/media/libmedia/ICrypto.cpp b/media/libmedia/ICrypto.cpp index 2defc2d..98b183a 100644 --- a/media/libmedia/ICrypto.cpp +++ b/media/libmedia/ICrypto.cpp @@ -48,7 +48,7 @@ struct BpCrypto : public BpInterface<ICrypto> { return reply.readInt32(); } - virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]) const { + virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]) { Parcel data, reply; data.writeInterfaceToken(ICrypto::getInterfaceDescriptor()); data.write(uuid, 16); diff --git a/media/libmedia/IDrm.cpp b/media/libmedia/IDrm.cpp new file mode 100644 index 0000000..902aeb2 --- /dev/null +++ b/media/libmedia/IDrm.cpp @@ -0,0 +1,739 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "IDrm" +#include <utils/Log.h> + +#include <binder/Parcel.h> +#include <media/IDrm.h> +#include <media/stagefright/MediaErrors.h> +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/AString.h> + +namespace android { + +enum { + INIT_CHECK = IBinder::FIRST_CALL_TRANSACTION, + IS_CRYPTO_SUPPORTED, + CREATE_PLUGIN, + DESTROY_PLUGIN, + OPEN_SESSION, + CLOSE_SESSION, + GET_KEY_REQUEST, + PROVIDE_KEY_RESPONSE, + REMOVE_KEYS, + RESTORE_KEYS, + QUERY_KEY_STATUS, + GET_PROVISION_REQUEST, + PROVIDE_PROVISION_RESPONSE, + GET_SECURE_STOPS, + RELEASE_SECURE_STOPS, + GET_PROPERTY_STRING, + GET_PROPERTY_BYTE_ARRAY, + SET_PROPERTY_STRING, + SET_PROPERTY_BYTE_ARRAY, + SET_CIPHER_ALGORITHM, + SET_MAC_ALGORITHM, + ENCRYPT, + DECRYPT, + SIGN, + VERIFY, + SET_LISTENER +}; + +struct BpDrm : public BpInterface<IDrm> { + BpDrm(const sp<IBinder> &impl) + : BpInterface<IDrm>(impl) { + } + + virtual status_t initCheck() const { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + remote()->transact(INIT_CHECK, data, &reply); + + return reply.readInt32(); + } + + virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + data.write(uuid, 16); + remote()->transact(IS_CRYPTO_SUPPORTED, data, &reply); + + return reply.readInt32() != 0; + } + + virtual status_t createPlugin(const uint8_t uuid[16]) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + data.write(uuid, 16); + + remote()->transact(CREATE_PLUGIN, data, &reply); + + return reply.readInt32(); + } + + virtual status_t destroyPlugin() { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + remote()->transact(DESTROY_PLUGIN, data, &reply); + + return reply.readInt32(); + } + + virtual status_t openSession(Vector<uint8_t> &sessionId) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + remote()->transact(OPEN_SESSION, data, &reply); + readVector(reply, sessionId); + + return reply.readInt32(); + } + + virtual status_t closeSession(Vector<uint8_t> const &sessionId) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + writeVector(data, sessionId); + remote()->transact(CLOSE_SESSION, data, &reply); + + return reply.readInt32(); + } + + virtual status_t + getKeyRequest(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &initData, + String8 const &mimeType, DrmPlugin::KeyType keyType, + KeyedVector<String8, String8> const &optionalParameters, + Vector<uint8_t> &request, String8 &defaultUrl) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + writeVector(data, sessionId); + writeVector(data, initData); + data.writeString8(mimeType); + data.writeInt32((uint32_t)keyType); + + data.writeInt32(optionalParameters.size()); + for (size_t i = 0; i < optionalParameters.size(); ++i) { + data.writeString8(optionalParameters.keyAt(i)); + data.writeString8(optionalParameters.valueAt(i)); + } + remote()->transact(GET_KEY_REQUEST, data, &reply); + + readVector(reply, request); + defaultUrl = reply.readString8(); + + return reply.readInt32(); + } + + virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &response, + Vector<uint8_t> &keySetId) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + writeVector(data, sessionId); + writeVector(data, response); + remote()->transact(PROVIDE_KEY_RESPONSE, data, &reply); + readVector(reply, keySetId); + + return reply.readInt32(); + } + + virtual status_t removeKeys(Vector<uint8_t> const &keySetId) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + writeVector(data, keySetId); + remote()->transact(REMOVE_KEYS, data, &reply); + + return reply.readInt32(); + } + + virtual status_t restoreKeys(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keySetId) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + writeVector(data, sessionId); + writeVector(data, keySetId); + remote()->transact(RESTORE_KEYS, data, &reply); + + return reply.readInt32(); + } + + virtual status_t queryKeyStatus(Vector<uint8_t> const &sessionId, + KeyedVector<String8, String8> &infoMap) const { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + writeVector(data, sessionId); + remote()->transact(QUERY_KEY_STATUS, data, &reply); + + infoMap.clear(); + size_t count = reply.readInt32(); + for (size_t i = 0; i < count; i++) { + String8 key = reply.readString8(); + String8 value = reply.readString8(); + infoMap.add(key, value); + } + return reply.readInt32(); + } + + virtual status_t getProvisionRequest(Vector<uint8_t> &request, + String8 &defaultUrl) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + remote()->transact(GET_PROVISION_REQUEST, data, &reply); + + readVector(reply, request); + defaultUrl = reply.readString8(); + + return reply.readInt32(); + } + + virtual status_t provideProvisionResponse(Vector<uint8_t> const &response) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + writeVector(data, response); + remote()->transact(PROVIDE_PROVISION_RESPONSE, data, &reply); + + return reply.readInt32(); + } + + virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + remote()->transact(GET_SECURE_STOPS, data, &reply); + + secureStops.clear(); + uint32_t count = reply.readInt32(); + for (size_t i = 0; i < count; i++) { + Vector<uint8_t> secureStop; + readVector(reply, secureStop); + secureStops.push_back(secureStop); + } + return reply.readInt32(); + } + + virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + writeVector(data, ssRelease); + remote()->transact(RELEASE_SECURE_STOPS, data, &reply); + + return reply.readInt32(); + } + + virtual status_t getPropertyString(String8 const &name, String8 &value) const { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + data.writeString8(name); + remote()->transact(GET_PROPERTY_STRING, data, &reply); + + value = reply.readString8(); + return reply.readInt32(); + } + + virtual status_t getPropertyByteArray(String8 const &name, Vector<uint8_t> &value) const { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + data.writeString8(name); + remote()->transact(GET_PROPERTY_BYTE_ARRAY, data, &reply); + + readVector(reply, value); + return reply.readInt32(); + } + + virtual status_t setPropertyString(String8 const &name, String8 const &value) const { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + data.writeString8(name); + data.writeString8(value); + remote()->transact(SET_PROPERTY_STRING, data, &reply); + + return reply.readInt32(); + } + + virtual status_t setPropertyByteArray(String8 const &name, + Vector<uint8_t> const &value) const { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + data.writeString8(name); + writeVector(data, value); + remote()->transact(SET_PROPERTY_BYTE_ARRAY, data, &reply); + + return reply.readInt32(); + } + + + virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId, + String8 const &algorithm) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + writeVector(data, sessionId); + data.writeString8(algorithm); + remote()->transact(SET_CIPHER_ALGORITHM, data, &reply); + return reply.readInt32(); + } + + virtual status_t setMacAlgorithm(Vector<uint8_t> const &sessionId, + String8 const &algorithm) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + writeVector(data, sessionId); + data.writeString8(algorithm); + remote()->transact(SET_MAC_ALGORITHM, data, &reply); + return reply.readInt32(); + } + + virtual status_t encrypt(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &input, + Vector<uint8_t> const &iv, + Vector<uint8_t> &output) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + writeVector(data, sessionId); + writeVector(data, keyId); + writeVector(data, input); + writeVector(data, iv); + + remote()->transact(ENCRYPT, data, &reply); + readVector(reply, output); + + return reply.readInt32(); + } + + virtual status_t decrypt(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &input, + Vector<uint8_t> const &iv, + Vector<uint8_t> &output) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + writeVector(data, sessionId); + writeVector(data, keyId); + writeVector(data, input); + writeVector(data, iv); + + remote()->transact(DECRYPT, data, &reply); + readVector(reply, output); + + return reply.readInt32(); + } + + virtual status_t sign(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &message, + Vector<uint8_t> &signature) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + writeVector(data, sessionId); + writeVector(data, keyId); + writeVector(data, message); + + remote()->transact(SIGN, data, &reply); + readVector(reply, signature); + + return reply.readInt32(); + } + + virtual status_t verify(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &message, + Vector<uint8_t> const &signature, + bool &match) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + + writeVector(data, sessionId); + writeVector(data, keyId); + writeVector(data, message); + writeVector(data, signature); + + remote()->transact(VERIFY, data, &reply); + match = (bool)reply.readInt32(); + return reply.readInt32(); + } + + virtual status_t setListener(const sp<IDrmClient>& listener) { + Parcel data, reply; + data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); + data.writeStrongBinder(listener->asBinder()); + remote()->transact(SET_LISTENER, data, &reply); + return reply.readInt32(); + } + +private: + void readVector(Parcel &reply, Vector<uint8_t> &vector) const { + uint32_t size = reply.readInt32(); + vector.insertAt((size_t)0, size); + reply.read(vector.editArray(), size); + } + + void writeVector(Parcel &data, Vector<uint8_t> const &vector) const { + data.writeInt32(vector.size()); + data.write(vector.array(), vector.size()); + } + + DISALLOW_EVIL_CONSTRUCTORS(BpDrm); +}; + +IMPLEMENT_META_INTERFACE(Drm, "android.drm.IDrm"); + +//////////////////////////////////////////////////////////////////////////////// + +void BnDrm::readVector(const Parcel &data, Vector<uint8_t> &vector) const { + uint32_t size = data.readInt32(); + vector.insertAt((size_t)0, size); + data.read(vector.editArray(), size); +} + +void BnDrm::writeVector(Parcel *reply, Vector<uint8_t> const &vector) const { + reply->writeInt32(vector.size()); + reply->write(vector.array(), vector.size()); +} + +status_t BnDrm::onTransact( + uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags) { + switch (code) { + case INIT_CHECK: + { + CHECK_INTERFACE(IDrm, data, reply); + reply->writeInt32(initCheck()); + return OK; + } + + case IS_CRYPTO_SUPPORTED: + { + CHECK_INTERFACE(IDrm, data, reply); + uint8_t uuid[16]; + data.read(uuid, sizeof(uuid)); + reply->writeInt32(isCryptoSchemeSupported(uuid)); + return OK; + } + + case CREATE_PLUGIN: + { + CHECK_INTERFACE(IDrm, data, reply); + uint8_t uuid[16]; + data.read(uuid, sizeof(uuid)); + reply->writeInt32(createPlugin(uuid)); + return OK; + } + + case DESTROY_PLUGIN: + { + CHECK_INTERFACE(IDrm, data, reply); + reply->writeInt32(destroyPlugin()); + return OK; + } + + case OPEN_SESSION: + { + CHECK_INTERFACE(IDrm, data, reply); + Vector<uint8_t> sessionId; + status_t result = openSession(sessionId); + writeVector(reply, sessionId); + reply->writeInt32(result); + return OK; + } + + case CLOSE_SESSION: + { + CHECK_INTERFACE(IDrm, data, reply); + Vector<uint8_t> sessionId; + readVector(data, sessionId); + reply->writeInt32(closeSession(sessionId)); + return OK; + } + + case GET_KEY_REQUEST: + { + CHECK_INTERFACE(IDrm, data, reply); + Vector<uint8_t> sessionId, initData; + + readVector(data, sessionId); + readVector(data, initData); + String8 mimeType = data.readString8(); + DrmPlugin::KeyType keyType = (DrmPlugin::KeyType)data.readInt32(); + + KeyedVector<String8, String8> optionalParameters; + uint32_t count = data.readInt32(); + for (size_t i = 0; i < count; ++i) { + String8 key, value; + key = data.readString8(); + value = data.readString8(); + optionalParameters.add(key, value); + } + + Vector<uint8_t> request; + String8 defaultUrl; + + status_t result = getKeyRequest(sessionId, initData, + mimeType, keyType, + optionalParameters, + request, defaultUrl); + writeVector(reply, request); + reply->writeString8(defaultUrl); + reply->writeInt32(result); + return OK; + } + + case PROVIDE_KEY_RESPONSE: + { + CHECK_INTERFACE(IDrm, data, reply); + Vector<uint8_t> sessionId, response, keySetId; + readVector(data, sessionId); + readVector(data, response); + uint32_t result = provideKeyResponse(sessionId, response, keySetId); + writeVector(reply, keySetId); + reply->writeInt32(result); + return OK; + } + + case REMOVE_KEYS: + { + CHECK_INTERFACE(IDrm, data, reply); + Vector<uint8_t> keySetId; + readVector(data, keySetId); + reply->writeInt32(removeKeys(keySetId)); + return OK; + } + + case RESTORE_KEYS: + { + CHECK_INTERFACE(IDrm, data, reply); + Vector<uint8_t> sessionId, keySetId; + readVector(data, sessionId); + readVector(data, keySetId); + reply->writeInt32(restoreKeys(sessionId, keySetId)); + return OK; + } + + case QUERY_KEY_STATUS: + { + CHECK_INTERFACE(IDrm, data, reply); + Vector<uint8_t> sessionId; + readVector(data, sessionId); + KeyedVector<String8, String8> infoMap; + status_t result = queryKeyStatus(sessionId, infoMap); + size_t count = infoMap.size(); + reply->writeInt32(count); + for (size_t i = 0; i < count; ++i) { + reply->writeString8(infoMap.keyAt(i)); + reply->writeString8(infoMap.valueAt(i)); + } + reply->writeInt32(result); + return OK; + } + + case GET_PROVISION_REQUEST: + { + CHECK_INTERFACE(IDrm, data, reply); + Vector<uint8_t> request; + String8 defaultUrl; + status_t result = getProvisionRequest(request, defaultUrl); + writeVector(reply, request); + reply->writeString8(defaultUrl); + reply->writeInt32(result); + return OK; + } + + case PROVIDE_PROVISION_RESPONSE: + { + CHECK_INTERFACE(IDrm, data, reply); + Vector<uint8_t> response; + readVector(data, response); + reply->writeInt32(provideProvisionResponse(response)); + return OK; + } + + case GET_SECURE_STOPS: + { + CHECK_INTERFACE(IDrm, data, reply); + List<Vector<uint8_t> > secureStops; + status_t result = getSecureStops(secureStops); + size_t count = secureStops.size(); + reply->writeInt32(count); + List<Vector<uint8_t> >::iterator iter = secureStops.begin(); + while(iter != secureStops.end()) { + size_t size = iter->size(); + reply->writeInt32(size); + reply->write(iter->array(), iter->size()); + iter++; + } + reply->writeInt32(result); + return OK; + } + + case RELEASE_SECURE_STOPS: + { + CHECK_INTERFACE(IDrm, data, reply); + Vector<uint8_t> ssRelease; + readVector(data, ssRelease); + reply->writeInt32(releaseSecureStops(ssRelease)); + return OK; + } + + case GET_PROPERTY_STRING: + { + CHECK_INTERFACE(IDrm, data, reply); + String8 name = data.readString8(); + String8 value; + status_t result = getPropertyString(name, value); + reply->writeString8(value); + reply->writeInt32(result); + return OK; + } + + case GET_PROPERTY_BYTE_ARRAY: + { + CHECK_INTERFACE(IDrm, data, reply); + String8 name = data.readString8(); + Vector<uint8_t> value; + status_t result = getPropertyByteArray(name, value); + writeVector(reply, value); + reply->writeInt32(result); + return OK; + } + + case SET_PROPERTY_STRING: + { + CHECK_INTERFACE(IDrm, data, reply); + String8 name = data.readString8(); + String8 value = data.readString8(); + reply->writeInt32(setPropertyString(name, value)); + return OK; + } + + case SET_PROPERTY_BYTE_ARRAY: + { + CHECK_INTERFACE(IDrm, data, reply); + String8 name = data.readString8(); + Vector<uint8_t> value; + readVector(data, value); + reply->writeInt32(setPropertyByteArray(name, value)); + return OK; + } + + case SET_CIPHER_ALGORITHM: + { + CHECK_INTERFACE(IDrm, data, reply); + Vector<uint8_t> sessionId; + readVector(data, sessionId); + String8 algorithm = data.readString8(); + reply->writeInt32(setCipherAlgorithm(sessionId, algorithm)); + return OK; + } + + case SET_MAC_ALGORITHM: + { + CHECK_INTERFACE(IDrm, data, reply); + Vector<uint8_t> sessionId; + readVector(data, sessionId); + String8 algorithm = data.readString8(); + reply->writeInt32(setMacAlgorithm(sessionId, algorithm)); + return OK; + } + + case ENCRYPT: + { + CHECK_INTERFACE(IDrm, data, reply); + Vector<uint8_t> sessionId, keyId, input, iv, output; + readVector(data, sessionId); + readVector(data, keyId); + readVector(data, input); + readVector(data, iv); + uint32_t result = encrypt(sessionId, keyId, input, iv, output); + writeVector(reply, output); + reply->writeInt32(result); + return OK; + } + + case DECRYPT: + { + CHECK_INTERFACE(IDrm, data, reply); + Vector<uint8_t> sessionId, keyId, input, iv, output; + readVector(data, sessionId); + readVector(data, keyId); + readVector(data, input); + readVector(data, iv); + uint32_t result = decrypt(sessionId, keyId, input, iv, output); + writeVector(reply, output); + reply->writeInt32(result); + return OK; + } + + case SIGN: + { + CHECK_INTERFACE(IDrm, data, reply); + Vector<uint8_t> sessionId, keyId, message, signature; + readVector(data, sessionId); + readVector(data, keyId); + readVector(data, message); + uint32_t result = sign(sessionId, keyId, message, signature); + writeVector(reply, signature); + reply->writeInt32(result); + return OK; + } + + case VERIFY: + { + CHECK_INTERFACE(IDrm, data, reply); + Vector<uint8_t> sessionId, keyId, message, signature; + readVector(data, sessionId); + readVector(data, keyId); + readVector(data, message); + readVector(data, signature); + bool match; + uint32_t result = verify(sessionId, keyId, message, signature, match); + reply->writeInt32(match); + reply->writeInt32(result); + return OK; + } + + case SET_LISTENER: { + CHECK_INTERFACE(IDrm, data, reply); + sp<IDrmClient> listener = + interface_cast<IDrmClient>(data.readStrongBinder()); + reply->writeInt32(setListener(listener)); + return NO_ERROR; + } break; + + default: + return BBinder::onTransact(code, data, reply, flags); + } +} + +} // namespace android + diff --git a/media/libmedia/IDrmClient.cpp b/media/libmedia/IDrmClient.cpp new file mode 100644 index 0000000..f50715e --- /dev/null +++ b/media/libmedia/IDrmClient.cpp @@ -0,0 +1,81 @@ +/* +** +** Copyright 2013, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "IDrmClient" +#include <utils/Log.h> + +#include <utils/RefBase.h> +#include <binder/IInterface.h> +#include <binder/Parcel.h> + +#include <media/IMediaPlayerClient.h> +#include <media/IDrmClient.h> + +namespace android { + +enum { + NOTIFY = IBinder::FIRST_CALL_TRANSACTION, +}; + +class BpDrmClient: public BpInterface<IDrmClient> +{ +public: + BpDrmClient(const sp<IBinder>& impl) + : BpInterface<IDrmClient>(impl) + { + } + + virtual void notify(DrmPlugin::EventType eventType, int extra, const Parcel *obj) + { + Parcel data, reply; + data.writeInterfaceToken(IDrmClient::getInterfaceDescriptor()); + data.writeInt32((int)eventType); + data.writeInt32(extra); + if (obj && obj->dataSize() > 0) { + data.appendFrom(const_cast<Parcel *>(obj), 0, obj->dataSize()); + } + remote()->transact(NOTIFY, data, &reply, IBinder::FLAG_ONEWAY); + } +}; + +IMPLEMENT_META_INTERFACE(DrmClient, "android.media.IDrmClient"); + +// ---------------------------------------------------------------------- + +status_t BnDrmClient::onTransact( + uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) +{ + switch (code) { + case NOTIFY: { + CHECK_INTERFACE(IDrmClient, data, reply); + int eventType = data.readInt32(); + int extra = data.readInt32(); + Parcel obj; + if (data.dataAvail() > 0) { + obj.appendFrom(const_cast<Parcel *>(&data), data.dataPosition(), data.dataAvail()); + } + + notify((DrmPlugin::EventType)eventType, extra, &obj); + return NO_ERROR; + } break; + default: + return BBinder::onTransact(code, data, reply, flags); + } +} + +}; // namespace android diff --git a/media/libmedia/IHDCP.cpp b/media/libmedia/IHDCP.cpp index 493f5a4..f13addc 100644 --- a/media/libmedia/IHDCP.cpp +++ b/media/libmedia/IHDCP.cpp @@ -31,6 +31,7 @@ enum { HDCP_INIT_ASYNC, HDCP_SHUTDOWN_ASYNC, HDCP_ENCRYPT, + HDCP_DECRYPT, }; struct BpHDCPObserver : public BpInterface<IHDCPObserver> { @@ -106,6 +107,29 @@ struct BpHDCP : public BpInterface<IHDCP> { return err; } + + virtual status_t decrypt( + const void *inData, size_t size, + uint32_t streamCTR, uint64_t inputCTR, + void *outData) { + Parcel data, reply; + data.writeInterfaceToken(IHDCP::getInterfaceDescriptor()); + data.writeInt32(size); + data.write(inData, size); + data.writeInt32(streamCTR); + data.writeInt64(inputCTR); + remote()->transact(HDCP_DECRYPT, data, &reply); + + status_t err = reply.readInt32(); + + if (err != OK) { + return err; + } + + reply.read(outData, size); + + return err; + } }; IMPLEMENT_META_INTERFACE(HDCP, "android.hardware.IHDCP"); @@ -198,6 +222,31 @@ status_t BnHDCP::onTransact( return OK; } + case HDCP_DECRYPT: + { + size_t size = data.readInt32(); + + void *inData = malloc(2 * size); + void *outData = (uint8_t *)inData + size; + + data.read(inData, size); + + uint32_t streamCTR = data.readInt32(); + uint64_t inputCTR = data.readInt64(); + status_t err = decrypt(inData, size, streamCTR, inputCTR, outData); + + reply->writeInt32(err); + + if (err == OK) { + reply->write(outData, size); + } + + free(inData); + inData = outData = NULL; + + return OK; + } + default: return BBinder::onTransact(code, data, reply, flags); } diff --git a/media/libmedia/IMediaLogService.cpp b/media/libmedia/IMediaLogService.cpp new file mode 100644 index 0000000..33239a7 --- /dev/null +++ b/media/libmedia/IMediaLogService.cpp @@ -0,0 +1,94 @@ +/* +** +** Copyright 2007, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#define LOG_TAG "IMediaLogService" +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> +#include <stdint.h> +#include <sys/types.h> +#include <binder/Parcel.h> +#include <media/IMediaLogService.h> + +namespace android { + +enum { + REGISTER_WRITER = IBinder::FIRST_CALL_TRANSACTION, + UNREGISTER_WRITER, +}; + +class BpMediaLogService : public BpInterface<IMediaLogService> +{ +public: + BpMediaLogService(const sp<IBinder>& impl) + : BpInterface<IMediaLogService>(impl) + { + } + + virtual void registerWriter(const sp<IMemory>& shared, size_t size, const char *name) { + Parcel data, reply; + data.writeInterfaceToken(IMediaLogService::getInterfaceDescriptor()); + data.writeStrongBinder(shared->asBinder()); + data.writeInt32((int32_t) size); + data.writeCString(name); + status_t status = remote()->transact(REGISTER_WRITER, data, &reply); + // FIXME ignores status + } + + virtual void unregisterWriter(const sp<IMemory>& shared) { + Parcel data, reply; + data.writeInterfaceToken(IMediaLogService::getInterfaceDescriptor()); + data.writeStrongBinder(shared->asBinder()); + status_t status = remote()->transact(UNREGISTER_WRITER, data, &reply); + // FIXME ignores status + } + +}; + +IMPLEMENT_META_INTERFACE(MediaLogService, "android.media.IMediaLogService"); + +// ---------------------------------------------------------------------- + +status_t BnMediaLogService::onTransact( + uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) +{ + switch (code) { + + case REGISTER_WRITER: { + CHECK_INTERFACE(IMediaLogService, data, reply); + sp<IMemory> shared = interface_cast<IMemory>(data.readStrongBinder()); + size_t size = (size_t) data.readInt32(); + const char *name = data.readCString(); + registerWriter(shared, size, name); + return NO_ERROR; + } + + case UNREGISTER_WRITER: { + CHECK_INTERFACE(IMediaLogService, data, reply); + sp<IMemory> shared = interface_cast<IMemory>(data.readStrongBinder()); + unregisterWriter(shared); + return NO_ERROR; + } + + default: + return BBinder::onTransact(code, data, reply, flags); + } +} + +// ---------------------------------------------------------------------------- + +}; // namespace android diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp index cb07766..e79bcd2 100644 --- a/media/libmedia/IMediaPlayer.cpp +++ b/media/libmedia/IMediaPlayer.cpp @@ -24,7 +24,7 @@ #include <media/IMediaPlayer.h> #include <media/IStreamSource.h> -#include <gui/ISurfaceTexture.h> +#include <gui/IGraphicBufferProducer.h> #include <utils/String8.h> namespace android { @@ -113,12 +113,12 @@ public: return reply.readInt32(); } - // pass the buffered ISurfaceTexture to the media player service - status_t setVideoSurfaceTexture(const sp<ISurfaceTexture>& surfaceTexture) + // pass the buffered IGraphicBufferProducer to the media player service + status_t setVideoSurfaceTexture(const sp<IGraphicBufferProducer>& bufferProducer) { Parcel data, reply; data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor()); - sp<IBinder> b(surfaceTexture->asBinder()); + sp<IBinder> b(bufferProducer->asBinder()); data.writeStrongBinder(b); remote()->transact(SET_VIDEO_SURFACETEXTURE, data, &reply); return reply.readInt32(); @@ -383,9 +383,9 @@ status_t BnMediaPlayer::onTransact( } case SET_VIDEO_SURFACETEXTURE: { CHECK_INTERFACE(IMediaPlayer, data, reply); - sp<ISurfaceTexture> surfaceTexture = - interface_cast<ISurfaceTexture>(data.readStrongBinder()); - reply->writeInt32(setVideoSurfaceTexture(surfaceTexture)); + sp<IGraphicBufferProducer> bufferProducer = + interface_cast<IGraphicBufferProducer>(data.readStrongBinder()); + reply->writeInt32(setVideoSurfaceTexture(bufferProducer)); return NO_ERROR; } break; case PREPARE_ASYNC: { diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp index c0a0260..74f574d 100644 --- a/media/libmedia/IMediaPlayerService.cpp +++ b/media/libmedia/IMediaPlayerService.cpp @@ -21,6 +21,7 @@ #include <binder/Parcel.h> #include <binder/IMemory.h> #include <media/ICrypto.h> +#include <media/IDrm.h> #include <media/IHDCP.h> #include <media/IMediaPlayerService.h> #include <media/IMediaRecorder.h> @@ -42,10 +43,12 @@ enum { CREATE_METADATA_RETRIEVER, GET_OMX, MAKE_CRYPTO, + MAKE_DRM, MAKE_HDCP, ADD_BATTERY_DATA, PULL_BATTERY_DATA, LISTEN_FOR_REMOTE_DISPLAY, + UPDATE_PROXY_CONFIG, }; class BpMediaPlayerService: public BpInterface<IMediaPlayerService> @@ -56,20 +59,18 @@ public: { } - virtual sp<IMediaMetadataRetriever> createMetadataRetriever(pid_t pid) + virtual sp<IMediaMetadataRetriever> createMetadataRetriever() { Parcel data, reply; data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor()); - data.writeInt32(pid); remote()->transact(CREATE_METADATA_RETRIEVER, data, &reply); return interface_cast<IMediaMetadataRetriever>(reply.readStrongBinder()); } virtual sp<IMediaPlayer> create( - pid_t pid, const sp<IMediaPlayerClient>& client, int audioSessionId) { + const sp<IMediaPlayerClient>& client, int audioSessionId) { Parcel data, reply; data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor()); - data.writeInt32(pid); data.writeStrongBinder(client->asBinder()); data.writeInt32(audioSessionId); @@ -77,11 +78,10 @@ public: return interface_cast<IMediaPlayer>(reply.readStrongBinder()); } - virtual sp<IMediaRecorder> createMediaRecorder(pid_t pid) + virtual sp<IMediaRecorder> createMediaRecorder() { Parcel data, reply; data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor()); - data.writeInt32(pid); remote()->transact(CREATE_MEDIA_RECORDER, data, &reply); return interface_cast<IMediaRecorder>(reply.readStrongBinder()); } @@ -126,9 +126,17 @@ public: return interface_cast<ICrypto>(reply.readStrongBinder()); } - virtual sp<IHDCP> makeHDCP() { + virtual sp<IDrm> makeDrm() { Parcel data, reply; data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor()); + remote()->transact(MAKE_DRM, data, &reply); + return interface_cast<IDrm>(reply.readStrongBinder()); + } + + virtual sp<IHDCP> makeHDCP(bool createEncryptionModule) { + Parcel data, reply; + data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor()); + data.writeInt32(createEncryptionModule); remote()->transact(MAKE_HDCP, data, &reply); return interface_cast<IHDCP>(reply.readStrongBinder()); } @@ -156,6 +164,25 @@ public: remote()->transact(LISTEN_FOR_REMOTE_DISPLAY, data, &reply); return interface_cast<IRemoteDisplay>(reply.readStrongBinder()); } + + virtual status_t updateProxyConfig( + const char *host, int32_t port, const char *exclusionList) { + Parcel data, reply; + + data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor()); + if (host == NULL) { + data.writeInt32(0); + } else { + data.writeInt32(1); + data.writeCString(host); + data.writeInt32(port); + data.writeCString(exclusionList); + } + + remote()->transact(UPDATE_PROXY_CONFIG, data, &reply); + + return reply.readInt32(); + } }; IMPLEMENT_META_INTERFACE(MediaPlayerService, "android.media.IMediaPlayerService"); @@ -168,11 +195,10 @@ status_t BnMediaPlayerService::onTransact( switch (code) { case CREATE: { CHECK_INTERFACE(IMediaPlayerService, data, reply); - pid_t pid = data.readInt32(); sp<IMediaPlayerClient> client = interface_cast<IMediaPlayerClient>(data.readStrongBinder()); int audioSessionId = data.readInt32(); - sp<IMediaPlayer> player = create(pid, client, audioSessionId); + sp<IMediaPlayer> player = create(client, audioSessionId); reply->writeStrongBinder(player->asBinder()); return NO_ERROR; } break; @@ -206,15 +232,13 @@ status_t BnMediaPlayerService::onTransact( } break; case CREATE_MEDIA_RECORDER: { CHECK_INTERFACE(IMediaPlayerService, data, reply); - pid_t pid = data.readInt32(); - sp<IMediaRecorder> recorder = createMediaRecorder(pid); + sp<IMediaRecorder> recorder = createMediaRecorder(); reply->writeStrongBinder(recorder->asBinder()); return NO_ERROR; } break; case CREATE_METADATA_RETRIEVER: { CHECK_INTERFACE(IMediaPlayerService, data, reply); - pid_t pid = data.readInt32(); - sp<IMediaMetadataRetriever> retriever = createMetadataRetriever(pid); + sp<IMediaMetadataRetriever> retriever = createMetadataRetriever(); reply->writeStrongBinder(retriever->asBinder()); return NO_ERROR; } break; @@ -230,9 +254,16 @@ status_t BnMediaPlayerService::onTransact( reply->writeStrongBinder(crypto->asBinder()); return NO_ERROR; } break; + case MAKE_DRM: { + CHECK_INTERFACE(IMediaPlayerService, data, reply); + sp<IDrm> drm = makeDrm(); + reply->writeStrongBinder(drm->asBinder()); + return NO_ERROR; + } break; case MAKE_HDCP: { CHECK_INTERFACE(IMediaPlayerService, data, reply); - sp<IHDCP> hdcp = makeHDCP(); + bool createEncryptionModule = data.readInt32(); + sp<IHDCP> hdcp = makeHDCP(createEncryptionModule); reply->writeStrongBinder(hdcp->asBinder()); return NO_ERROR; } break; @@ -256,6 +287,24 @@ status_t BnMediaPlayerService::onTransact( reply->writeStrongBinder(display->asBinder()); return NO_ERROR; } break; + case UPDATE_PROXY_CONFIG: + { + CHECK_INTERFACE(IMediaPlayerService, data, reply); + + const char *host = NULL; + int32_t port = 0; + const char *exclusionList = NULL; + + if (data.readInt32()) { + host = data.readCString(); + port = data.readInt32(); + exclusionList = data.readCString(); + } + + reply->writeInt32(updateProxyConfig(host, port, exclusionList)); + + return OK; + } default: return BBinder::onTransact(code, data, reply, flags); } diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp index a710fd7..8e58162 100644 --- a/media/libmedia/IMediaRecorder.cpp +++ b/media/libmedia/IMediaRecorder.cpp @@ -23,7 +23,7 @@ #include <media/IMediaRecorderClient.h> #include <media/IMediaRecorder.h> #include <gui/Surface.h> -#include <gui/ISurfaceTexture.h> +#include <gui/IGraphicBufferProducer.h> #include <unistd.h> @@ -51,7 +51,8 @@ enum { SET_PARAMETERS, SET_PREVIEW_SURFACE, SET_CAMERA, - SET_LISTENER + SET_LISTENER, + SET_CLIENT_NAME }; class BpMediaRecorder: public BpInterface<IMediaRecorder> @@ -73,7 +74,7 @@ public: return reply.readInt32(); } - sp<ISurfaceTexture> querySurfaceMediaSource() + sp<IGraphicBufferProducer> querySurfaceMediaSource() { ALOGV("Query SurfaceMediaSource"); Parcel data, reply; @@ -83,15 +84,15 @@ public: if (returnedNull) { return NULL; } - return interface_cast<ISurfaceTexture>(reply.readStrongBinder()); + return interface_cast<IGraphicBufferProducer>(reply.readStrongBinder()); } - status_t setPreviewSurface(const sp<Surface>& surface) + status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) { ALOGV("setPreviewSurface(%p)", surface.get()); Parcel data, reply; data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor()); - Surface::writeToParcel(surface, &data); + data.writeStrongBinder(surface->asBinder()); remote()->transact(SET_PREVIEW_SURFACE, data, &reply); return reply.readInt32(); } @@ -217,6 +218,16 @@ public: return reply.readInt32(); } + status_t setClientName(const String16& clientName) + { + ALOGV("setClientName(%s)", String8(clientName).string()); + Parcel data, reply; + data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor()); + data.writeString16(clientName); + remote()->transact(SET_CLIENT_NAME, data, &reply); + return reply.readInt32(); + } + status_t prepare() { ALOGV("prepare"); @@ -423,10 +434,16 @@ status_t BnMediaRecorder::onTransact( reply->writeInt32(setListener(listener)); return NO_ERROR; } break; + case SET_CLIENT_NAME: { + ALOGV("SET_CLIENT_NAME"); + CHECK_INTERFACE(IMediaRecorder, data, reply); + reply->writeInt32(setClientName(data.readString16())); + return NO_ERROR; + } case SET_PREVIEW_SURFACE: { ALOGV("SET_PREVIEW_SURFACE"); CHECK_INTERFACE(IMediaRecorder, data, reply); - sp<Surface> surface = Surface::readFromParcel(data); + sp<IGraphicBufferProducer> surface = interface_cast<IGraphicBufferProducer>(data.readStrongBinder()); reply->writeInt32(setPreviewSurface(surface)); return NO_ERROR; } break; @@ -444,7 +461,7 @@ status_t BnMediaRecorder::onTransact( CHECK_INTERFACE(IMediaRecorder, data, reply); // call the mediaserver side to create // a surfacemediasource - sp<ISurfaceTexture> surfaceMediaSource = querySurfaceMediaSource(); + sp<IGraphicBufferProducer> surfaceMediaSource = querySurfaceMediaSource(); // The mediaserver might have failed to create a source int returnedNull= (surfaceMediaSource == NULL) ? 1 : 0 ; reply->writeInt32(returnedNull); diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp index 48e427a..d6cd43a 100644 --- a/media/libmedia/IOMX.cpp +++ b/media/libmedia/IOMX.cpp @@ -40,6 +40,8 @@ enum { ENABLE_GRAPHIC_BUFFERS, USE_BUFFER, USE_GRAPHIC_BUFFER, + CREATE_INPUT_SURFACE, + SIGNAL_END_OF_INPUT_STREAM, STORE_META_DATA_IN_BUFFERS, ALLOC_BUFFER, ALLOC_BUFFER_WITH_BACKUP, @@ -280,6 +282,45 @@ public: return err; } + virtual status_t createInputSurface( + node_id node, OMX_U32 port_index, + sp<IGraphicBufferProducer> *bufferProducer) { + Parcel data, reply; + status_t err; + data.writeInterfaceToken(IOMX::getInterfaceDescriptor()); + data.writeIntPtr((intptr_t)node); + data.writeInt32(port_index); + err = remote()->transact(CREATE_INPUT_SURFACE, data, &reply); + if (err != OK) { + ALOGW("binder transaction failed: %d", err); + return err; + } + + err = reply.readInt32(); + if (err != OK) { + return err; + } + + *bufferProducer = IGraphicBufferProducer::asInterface( + reply.readStrongBinder()); + + return err; + } + + virtual status_t signalEndOfInputStream(node_id node) { + Parcel data, reply; + status_t err; + data.writeInterfaceToken(IOMX::getInterfaceDescriptor()); + data.writeIntPtr((intptr_t)node); + err = remote()->transact(SIGNAL_END_OF_INPUT_STREAM, data, &reply); + if (err != OK) { + ALOGW("binder transaction failed: %d", err); + return err; + } + + return reply.readInt32(); + } + virtual status_t storeMetaDataInBuffers( node_id node, OMX_U32 port_index, OMX_BOOL enable) { Parcel data, reply; @@ -404,7 +445,7 @@ IMPLEMENT_META_INTERFACE(OMX, "android.hardware.IOMX"); //////////////////////////////////////////////////////////////////////////////// -#define CHECK_INTERFACE(interface, data, reply) \ +#define CHECK_OMX_INTERFACE(interface, data, reply) \ do { if (!data.enforceInterface(interface::getInterfaceDescriptor())) { \ ALOGW("Call incorrectly routed to " #interface); \ return PERMISSION_DENIED; \ @@ -415,7 +456,7 @@ status_t BnOMX::onTransact( switch (code) { case LIVES_LOCALLY: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); node_id node = (void *)data.readIntPtr(); pid_t pid = (pid_t)data.readInt32(); reply->writeInt32(livesLocally(node, pid)); @@ -425,7 +466,7 @@ status_t BnOMX::onTransact( case LIST_NODES: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); List<ComponentInfo> list; listNodes(&list); @@ -448,7 +489,7 @@ status_t BnOMX::onTransact( case ALLOCATE_NODE: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); const char *name = data.readCString(); @@ -468,7 +509,7 @@ status_t BnOMX::onTransact( case FREE_NODE: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); node_id node = (void*)data.readIntPtr(); @@ -479,7 +520,7 @@ status_t BnOMX::onTransact( case SEND_COMMAND: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); node_id node = (void*)data.readIntPtr(); @@ -497,7 +538,7 @@ status_t BnOMX::onTransact( case GET_CONFIG: case SET_CONFIG: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); node_id node = (void*)data.readIntPtr(); OMX_INDEXTYPE index = static_cast<OMX_INDEXTYPE>(data.readInt32()); @@ -539,7 +580,7 @@ status_t BnOMX::onTransact( case GET_STATE: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); node_id node = (void*)data.readIntPtr(); OMX_STATETYPE state = OMX_StateInvalid; @@ -553,7 +594,7 @@ status_t BnOMX::onTransact( case ENABLE_GRAPHIC_BUFFERS: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); node_id node = (void*)data.readIntPtr(); OMX_U32 port_index = data.readInt32(); @@ -567,7 +608,7 @@ status_t BnOMX::onTransact( case GET_GRAPHIC_BUFFER_USAGE: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); node_id node = (void*)data.readIntPtr(); OMX_U32 port_index = data.readInt32(); @@ -582,7 +623,7 @@ status_t BnOMX::onTransact( case USE_BUFFER: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); node_id node = (void*)data.readIntPtr(); OMX_U32 port_index = data.readInt32(); @@ -602,7 +643,7 @@ status_t BnOMX::onTransact( case USE_GRAPHIC_BUFFER: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); node_id node = (void*)data.readIntPtr(); OMX_U32 port_index = data.readInt32(); @@ -621,9 +662,41 @@ status_t BnOMX::onTransact( return NO_ERROR; } + case CREATE_INPUT_SURFACE: + { + CHECK_OMX_INTERFACE(IOMX, data, reply); + + node_id node = (void*)data.readIntPtr(); + OMX_U32 port_index = data.readInt32(); + + sp<IGraphicBufferProducer> bufferProducer; + status_t err = createInputSurface(node, port_index, + &bufferProducer); + + reply->writeInt32(err); + + if (err == OK) { + reply->writeStrongBinder(bufferProducer->asBinder()); + } + + return NO_ERROR; + } + + case SIGNAL_END_OF_INPUT_STREAM: + { + CHECK_OMX_INTERFACE(IOMX, data, reply); + + node_id node = (void*)data.readIntPtr(); + + status_t err = signalEndOfInputStream(node); + reply->writeInt32(err); + + return NO_ERROR; + } + case STORE_META_DATA_IN_BUFFERS: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); node_id node = (void*)data.readIntPtr(); OMX_U32 port_index = data.readInt32(); @@ -637,7 +710,7 @@ status_t BnOMX::onTransact( case ALLOC_BUFFER: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); node_id node = (void*)data.readIntPtr(); OMX_U32 port_index = data.readInt32(); @@ -659,7 +732,7 @@ status_t BnOMX::onTransact( case ALLOC_BUFFER_WITH_BACKUP: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); node_id node = (void*)data.readIntPtr(); OMX_U32 port_index = data.readInt32(); @@ -681,7 +754,7 @@ status_t BnOMX::onTransact( case FREE_BUFFER: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); node_id node = (void*)data.readIntPtr(); OMX_U32 port_index = data.readInt32(); @@ -693,7 +766,7 @@ status_t BnOMX::onTransact( case FILL_BUFFER: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); node_id node = (void*)data.readIntPtr(); buffer_id buffer = (void*)data.readIntPtr(); @@ -704,7 +777,7 @@ status_t BnOMX::onTransact( case EMPTY_BUFFER: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); node_id node = (void*)data.readIntPtr(); buffer_id buffer = (void*)data.readIntPtr(); @@ -723,7 +796,7 @@ status_t BnOMX::onTransact( case GET_EXTENSION_INDEX: { - CHECK_INTERFACE(IOMX, data, reply); + CHECK_OMX_INTERFACE(IOMX, data, reply); node_id node = (void*)data.readIntPtr(); const char *parameter_name = data.readCString(); @@ -769,7 +842,7 @@ status_t BnOMXObserver::onTransact( switch (code) { case OBSERVER_ON_MSG: { - CHECK_INTERFACE(IOMXObserver, data, reply); + CHECK_OMX_INTERFACE(IOMXObserver, data, reply); omx_message msg; data.read(&msg, sizeof(msg)); diff --git a/media/libmedia/IRemoteDisplayClient.cpp b/media/libmedia/IRemoteDisplayClient.cpp index 4a1b570..5c494b3 100644 --- a/media/libmedia/IRemoteDisplayClient.cpp +++ b/media/libmedia/IRemoteDisplayClient.cpp @@ -18,7 +18,7 @@ #include <sys/types.h> #include <media/IRemoteDisplayClient.h> -#include <gui/ISurfaceTexture.h> +#include <gui/IGraphicBufferProducer.h> #include <utils/String8.h> namespace android { @@ -37,12 +37,12 @@ public: { } - void onDisplayConnected(const sp<ISurfaceTexture>& surfaceTexture, + void onDisplayConnected(const sp<IGraphicBufferProducer>& bufferProducer, uint32_t width, uint32_t height, uint32_t flags) { Parcel data, reply; data.writeInterfaceToken(IRemoteDisplayClient::getInterfaceDescriptor()); - data.writeStrongBinder(surfaceTexture->asBinder()); + data.writeStrongBinder(bufferProducer->asBinder()); data.writeInt32(width); data.writeInt32(height); data.writeInt32(flags); @@ -75,8 +75,8 @@ status_t BnRemoteDisplayClient::onTransact( switch (code) { case ON_DISPLAY_CONNECTED: { CHECK_INTERFACE(IRemoteDisplayClient, data, reply); - sp<ISurfaceTexture> surfaceTexture( - interface_cast<ISurfaceTexture>(data.readStrongBinder())); + sp<IGraphicBufferProducer> surfaceTexture( + interface_cast<IGraphicBufferProducer>(data.readStrongBinder())); uint32_t width = data.readInt32(); uint32_t height = data.readInt32(); uint32_t flags = data.readInt32(); diff --git a/media/libmedia/SingleStateQueue.cpp b/media/libmedia/SingleStateQueue.cpp new file mode 100644 index 0000000..3503baa --- /dev/null +++ b/media/libmedia/SingleStateQueue.cpp @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <new> +#include <cutils/atomic.h> +#include <cutils/atomic-inline.h> // for android_memory_barrier() +#include <media/SingleStateQueue.h> + +namespace android { + +template<typename T> SingleStateQueue<T>::Mutator::Mutator(Shared *shared) + : mSequence(0), mShared((Shared *) shared) +{ + // exactly one of Mutator and Observer must initialize, currently it is Observer + //shared->init(); +} + +template<typename T> int32_t SingleStateQueue<T>::Mutator::push(const T& value) +{ + Shared *shared = mShared; + int32_t sequence = mSequence; + sequence++; + android_atomic_acquire_store(sequence, &shared->mSequence); + shared->mValue = value; + sequence++; + android_atomic_release_store(sequence, &shared->mSequence); + mSequence = sequence; + // consider signalling a futex here, if we know that observer is waiting + return sequence; +} + +template<typename T> bool SingleStateQueue<T>::Mutator::ack() +{ + return mShared->mAck - mSequence == 0; +} + +template<typename T> bool SingleStateQueue<T>::Mutator::ack(int32_t sequence) +{ + // this relies on 2's complement rollover to detect an ancient sequence number + return mShared->mAck - sequence >= 0; +} + +template<typename T> SingleStateQueue<T>::Observer::Observer(Shared *shared) + : mSequence(0), mSeed(1), mShared((Shared *) shared) +{ + // exactly one of Mutator and Observer must initialize, currently it is Observer + shared->init(); +} + +template<typename T> bool SingleStateQueue<T>::Observer::poll(T& value) +{ + Shared *shared = mShared; + int32_t before = shared->mSequence; + if (before == mSequence) { + return false; + } + for (int tries = 0; ; ) { + const int MAX_TRIES = 5; + if (before & 1) { + if (++tries >= MAX_TRIES) { + return false; + } + before = shared->mSequence; + } else { + android_memory_barrier(); + T temp = shared->mValue; + int32_t after = android_atomic_release_load(&shared->mSequence); + if (after == before) { + value = temp; + shared->mAck = before; + mSequence = before; + return true; + } + if (++tries >= MAX_TRIES) { + return false; + } + before = after; + } + } +} + +#if 0 +template<typename T> SingleStateQueue<T>::SingleStateQueue(void /*Shared*/ *shared) +{ + ((Shared *) shared)->init(); +} +#endif + +} // namespace android + +// hack for gcc +#ifdef SINGLE_STATE_QUEUE_INSTANTIATIONS +#include SINGLE_STATE_QUEUE_INSTANTIATIONS +#endif diff --git a/media/libstagefright/wifi-display/TimeSeries.h b/media/libmedia/SingleStateQueueInstantiations.cpp index c818d51..2afebe9 100644 --- a/media/libstagefright/wifi-display/TimeSeries.h +++ b/media/libmedia/SingleStateQueueInstantiations.cpp @@ -1,11 +1,11 @@ /* - * Copyright 2012, The Android Open Source Project + * Copyright (C) 2012 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,33 +14,13 @@ * limitations under the License. */ -#ifndef TIME_SERIES_H_ +#include <media/SingleStateQueue.h> +#include <private/media/StaticAudioTrackState.h> -#define TIME_SERIES_H_ - -#include <sys/types.h> +// FIXME hack for gcc namespace android { -struct TimeSeries { - TimeSeries(); - - void add(double val); - - double mean() const; - double sdev() const; - -private: - enum { - kHistorySize = 20 - }; - double mValues[kHistorySize]; - - size_t mCount; - double mSum; -}; - -} // namespace android - -#endif // TIME_SERIES_H_ +template class SingleStateQueue<StaticAudioTrackState>; // typedef StaticAudioTrackSingleStateQueue +} diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp index abc8899..ee70ef7 100644 --- a/media/libmedia/SoundPool.cpp +++ b/media/libmedia/SoundPool.cpp @@ -489,7 +489,7 @@ Sample::~Sample() ::close(mFd); } mData.clear(); - delete mUrl; + free(mUrl); } status_t Sample::doLoad() @@ -568,8 +568,8 @@ void SoundChannel::play(const sp<Sample>& sample, int nextChannelID, float leftV } // initialize track - int afFrameCount; - int afSampleRate; + size_t afFrameCount; + uint32_t afSampleRate; audio_stream_type_t streamType = mSoundPool->streamType(); if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) { afFrameCount = kDefaultFrameCount; diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp index 253602d..f55b697 100644 --- a/media/libmedia/ToneGenerator.cpp +++ b/media/libmedia/ToneGenerator.cpp @@ -803,6 +803,7 @@ ToneGenerator::ToneGenerator(audio_stream_type_t streamType, float volume, bool ALOGV("ToneGenerator constructor: streamType=%d, volume=%f", streamType, volume); mState = TONE_IDLE; + mpAudioTrack = NULL; if (AudioSystem::getOutputSamplingRate(&mSamplingRate, streamType) != NO_ERROR) { ALOGE("Unable to marshal AudioFlinger"); @@ -811,7 +812,6 @@ ToneGenerator::ToneGenerator(audio_stream_type_t streamType, float volume, bool mThreadCanCallJava = threadCanCallJava; mStreamType = streamType; mVolume = volume; - mpAudioTrack = NULL; mpToneDesc = NULL; mpNewToneDesc = NULL; // Generate tone by chunks of 20 ms to keep cadencing precision @@ -885,6 +885,11 @@ bool ToneGenerator::startTone(tone_type toneType, int durationMs) { if ((toneType < 0) || (toneType >= NUM_TONES)) return lResult; + toneType = getToneForRegion(toneType); + if (toneType == TONE_CDMA_SIGNAL_OFF) { + return true; + } + if (mState == TONE_IDLE) { ALOGV("startTone: try to re-init AudioTrack"); if (!initAudioTrack()) { @@ -897,7 +902,6 @@ bool ToneGenerator::startTone(tone_type toneType, int durationMs) { mLock.lock(); // Get descriptor for requested tone - toneType = getToneForRegion(toneType); mpNewToneDesc = &sToneDescriptors[toneType]; mDurationMs = durationMs; @@ -918,6 +922,9 @@ bool ToneGenerator::startTone(tone_type toneType, int durationMs) { ALOGV("Immediate start, time %d", (unsigned int)(systemTime()/1000000)); lResult = true; mState = TONE_STARTING; + if (clock_gettime(CLOCK_MONOTONIC, &mStartTime) != 0) { + mStartTime.tv_sec = 0; + } mLock.unlock(); mpAudioTrack->start(); mLock.lock(); @@ -936,6 +943,7 @@ bool ToneGenerator::startTone(tone_type toneType, int durationMs) { } else { ALOGV("Delayed start"); mState = TONE_RESTARTING; + mStartTime.tv_sec = 0; lStatus = mWaitCbkCond.waitRelative(mLock, seconds(3)); if (lStatus == NO_ERROR) { if (mState != TONE_IDLE) { @@ -972,21 +980,50 @@ void ToneGenerator::stopTone() { ALOGV("stopTone"); mLock.lock(); - if (mState == TONE_PLAYING || mState == TONE_STARTING || mState == TONE_RESTARTING) { - mState = TONE_STOPPING; + if (mState != TONE_IDLE && mState != TONE_INIT) { + if (mState == TONE_PLAYING || mState == TONE_STARTING || mState == TONE_RESTARTING) { + struct timespec stopTime; + // If the start time is valid, make sure that the number of audio samples produced + // corresponds at least to the time between the start and stop commands. + // This is needed in case of cold start of the output stream. + if ((mStartTime.tv_sec != 0) && (clock_gettime(CLOCK_MONOTONIC, &stopTime) == 0)) { + time_t sec = stopTime.tv_sec - mStartTime.tv_sec; + long nsec = stopTime.tv_nsec - mStartTime.tv_nsec; + long durationMs; + if (nsec < 0) { + --sec; + nsec += 1000000000; + } + + if ((sec + 1) > ((long)(INT_MAX / mSamplingRate))) { + mMaxSmp = sec * mSamplingRate; + } else { + // mSamplingRate is always > 1000 + sec = sec * 1000 + nsec / 1000000; // duration in milliseconds + mMaxSmp = (unsigned int)(((int64_t)sec * mSamplingRate) / 1000); + } + ALOGV("stopTone() forcing mMaxSmp to %d, total for far %d", mMaxSmp, mTotalSmp); + } else { + mState = TONE_STOPPING; + } + } ALOGV("waiting cond"); status_t lStatus = mWaitCbkCond.waitRelative(mLock, seconds(3)); if (lStatus == NO_ERROR) { + // If the tone was restarted exit now before calling clearWaveGens(); + if (mState != TONE_INIT) { + mLock.unlock(); + return; + } ALOGV("track stop complete, time %d", (unsigned int)(systemTime()/1000000)); } else { ALOGE("--- Stop timed out"); mState = TONE_IDLE; mpAudioTrack->stop(); } + clearWaveGens(); } - clearWaveGens(); - mLock.unlock(); } @@ -1036,7 +1073,7 @@ bool ToneGenerator::initAudioTrack() { goto initAudioTrack_exit; } - mpAudioTrack->setVolume(mVolume, mVolume); + mpAudioTrack->setVolume(mVolume); mState = TONE_INIT; @@ -1254,6 +1291,9 @@ audioCallback_EndLoop: ALOGV("Cbk restarting track"); if (lpToneGen->prepareWave()) { lpToneGen->mState = TONE_STARTING; + if (clock_gettime(CLOCK_MONOTONIC, &lpToneGen->mStartTime) != 0) { + lpToneGen->mStartTime.tv_sec = 0; + } // must reload lpToneDesc as prepareWave() may change mpToneDesc lpToneDesc = lpToneGen->mpToneDesc; } else { @@ -1295,7 +1335,7 @@ audioCallback_EndLoop: } if (lSignal) - lpToneGen->mWaitCbkCond.signal(); + lpToneGen->mWaitCbkCond.broadcast(); lpToneGen->mLock.unlock(); } } diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp index 8196e10..5b4071b 100644 --- a/media/libmedia/Visualizer.cpp +++ b/media/libmedia/Visualizer.cpp @@ -88,7 +88,8 @@ status_t Visualizer::setEnabled(bool enabled) return status; } -status_t Visualizer::setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags, uint32_t rate) +status_t Visualizer::setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags, + uint32_t rate) { if (rate > CAPTURE_RATE_MAX) { return BAD_VALUE; @@ -334,7 +335,8 @@ void Visualizer::controlStatusChanged(bool controlGranted) { //------------------------------------------------------------------------- -Visualizer::CaptureThread::CaptureThread(Visualizer& receiver, uint32_t captureRate, bool bCanCallJava) +Visualizer::CaptureThread::CaptureThread(Visualizer& receiver, uint32_t captureRate, + bool bCanCallJava) : Thread(bCanCallJava), mReceiver(receiver) { mSleepTimeUs = 1000000000 / captureRate; diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp index b0241aa..110b94c 100644 --- a/media/libmedia/mediametadataretriever.cpp +++ b/media/libmedia/mediametadataretriever.cpp @@ -64,7 +64,7 @@ MediaMetadataRetriever::MediaMetadataRetriever() ALOGE("failed to obtain MediaMetadataRetrieverService"); return; } - sp<IMediaMetadataRetriever> retriever(service->createMetadataRetriever(getpid())); + sp<IMediaMetadataRetriever> retriever(service->createMetadataRetriever()); if (retriever == 0) { ALOGE("failed to create IMediaMetadataRetriever object from server"); } diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp index 4ab98e3..963b04f 100644 --- a/media/libmedia/mediaplayer.cpp +++ b/media/libmedia/mediaplayer.cpp @@ -27,7 +27,7 @@ #include <binder/IServiceManager.h> #include <binder/IPCThreadState.h> -#include <gui/SurfaceTextureClient.h> +#include <gui/Surface.h> #include <media/mediaplayer.h> #include <media/AudioSystem.h> @@ -143,7 +143,7 @@ status_t MediaPlayer::setDataSource( if (url != NULL) { const sp<IMediaPlayerService>& service(getMediaPlayerService()); if (service != 0) { - sp<IMediaPlayer> player(service->create(getpid(), this, mAudioSessionId)); + sp<IMediaPlayer> player(service->create(this, mAudioSessionId)); if ((NO_ERROR != doSetRetransmitEndpoint(player)) || (NO_ERROR != player->setDataSource(url, headers))) { player.clear(); @@ -160,7 +160,7 @@ status_t MediaPlayer::setDataSource(int fd, int64_t offset, int64_t length) status_t err = UNKNOWN_ERROR; const sp<IMediaPlayerService>& service(getMediaPlayerService()); if (service != 0) { - sp<IMediaPlayer> player(service->create(getpid(), this, mAudioSessionId)); + sp<IMediaPlayer> player(service->create(this, mAudioSessionId)); if ((NO_ERROR != doSetRetransmitEndpoint(player)) || (NO_ERROR != player->setDataSource(fd, offset, length))) { player.clear(); @@ -176,7 +176,7 @@ status_t MediaPlayer::setDataSource(const sp<IStreamSource> &source) status_t err = UNKNOWN_ERROR; const sp<IMediaPlayerService>& service(getMediaPlayerService()); if (service != 0) { - sp<IMediaPlayer> player(service->create(getpid(), this, mAudioSessionId)); + sp<IMediaPlayer> player(service->create(this, mAudioSessionId)); if ((NO_ERROR != doSetRetransmitEndpoint(player)) || (NO_ERROR != player->setDataSource(source))) { player.clear(); @@ -221,12 +221,12 @@ status_t MediaPlayer::getMetadata(bool update_only, bool apply_filter, Parcel *m } status_t MediaPlayer::setVideoSurfaceTexture( - const sp<ISurfaceTexture>& surfaceTexture) + const sp<IGraphicBufferProducer>& bufferProducer) { ALOGV("setVideoSurfaceTexture"); Mutex::Autolock _l(mLock); if (mPlayer == 0) return NO_INIT; - return mPlayer->setVideoSurfaceTexture(surfaceTexture); + return mPlayer->setVideoSurfaceTexture(bufferProducer); } // must call with lock held @@ -398,6 +398,13 @@ status_t MediaPlayer::getDuration_l(int *msec) if (mPlayer != 0 && isValidState) { int durationMs; status_t ret = mPlayer->getDuration(&durationMs); + + if (ret != OK) { + // Do not enter error state just because no duration was available. + durationMs = -1; + ret = OK; + } + if (msec) { *msec = durationMs; } @@ -568,8 +575,8 @@ status_t MediaPlayer::setAudioSessionId(int sessionId) return BAD_VALUE; } if (sessionId != mAudioSessionId) { - AudioSystem::releaseAudioSessionId(mAudioSessionId); AudioSystem::acquireAudioSessionId(sessionId); + AudioSystem::releaseAudioSessionId(mAudioSessionId); mAudioSessionId = sessionId; } return NO_ERROR; @@ -807,4 +814,15 @@ status_t MediaPlayer::setNextMediaPlayer(const sp<MediaPlayer>& next) { return mPlayer->setNextPlayer(next == NULL ? NULL : next->mPlayer); } +status_t MediaPlayer::updateProxyConfig( + const char *host, int32_t port, const char *exclusionList) { + const sp<IMediaPlayerService>& service = getMediaPlayerService(); + + if (service != NULL) { + return service->updateProxyConfig(host, port, exclusionList); + } + + return INVALID_OPERATION; +} + }; // namespace android diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp index 9541015..3710e46 100644 --- a/media/libmedia/mediarecorder.cpp +++ b/media/libmedia/mediarecorder.cpp @@ -24,7 +24,7 @@ #include <media/IMediaPlayerService.h> #include <media/IMediaRecorder.h> #include <media/mediaplayer.h> // for MEDIA_ERROR_SERVER_DIED -#include <gui/ISurfaceTexture.h> +#include <gui/IGraphicBufferProducer.h> namespace android { @@ -49,7 +49,7 @@ status_t MediaRecorder::setCamera(const sp<ICamera>& camera, const sp<ICameraRec return ret; } -status_t MediaRecorder::setPreviewSurface(const sp<Surface>& surface) +status_t MediaRecorder::setPreviewSurface(const sp<IGraphicBufferProducer>& surface) { ALOGV("setPreviewSurface(%p)", surface.get()); if (mMediaRecorder == NULL) { @@ -348,9 +348,9 @@ status_t MediaRecorder::setVideoSize(int width, int height) } // Query a SurfaceMediaSurface through the Mediaserver, over the -// binder interface. This is used by the Filter Framework (MeidaEncoder) -// to get an <ISurfaceTexture> object to hook up to ANativeWindow. -sp<ISurfaceTexture> MediaRecorder:: +// binder interface. This is used by the Filter Framework (MediaEncoder) +// to get an <IGraphicBufferProducer> object to hook up to ANativeWindow. +sp<IGraphicBufferProducer> MediaRecorder:: querySurfaceMediaSourceFromMediaServer() { Mutex::Autolock _l(mLock); @@ -620,7 +620,7 @@ MediaRecorder::MediaRecorder() : mSurfaceMediaSource(NULL) const sp<IMediaPlayerService>& service(getMediaPlayerService()); if (service != NULL) { - mMediaRecorder = service->createMediaRecorder(getpid()); + mMediaRecorder = service->createMediaRecorder(); } if (mMediaRecorder != NULL) { mCurrentState = MEDIA_RECORDER_IDLE; @@ -656,6 +656,27 @@ status_t MediaRecorder::setListener(const sp<MediaRecorderListener>& listener) return NO_ERROR; } +status_t MediaRecorder::setClientName(const String16& clientName) +{ + ALOGV("setClientName"); + if (mMediaRecorder == NULL) { + ALOGE("media recorder is not initialized yet"); + return INVALID_OPERATION; + } + bool isInvalidState = (mCurrentState & + (MEDIA_RECORDER_PREPARED | + MEDIA_RECORDER_RECORDING | + MEDIA_RECORDER_ERROR)); + if (isInvalidState) { + ALOGE("setClientName is called in an invalid state: %d", mCurrentState); + return INVALID_OPERATION; + } + + mMediaRecorder->setClientName(clientName); + + return NO_ERROR; +} + void MediaRecorder::notify(int msg, int ext1, int ext2) { ALOGV("message received msg=%d, ext1=%d, ext2=%d", msg, ext1, ext2); diff --git a/media/libmedia_native/Android.mk b/media/libmedia_native/Android.mk deleted file mode 100644 index 065a90f..0000000 --- a/media/libmedia_native/Android.mk +++ /dev/null @@ -1,11 +0,0 @@ -LOCAL_PATH := $(call my-dir) - -include $(CLEAR_VARS) - -LOCAL_SRC_FILES := - -LOCAL_MODULE:= libmedia_native - -LOCAL_MODULE_TAGS := optional - -include $(BUILD_SHARED_LIBRARY) diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk index 5b5ed71..d87bc7f 100644 --- a/media/libmediaplayerservice/Android.mk +++ b/media/libmediaplayerservice/Android.mk @@ -9,6 +9,7 @@ include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ ActivityManager.cpp \ Crypto.cpp \ + Drm.cpp \ HDCP.cpp \ MediaPlayerFactory.cpp \ MediaPlayerService.cpp \ @@ -17,6 +18,7 @@ LOCAL_SRC_FILES:= \ MidiFile.cpp \ MidiMetadataRetriever.cpp \ RemoteDisplay.cpp \ + SharedLibrary.cpp \ StagefrightPlayer.cpp \ StagefrightRecorder.cpp \ TestPlayerStub.cpp \ @@ -25,10 +27,10 @@ LOCAL_SHARED_LIBRARIES := \ libbinder \ libcamera_client \ libcutils \ + liblog \ libdl \ libgui \ libmedia \ - libmedia_native \ libsonivox \ libstagefright \ libstagefright_foundation \ diff --git a/media/libmediaplayerservice/Crypto.cpp b/media/libmediaplayerservice/Crypto.cpp index 0e8f913..ae4d845 100644 --- a/media/libmediaplayerservice/Crypto.cpp +++ b/media/libmediaplayerservice/Crypto.cpp @@ -17,6 +17,8 @@ //#define LOG_NDEBUG 0 #define LOG_TAG "Crypto" #include <utils/Log.h> +#include <dirent.h> +#include <dlfcn.h> #include "Crypto.h" @@ -26,87 +28,176 @@ #include <media/stagefright/foundation/hexdump.h> #include <media/stagefright/MediaErrors.h> -#include <dlfcn.h> - namespace android { +KeyedVector<Vector<uint8_t>, String8> Crypto::mUUIDToLibraryPathMap; +KeyedVector<String8, wp<SharedLibrary> > Crypto::mLibraryPathToOpenLibraryMap; +Mutex Crypto::mMapLock; + +static bool operator<(const Vector<uint8_t> &lhs, const Vector<uint8_t> &rhs) { + if (lhs.size() < rhs.size()) { + return true; + } else if (lhs.size() > rhs.size()) { + return false; + } + + return memcmp((void *)lhs.array(), (void *)rhs.array(), rhs.size()) < 0; +} + Crypto::Crypto() : mInitCheck(NO_INIT), - mLibHandle(NULL), mFactory(NULL), mPlugin(NULL) { - mInitCheck = init(); } Crypto::~Crypto() { delete mPlugin; mPlugin = NULL; + closeFactory(); +} +void Crypto::closeFactory() { delete mFactory; mFactory = NULL; - - if (mLibHandle != NULL) { - dlclose(mLibHandle); - mLibHandle = NULL; - } + mLibrary.clear(); } status_t Crypto::initCheck() const { return mInitCheck; } -status_t Crypto::init() { - mLibHandle = dlopen("libdrmdecrypt.so", RTLD_NOW); +/* + * Search the plugins directory for a plugin that supports the scheme + * specified by uuid + * + * If found: + * mLibrary holds a strong pointer to the dlopen'd library + * mFactory is set to the library's factory method + * mInitCheck is set to OK + * + * If not found: + * mLibrary is cleared and mFactory are set to NULL + * mInitCheck is set to an error (!OK) + */ +void Crypto::findFactoryForScheme(const uint8_t uuid[16]) { - if (mLibHandle == NULL) { - ALOGE("Unable to locate libdrmdecrypt.so"); + closeFactory(); - return ERROR_UNSUPPORTED; + // lock static maps + Mutex::Autolock autoLock(mMapLock); + + // first check cache + Vector<uint8_t> uuidVector; + uuidVector.appendArray(uuid, sizeof(uuid)); + ssize_t index = mUUIDToLibraryPathMap.indexOfKey(uuidVector); + if (index >= 0) { + if (loadLibraryForScheme(mUUIDToLibraryPathMap[index], uuid)) { + mInitCheck = OK; + return; + } else { + ALOGE("Failed to load from cached library path!"); + mInitCheck = ERROR_UNSUPPORTED; + return; + } } - typedef CryptoFactory *(*CreateCryptoFactoryFunc)(); - CreateCryptoFactoryFunc createCryptoFactory = - (CreateCryptoFactoryFunc)dlsym(mLibHandle, "createCryptoFactory"); + // no luck, have to search + String8 dirPath("/vendor/lib/mediadrm"); + String8 pluginPath; - if (createCryptoFactory == NULL - || ((mFactory = createCryptoFactory()) == NULL)) { - if (createCryptoFactory == NULL) { - ALOGE("Unable to find symbol 'createCryptoFactory'."); - } else { - ALOGE("createCryptoFactory() failed."); + DIR* pDir = opendir(dirPath.string()); + if (pDir) { + struct dirent* pEntry; + while ((pEntry = readdir(pDir))) { + + pluginPath = dirPath + "/" + pEntry->d_name; + + if (pluginPath.getPathExtension() == ".so") { + + if (loadLibraryForScheme(pluginPath, uuid)) { + mUUIDToLibraryPathMap.add(uuidVector, pluginPath); + mInitCheck = OK; + closedir(pDir); + return; + } + } } - dlclose(mLibHandle); - mLibHandle = NULL; + closedir(pDir); + } - return ERROR_UNSUPPORTED; + // try the legacy libdrmdecrypt.so + pluginPath = "libdrmdecrypt.so"; + if (loadLibraryForScheme(pluginPath, uuid)) { + mUUIDToLibraryPathMap.add(uuidVector, pluginPath); + mInitCheck = OK; + return; } - return OK; + ALOGE("Failed to find crypto plugin"); + mInitCheck = ERROR_UNSUPPORTED; } -bool Crypto::isCryptoSchemeSupported(const uint8_t uuid[16]) const { - Mutex::Autolock autoLock(mLock); +bool Crypto::loadLibraryForScheme(const String8 &path, const uint8_t uuid[16]) { - if (mInitCheck != OK) { + // get strong pointer to open shared library + ssize_t index = mLibraryPathToOpenLibraryMap.indexOfKey(path); + if (index >= 0) { + mLibrary = mLibraryPathToOpenLibraryMap[index].promote(); + } else { + index = mLibraryPathToOpenLibraryMap.add(path, NULL); + } + + if (!mLibrary.get()) { + mLibrary = new SharedLibrary(path); + if (!*mLibrary) { + return false; + } + + mLibraryPathToOpenLibraryMap.replaceValueAt(index, mLibrary); + } + + typedef CryptoFactory *(*CreateCryptoFactoryFunc)(); + + CreateCryptoFactoryFunc createCryptoFactory = + (CreateCryptoFactoryFunc)mLibrary->lookup("createCryptoFactory"); + + if (createCryptoFactory == NULL || + (mFactory = createCryptoFactory()) == NULL || + !mFactory->isCryptoSchemeSupported(uuid)) { + closeFactory(); return false; } + return true; +} - return mFactory->isCryptoSchemeSupported(uuid); +bool Crypto::isCryptoSchemeSupported(const uint8_t uuid[16]) { + Mutex::Autolock autoLock(mLock); + + if (mFactory && mFactory->isCryptoSchemeSupported(uuid)) { + return true; + } + + findFactoryForScheme(uuid); + return (mInitCheck == OK); } status_t Crypto::createPlugin( const uint8_t uuid[16], const void *data, size_t size) { Mutex::Autolock autoLock(mLock); - if (mInitCheck != OK) { - return mInitCheck; - } - if (mPlugin != NULL) { return -EINVAL; } + if (!mFactory || !mFactory->isCryptoSchemeSupported(uuid)) { + findFactoryForScheme(uuid); + } + + if (mInitCheck != OK) { + return mInitCheck; + } + return mFactory->createPlugin(uuid, data, size, &mPlugin); } diff --git a/media/libmediaplayerservice/Crypto.h b/media/libmediaplayerservice/Crypto.h index d066774..c44ae34 100644 --- a/media/libmediaplayerservice/Crypto.h +++ b/media/libmediaplayerservice/Crypto.h @@ -20,6 +20,9 @@ #include <media/ICrypto.h> #include <utils/threads.h> +#include <utils/KeyedVector.h> + +#include "SharedLibrary.h" namespace android { @@ -32,7 +35,7 @@ struct Crypto : public BnCrypto { virtual status_t initCheck() const; - virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]) const; + virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]); virtual status_t createPlugin( const uint8_t uuid[16], const void *data, size_t size); @@ -56,11 +59,17 @@ private: mutable Mutex mLock; status_t mInitCheck; - void *mLibHandle; + sp<SharedLibrary> mLibrary; CryptoFactory *mFactory; CryptoPlugin *mPlugin; - status_t init(); + static KeyedVector<Vector<uint8_t>, String8> mUUIDToLibraryPathMap; + static KeyedVector<String8, wp<SharedLibrary> > mLibraryPathToOpenLibraryMap; + static Mutex mMapLock; + + void findFactoryForScheme(const uint8_t uuid[16]); + bool loadLibraryForScheme(const String8 &path, const uint8_t uuid[16]); + void closeFactory(); DISALLOW_EVIL_CONSTRUCTORS(Crypto); }; diff --git a/media/libmediaplayerservice/Drm.cpp b/media/libmediaplayerservice/Drm.cpp new file mode 100644 index 0000000..1e6cd94 --- /dev/null +++ b/media/libmediaplayerservice/Drm.cpp @@ -0,0 +1,579 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "Drm" +#include <utils/Log.h> + +#include <dirent.h> +#include <dlfcn.h> + +#include "Drm.h" + +#include <media/drm/DrmAPI.h> +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/AString.h> +#include <media/stagefright/foundation/hexdump.h> +#include <media/stagefright/MediaErrors.h> + +namespace android { + +KeyedVector<Vector<uint8_t>, String8> Drm::mUUIDToLibraryPathMap; +KeyedVector<String8, wp<SharedLibrary> > Drm::mLibraryPathToOpenLibraryMap; +Mutex Drm::mMapLock; + +static bool operator<(const Vector<uint8_t> &lhs, const Vector<uint8_t> &rhs) { + if (lhs.size() < rhs.size()) { + return true; + } else if (lhs.size() > rhs.size()) { + return false; + } + + return memcmp((void *)lhs.array(), (void *)rhs.array(), rhs.size()) < 0; +} + +Drm::Drm() + : mInitCheck(NO_INIT), + mListener(NULL), + mFactory(NULL), + mPlugin(NULL) { +} + +Drm::~Drm() { + delete mPlugin; + mPlugin = NULL; + closeFactory(); +} + +void Drm::closeFactory() { + delete mFactory; + mFactory = NULL; + mLibrary.clear(); +} + +status_t Drm::initCheck() const { + return mInitCheck; +} + +status_t Drm::setListener(const sp<IDrmClient>& listener) +{ + Mutex::Autolock lock(mEventLock); + mListener = listener; + return NO_ERROR; +} + +void Drm::sendEvent(DrmPlugin::EventType eventType, int extra, + Vector<uint8_t> const *sessionId, + Vector<uint8_t> const *data) +{ + mEventLock.lock(); + sp<IDrmClient> listener = mListener; + mEventLock.unlock(); + + if (listener != NULL) { + Parcel obj; + if (sessionId && sessionId->size()) { + obj.writeInt32(sessionId->size()); + obj.write(sessionId->array(), sessionId->size()); + } else { + obj.writeInt32(0); + } + + if (data && data->size()) { + obj.writeInt32(data->size()); + obj.write(data->array(), data->size()); + } else { + obj.writeInt32(0); + } + + Mutex::Autolock lock(mNotifyLock); + listener->notify(eventType, extra, &obj); + } +} + +/* + * Search the plugins directory for a plugin that supports the scheme + * specified by uuid + * + * If found: + * mLibrary holds a strong pointer to the dlopen'd library + * mFactory is set to the library's factory method + * mInitCheck is set to OK + * + * If not found: + * mLibrary is cleared and mFactory are set to NULL + * mInitCheck is set to an error (!OK) + */ +void Drm::findFactoryForScheme(const uint8_t uuid[16]) { + + closeFactory(); + + // lock static maps + Mutex::Autolock autoLock(mMapLock); + + // first check cache + Vector<uint8_t> uuidVector; + uuidVector.appendArray(uuid, sizeof(uuid)); + ssize_t index = mUUIDToLibraryPathMap.indexOfKey(uuidVector); + if (index >= 0) { + if (loadLibraryForScheme(mUUIDToLibraryPathMap[index], uuid)) { + mInitCheck = OK; + return; + } else { + ALOGE("Failed to load from cached library path!"); + mInitCheck = ERROR_UNSUPPORTED; + return; + } + } + + // no luck, have to search + String8 dirPath("/vendor/lib/mediadrm"); + DIR* pDir = opendir(dirPath.string()); + + if (pDir == NULL) { + mInitCheck = ERROR_UNSUPPORTED; + ALOGE("Failed to open plugin directory %s", dirPath.string()); + return; + } + + + struct dirent* pEntry; + while ((pEntry = readdir(pDir))) { + + String8 pluginPath = dirPath + "/" + pEntry->d_name; + + if (pluginPath.getPathExtension() == ".so") { + + if (loadLibraryForScheme(pluginPath, uuid)) { + mUUIDToLibraryPathMap.add(uuidVector, pluginPath); + mInitCheck = OK; + closedir(pDir); + return; + } + } + } + + closedir(pDir); + + ALOGE("Failed to find drm plugin"); + mInitCheck = ERROR_UNSUPPORTED; +} + +bool Drm::loadLibraryForScheme(const String8 &path, const uint8_t uuid[16]) { + + // get strong pointer to open shared library + ssize_t index = mLibraryPathToOpenLibraryMap.indexOfKey(path); + if (index >= 0) { + mLibrary = mLibraryPathToOpenLibraryMap[index].promote(); + } else { + index = mLibraryPathToOpenLibraryMap.add(path, NULL); + } + + if (!mLibrary.get()) { + mLibrary = new SharedLibrary(path); + if (!*mLibrary) { + return false; + } + + mLibraryPathToOpenLibraryMap.replaceValueAt(index, mLibrary); + } + + typedef DrmFactory *(*CreateDrmFactoryFunc)(); + + CreateDrmFactoryFunc createDrmFactory = + (CreateDrmFactoryFunc)mLibrary->lookup("createDrmFactory"); + + if (createDrmFactory == NULL || + (mFactory = createDrmFactory()) == NULL || + !mFactory->isCryptoSchemeSupported(uuid)) { + closeFactory(); + return false; + } + return true; +} + +bool Drm::isCryptoSchemeSupported(const uint8_t uuid[16]) { + Mutex::Autolock autoLock(mLock); + + if (mFactory && mFactory->isCryptoSchemeSupported(uuid)) { + return true; + } + + findFactoryForScheme(uuid); + return (mInitCheck == OK); +} + +status_t Drm::createPlugin(const uint8_t uuid[16]) { + Mutex::Autolock autoLock(mLock); + + if (mPlugin != NULL) { + return -EINVAL; + } + + if (!mFactory || !mFactory->isCryptoSchemeSupported(uuid)) { + findFactoryForScheme(uuid); + } + + if (mInitCheck != OK) { + return mInitCheck; + } + + status_t result = mFactory->createDrmPlugin(uuid, &mPlugin); + mPlugin->setListener(this); + return result; +} + +status_t Drm::destroyPlugin() { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + delete mPlugin; + mPlugin = NULL; + + return OK; +} + +status_t Drm::openSession(Vector<uint8_t> &sessionId) { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->openSession(sessionId); +} + +status_t Drm::closeSession(Vector<uint8_t> const &sessionId) { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->closeSession(sessionId); +} + +status_t Drm::getKeyRequest(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &initData, + String8 const &mimeType, DrmPlugin::KeyType keyType, + KeyedVector<String8, String8> const &optionalParameters, + Vector<uint8_t> &request, String8 &defaultUrl) { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->getKeyRequest(sessionId, initData, mimeType, keyType, + optionalParameters, request, defaultUrl); +} + +status_t Drm::provideKeyResponse(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &response, + Vector<uint8_t> &keySetId) { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->provideKeyResponse(sessionId, response, keySetId); +} + +status_t Drm::removeKeys(Vector<uint8_t> const &keySetId) { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->removeKeys(keySetId); +} + +status_t Drm::restoreKeys(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keySetId) { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->restoreKeys(sessionId, keySetId); +} + +status_t Drm::queryKeyStatus(Vector<uint8_t> const &sessionId, + KeyedVector<String8, String8> &infoMap) const { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->queryKeyStatus(sessionId, infoMap); +} + +status_t Drm::getProvisionRequest(Vector<uint8_t> &request, String8 &defaultUrl) { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->getProvisionRequest(request, defaultUrl); +} + +status_t Drm::provideProvisionResponse(Vector<uint8_t> const &response) { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->provideProvisionResponse(response); +} + + +status_t Drm::getSecureStops(List<Vector<uint8_t> > &secureStops) { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->getSecureStops(secureStops); +} + +status_t Drm::releaseSecureStops(Vector<uint8_t> const &ssRelease) { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->releaseSecureStops(ssRelease); +} + +status_t Drm::getPropertyString(String8 const &name, String8 &value ) const { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->getPropertyString(name, value); +} + +status_t Drm::getPropertyByteArray(String8 const &name, Vector<uint8_t> &value ) const { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->getPropertyByteArray(name, value); +} + +status_t Drm::setPropertyString(String8 const &name, String8 const &value ) const { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->setPropertyString(name, value); +} + +status_t Drm::setPropertyByteArray(String8 const &name, + Vector<uint8_t> const &value ) const { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->setPropertyByteArray(name, value); +} + + +status_t Drm::setCipherAlgorithm(Vector<uint8_t> const &sessionId, + String8 const &algorithm) { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->setCipherAlgorithm(sessionId, algorithm); +} + +status_t Drm::setMacAlgorithm(Vector<uint8_t> const &sessionId, + String8 const &algorithm) { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->setMacAlgorithm(sessionId, algorithm); +} + +status_t Drm::encrypt(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &input, + Vector<uint8_t> const &iv, + Vector<uint8_t> &output) { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->encrypt(sessionId, keyId, input, iv, output); +} + +status_t Drm::decrypt(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &input, + Vector<uint8_t> const &iv, + Vector<uint8_t> &output) { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->decrypt(sessionId, keyId, input, iv, output); +} + +status_t Drm::sign(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &message, + Vector<uint8_t> &signature) { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->sign(sessionId, keyId, message, signature); +} + +status_t Drm::verify(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &message, + Vector<uint8_t> const &signature, + bool &match) { + Mutex::Autolock autoLock(mLock); + + if (mInitCheck != OK) { + return mInitCheck; + } + + if (mPlugin == NULL) { + return -EINVAL; + } + + return mPlugin->verify(sessionId, keyId, message, signature, match); +} + +} // namespace android diff --git a/media/libmediaplayerservice/Drm.h b/media/libmediaplayerservice/Drm.h new file mode 100644 index 0000000..3da8ad4 --- /dev/null +++ b/media/libmediaplayerservice/Drm.h @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef DRM_H_ + +#define DRM_H_ + +#include "SharedLibrary.h" + +#include <media/IDrm.h> +#include <media/IDrmClient.h> +#include <utils/threads.h> + +namespace android { + +struct DrmFactory; +struct DrmPlugin; + +struct Drm : public BnDrm, public DrmPluginListener { + Drm(); + virtual ~Drm(); + + virtual status_t initCheck() const; + + virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]); + + virtual status_t createPlugin(const uint8_t uuid[16]); + + virtual status_t destroyPlugin(); + + virtual status_t openSession(Vector<uint8_t> &sessionId); + + virtual status_t closeSession(Vector<uint8_t> const &sessionId); + + virtual status_t + getKeyRequest(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &initData, + String8 const &mimeType, DrmPlugin::KeyType keyType, + KeyedVector<String8, String8> const &optionalParameters, + Vector<uint8_t> &request, String8 &defaultUrl); + + virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &response, + Vector<uint8_t> &keySetId); + + virtual status_t removeKeys(Vector<uint8_t> const &keySetId); + + virtual status_t restoreKeys(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keySetId); + + virtual status_t queryKeyStatus(Vector<uint8_t> const &sessionId, + KeyedVector<String8, String8> &infoMap) const; + + virtual status_t getProvisionRequest(Vector<uint8_t> &request, + String8 &defaulUrl); + + virtual status_t provideProvisionResponse(Vector<uint8_t> const &response); + + virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops); + + virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease); + + virtual status_t getPropertyString(String8 const &name, String8 &value ) const; + virtual status_t getPropertyByteArray(String8 const &name, + Vector<uint8_t> &value ) const; + virtual status_t setPropertyString(String8 const &name, String8 const &value ) const; + virtual status_t setPropertyByteArray(String8 const &name, + Vector<uint8_t> const &value ) const; + + virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId, + String8 const &algorithm); + + virtual status_t setMacAlgorithm(Vector<uint8_t> const &sessionId, + String8 const &algorithm); + + virtual status_t encrypt(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &input, + Vector<uint8_t> const &iv, + Vector<uint8_t> &output); + + virtual status_t decrypt(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &input, + Vector<uint8_t> const &iv, + Vector<uint8_t> &output); + + virtual status_t sign(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &message, + Vector<uint8_t> &signature); + + virtual status_t verify(Vector<uint8_t> const &sessionId, + Vector<uint8_t> const &keyId, + Vector<uint8_t> const &message, + Vector<uint8_t> const &signature, + bool &match); + + virtual status_t setListener(const sp<IDrmClient>& listener); + + virtual void sendEvent(DrmPlugin::EventType eventType, int extra, + Vector<uint8_t> const *sessionId, + Vector<uint8_t> const *data); + +private: + mutable Mutex mLock; + + status_t mInitCheck; + + sp<IDrmClient> mListener; + mutable Mutex mEventLock; + mutable Mutex mNotifyLock; + + sp<SharedLibrary> mLibrary; + DrmFactory *mFactory; + DrmPlugin *mPlugin; + + static KeyedVector<Vector<uint8_t>, String8> mUUIDToLibraryPathMap; + static KeyedVector<String8, wp<SharedLibrary> > mLibraryPathToOpenLibraryMap; + static Mutex mMapLock; + + void findFactoryForScheme(const uint8_t uuid[16]); + bool loadLibraryForScheme(const String8 &path, const uint8_t uuid[16]); + void closeFactory(); + + + DISALLOW_EVIL_CONSTRUCTORS(Drm); +}; + +} // namespace android + +#endif // CRYPTO_H_ diff --git a/media/libmediaplayerservice/HDCP.cpp b/media/libmediaplayerservice/HDCP.cpp index 09b9719..469a02e 100644 --- a/media/libmediaplayerservice/HDCP.cpp +++ b/media/libmediaplayerservice/HDCP.cpp @@ -26,8 +26,9 @@ namespace android { -HDCP::HDCP() - : mLibHandle(NULL), +HDCP::HDCP(bool createEncryptionModule) + : mIsEncryptionModule(createEncryptionModule), + mLibHandle(NULL), mHDCPModule(NULL) { mLibHandle = dlopen("libstagefright_hdcp.so", RTLD_NOW); @@ -40,7 +41,10 @@ HDCP::HDCP() void *, HDCPModule::ObserverFunc); CreateHDCPModuleFunc createHDCPModule = - (CreateHDCPModuleFunc)dlsym(mLibHandle, "createHDCPModule"); + mIsEncryptionModule + ? (CreateHDCPModuleFunc)dlsym(mLibHandle, "createHDCPModule") + : (CreateHDCPModuleFunc)dlsym( + mLibHandle, "createHDCPModuleForDecryption"); if (createHDCPModule == NULL) { ALOGE("Unable to find symbol 'createHDCPModule'."); @@ -101,6 +105,8 @@ status_t HDCP::encrypt( uint64_t *outInputCTR, void *outData) { Mutex::Autolock autoLock(mLock); + CHECK(mIsEncryptionModule); + if (mHDCPModule == NULL) { *outInputCTR = 0; @@ -110,6 +116,20 @@ status_t HDCP::encrypt( return mHDCPModule->encrypt(inData, size, streamCTR, outInputCTR, outData); } +status_t HDCP::decrypt( + const void *inData, size_t size, + uint32_t streamCTR, uint64_t outInputCTR, void *outData) { + Mutex::Autolock autoLock(mLock); + + CHECK(!mIsEncryptionModule); + + if (mHDCPModule == NULL) { + return NO_INIT; + } + + return mHDCPModule->decrypt(inData, size, streamCTR, outInputCTR, outData); +} + // static void HDCP::ObserveWrapper(void *me, int msg, int ext1, int ext2) { static_cast<HDCP *>(me)->observe(msg, ext1, ext2); diff --git a/media/libmediaplayerservice/HDCP.h b/media/libmediaplayerservice/HDCP.h index b2fc457..42e6467 100644 --- a/media/libmediaplayerservice/HDCP.h +++ b/media/libmediaplayerservice/HDCP.h @@ -24,7 +24,7 @@ namespace android { struct HDCP : public BnHDCP { - HDCP(); + HDCP(bool createEncryptionModule); virtual ~HDCP(); virtual status_t setObserver(const sp<IHDCPObserver> &observer); @@ -35,9 +35,15 @@ struct HDCP : public BnHDCP { const void *inData, size_t size, uint32_t streamCTR, uint64_t *outInputCTR, void *outData); + virtual status_t decrypt( + const void *inData, size_t size, + uint32_t streamCTR, uint64_t outInputCTR, void *outData); + private: Mutex mLock; + bool mIsEncryptionModule; + void *mLibHandle; HDCPModule *mHDCPModule; sp<IHDCPObserver> mObserver; diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp index 3f69c11..90aed39 100644 --- a/media/libmediaplayerservice/MediaPlayerFactory.cpp +++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp @@ -100,7 +100,7 @@ void MediaPlayerFactory::unregisterFactory(player_type type) { } \ \ if (0.0 == bestScore) { \ - bestScore = getDefaultPlayerType(); \ + ret = getDefaultPlayerType(); \ } \ \ return ret; @@ -206,7 +206,8 @@ class NuPlayerFactory : public MediaPlayerFactory::IFactory { return 0.0; if (!strncasecmp("http://", url, 7) - || !strncasecmp("https://", url, 8)) { + || !strncasecmp("https://", url, 8) + || !strncasecmp("file://", url, 7)) { size_t len = strlen(url); if (len >= 5 && !strcasecmp(".m3u8", &url[len - 5])) { return kOurScore; @@ -215,6 +216,10 @@ class NuPlayerFactory : public MediaPlayerFactory::IFactory { if (strstr(url,"m3u8")) { return kOurScore; } + + if ((len >= 4 && !strcasecmp(".sdp", &url[len - 4])) || strstr(url, ".sdp?")) { + return kOurScore; + } } if (!strncasecmp("rtsp://", url, 7)) { diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp index 193b02c..57ec7ea 100644 --- a/media/libmediaplayerservice/MediaPlayerService.cpp +++ b/media/libmediaplayerservice/MediaPlayerService.cpp @@ -38,7 +38,7 @@ #include <binder/IServiceManager.h> #include <binder/MemoryHeapBase.h> #include <binder/MemoryBase.h> -#include <gui/SurfaceTextureClient.h> +#include <gui/Surface.h> #include <utils/Errors.h> // for status_t #include <utils/String8.h> #include <utils/SystemClock.h> @@ -72,7 +72,9 @@ #include <OMX.h> #include "Crypto.h" +#include "Drm.h" #include "HDCP.h" +#include "HTTPBase.h" #include "RemoteDisplay.h" namespace { @@ -224,8 +226,9 @@ MediaPlayerService::~MediaPlayerService() ALOGV("MediaPlayerService destroyed"); } -sp<IMediaRecorder> MediaPlayerService::createMediaRecorder(pid_t pid) +sp<IMediaRecorder> MediaPlayerService::createMediaRecorder() { + pid_t pid = IPCThreadState::self()->getCallingPid(); sp<MediaRecorderClient> recorder = new MediaRecorderClient(this, pid); wp<MediaRecorderClient> w = recorder; Mutex::Autolock lock(mLock); @@ -241,16 +244,18 @@ void MediaPlayerService::removeMediaRecorderClient(wp<MediaRecorderClient> clien ALOGV("Delete media recorder client"); } -sp<IMediaMetadataRetriever> MediaPlayerService::createMetadataRetriever(pid_t pid) +sp<IMediaMetadataRetriever> MediaPlayerService::createMetadataRetriever() { + pid_t pid = IPCThreadState::self()->getCallingPid(); sp<MetadataRetrieverClient> retriever = new MetadataRetrieverClient(pid); ALOGV("Create new media retriever from pid %d", pid); return retriever; } -sp<IMediaPlayer> MediaPlayerService::create(pid_t pid, const sp<IMediaPlayerClient>& client, +sp<IMediaPlayer> MediaPlayerService::create(const sp<IMediaPlayerClient>& client, int audioSessionId) { + pid_t pid = IPCThreadState::self()->getCallingPid(); int32_t connId = android_atomic_inc(&mNextConnId); sp<Client> c = new Client( @@ -282,8 +287,12 @@ sp<ICrypto> MediaPlayerService::makeCrypto() { return new Crypto; } -sp<IHDCP> MediaPlayerService::makeHDCP() { - return new HDCP; +sp<IDrm> MediaPlayerService::makeDrm() { + return new Drm; +} + +sp<IHDCP> MediaPlayerService::makeHDCP(bool createEncryptionModule) { + return new HDCP(createEncryptionModule); } sp<IRemoteDisplay> MediaPlayerService::listenForRemoteDisplay( @@ -295,6 +304,11 @@ sp<IRemoteDisplay> MediaPlayerService::listenForRemoteDisplay( return new RemoteDisplay(client, iface.string()); } +status_t MediaPlayerService::updateProxyConfig( + const char *host, int32_t port, const char *exclusionList) { + return HTTPBase::UpdateProxyConfig(host, port, exclusionList); +} + status_t MediaPlayerService::AudioCache::dump(int fd, const Vector<String16>& args) const { const size_t SIZE = 256; @@ -714,21 +728,21 @@ void MediaPlayerService::Client::disconnectNativeWindow() { } status_t MediaPlayerService::Client::setVideoSurfaceTexture( - const sp<ISurfaceTexture>& surfaceTexture) + const sp<IGraphicBufferProducer>& bufferProducer) { - ALOGV("[%d] setVideoSurfaceTexture(%p)", mConnId, surfaceTexture.get()); + ALOGV("[%d] setVideoSurfaceTexture(%p)", mConnId, bufferProducer.get()); sp<MediaPlayerBase> p = getPlayer(); if (p == 0) return UNKNOWN_ERROR; - sp<IBinder> binder(surfaceTexture == NULL ? NULL : - surfaceTexture->asBinder()); + sp<IBinder> binder(bufferProducer == NULL ? NULL : + bufferProducer->asBinder()); if (mConnectedWindowBinder == binder) { return OK; } sp<ANativeWindow> anw; - if (surfaceTexture != NULL) { - anw = new SurfaceTextureClient(surfaceTexture); + if (bufferProducer != NULL) { + anw = new Surface(bufferProducer); status_t err = native_window_api_connect(anw.get(), NATIVE_WINDOW_API_MEDIA); @@ -745,10 +759,10 @@ status_t MediaPlayerService::Client::setVideoSurfaceTexture( } } - // Note that we must set the player's new SurfaceTexture before + // Note that we must set the player's new GraphicBufferProducer before // disconnecting the old one. Otherwise queue/dequeue calls could be made // on the disconnected ANW, which may result in errors. - status_t err = p->setVideoSurfaceTexture(surfaceTexture); + status_t err = p->setVideoSurfaceTexture(bufferProducer); disconnectNativeWindow(); @@ -1387,8 +1401,8 @@ status_t MediaPlayerService::AudioOutput::open( } ALOGV("open(%u, %d, 0x%x, %d, %d, %d)", sampleRate, channelCount, channelMask, format, bufferCount, mSessionId); - int afSampleRate; - int afFrameCount; + uint32_t afSampleRate; + size_t afFrameCount; uint32_t frameCount; if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) != NO_ERROR) { diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h index fd648df..b33805d 100644 --- a/media/libmediaplayerservice/MediaPlayerService.h +++ b/media/libmediaplayerservice/MediaPlayerService.h @@ -239,22 +239,26 @@ public: static void instantiate(); // IMediaPlayerService interface - virtual sp<IMediaRecorder> createMediaRecorder(pid_t pid); + virtual sp<IMediaRecorder> createMediaRecorder(); void removeMediaRecorderClient(wp<MediaRecorderClient> client); - virtual sp<IMediaMetadataRetriever> createMetadataRetriever(pid_t pid); + virtual sp<IMediaMetadataRetriever> createMetadataRetriever(); - virtual sp<IMediaPlayer> create(pid_t pid, const sp<IMediaPlayerClient>& client, int audioSessionId); + virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client, int audioSessionId); virtual sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat); virtual sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat); virtual sp<IOMX> getOMX(); virtual sp<ICrypto> makeCrypto(); - virtual sp<IHDCP> makeHDCP(); + virtual sp<IDrm> makeDrm(); + virtual sp<IHDCP> makeHDCP(bool createEncryptionModule); virtual sp<IRemoteDisplay> listenForRemoteDisplay(const sp<IRemoteDisplayClient>& client, const String8& iface); virtual status_t dump(int fd, const Vector<String16>& args); + virtual status_t updateProxyConfig( + const char *host, int32_t port, const char *exclusionList); + void removeClient(wp<Client> client); // For battery usage tracking purpose @@ -307,7 +311,7 @@ private: // IMediaPlayer interface virtual void disconnect(); virtual status_t setVideoSurfaceTexture( - const sp<ISurfaceTexture>& surfaceTexture); + const sp<IGraphicBufferProducer>& bufferProducer); virtual status_t prepareAsync(); virtual status_t start(); virtual status_t stop(); diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp index eadc8ee..a9820e0 100644 --- a/media/libmediaplayerservice/MediaRecorderClient.cpp +++ b/media/libmediaplayerservice/MediaRecorderClient.cpp @@ -38,7 +38,7 @@ #include "MediaPlayerService.h" #include "StagefrightRecorder.h" -#include <gui/ISurfaceTexture.h> +#include <gui/IGraphicBufferProducer.h> namespace android { @@ -56,7 +56,7 @@ static bool checkPermission(const char* permissionString) { } -sp<ISurfaceTexture> MediaRecorderClient::querySurfaceMediaSource() +sp<IGraphicBufferProducer> MediaRecorderClient::querySurfaceMediaSource() { ALOGV("Query SurfaceMediaSource"); Mutex::Autolock lock(mLock); @@ -81,7 +81,7 @@ status_t MediaRecorderClient::setCamera(const sp<ICamera>& camera, return mRecorder->setCamera(camera, proxy); } -status_t MediaRecorderClient::setPreviewSurface(const sp<Surface>& surface) +status_t MediaRecorderClient::setPreviewSurface(const sp<IGraphicBufferProducer>& surface) { ALOGV("setPreviewSurface"); Mutex::Autolock lock(mLock); @@ -99,7 +99,7 @@ status_t MediaRecorderClient::setVideoSource(int vs) return PERMISSION_DENIED; } Mutex::Autolock lock(mLock); - if (mRecorder == NULL) { + if (mRecorder == NULL) { ALOGE("recorder is not initialized"); return NO_INIT; } @@ -325,6 +325,16 @@ status_t MediaRecorderClient::setListener(const sp<IMediaRecorderClient>& listen return mRecorder->setListener(listener); } +status_t MediaRecorderClient::setClientName(const String16& clientName) { + ALOGV("setClientName(%s)", String8(clientName).string()); + Mutex::Autolock lock(mLock); + if (mRecorder == NULL) { + ALOGE("recorder is not initialized"); + return NO_INIT; + } + return mRecorder->setClientName(clientName); +} + status_t MediaRecorderClient::dump(int fd, const Vector<String16>& args) const { if (mRecorder != NULL) { return mRecorder->dump(fd, args); diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h index c9ccf22..a65ec9f 100644 --- a/media/libmediaplayerservice/MediaRecorderClient.h +++ b/media/libmediaplayerservice/MediaRecorderClient.h @@ -25,14 +25,14 @@ namespace android { class MediaRecorderBase; class MediaPlayerService; class ICameraRecordingProxy; -class ISurfaceTexture; +class IGraphicBufferProducer; class MediaRecorderClient : public BnMediaRecorder { public: virtual status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy); - virtual status_t setPreviewSurface(const sp<Surface>& surface); + virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface); virtual status_t setVideoSource(int vs); virtual status_t setAudioSource(int as); virtual status_t setOutputFormat(int of); @@ -46,6 +46,7 @@ public: virtual status_t setParameters(const String8& params); virtual status_t setListener( const sp<IMediaRecorderClient>& listener); + virtual status_t setClientName(const String16& clientName); virtual status_t prepare(); virtual status_t getMaxAmplitude(int* max); virtual status_t start(); @@ -55,7 +56,7 @@ public: virtual status_t close(); virtual status_t release(); virtual status_t dump(int fd, const Vector<String16>& args) const; - virtual sp<ISurfaceTexture> querySurfaceMediaSource(); + virtual sp<IGraphicBufferProducer> querySurfaceMediaSource(); private: friend class MediaPlayerService; // for accessing private constructor diff --git a/media/libmediaplayerservice/MidiFile.h b/media/libmediaplayerservice/MidiFile.h index f6f8f7b..24d59b4 100644 --- a/media/libmediaplayerservice/MidiFile.h +++ b/media/libmediaplayerservice/MidiFile.h @@ -36,7 +36,7 @@ public: virtual status_t setDataSource(int fd, int64_t offset, int64_t length); virtual status_t setVideoSurfaceTexture( - const sp<ISurfaceTexture>& surfaceTexture) + const sp<IGraphicBufferProducer>& bufferProducer) { return UNKNOWN_ERROR; } virtual status_t prepare(); virtual status_t prepareAsync(); diff --git a/media/libmediaplayerservice/SharedLibrary.cpp b/media/libmediaplayerservice/SharedLibrary.cpp new file mode 100644 index 0000000..178e15d --- /dev/null +++ b/media/libmediaplayerservice/SharedLibrary.cpp @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "Drm" +#include <utils/Log.h> +#include <media/stagefright/foundation/ADebug.h> + +#include <dlfcn.h> + +#include "SharedLibrary.h" + +namespace android { + + SharedLibrary::SharedLibrary(const String8 &path) { + mLibHandle = dlopen(path.string(), RTLD_NOW); + } + + SharedLibrary::~SharedLibrary() { + if (mLibHandle != NULL) { + dlclose(mLibHandle); + mLibHandle = NULL; + } + } + + bool SharedLibrary::operator!() const { + return mLibHandle == NULL; + } + + void *SharedLibrary::lookup(const char *symbol) const { + if (!mLibHandle) { + return NULL; + } + return dlsym(mLibHandle, symbol); + } +}; diff --git a/media/libmediaplayerservice/SharedLibrary.h b/media/libmediaplayerservice/SharedLibrary.h new file mode 100644 index 0000000..5353642 --- /dev/null +++ b/media/libmediaplayerservice/SharedLibrary.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SHARED_LIBRARY_H_ +#define SHARED_LIBRARY_H_ + +#include <utils/RefBase.h> +#include <utils/String8.h> +#include <media/stagefright/foundation/ABase.h> + +namespace android { + class SharedLibrary : public RefBase { + public: + SharedLibrary(const String8 &path); + ~SharedLibrary(); + + bool operator!() const; + void *lookup(const char *symbol) const; + + private: + void *mLibHandle; + DISALLOW_EVIL_CONSTRUCTORS(SharedLibrary); + }; +}; + +#endif // SHARED_LIBRARY_H_ diff --git a/media/libmediaplayerservice/StagefrightPlayer.cpp b/media/libmediaplayerservice/StagefrightPlayer.cpp index 619c149..de61d9b 100644 --- a/media/libmediaplayerservice/StagefrightPlayer.cpp +++ b/media/libmediaplayerservice/StagefrightPlayer.cpp @@ -70,10 +70,10 @@ status_t StagefrightPlayer::setDataSource(const sp<IStreamSource> &source) { } status_t StagefrightPlayer::setVideoSurfaceTexture( - const sp<ISurfaceTexture> &surfaceTexture) { + const sp<IGraphicBufferProducer> &bufferProducer) { ALOGV("setVideoSurfaceTexture"); - return mPlayer->setSurfaceTexture(surfaceTexture); + return mPlayer->setSurfaceTexture(bufferProducer); } status_t StagefrightPlayer::prepare() { diff --git a/media/libmediaplayerservice/StagefrightPlayer.h b/media/libmediaplayerservice/StagefrightPlayer.h index e89e18a..600945e 100644 --- a/media/libmediaplayerservice/StagefrightPlayer.h +++ b/media/libmediaplayerservice/StagefrightPlayer.h @@ -41,7 +41,7 @@ public: virtual status_t setDataSource(const sp<IStreamSource> &source); virtual status_t setVideoSurfaceTexture( - const sp<ISurfaceTexture> &surfaceTexture); + const sp<IGraphicBufferProducer> &bufferProducer); virtual status_t prepare(); virtual status_t prepareAsync(); virtual status_t start(); diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp index 57b0ec2..095d5ca 100644 --- a/media/libmediaplayerservice/StagefrightRecorder.cpp +++ b/media/libmediaplayerservice/StagefrightRecorder.cpp @@ -70,7 +70,8 @@ StagefrightRecorder::StagefrightRecorder() mOutputFd(-1), mAudioSource(AUDIO_SOURCE_CNT), mVideoSource(VIDEO_SOURCE_LIST_END), - mStarted(false), mSurfaceMediaSource(NULL) { + mStarted(false), mSurfaceMediaSource(NULL), + mCaptureTimeLapse(false) { ALOGV("Constructor"); reset(); @@ -89,7 +90,7 @@ status_t StagefrightRecorder::init() { // The client side of mediaserver asks it to creat a SurfaceMediaSource // and return a interface reference. The client side will use that // while encoding GL Frames -sp<ISurfaceTexture> StagefrightRecorder::querySurfaceMediaSource() const { +sp<IGraphicBufferProducer> StagefrightRecorder::querySurfaceMediaSource() const { ALOGV("Get SurfaceMediaSource"); return mSurfaceMediaSource->getBufferQueue(); } @@ -224,7 +225,7 @@ status_t StagefrightRecorder::setCamera(const sp<ICamera> &camera, return OK; } -status_t StagefrightRecorder::setPreviewSurface(const sp<Surface> &surface) { +status_t StagefrightRecorder::setPreviewSurface(const sp<IGraphicBufferProducer> &surface) { ALOGV("setPreviewSurface: %p", surface.get()); mPreviewSurface = surface; @@ -730,6 +731,12 @@ status_t StagefrightRecorder::setListener(const sp<IMediaRecorderClient> &listen return OK; } +status_t StagefrightRecorder::setClientName(const String16& clientName) { + mClientName = clientName; + + return OK; +} + status_t StagefrightRecorder::prepare() { return OK; } @@ -737,6 +744,8 @@ status_t StagefrightRecorder::prepare() { status_t StagefrightRecorder::start() { CHECK_GE(mOutputFd, 0); + // Get UID here for permission checking + mClientUid = IPCThreadState::self()->getCallingUid(); if (mWriter != NULL) { ALOGE("File writer is not avaialble"); return UNKNOWN_ERROR; @@ -1312,13 +1321,14 @@ status_t StagefrightRecorder::setupCameraSource( } mCameraSourceTimeLapse = CameraSourceTimeLapse::CreateFromCamera( - mCamera, mCameraProxy, mCameraId, + mCamera, mCameraProxy, mCameraId, mClientName, mClientUid, videoSize, mFrameRate, mPreviewSurface, mTimeBetweenTimeLapseFrameCaptureUs); *cameraSource = mCameraSourceTimeLapse; } else { *cameraSource = CameraSource::CreateFromCamera( - mCamera, mCameraProxy, mCameraId, videoSize, mFrameRate, + mCamera, mCameraProxy, mCameraId, mClientName, mClientUid, + videoSize, mFrameRate, mPreviewSurface, true /*storeMetaDataInVideoBuffers*/); } mCamera.clear(); diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h index ec5ce7e..c864207 100644 --- a/media/libmediaplayerservice/StagefrightRecorder.h +++ b/media/libmediaplayerservice/StagefrightRecorder.h @@ -35,7 +35,7 @@ struct MediaWriter; class MetaData; struct AudioSource; class MediaProfiles; -class ISurfaceTexture; +class IGraphicBufferProducer; class SurfaceMediaSource; struct StagefrightRecorder : public MediaRecorderBase { @@ -51,11 +51,12 @@ struct StagefrightRecorder : public MediaRecorderBase { virtual status_t setVideoSize(int width, int height); virtual status_t setVideoFrameRate(int frames_per_second); virtual status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy); - virtual status_t setPreviewSurface(const sp<Surface>& surface); + virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface); virtual status_t setOutputFile(const char *path); virtual status_t setOutputFile(int fd, int64_t offset, int64_t length); virtual status_t setParameters(const String8& params); virtual status_t setListener(const sp<IMediaRecorderClient>& listener); + virtual status_t setClientName(const String16& clientName); virtual status_t prepare(); virtual status_t start(); virtual status_t pause(); @@ -65,13 +66,15 @@ struct StagefrightRecorder : public MediaRecorderBase { virtual status_t getMaxAmplitude(int *max); virtual status_t dump(int fd, const Vector<String16>& args) const; // Querying a SurfaceMediaSourcer - virtual sp<ISurfaceTexture> querySurfaceMediaSource() const; + virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() const; private: sp<ICamera> mCamera; sp<ICameraRecordingProxy> mCameraProxy; - sp<Surface> mPreviewSurface; + sp<IGraphicBufferProducer> mPreviewSurface; sp<IMediaRecorderClient> mListener; + String16 mClientName; + uid_t mClientUid; sp<MediaWriter> mWriter; int mOutputFd; sp<AudioSource> mAudioSourceNode; @@ -116,7 +119,7 @@ private: bool mStarted; // Needed when GLFrames are encoded. - // An <ISurfaceTexture> pointer + // An <IGraphicBufferProducer> pointer // will be sent to the client side using which the // frame buffers will be queued and dequeued sp<SurfaceMediaSource> mSurfaceMediaSource; diff --git a/media/libmediaplayerservice/TestPlayerStub.h b/media/libmediaplayerservice/TestPlayerStub.h index 91ffa7d..a3802eb 100644 --- a/media/libmediaplayerservice/TestPlayerStub.h +++ b/media/libmediaplayerservice/TestPlayerStub.h @@ -76,7 +76,7 @@ class TestPlayerStub : public MediaPlayerInterface { // All the methods below wrap the mPlayer instance. virtual status_t setVideoSurfaceTexture( - const android::sp<android::ISurfaceTexture>& st) { + const android::sp<android::IGraphicBufferProducer>& st) { return mPlayer->setVideoSurfaceTexture(st); } virtual status_t prepare() {return mPlayer->prepare();} diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp index f281879..b04e7a6 100644 --- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp +++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp @@ -32,11 +32,13 @@ namespace android { NuPlayer::GenericSource::GenericSource( + const sp<AMessage> ¬ify, const char *url, const KeyedVector<String8, String8> *headers, bool uidValid, uid_t uid) - : mDurationUs(0ll), + : Source(notify), + mDurationUs(0ll), mAudioIsVorbis(false) { DataSource::RegisterDefaultSniffers(); @@ -48,8 +50,10 @@ NuPlayer::GenericSource::GenericSource( } NuPlayer::GenericSource::GenericSource( + const sp<AMessage> ¬ify, int fd, int64_t offset, int64_t length) - : mDurationUs(0ll), + : Source(notify), + mDurationUs(0ll), mAudioIsVorbis(false) { DataSource::RegisterDefaultSniffers(); @@ -102,6 +106,26 @@ void NuPlayer::GenericSource::initFromDataSource( NuPlayer::GenericSource::~GenericSource() { } +void NuPlayer::GenericSource::prepareAsync() { + if (mVideoTrack.mSource != NULL) { + sp<MetaData> meta = mVideoTrack.mSource->getFormat(); + + int32_t width, height; + CHECK(meta->findInt32(kKeyWidth, &width)); + CHECK(meta->findInt32(kKeyHeight, &height)); + + notifyVideoSizeChanged(width, height); + } + + notifyFlagsChanged( + FLAG_CAN_PAUSE + | FLAG_CAN_SEEK_BACKWARD + | FLAG_CAN_SEEK_FORWARD + | FLAG_CAN_SEEK); + + notifyPrepared(); +} + void NuPlayer::GenericSource::start() { ALOGI("start"); @@ -258,8 +282,4 @@ void NuPlayer::GenericSource::readBuffer( } } -uint32_t NuPlayer::GenericSource::flags() const { - return FLAG_SEEKABLE; -} - } // namespace android diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h index e1ce2c1..2da680c 100644 --- a/media/libmediaplayerservice/nuplayer/GenericSource.h +++ b/media/libmediaplayerservice/nuplayer/GenericSource.h @@ -32,12 +32,17 @@ struct MediaSource; struct NuPlayer::GenericSource : public NuPlayer::Source { GenericSource( + const sp<AMessage> ¬ify, const char *url, const KeyedVector<String8, String8> *headers, bool uidValid = false, uid_t uid = 0); - GenericSource(int fd, int64_t offset, int64_t length); + GenericSource( + const sp<AMessage> ¬ify, + int fd, int64_t offset, int64_t length); + + virtual void prepareAsync(); virtual void start(); @@ -48,8 +53,6 @@ struct NuPlayer::GenericSource : public NuPlayer::Source { virtual status_t getDuration(int64_t *durationUs); virtual status_t seekTo(int64_t seekTimeUs); - virtual uint32_t flags() const; - protected: virtual ~GenericSource(); diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp index 5dcca12..655ee55 100644 --- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp +++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp @@ -34,10 +34,12 @@ namespace android { NuPlayer::HTTPLiveSource::HTTPLiveSource( + const sp<AMessage> ¬ify, const char *url, const KeyedVector<String8, String8> *headers, bool uidValid, uid_t uid) - : mURL(url), + : Source(notify), + mURL(url), mUIDValid(uidValid), mUID(uid), mFlags(0), @@ -64,12 +66,15 @@ NuPlayer::HTTPLiveSource::~HTTPLiveSource() { } } -void NuPlayer::HTTPLiveSource::start() { +void NuPlayer::HTTPLiveSource::prepareAsync() { mLiveLooper = new ALooper; mLiveLooper->setName("http live"); mLiveLooper->start(); + sp<AMessage> notify = new AMessage(kWhatSessionNotify, id()); + mLiveSession = new LiveSession( + notify, (mFlags & kFlagIncognito) ? LiveSession::kFlagIncognito : 0, mUIDValid, mUID); @@ -81,6 +86,9 @@ void NuPlayer::HTTPLiveSource::start() { mTSParser = new ATSParser; } +void NuPlayer::HTTPLiveSource::start() { +} + sp<MetaData> NuPlayer::HTTPLiveSource::getFormatMeta(bool audio) { ATSParser::SourceType type = audio ? ATSParser::AUDIO : ATSParser::VIDEO; @@ -192,17 +200,58 @@ status_t NuPlayer::HTTPLiveSource::seekTo(int64_t seekTimeUs) { return OK; } -uint32_t NuPlayer::HTTPLiveSource::flags() const { - uint32_t flags = 0; - if (mLiveSession->isSeekable()) { - flags |= FLAG_SEEKABLE; - } +void NuPlayer::HTTPLiveSource::onMessageReceived(const sp<AMessage> &msg) { + switch (msg->what()) { + case kWhatSessionNotify: + { + onSessionNotify(msg); + break; + } - if (mLiveSession->hasDynamicDuration()) { - flags |= FLAG_DYNAMIC_DURATION; + default: + Source::onMessageReceived(msg); + break; } +} - return flags; +void NuPlayer::HTTPLiveSource::onSessionNotify(const sp<AMessage> &msg) { + int32_t what; + CHECK(msg->findInt32("what", &what)); + + switch (what) { + case LiveSession::kWhatPrepared: + { + notifyVideoSizeChanged(0, 0); + + uint32_t flags = FLAG_CAN_PAUSE; + if (mLiveSession->isSeekable()) { + flags |= FLAG_CAN_SEEK; + flags |= FLAG_CAN_SEEK_BACKWARD; + flags |= FLAG_CAN_SEEK_FORWARD; + } + + if (mLiveSession->hasDynamicDuration()) { + flags |= FLAG_DYNAMIC_DURATION; + } + + notifyFlagsChanged(flags); + + notifyPrepared(); + break; + } + + case LiveSession::kWhatPreparationFailed: + { + status_t err; + CHECK(msg->findInt32("err", &err)); + + notifyPrepared(err); + break; + } + + default: + TRESPASS(); + } } } // namespace android diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h index 79f4ab8..067d1da 100644 --- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h +++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h @@ -28,11 +28,13 @@ struct LiveSession; struct NuPlayer::HTTPLiveSource : public NuPlayer::Source { HTTPLiveSource( + const sp<AMessage> ¬ify, const char *url, const KeyedVector<String8, String8> *headers, bool uidValid = false, uid_t uid = 0); + virtual void prepareAsync(); virtual void start(); virtual status_t feedMoreTSData(); @@ -42,19 +44,23 @@ struct NuPlayer::HTTPLiveSource : public NuPlayer::Source { virtual status_t getDuration(int64_t *durationUs); virtual status_t seekTo(int64_t seekTimeUs); - virtual uint32_t flags() const; - protected: virtual ~HTTPLiveSource(); virtual sp<MetaData> getFormatMeta(bool audio); + virtual void onMessageReceived(const sp<AMessage> &msg); + private: enum Flags { // Don't log any URLs. kFlagIncognito = 1, }; + enum { + kWhatSessionNotify, + }; + AString mURL; KeyedVector<String8, String8> mExtraHeaders; bool mUIDValid; @@ -66,6 +72,8 @@ private: sp<LiveSession> mLiveSession; sp<ATSParser> mTSParser; + void onSessionNotify(const sp<AMessage> &msg); + DISALLOW_EVIL_CONSTRUCTORS(HTTPLiveSource); }; diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp index ff27873..b89b1c8 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp +++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp @@ -41,7 +41,7 @@ #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MetaData.h> -#include <gui/ISurfaceTexture.h> +#include <gui/IGraphicBufferProducer.h> #include "avc_utils.h" @@ -50,10 +50,69 @@ namespace android { +struct NuPlayer::Action : public RefBase { + Action() {} + + virtual void execute(NuPlayer *player) = 0; + +private: + DISALLOW_EVIL_CONSTRUCTORS(Action); +}; + +struct NuPlayer::SeekAction : public Action { + SeekAction(int64_t seekTimeUs) + : mSeekTimeUs(seekTimeUs) { + } + + virtual void execute(NuPlayer *player) { + player->performSeek(mSeekTimeUs); + } + +private: + int64_t mSeekTimeUs; + + DISALLOW_EVIL_CONSTRUCTORS(SeekAction); +}; + +struct NuPlayer::SetSurfaceAction : public Action { + SetSurfaceAction(const sp<NativeWindowWrapper> &wrapper) + : mWrapper(wrapper) { + } + + virtual void execute(NuPlayer *player) { + player->performSetSurface(mWrapper); + } + +private: + sp<NativeWindowWrapper> mWrapper; + + DISALLOW_EVIL_CONSTRUCTORS(SetSurfaceAction); +}; + +// Use this if there's no state necessary to save in order to execute +// the action. +struct NuPlayer::SimpleAction : public Action { + typedef void (NuPlayer::*ActionFunc)(); + + SimpleAction(ActionFunc func) + : mFunc(func) { + } + + virtual void execute(NuPlayer *player) { + (player->*mFunc)(); + } + +private: + ActionFunc mFunc; + + DISALLOW_EVIL_CONSTRUCTORS(SimpleAction); +}; + //////////////////////////////////////////////////////////////////////////////// NuPlayer::NuPlayer() : mUIDValid(false), + mSourceFlags(0), mVideoIsAVC(false), mAudioEOS(false), mVideoEOS(false), @@ -63,14 +122,13 @@ NuPlayer::NuPlayer() mTimeDiscontinuityPending(false), mFlushingAudio(NONE), mFlushingVideo(NONE), - mResetInProgress(false), - mResetPostponed(false), mSkipRenderingAudioUntilMediaTimeUs(-1ll), mSkipRenderingVideoUntilMediaTimeUs(-1ll), mVideoLateByUs(0ll), mNumFramesTotal(0ll), mNumFramesDropped(0ll), - mVideoScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW) { + mVideoScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW), + mStarted(false) { } NuPlayer::~NuPlayer() { @@ -85,15 +143,17 @@ void NuPlayer::setDriver(const wp<NuPlayerDriver> &driver) { mDriver = driver; } -void NuPlayer::setDataSource(const sp<IStreamSource> &source) { +void NuPlayer::setDataSourceAsync(const sp<IStreamSource> &source) { sp<AMessage> msg = new AMessage(kWhatSetDataSource, id()); + sp<AMessage> notify = new AMessage(kWhatSourceNotify, id()); + char prop[PROPERTY_VALUE_MAX]; if (property_get("media.stagefright.use-mp4source", prop, NULL) && (!strcmp(prop, "1") || !strcasecmp(prop, "true"))) { - msg->setObject("source", new MP4Source(source)); + msg->setObject("source", new MP4Source(notify, source)); } else { - msg->setObject("source", new StreamingSource(source)); + msg->setObject("source", new StreamingSource(notify, source)); } msg->post(); @@ -101,7 +161,8 @@ void NuPlayer::setDataSource(const sp<IStreamSource> &source) { static bool IsHTTPLiveURL(const char *url) { if (!strncasecmp("http://", url, 7) - || !strncasecmp("https://", url, 8)) { + || !strncasecmp("https://", url, 8) + || !strncasecmp("file://", url, 7)) { size_t len = strlen(url); if (len >= 5 && !strcasecmp(".m3u8", &url[len - 5])) { return true; @@ -115,36 +176,58 @@ static bool IsHTTPLiveURL(const char *url) { return false; } -void NuPlayer::setDataSource( +void NuPlayer::setDataSourceAsync( const char *url, const KeyedVector<String8, String8> *headers) { sp<AMessage> msg = new AMessage(kWhatSetDataSource, id()); + size_t len = strlen(url); + + sp<AMessage> notify = new AMessage(kWhatSourceNotify, id()); sp<Source> source; if (IsHTTPLiveURL(url)) { - source = new HTTPLiveSource(url, headers, mUIDValid, mUID); + source = new HTTPLiveSource(notify, url, headers, mUIDValid, mUID); } else if (!strncasecmp(url, "rtsp://", 7)) { - source = new RTSPSource(url, headers, mUIDValid, mUID); + source = new RTSPSource(notify, url, headers, mUIDValid, mUID); + } else if ((!strncasecmp(url, "http://", 7) + || !strncasecmp(url, "https://", 8)) + && ((len >= 4 && !strcasecmp(".sdp", &url[len - 4])) + || strstr(url, ".sdp?"))) { + source = new RTSPSource(notify, url, headers, mUIDValid, mUID, true); } else { - source = new GenericSource(url, headers, mUIDValid, mUID); + source = new GenericSource(notify, url, headers, mUIDValid, mUID); } msg->setObject("source", source); msg->post(); } -void NuPlayer::setDataSource(int fd, int64_t offset, int64_t length) { +void NuPlayer::setDataSourceAsync(int fd, int64_t offset, int64_t length) { sp<AMessage> msg = new AMessage(kWhatSetDataSource, id()); - sp<Source> source = new GenericSource(fd, offset, length); + sp<AMessage> notify = new AMessage(kWhatSourceNotify, id()); + + sp<Source> source = new GenericSource(notify, fd, offset, length); msg->setObject("source", source); msg->post(); } -void NuPlayer::setVideoSurfaceTexture(const sp<ISurfaceTexture> &surfaceTexture) { +void NuPlayer::prepareAsync() { + (new AMessage(kWhatPrepare, id()))->post(); +} + +void NuPlayer::setVideoSurfaceTextureAsync( + const sp<IGraphicBufferProducer> &bufferProducer) { sp<AMessage> msg = new AMessage(kWhatSetVideoNativeWindow, id()); - sp<SurfaceTextureClient> surfaceTextureClient(surfaceTexture != NULL ? - new SurfaceTextureClient(surfaceTexture) : NULL); - msg->setObject("native-window", new NativeWindowWrapper(surfaceTextureClient)); + + if (bufferProducer == NULL) { + msg->setObject("native-window", NULL); + } else { + msg->setObject( + "native-window", + new NativeWindowWrapper( + new Surface(bufferProducer))); + } + msg->post(); } @@ -208,6 +291,20 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { CHECK(msg->findObject("source", &obj)); mSource = static_cast<Source *>(obj.get()); + + looper()->registerHandler(mSource); + + CHECK(mDriver != NULL); + sp<NuPlayerDriver> driver = mDriver.promote(); + if (driver != NULL) { + driver->notifySetDataSourceCompleted(OK); + } + break; + } + + case kWhatPrepare: + { + mSource->prepareAsync(); break; } @@ -237,13 +334,24 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { { ALOGV("kWhatSetVideoNativeWindow"); + mDeferredActions.push_back( + new SimpleAction(&NuPlayer::performDecoderShutdown)); + sp<RefBase> obj; CHECK(msg->findObject("native-window", &obj)); - mNativeWindow = static_cast<NativeWindowWrapper *>(obj.get()); + mDeferredActions.push_back( + new SetSurfaceAction( + static_cast<NativeWindowWrapper *>(obj.get()))); + + if (obj != NULL) { + // If there is a new surface texture, instantiate decoders + // again if possible. + mDeferredActions.push_back( + new SimpleAction(&NuPlayer::performScanSources)); + } - // XXX - ignore error from setVideoScalingMode for now - setVideoScalingMode(mVideoScalingMode); + processDeferredActions(); break; } @@ -270,12 +378,20 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { mVideoLateByUs = 0; mNumFramesTotal = 0; mNumFramesDropped = 0; + mStarted = true; mSource->start(); + uint32_t flags = 0; + + if (mSource->isRealTime()) { + flags |= Renderer::FLAG_REAL_TIME; + } + mRenderer = new Renderer( mAudioSink, - new AMessage(kWhatRendererNotify, id())); + new AMessage(kWhatRendererNotify, id()), + flags); looper()->registerHandler(mRenderer); @@ -312,9 +428,7 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { && (mAudioDecoder != NULL || mVideoDecoder != NULL)) { // This is the first time we've found anything playable. - uint32_t flags = mSource->flags(); - - if (flags & Source::FLAG_DYNAMIC_DURATION) { + if (mSourceFlags & Source::FLAG_DYNAMIC_DURATION) { schedulePollDuration(); } } @@ -407,7 +521,8 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { } else if (what == ACodec::kWhatOutputFormatChanged) { if (audio) { int32_t numChannels; - CHECK(codecRequest->findInt32("channel-count", &numChannels)); + CHECK(codecRequest->findInt32( + "channel-count", &numChannels)); int32_t sampleRate; CHECK(codecRequest->findInt32("sample-rate", &sampleRate)); @@ -419,13 +534,15 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { audio_output_flags_t flags; int64_t durationUs; - // FIXME: we should handle the case where the video decoder is created after - // we receive the format change indication. Current code will just make that - // we select deep buffer with video which should not be a problem as it should + // FIXME: we should handle the case where the video decoder + // is created after we receive the format change indication. + // Current code will just make that we select deep buffer + // with video which should not be a problem as it should // not prevent from keeping A/V sync. if (mVideoDecoder == NULL && mSource->getDuration(&durationUs) == OK && - durationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US) { + durationUs + > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US) { flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER; } else { flags = AUDIO_OUTPUT_FLAG_NONE; @@ -461,17 +578,35 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { "crop", &cropLeft, &cropTop, &cropRight, &cropBottom)); + int32_t displayWidth = cropRight - cropLeft + 1; + int32_t displayHeight = cropBottom - cropTop + 1; + ALOGV("Video output format changed to %d x %d " "(crop: %d x %d @ (%d, %d))", width, height, - (cropRight - cropLeft + 1), - (cropBottom - cropTop + 1), + displayWidth, + displayHeight, cropLeft, cropTop); + sp<AMessage> videoInputFormat = + mSource->getFormat(false /* audio */); + + // Take into account sample aspect ratio if necessary: + int32_t sarWidth, sarHeight; + if (videoInputFormat->findInt32("sar-width", &sarWidth) + && videoInputFormat->findInt32( + "sar-height", &sarHeight)) { + ALOGV("Sample aspect ratio %d : %d", + sarWidth, sarHeight); + + displayWidth = (displayWidth * sarWidth) / sarHeight; + + ALOGV("display dimensions %d x %d", + displayWidth, displayHeight); + } + notifyListener( - MEDIA_SET_VIDEO_SIZE, - cropRight - cropLeft + 1, - cropBottom - cropTop + 1); + MEDIA_SET_VIDEO_SIZE, displayWidth, displayHeight); } } else if (what == ACodec::kWhatShutdownCompleted) { ALOGV("%s shutdown completed", audio ? "audio" : "video"); @@ -495,8 +630,15 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { mRenderer->queueEOS(audio, UNKNOWN_ERROR); } else if (what == ACodec::kWhatDrainThisBuffer) { renderBuffer(audio, codecRequest); - } else { - ALOGV("Unhandled codec notification %d.", what); + } else if (what != ACodec::kWhatComponentAllocated + && what != ACodec::kWhatComponentConfigured + && what != ACodec::kWhatBuffersAllocated) { + ALOGV("Unhandled codec notification %d '%c%c%c%c'.", + what, + what >> 24, + (what >> 16) & 0xff, + (what >> 8) & 0xff, + what & 0xff); } break; @@ -550,8 +692,6 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { } } } else if (what == Renderer::kWhatFlushComplete) { - CHECK_EQ(what, (int32_t)Renderer::kWhatFlushComplete); - int32_t audio; CHECK(msg->findInt32("audio", &audio)); @@ -571,47 +711,13 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { { ALOGV("kWhatReset"); - cancelPollDuration(); - - if (mRenderer != NULL) { - // There's an edge case where the renderer owns all output - // buffers and is paused, therefore the decoder will not read - // more input data and will never encounter the matching - // discontinuity. To avoid this, we resume the renderer. - - if (mFlushingAudio == AWAITING_DISCONTINUITY - || mFlushingVideo == AWAITING_DISCONTINUITY) { - mRenderer->resume(); - } - } + mDeferredActions.push_back( + new SimpleAction(&NuPlayer::performDecoderShutdown)); - if (mFlushingAudio != NONE || mFlushingVideo != NONE) { - // We're currently flushing, postpone the reset until that's - // completed. - - ALOGV("postponing reset mFlushingAudio=%d, mFlushingVideo=%d", - mFlushingAudio, mFlushingVideo); - - mResetPostponed = true; - break; - } - - if (mAudioDecoder == NULL && mVideoDecoder == NULL) { - finishReset(); - break; - } + mDeferredActions.push_back( + new SimpleAction(&NuPlayer::performReset)); - mTimeDiscontinuityPending = true; - - if (mAudioDecoder != NULL) { - flushDecoder(true /* audio */, true /* needShutdown */); - } - - if (mVideoDecoder != NULL) { - flushDecoder(false /* audio */, true /* needShutdown */); - } - - mResetInProgress = true; + processDeferredActions(); break; } @@ -620,24 +726,21 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { int64_t seekTimeUs; CHECK(msg->findInt64("seekTimeUs", &seekTimeUs)); - ALOGV("kWhatSeek seekTimeUs=%lld us (%.2f secs)", - seekTimeUs, seekTimeUs / 1E6); + ALOGV("kWhatSeek seekTimeUs=%lld us", seekTimeUs); - mSource->seekTo(seekTimeUs); + mDeferredActions.push_back( + new SimpleAction(&NuPlayer::performDecoderFlush)); - if (mDriver != NULL) { - sp<NuPlayerDriver> driver = mDriver.promote(); - if (driver != NULL) { - driver->notifySeekComplete(); - } - } + mDeferredActions.push_back(new SeekAction(seekTimeUs)); + processDeferredActions(); break; } case kWhatPause: { CHECK(mRenderer != NULL); + mSource->pause(); mRenderer->pause(); break; } @@ -645,10 +748,17 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { case kWhatResume: { CHECK(mRenderer != NULL); + mSource->resume(); mRenderer->resume(); break; } + case kWhatSourceNotify: + { + onSourceNotify(msg); + break; + } + default: TRESPASS(); break; @@ -682,39 +792,7 @@ void NuPlayer::finishFlushIfPossible() { mFlushingAudio = NONE; mFlushingVideo = NONE; - if (mResetInProgress) { - ALOGV("reset completed"); - - mResetInProgress = false; - finishReset(); - } else if (mResetPostponed) { - (new AMessage(kWhatReset, id()))->post(); - mResetPostponed = false; - } else if (mAudioDecoder == NULL || mVideoDecoder == NULL) { - postScanSources(); - } -} - -void NuPlayer::finishReset() { - CHECK(mAudioDecoder == NULL); - CHECK(mVideoDecoder == NULL); - - ++mScanSourcesGeneration; - mScanSourcesPending = false; - - mRenderer.clear(); - - if (mSource != NULL) { - mSource->stop(); - mSource.clear(); - } - - if (mDriver != NULL) { - sp<NuPlayerDriver> driver = mDriver.promote(); - if (driver != NULL) { - driver->notifyResetComplete(); - } - } + processDeferredActions(); } void NuPlayer::postScanSources() { @@ -756,14 +834,6 @@ status_t NuPlayer::instantiateDecoder(bool audio, sp<Decoder> *decoder) { (*decoder)->configure(format); - int64_t durationUs; - if (mDriver != NULL && mSource->getDuration(&durationUs) == OK) { - sp<NuPlayerDriver> driver = mDriver.promote(); - if (driver != NULL) { - driver->notifyDuration(durationUs); - } - } - return OK; } @@ -833,6 +903,14 @@ status_t NuPlayer::feedDecoderInputData(bool audio, const sp<AMessage> &msg) { mTimeDiscontinuityPending || timeChange; if (formatChange || timeChange) { + if (mFlushingAudio == NONE && mFlushingVideo == NONE) { + // And we'll resume scanning sources once we're done + // flushing. + mDeferredActions.push_front( + new SimpleAction( + &NuPlayer::performScanSources)); + } + flushDecoder(audio, formatChange); } else { // This stream is unaffected by the discontinuity @@ -1002,8 +1080,7 @@ sp<AMessage> NuPlayer::Source::getFormat(bool audio) { status_t NuPlayer::setVideoScalingMode(int32_t mode) { mVideoScalingMode = mode; - if (mNativeWindow != NULL - && mNativeWindow->getNativeWindow() != NULL) { + if (mNativeWindow != NULL) { status_t ret = native_window_set_scaling_mode( mNativeWindow->getNativeWindow().get(), mVideoScalingMode); if (ret != OK) { @@ -1025,4 +1102,257 @@ void NuPlayer::cancelPollDuration() { ++mPollDurationGeneration; } +void NuPlayer::processDeferredActions() { + while (!mDeferredActions.empty()) { + // We won't execute any deferred actions until we're no longer in + // an intermediate state, i.e. one more more decoders are currently + // flushing or shutting down. + + if (mRenderer != NULL) { + // There's an edge case where the renderer owns all output + // buffers and is paused, therefore the decoder will not read + // more input data and will never encounter the matching + // discontinuity. To avoid this, we resume the renderer. + + if (mFlushingAudio == AWAITING_DISCONTINUITY + || mFlushingVideo == AWAITING_DISCONTINUITY) { + mRenderer->resume(); + } + } + + if (mFlushingAudio != NONE || mFlushingVideo != NONE) { + // We're currently flushing, postpone the reset until that's + // completed. + + ALOGV("postponing action mFlushingAudio=%d, mFlushingVideo=%d", + mFlushingAudio, mFlushingVideo); + + break; + } + + sp<Action> action = *mDeferredActions.begin(); + mDeferredActions.erase(mDeferredActions.begin()); + + action->execute(this); + } +} + +void NuPlayer::performSeek(int64_t seekTimeUs) { + ALOGV("performSeek seekTimeUs=%lld us (%.2f secs)", + seekTimeUs, + seekTimeUs / 1E6); + + mSource->seekTo(seekTimeUs); + + if (mDriver != NULL) { + sp<NuPlayerDriver> driver = mDriver.promote(); + if (driver != NULL) { + driver->notifyPosition(seekTimeUs); + driver->notifySeekComplete(); + } + } + + // everything's flushed, continue playback. +} + +void NuPlayer::performDecoderFlush() { + ALOGV("performDecoderFlush"); + + if (mAudioDecoder == NULL && mVideoDecoder == NULL) { + return; + } + + mTimeDiscontinuityPending = true; + + if (mAudioDecoder != NULL) { + flushDecoder(true /* audio */, false /* needShutdown */); + } + + if (mVideoDecoder != NULL) { + flushDecoder(false /* audio */, false /* needShutdown */); + } +} + +void NuPlayer::performDecoderShutdown() { + ALOGV("performDecoderShutdown"); + + if (mAudioDecoder == NULL && mVideoDecoder == NULL) { + return; + } + + mTimeDiscontinuityPending = true; + + if (mAudioDecoder != NULL) { + flushDecoder(true /* audio */, true /* needShutdown */); + } + + if (mVideoDecoder != NULL) { + flushDecoder(false /* audio */, true /* needShutdown */); + } +} + +void NuPlayer::performReset() { + ALOGV("performReset"); + + CHECK(mAudioDecoder == NULL); + CHECK(mVideoDecoder == NULL); + + cancelPollDuration(); + + ++mScanSourcesGeneration; + mScanSourcesPending = false; + + mRenderer.clear(); + + if (mSource != NULL) { + mSource->stop(); + + looper()->unregisterHandler(mSource->id()); + + mSource.clear(); + } + + if (mDriver != NULL) { + sp<NuPlayerDriver> driver = mDriver.promote(); + if (driver != NULL) { + driver->notifyResetComplete(); + } + } + + mStarted = false; +} + +void NuPlayer::performScanSources() { + ALOGV("performScanSources"); + + if (!mStarted) { + return; + } + + if (mAudioDecoder == NULL || mVideoDecoder == NULL) { + postScanSources(); + } +} + +void NuPlayer::performSetSurface(const sp<NativeWindowWrapper> &wrapper) { + ALOGV("performSetSurface"); + + mNativeWindow = wrapper; + + // XXX - ignore error from setVideoScalingMode for now + setVideoScalingMode(mVideoScalingMode); + + if (mDriver != NULL) { + sp<NuPlayerDriver> driver = mDriver.promote(); + if (driver != NULL) { + driver->notifySetSurfaceComplete(); + } + } +} + +void NuPlayer::onSourceNotify(const sp<AMessage> &msg) { + int32_t what; + CHECK(msg->findInt32("what", &what)); + + switch (what) { + case Source::kWhatPrepared: + { + if (mSource == NULL) { + // This is a stale notification from a source that was + // asynchronously preparing when the client called reset(). + // We handled the reset, the source is gone. + break; + } + + int32_t err; + CHECK(msg->findInt32("err", &err)); + + sp<NuPlayerDriver> driver = mDriver.promote(); + if (driver != NULL) { + driver->notifyPrepareCompleted(err); + } + + int64_t durationUs; + if (mDriver != NULL && mSource->getDuration(&durationUs) == OK) { + sp<NuPlayerDriver> driver = mDriver.promote(); + if (driver != NULL) { + driver->notifyDuration(durationUs); + } + } + break; + } + + case Source::kWhatFlagsChanged: + { + uint32_t flags; + CHECK(msg->findInt32("flags", (int32_t *)&flags)); + + if ((mSourceFlags & Source::FLAG_DYNAMIC_DURATION) + && (!(flags & Source::FLAG_DYNAMIC_DURATION))) { + cancelPollDuration(); + } else if (!(mSourceFlags & Source::FLAG_DYNAMIC_DURATION) + && (flags & Source::FLAG_DYNAMIC_DURATION) + && (mAudioDecoder != NULL || mVideoDecoder != NULL)) { + schedulePollDuration(); + } + + mSourceFlags = flags; + break; + } + + case Source::kWhatVideoSizeChanged: + { + int32_t width, height; + CHECK(msg->findInt32("width", &width)); + CHECK(msg->findInt32("height", &height)); + + notifyListener(MEDIA_SET_VIDEO_SIZE, width, height); + break; + } + + case Source::kWhatBufferingStart: + { + notifyListener(MEDIA_INFO, MEDIA_INFO_BUFFERING_START, 0); + break; + } + + case Source::kWhatBufferingEnd: + { + notifyListener(MEDIA_INFO, MEDIA_INFO_BUFFERING_END, 0); + break; + } + + default: + TRESPASS(); + } +} + +//////////////////////////////////////////////////////////////////////////////// + +void NuPlayer::Source::notifyFlagsChanged(uint32_t flags) { + sp<AMessage> notify = dupNotify(); + notify->setInt32("what", kWhatFlagsChanged); + notify->setInt32("flags", flags); + notify->post(); +} + +void NuPlayer::Source::notifyVideoSizeChanged(int32_t width, int32_t height) { + sp<AMessage> notify = dupNotify(); + notify->setInt32("what", kWhatVideoSizeChanged); + notify->setInt32("width", width); + notify->setInt32("height", height); + notify->post(); +} + +void NuPlayer::Source::notifyPrepared(status_t err) { + sp<AMessage> notify = dupNotify(); + notify->setInt32("what", kWhatPrepared); + notify->setInt32("err", err); + notify->post(); +} + +void NuPlayer::Source::onMessageReceived(const sp<AMessage> &msg) { + TRESPASS(); +} + } // namespace android diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h index 31efb2e..50d0462 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayer.h +++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h @@ -35,14 +35,18 @@ struct NuPlayer : public AHandler { void setDriver(const wp<NuPlayerDriver> &driver); - void setDataSource(const sp<IStreamSource> &source); + void setDataSourceAsync(const sp<IStreamSource> &source); - void setDataSource( + void setDataSourceAsync( const char *url, const KeyedVector<String8, String8> *headers); - void setDataSource(int fd, int64_t offset, int64_t length); + void setDataSourceAsync(int fd, int64_t offset, int64_t length); + + void prepareAsync(); + + void setVideoSurfaceTextureAsync( + const sp<IGraphicBufferProducer> &bufferProducer); - void setVideoSurfaceTexture(const sp<ISurfaceTexture> &surfaceTexture); void setAudioSink(const sp<MediaPlayerBase::AudioSink> &sink); void start(); @@ -73,9 +77,14 @@ private: struct Renderer; struct RTSPSource; struct StreamingSource; + struct Action; + struct SeekAction; + struct SetSurfaceAction; + struct SimpleAction; enum { kWhatSetDataSource = '=DaS', + kWhatPrepare = 'prep', kWhatSetVideoNativeWindow = '=NaW', kWhatSetAudioSink = '=AuS', kWhatMoreDataQueued = 'more', @@ -89,12 +98,14 @@ private: kWhatPause = 'paus', kWhatResume = 'rsme', kWhatPollDuration = 'polD', + kWhatSourceNotify = 'srcN', }; wp<NuPlayerDriver> mDriver; bool mUIDValid; uid_t mUID; sp<Source> mSource; + uint32_t mSourceFlags; sp<NativeWindowWrapper> mNativeWindow; sp<MediaPlayerBase::AudioSink> mAudioSink; sp<Decoder> mVideoDecoder; @@ -102,6 +113,8 @@ private: sp<Decoder> mAudioDecoder; sp<Renderer> mRenderer; + List<sp<Action> > mDeferredActions; + bool mAudioEOS; bool mVideoEOS; @@ -126,8 +139,6 @@ private: FlushStatus mFlushingAudio; FlushStatus mFlushingVideo; - bool mResetInProgress; - bool mResetPostponed; int64_t mSkipRenderingAudioUntilMediaTimeUs; int64_t mSkipRenderingVideoUntilMediaTimeUs; @@ -137,6 +148,8 @@ private: int32_t mVideoScalingMode; + bool mStarted; + status_t instantiateDecoder(bool audio, sp<Decoder> *decoder); status_t feedDecoderInputData(bool audio, const sp<AMessage> &msg); @@ -150,12 +163,22 @@ private: static bool IsFlushingState(FlushStatus state, bool *needShutdown = NULL); - void finishReset(); void postScanSources(); void schedulePollDuration(); void cancelPollDuration(); + void processDeferredActions(); + + void performSeek(int64_t seekTimeUs); + void performDecoderFlush(); + void performDecoderShutdown(); + void performReset(); + void performScanSources(); + void performSetSurface(const sp<NativeWindowWrapper> &wrapper); + + void onSourceNotify(const sp<AMessage> &msg); + DISALLOW_EVIL_CONSTRUCTORS(NuPlayer); }; diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp index d03601f..68b9623 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp +++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp @@ -21,20 +21,25 @@ #include "NuPlayerDriver.h" #include "NuPlayer.h" +#include "NuPlayerSource.h" #include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/foundation/ALooper.h> +#include <media/stagefright/MetaData.h> namespace android { NuPlayerDriver::NuPlayerDriver() - : mResetInProgress(false), + : mState(STATE_IDLE), + mIsAsyncPrepare(false), + mAsyncResult(UNKNOWN_ERROR), + mSetSurfaceInProgress(false), mDurationUs(-1), mPositionUs(-1), mNumFramesTotal(0), mNumFramesDropped(0), mLooper(new ALooper), - mState(UNINITIALIZED), + mPlayerFlags(0), mAtEOS(false), mStartupSeekTimeUs(-1) { mLooper->setName("NuPlayerDriver Looper"); @@ -66,60 +71,143 @@ status_t NuPlayerDriver::setUID(uid_t uid) { status_t NuPlayerDriver::setDataSource( const char *url, const KeyedVector<String8, String8> *headers) { - CHECK_EQ((int)mState, (int)UNINITIALIZED); + Mutex::Autolock autoLock(mLock); - mPlayer->setDataSource(url, headers); + if (mState != STATE_IDLE) { + return INVALID_OPERATION; + } - mState = STOPPED; + mState = STATE_SET_DATASOURCE_PENDING; - return OK; + mPlayer->setDataSourceAsync(url, headers); + + while (mState == STATE_SET_DATASOURCE_PENDING) { + mCondition.wait(mLock); + } + + return mAsyncResult; } status_t NuPlayerDriver::setDataSource(int fd, int64_t offset, int64_t length) { - CHECK_EQ((int)mState, (int)UNINITIALIZED); + Mutex::Autolock autoLock(mLock); + + if (mState != STATE_IDLE) { + return INVALID_OPERATION; + } - mPlayer->setDataSource(fd, offset, length); + mState = STATE_SET_DATASOURCE_PENDING; - mState = STOPPED; + mPlayer->setDataSourceAsync(fd, offset, length); - return OK; + while (mState == STATE_SET_DATASOURCE_PENDING) { + mCondition.wait(mLock); + } + + return mAsyncResult; } status_t NuPlayerDriver::setDataSource(const sp<IStreamSource> &source) { - CHECK_EQ((int)mState, (int)UNINITIALIZED); + Mutex::Autolock autoLock(mLock); + + if (mState != STATE_IDLE) { + return INVALID_OPERATION; + } - mPlayer->setDataSource(source); + mState = STATE_SET_DATASOURCE_PENDING; - mState = STOPPED; + mPlayer->setDataSourceAsync(source); - return OK; + while (mState == STATE_SET_DATASOURCE_PENDING) { + mCondition.wait(mLock); + } + + return mAsyncResult; } status_t NuPlayerDriver::setVideoSurfaceTexture( - const sp<ISurfaceTexture> &surfaceTexture) { - mPlayer->setVideoSurfaceTexture(surfaceTexture); + const sp<IGraphicBufferProducer> &bufferProducer) { + Mutex::Autolock autoLock(mLock); + + if (mSetSurfaceInProgress) { + return INVALID_OPERATION; + } + + switch (mState) { + case STATE_SET_DATASOURCE_PENDING: + case STATE_RESET_IN_PROGRESS: + return INVALID_OPERATION; + + default: + break; + } + + mSetSurfaceInProgress = true; + + mPlayer->setVideoSurfaceTextureAsync(bufferProducer); + + while (mSetSurfaceInProgress) { + mCondition.wait(mLock); + } return OK; } status_t NuPlayerDriver::prepare() { - sendEvent(MEDIA_SET_VIDEO_SIZE, 0, 0); - return OK; + Mutex::Autolock autoLock(mLock); + return prepare_l(); } -status_t NuPlayerDriver::prepareAsync() { - status_t err = prepare(); +status_t NuPlayerDriver::prepare_l() { + switch (mState) { + case STATE_UNPREPARED: + mState = STATE_PREPARING; + + // Make sure we're not posting any notifications, success or + // failure information is only communicated through our result + // code. + mIsAsyncPrepare = false; + mPlayer->prepareAsync(); + while (mState == STATE_PREPARING) { + mCondition.wait(mLock); + } + return (mState == STATE_PREPARED) ? OK : UNKNOWN_ERROR; + default: + return INVALID_OPERATION; + }; +} - notifyListener(MEDIA_PREPARED); +status_t NuPlayerDriver::prepareAsync() { + Mutex::Autolock autoLock(mLock); - return err; + switch (mState) { + case STATE_UNPREPARED: + mState = STATE_PREPARING; + mIsAsyncPrepare = true; + mPlayer->prepareAsync(); + return OK; + default: + return INVALID_OPERATION; + }; } status_t NuPlayerDriver::start() { + Mutex::Autolock autoLock(mLock); + switch (mState) { - case UNINITIALIZED: - return INVALID_OPERATION; - case STOPPED: + case STATE_UNPREPARED: + { + status_t err = prepare_l(); + + if (err != OK) { + return err; + } + + CHECK_EQ(mState, STATE_PREPARED); + + // fall through + } + + case STATE_PREPARED: { mAtEOS = false; mPlayer->start(); @@ -133,21 +221,23 @@ status_t NuPlayerDriver::start() { mStartupSeekTimeUs = -1; } - break; } - case PLAYING: - return OK; - default: - { - CHECK_EQ((int)mState, (int)PAUSED); + case STATE_RUNNING: + break; + + case STATE_PAUSED: + { mPlayer->resume(); break; } + + default: + return INVALID_OPERATION; } - mState = PLAYING; + mState = STATE_RUNNING; return OK; } @@ -157,43 +247,44 @@ status_t NuPlayerDriver::stop() { } status_t NuPlayerDriver::pause() { + Mutex::Autolock autoLock(mLock); + switch (mState) { - case UNINITIALIZED: - return INVALID_OPERATION; - case STOPPED: + case STATE_PAUSED: + case STATE_PREPARED: return OK; - case PLAYING: + + case STATE_RUNNING: mPlayer->pause(); break; + default: - { - CHECK_EQ((int)mState, (int)PAUSED); - return OK; - } + return INVALID_OPERATION; } - mState = PAUSED; + mState = STATE_PAUSED; return OK; } bool NuPlayerDriver::isPlaying() { - return mState == PLAYING && !mAtEOS; + return mState == STATE_RUNNING && !mAtEOS; } status_t NuPlayerDriver::seekTo(int msec) { + Mutex::Autolock autoLock(mLock); + int64_t seekTimeUs = msec * 1000ll; switch (mState) { - case UNINITIALIZED: - return INVALID_OPERATION; - case STOPPED: + case STATE_PREPARED: { mStartupSeekTimeUs = seekTimeUs; break; } - case PLAYING: - case PAUSED: + + case STATE_RUNNING: + case STATE_PAUSED: { mAtEOS = false; mPlayer->seekToAsync(seekTimeUs); @@ -201,8 +292,7 @@ status_t NuPlayerDriver::seekTo(int msec) { } default: - TRESPASS(); - break; + return INVALID_OPERATION; } return OK; @@ -224,27 +314,46 @@ status_t NuPlayerDriver::getDuration(int *msec) { Mutex::Autolock autoLock(mLock); if (mDurationUs < 0) { - *msec = 0; - } else { - *msec = (mDurationUs + 500ll) / 1000; + return UNKNOWN_ERROR; } + *msec = (mDurationUs + 500ll) / 1000; + return OK; } status_t NuPlayerDriver::reset() { Mutex::Autolock autoLock(mLock); - mResetInProgress = true; + switch (mState) { + case STATE_IDLE: + return OK; + + case STATE_SET_DATASOURCE_PENDING: + case STATE_RESET_IN_PROGRESS: + return INVALID_OPERATION; + + case STATE_PREPARING: + { + CHECK(mIsAsyncPrepare); + + notifyListener(MEDIA_PREPARED); + break; + } + + default: + break; + } + + mState = STATE_RESET_IN_PROGRESS; mPlayer->resetAsync(); - while (mResetInProgress) { + while (mState == STATE_RESET_IN_PROGRESS) { mCondition.wait(mLock); } mDurationUs = -1; mPositionUs = -1; - mState = UNINITIALIZED; mStartupSeekTimeUs = -1; return OK; @@ -277,6 +386,7 @@ status_t NuPlayerDriver::invoke(const Parcel &request, Parcel *reply) { int mode = request.readInt32(); return mPlayer->setVideoScalingMode(mode); } + default: { return INVALID_OPERATION; @@ -298,13 +408,45 @@ status_t NuPlayerDriver::getParameter(int key, Parcel *reply) { status_t NuPlayerDriver::getMetadata( const media::Metadata::Filter& ids, Parcel *records) { - return INVALID_OPERATION; + Mutex::Autolock autoLock(mLock); + + using media::Metadata; + + Metadata meta(records); + + meta.appendBool( + Metadata::kPauseAvailable, + mPlayerFlags & NuPlayer::Source::FLAG_CAN_PAUSE); + + meta.appendBool( + Metadata::kSeekBackwardAvailable, + mPlayerFlags & NuPlayer::Source::FLAG_CAN_SEEK_BACKWARD); + + meta.appendBool( + Metadata::kSeekForwardAvailable, + mPlayerFlags & NuPlayer::Source::FLAG_CAN_SEEK_FORWARD); + + meta.appendBool( + Metadata::kSeekAvailable, + mPlayerFlags & NuPlayer::Source::FLAG_CAN_SEEK); + + return OK; } void NuPlayerDriver::notifyResetComplete() { Mutex::Autolock autoLock(mLock); - CHECK(mResetInProgress); - mResetInProgress = false; + + CHECK_EQ(mState, STATE_RESET_IN_PROGRESS); + mState = STATE_IDLE; + mCondition.broadcast(); +} + +void NuPlayerDriver::notifySetSurfaceComplete() { + Mutex::Autolock autoLock(mLock); + + CHECK(mSetSurfaceInProgress); + mSetSurfaceInProgress = false; + mCondition.broadcast(); } @@ -356,4 +498,50 @@ void NuPlayerDriver::notifyListener(int msg, int ext1, int ext2) { sendEvent(msg, ext1, ext2); } +void NuPlayerDriver::notifySetDataSourceCompleted(status_t err) { + Mutex::Autolock autoLock(mLock); + + CHECK_EQ(mState, STATE_SET_DATASOURCE_PENDING); + + mAsyncResult = err; + mState = (err == OK) ? STATE_UNPREPARED : STATE_IDLE; + mCondition.broadcast(); +} + +void NuPlayerDriver::notifyPrepareCompleted(status_t err) { + Mutex::Autolock autoLock(mLock); + + if (mState != STATE_PREPARING) { + // We were preparing asynchronously when the client called + // reset(), we sent a premature "prepared" notification and + // then initiated the reset. This notification is stale. + CHECK(mState == STATE_RESET_IN_PROGRESS || mState == STATE_IDLE); + return; + } + + CHECK_EQ(mState, STATE_PREPARING); + + mAsyncResult = err; + + if (err == OK) { + if (mIsAsyncPrepare) { + notifyListener(MEDIA_PREPARED); + } + mState = STATE_PREPARED; + } else { + if (mIsAsyncPrepare) { + notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err); + } + mState = STATE_UNPREPARED; + } + + mCondition.broadcast(); +} + +void NuPlayerDriver::notifyFlagsChanged(uint32_t flags) { + Mutex::Autolock autoLock(mLock); + + mPlayerFlags = flags; +} + } // namespace android diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h index 4a0026c..5df0cfb 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h +++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h @@ -38,7 +38,7 @@ struct NuPlayerDriver : public MediaPlayerInterface { virtual status_t setDataSource(const sp<IStreamSource> &source); virtual status_t setVideoSurfaceTexture( - const sp<ISurfaceTexture> &surfaceTexture); + const sp<IGraphicBufferProducer> &bufferProducer); virtual status_t prepare(); virtual status_t prepareAsync(); virtual status_t start(); @@ -61,23 +61,43 @@ struct NuPlayerDriver : public MediaPlayerInterface { virtual status_t dump(int fd, const Vector<String16> &args) const; + void notifySetDataSourceCompleted(status_t err); + void notifyPrepareCompleted(status_t err); void notifyResetComplete(); + void notifySetSurfaceComplete(); void notifyDuration(int64_t durationUs); void notifyPosition(int64_t positionUs); void notifySeekComplete(); void notifyFrameStats(int64_t numFramesTotal, int64_t numFramesDropped); void notifyListener(int msg, int ext1 = 0, int ext2 = 0); + void notifyFlagsChanged(uint32_t flags); protected: virtual ~NuPlayerDriver(); private: + enum State { + STATE_IDLE, + STATE_SET_DATASOURCE_PENDING, + STATE_UNPREPARED, + STATE_PREPARING, + STATE_PREPARED, + STATE_RUNNING, + STATE_PAUSED, + STATE_RESET_IN_PROGRESS, + }; + mutable Mutex mLock; Condition mCondition; + State mState; + + bool mIsAsyncPrepare; + status_t mAsyncResult; + // The following are protected through "mLock" // >>> - bool mResetInProgress; + bool mSetSurfaceInProgress; int64_t mDurationUs; int64_t mPositionUs; int64_t mNumFramesTotal; @@ -86,19 +106,14 @@ private: sp<ALooper> mLooper; sp<NuPlayer> mPlayer; + uint32_t mPlayerFlags; - enum State { - UNINITIALIZED, - STOPPED, - PLAYING, - PAUSED - }; - - State mState; bool mAtEOS; int64_t mStartupSeekTimeUs; + status_t prepare_l(); + DISALLOW_EVIL_CONSTRUCTORS(NuPlayerDriver); }; diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp index 8a75f83..404b56f 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp +++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp @@ -31,9 +31,11 @@ const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll; NuPlayer::Renderer::Renderer( const sp<MediaPlayerBase::AudioSink> &sink, - const sp<AMessage> ¬ify) + const sp<AMessage> ¬ify, + uint32_t flags) : mAudioSink(sink), mNotify(notify), + mFlags(flags), mNumFramesWritten(0), mDrainAudioQueuePending(false), mDrainVideoQueuePending(false), @@ -323,6 +325,11 @@ void NuPlayer::Renderer::postDrainVideoQueue() { if (entry.mBuffer == NULL) { // EOS doesn't carry a timestamp. delayUs = 0; + } else if (mFlags & FLAG_REAL_TIME) { + int64_t mediaTimeUs; + CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); + + delayUs = mediaTimeUs - ALooper::GetNowUs(); } else { int64_t mediaTimeUs; CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); @@ -368,12 +375,17 @@ void NuPlayer::Renderer::onDrainVideoQueue() { return; } - int64_t mediaTimeUs; - CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); + int64_t realTimeUs; + if (mFlags & FLAG_REAL_TIME) { + CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs)); + } else { + int64_t mediaTimeUs; + CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); + + realTimeUs = mediaTimeUs - mAnchorTimeMediaUs + mAnchorTimeRealUs; + } - int64_t realTimeUs = mediaTimeUs - mAnchorTimeMediaUs + mAnchorTimeRealUs; mVideoLateByUs = ALooper::GetNowUs() - realTimeUs; - bool tooLate = (mVideoLateByUs > 40000); if (tooLate) { @@ -512,9 +524,15 @@ void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) { entry.mFinalResult = finalResult; if (audio) { + if (mAudioQueue.empty() && mSyncQueues) { + syncQueuesDone(); + } mAudioQueue.push_back(entry); postDrainAudioQueue(); } else { + if (mVideoQueue.empty() && mSyncQueues) { + syncQueuesDone(); + } mVideoQueue.push_back(entry); postDrainVideoQueue(); } diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h index e4368c7..c9796e2 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h +++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h @@ -25,8 +25,12 @@ namespace android { struct ABuffer; struct NuPlayer::Renderer : public AHandler { + enum Flags { + FLAG_REAL_TIME = 1, + }; Renderer(const sp<MediaPlayerBase::AudioSink> &sink, - const sp<AMessage> ¬ify); + const sp<AMessage> ¬ify, + uint32_t flags = 0); void queueBuffer( bool audio, @@ -79,6 +83,7 @@ private: sp<MediaPlayerBase::AudioSink> mAudioSink; sp<AMessage> mNotify; + uint32_t mFlags; List<QueueEntry> mAudioQueue; List<QueueEntry> mVideoQueue; uint32_t mNumFramesWritten; diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h index a635340..1cbf575 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h +++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h @@ -20,20 +20,42 @@ #include "NuPlayer.h" +#include <media/stagefright/foundation/AMessage.h> + namespace android { struct ABuffer; +struct MetaData; -struct NuPlayer::Source : public RefBase { +struct NuPlayer::Source : public AHandler { enum Flags { - FLAG_SEEKABLE = 1, - FLAG_DYNAMIC_DURATION = 2, + FLAG_CAN_PAUSE = 1, + FLAG_CAN_SEEK_BACKWARD = 2, // the "10 sec back button" + FLAG_CAN_SEEK_FORWARD = 4, // the "10 sec forward button" + FLAG_CAN_SEEK = 8, // the "seek bar" + FLAG_DYNAMIC_DURATION = 16, + }; + + enum { + kWhatPrepared, + kWhatFlagsChanged, + kWhatVideoSizeChanged, + kWhatBufferingStart, + kWhatBufferingEnd, }; - Source() {} + // The provides message is used to notify the player about various + // events. + Source(const sp<AMessage> ¬ify) + : mNotify(notify) { + } + + virtual void prepareAsync() = 0; virtual void start() = 0; virtual void stop() {} + virtual void pause() {} + virtual void resume() {} // Returns OK iff more data was available, // an error or ERROR_END_OF_STREAM if not. @@ -52,14 +74,26 @@ struct NuPlayer::Source : public RefBase { return INVALID_OPERATION; } - virtual uint32_t flags() const = 0; + virtual bool isRealTime() const { + return false; + } protected: virtual ~Source() {} + virtual void onMessageReceived(const sp<AMessage> &msg); + virtual sp<MetaData> getFormatMeta(bool audio) { return NULL; } + sp<AMessage> dupNotify() const { return mNotify->dup(); } + + void notifyFlagsChanged(uint32_t flags); + void notifyVideoSizeChanged(int32_t width, int32_t height); + void notifyPrepared(status_t err = OK); + private: + sp<AMessage> mNotify; + DISALLOW_EVIL_CONSTRUCTORS(Source); }; diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp index cf455bd..50ebf9c 100644 --- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp +++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp @@ -22,26 +22,35 @@ #include "AnotherPacketSource.h" #include "MyHandler.h" +#include "SDPLoader.h" #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MetaData.h> namespace android { +const int64_t kNearEOSTimeoutUs = 2000000ll; // 2 secs + NuPlayer::RTSPSource::RTSPSource( + const sp<AMessage> ¬ify, const char *url, const KeyedVector<String8, String8> *headers, bool uidValid, - uid_t uid) - : mURL(url), + uid_t uid, + bool isSDP) + : Source(notify), + mURL(url), mUIDValid(uidValid), mUID(uid), mFlags(0), + mIsSDP(isSDP), mState(DISCONNECTED), mFinalResult(OK), mDisconnectReplyID(0), - mStartingUp(true), - mSeekGeneration(0) { + mBuffering(true), + mSeekGeneration(0), + mEOSTimeoutAudio(0), + mEOSTimeoutVideo(0) { if (headers) { mExtraHeaders = *headers; @@ -62,7 +71,7 @@ NuPlayer::RTSPSource::~RTSPSource() { } } -void NuPlayer::RTSPSource::start() { +void NuPlayer::RTSPSource::prepareAsync() { if (mLooper == NULL) { mLooper = new ALooper; mLooper->setName("rtsp"); @@ -73,25 +82,64 @@ void NuPlayer::RTSPSource::start() { } CHECK(mHandler == NULL); + CHECK(mSDPLoader == NULL); sp<AMessage> notify = new AMessage(kWhatNotify, mReflector->id()); - mHandler = new MyHandler(mURL.c_str(), notify, mUIDValid, mUID); - mLooper->registerHandler(mHandler); - CHECK_EQ(mState, (int)DISCONNECTED); mState = CONNECTING; - mHandler->connect(); + if (mIsSDP) { + mSDPLoader = new SDPLoader(notify, + (mFlags & kFlagIncognito) ? SDPLoader::kFlagIncognito : 0, + mUIDValid, mUID); + + mSDPLoader->load( + mURL.c_str(), mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders); + } else { + mHandler = new MyHandler(mURL.c_str(), notify, mUIDValid, mUID); + mLooper->registerHandler(mHandler); + + mHandler->connect(); + } + + sp<AMessage> notifyStart = dupNotify(); + notifyStart->setInt32("what", kWhatBufferingStart); + notifyStart->post(); +} + +void NuPlayer::RTSPSource::start() { } void NuPlayer::RTSPSource::stop() { + if (mLooper == NULL) { + return; + } sp<AMessage> msg = new AMessage(kWhatDisconnect, mReflector->id()); sp<AMessage> dummy; msg->postAndAwaitResponse(&dummy); } +void NuPlayer::RTSPSource::pause() { + int64_t mediaDurationUs = 0; + getDuration(&mediaDurationUs); + for (size_t index = 0; index < mTracks.size(); index++) { + TrackInfo *info = &mTracks.editItemAt(index); + sp<AnotherPacketSource> source = info->mSource; + + // Check if EOS or ERROR is received + if (source != NULL && source->isFinished(mediaDurationUs)) { + return; + } + } + mHandler->pause(); +} + +void NuPlayer::RTSPSource::resume() { + mHandler->resume(); +} + status_t NuPlayer::RTSPSource::feedMoreTSData() { return mFinalResult; } @@ -112,6 +160,13 @@ bool NuPlayer::RTSPSource::haveSufficientDataOnAllTracks() { static const int64_t kMinDurationUs = 2000000ll; + int64_t mediaDurationUs = 0; + getDuration(&mediaDurationUs); + if ((mAudioTrack != NULL && mAudioTrack->isFinished(mediaDurationUs)) + || (mVideoTrack != NULL && mVideoTrack->isFinished(mediaDurationUs))) { + return true; + } + status_t err; int64_t durationUs; if (mAudioTrack != NULL @@ -137,12 +192,16 @@ bool NuPlayer::RTSPSource::haveSufficientDataOnAllTracks() { status_t NuPlayer::RTSPSource::dequeueAccessUnit( bool audio, sp<ABuffer> *accessUnit) { - if (mStartingUp) { + if (mBuffering) { if (!haveSufficientDataOnAllTracks()) { return -EWOULDBLOCK; } - mStartingUp = false; + mBuffering = false; + + sp<AMessage> notify = dupNotify(); + notify->setInt32("what", kWhatBufferingEnd); + notify->post(); } sp<AnotherPacketSource> source = getSource(audio); @@ -153,9 +212,51 @@ status_t NuPlayer::RTSPSource::dequeueAccessUnit( status_t finalResult; if (!source->hasBufferAvailable(&finalResult)) { - return finalResult == OK ? -EWOULDBLOCK : finalResult; + if (finalResult == OK) { + int64_t mediaDurationUs = 0; + getDuration(&mediaDurationUs); + sp<AnotherPacketSource> otherSource = getSource(!audio); + status_t otherFinalResult; + + // If other source already signaled EOS, this source should also signal EOS + if (otherSource != NULL && + !otherSource->hasBufferAvailable(&otherFinalResult) && + otherFinalResult == ERROR_END_OF_STREAM) { + source->signalEOS(ERROR_END_OF_STREAM); + return ERROR_END_OF_STREAM; + } + + // If this source has detected near end, give it some time to retrieve more + // data before signaling EOS + if (source->isFinished(mediaDurationUs)) { + int64_t eosTimeout = audio ? mEOSTimeoutAudio : mEOSTimeoutVideo; + if (eosTimeout == 0) { + setEOSTimeout(audio, ALooper::GetNowUs()); + } else if ((ALooper::GetNowUs() - eosTimeout) > kNearEOSTimeoutUs) { + setEOSTimeout(audio, 0); + source->signalEOS(ERROR_END_OF_STREAM); + return ERROR_END_OF_STREAM; + } + return -EWOULDBLOCK; + } + + if (!(otherSource != NULL && otherSource->isFinished(mediaDurationUs))) { + // We should not enter buffering mode + // if any of the sources already have detected EOS. + mBuffering = true; + + sp<AMessage> notify = dupNotify(); + notify->setInt32("what", kWhatBufferingStart); + notify->post(); + } + + return -EWOULDBLOCK; + } + return finalResult; } + setEOSTimeout(audio, 0); + return source->dequeueAccessUnit(accessUnit); } @@ -170,6 +271,14 @@ sp<AnotherPacketSource> NuPlayer::RTSPSource::getSource(bool audio) { return audio ? mAudioTrack : mVideoTrack; } +void NuPlayer::RTSPSource::setEOSTimeout(bool audio, int64_t timeout) { + if (audio) { + mEOSTimeoutAudio = timeout; + } else { + mEOSTimeoutVideo = timeout; + } +} + status_t NuPlayer::RTSPSource::getDuration(int64_t *durationUs) { *durationUs = 0ll; @@ -210,10 +319,6 @@ void NuPlayer::RTSPSource::performSeek(int64_t seekTimeUs) { mHandler->seek(seekTimeUs); } -uint32_t NuPlayer::RTSPSource::flags() const { - return FLAG_SEEKABLE; -} - void NuPlayer::RTSPSource::onMessageReceived(const sp<AMessage> &msg) { if (msg->what() == kWhatDisconnect) { uint32_t replyID; @@ -245,17 +350,35 @@ void NuPlayer::RTSPSource::onMessageReceived(const sp<AMessage> &msg) { switch (what) { case MyHandler::kWhatConnected: + { onConnected(); + + notifyVideoSizeChanged(0, 0); + + uint32_t flags = 0; + + if (mHandler->isSeekable()) { + flags = FLAG_CAN_PAUSE | FLAG_CAN_SEEK; + + // Seeking 10secs forward or backward is a very expensive + // operation for rtsp, so let's not enable that. + // The user can always use the seek bar. + } + + notifyFlagsChanged(flags); + notifyPrepared(); break; + } case MyHandler::kWhatDisconnected: + { onDisconnected(msg); break; + } case MyHandler::kWhatSeekDone: { mState = CONNECTED; - mStartingUp = true; break; } @@ -405,6 +528,12 @@ void NuPlayer::RTSPSource::onMessageReceived(const sp<AMessage> &msg) { break; } + case SDPLoader::kWhatSDPLoaded: + { + onSDPLoaded(msg); + break; + } + default: TRESPASS(); } @@ -458,6 +587,52 @@ void NuPlayer::RTSPSource::onConnected() { mState = CONNECTED; } +void NuPlayer::RTSPSource::onSDPLoaded(const sp<AMessage> &msg) { + status_t err; + CHECK(msg->findInt32("result", &err)); + + mSDPLoader.clear(); + + if (mDisconnectReplyID != 0) { + err = UNKNOWN_ERROR; + } + + if (err == OK) { + sp<ASessionDescription> desc; + sp<RefBase> obj; + CHECK(msg->findObject("description", &obj)); + desc = static_cast<ASessionDescription *>(obj.get()); + + AString rtspUri; + if (!desc->findAttribute(0, "a=control", &rtspUri)) { + ALOGE("Unable to find url in SDP"); + err = UNKNOWN_ERROR; + } else { + sp<AMessage> notify = new AMessage(kWhatNotify, mReflector->id()); + + mHandler = new MyHandler(rtspUri.c_str(), notify, mUIDValid, mUID); + mLooper->registerHandler(mHandler); + + mHandler->loadSDP(desc); + } + } + + if (err != OK) { + if (mState == CONNECTING) { + // We're still in the preparation phase, signal that it + // failed. + notifyPrepared(err); + } + + mState = DISCONNECTED; + mFinalResult = err; + + if (mDisconnectReplyID != 0) { + finishDisconnectIfPossible(); + } + } +} + void NuPlayer::RTSPSource::onDisconnected(const sp<AMessage> &msg) { status_t err; CHECK(msg->findInt32("result", &err)); @@ -466,6 +641,12 @@ void NuPlayer::RTSPSource::onDisconnected(const sp<AMessage> &msg) { mLooper->unregisterHandler(mHandler->id()); mHandler.clear(); + if (mState == CONNECTING) { + // We're still in the preparation phase, signal that it + // failed. + notifyPrepared(err); + } + mState = DISCONNECTED; mFinalResult = err; @@ -476,7 +657,11 @@ void NuPlayer::RTSPSource::onDisconnected(const sp<AMessage> &msg) { void NuPlayer::RTSPSource::finishDisconnectIfPossible() { if (mState != DISCONNECTED) { - mHandler->disconnect(); + if (mHandler != NULL) { + mHandler->disconnect(); + } else if (mSDPLoader != NULL) { + mSDPLoader->cancel(); + } return; } diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h index 779d791..8cf34a0 100644 --- a/media/libmediaplayerservice/nuplayer/RTSPSource.h +++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h @@ -29,16 +29,22 @@ namespace android { struct ALooper; struct AnotherPacketSource; struct MyHandler; +struct SDPLoader; struct NuPlayer::RTSPSource : public NuPlayer::Source { RTSPSource( + const sp<AMessage> ¬ify, const char *url, const KeyedVector<String8, String8> *headers, bool uidValid = false, - uid_t uid = 0); + uid_t uid = 0, + bool isSDP = false); + virtual void prepareAsync(); virtual void start(); virtual void stop(); + virtual void pause(); + virtual void resume(); virtual status_t feedMoreTSData(); @@ -47,8 +53,6 @@ struct NuPlayer::RTSPSource : public NuPlayer::Source { virtual status_t getDuration(int64_t *durationUs); virtual status_t seekTo(int64_t seekTimeUs); - virtual uint32_t flags() const; - void onMessageReceived(const sp<AMessage> &msg); protected: @@ -89,14 +93,16 @@ private: bool mUIDValid; uid_t mUID; uint32_t mFlags; + bool mIsSDP; State mState; status_t mFinalResult; uint32_t mDisconnectReplyID; - bool mStartingUp; + bool mBuffering; sp<ALooper> mLooper; sp<AHandlerReflector<RTSPSource> > mReflector; sp<MyHandler> mHandler; + sp<SDPLoader> mSDPLoader; Vector<TrackInfo> mTracks; sp<AnotherPacketSource> mAudioTrack; @@ -106,9 +112,13 @@ private: int32_t mSeekGeneration; + int64_t mEOSTimeoutAudio; + int64_t mEOSTimeoutVideo; + sp<AnotherPacketSource> getSource(bool audio); void onConnected(); + void onSDPLoaded(const sp<AMessage> &msg); void onDisconnected(const sp<AMessage> &msg); void finishDisconnectIfPossible(); @@ -116,6 +126,8 @@ private: bool haveSufficientDataOnAllTracks(); + void setEOSTimeout(bool audio, int64_t timeout); + DISALLOW_EVIL_CONSTRUCTORS(RTSPSource); }; diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp index 7159404..28f0d50 100644 --- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp +++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp @@ -32,14 +32,23 @@ namespace android { -NuPlayer::StreamingSource::StreamingSource(const sp<IStreamSource> &source) - : mSource(source), +NuPlayer::StreamingSource::StreamingSource( + const sp<AMessage> ¬ify, + const sp<IStreamSource> &source) + : Source(notify), + mSource(source), mFinalResult(OK) { } NuPlayer::StreamingSource::~StreamingSource() { } +void NuPlayer::StreamingSource::prepareAsync() { + notifyVideoSizeChanged(0, 0); + notifyFlagsChanged(0); + notifyPrepared(); +} + void NuPlayer::StreamingSource::start() { mStreamListener = new NuPlayerStreamListener(mSource, 0); @@ -173,8 +182,8 @@ status_t NuPlayer::StreamingSource::dequeueAccessUnit( return err; } -uint32_t NuPlayer::StreamingSource::flags() const { - return 0; +bool NuPlayer::StreamingSource::isRealTime() const { + return mSource->flags() & IStreamSource::kFlagIsRealTimeData; } } // namespace android diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.h b/media/libmediaplayerservice/nuplayer/StreamingSource.h index a27b58a..412b6c4 100644 --- a/media/libmediaplayerservice/nuplayer/StreamingSource.h +++ b/media/libmediaplayerservice/nuplayer/StreamingSource.h @@ -27,15 +27,18 @@ struct ABuffer; struct ATSParser; struct NuPlayer::StreamingSource : public NuPlayer::Source { - StreamingSource(const sp<IStreamSource> &source); + StreamingSource( + const sp<AMessage> ¬ify, + const sp<IStreamSource> &source); + virtual void prepareAsync(); virtual void start(); virtual status_t feedMoreTSData(); virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit); - virtual uint32_t flags() const; + virtual bool isRealTime() const; protected: virtual ~StreamingSource(); diff --git a/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp b/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp index a62d5a2..d31d947 100644 --- a/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp +++ b/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp @@ -104,8 +104,10 @@ private: DISALLOW_EVIL_CONSTRUCTORS(StreamSource); }; -MP4Source::MP4Source(const sp<IStreamSource> &source) - : mSource(source), +MP4Source::MP4Source( + const sp<AMessage> ¬ify, const sp<IStreamSource> &source) + : Source(notify), + mSource(source), mLooper(new ALooper), mParser(new FragmentedMP4Parser), mEOS(false) { @@ -115,6 +117,12 @@ MP4Source::MP4Source(const sp<IStreamSource> &source) MP4Source::~MP4Source() { } +void MP4Source::prepareAsync() { + notifyVideoSizeChanged(0, 0); + notifyFlagsChanged(0); + notifyPrepared(); +} + void MP4Source::start() { mLooper->start(false /* runOnCallingThread */); mParser->start(new StreamSource(mSource)); @@ -133,8 +141,4 @@ status_t MP4Source::dequeueAccessUnit( return mParser->dequeueAccessUnit(audio, accessUnit); } -uint32_t MP4Source::flags() const { - return 0; -} - } // namespace android diff --git a/media/libmediaplayerservice/nuplayer/mp4/MP4Source.h b/media/libmediaplayerservice/nuplayer/mp4/MP4Source.h index abca236..a6ef622 100644 --- a/media/libmediaplayerservice/nuplayer/mp4/MP4Source.h +++ b/media/libmediaplayerservice/nuplayer/mp4/MP4Source.h @@ -24,8 +24,9 @@ namespace android { struct FragmentedMP4Parser; struct MP4Source : public NuPlayer::Source { - MP4Source(const sp<IStreamSource> &source); + MP4Source(const sp<AMessage> ¬ify, const sp<IStreamSource> &source); + virtual void prepareAsync(); virtual void start(); virtual status_t feedMoreTSData(); @@ -35,8 +36,6 @@ struct MP4Source : public NuPlayer::Source { virtual status_t dequeueAccessUnit( bool audio, sp<ABuffer> *accessUnit); - virtual uint32_t flags() const; - protected: virtual ~MP4Source(); diff --git a/media/libnbaio/Android.mk b/media/libnbaio/Android.mk index 757272f..5d00d15 100644 --- a/media/libnbaio/Android.mk +++ b/media/libnbaio/Android.mk @@ -14,6 +14,8 @@ LOCAL_SRC_FILES := \ roundup.c \ SourceAudioBufferProvider.cpp +LOCAL_SRC_FILES += NBLog.cpp + # libsndfile license is incompatible; uncomment to use for local debug only #LOCAL_SRC_FILES += LibsndfileSink.cpp LibsndfileSource.cpp #LOCAL_C_INCLUDES += path/to/libsndfile/src @@ -25,8 +27,10 @@ LOCAL_SRC_FILES := \ LOCAL_MODULE := libnbaio LOCAL_SHARED_LIBRARIES := \ + libbinder \ libcommon_time_client \ libcutils \ - libutils + libutils \ + liblog include $(BUILD_SHARED_LIBRARY) diff --git a/media/libnbaio/NBAIO.cpp b/media/libnbaio/NBAIO.cpp index 00d2017..e0d2c21 100644 --- a/media/libnbaio/NBAIO.cpp +++ b/media/libnbaio/NBAIO.cpp @@ -24,44 +24,55 @@ namespace android { size_t Format_frameSize(NBAIO_Format format) { - switch (format) { - case Format_SR44_1_C2_I16: - case Format_SR48_C2_I16: - return 2 * sizeof(short); - case Format_SR44_1_C1_I16: - case Format_SR48_C1_I16: - return 1 * sizeof(short); - case Format_Invalid: - default: - return 0; - } + return Format_channelCount(format) * sizeof(short); } size_t Format_frameBitShift(NBAIO_Format format) { - switch (format) { - case Format_SR44_1_C2_I16: - case Format_SR48_C2_I16: - return 2; // 1 << 2 == 2 * sizeof(short) - case Format_SR44_1_C1_I16: - case Format_SR48_C1_I16: - return 1; // 1 << 1 == 1 * sizeof(short) - case Format_Invalid: - default: - return 0; - } + // sizeof(short) == 2, so frame size == 1 << channels + return Format_channelCount(format); } +enum { + Format_SR_8000, + Format_SR_11025, + Format_SR_16000, + Format_SR_22050, + Format_SR_24000, + Format_SR_32000, + Format_SR_44100, + Format_SR_48000, + Format_SR_Mask = 7 +}; + +enum { + Format_C_1 = 0x08, + Format_C_2 = 0x10, + Format_C_Mask = 0x18 +}; + unsigned Format_sampleRate(NBAIO_Format format) { - switch (format) { - case Format_SR44_1_C1_I16: - case Format_SR44_1_C2_I16: + if (format == Format_Invalid) { + return 0; + } + switch (format & Format_SR_Mask) { + case Format_SR_8000: + return 8000; + case Format_SR_11025: + return 11025; + case Format_SR_16000: + return 16000; + case Format_SR_22050: + return 22050; + case Format_SR_24000: + return 24000; + case Format_SR_32000: + return 32000; + case Format_SR_44100: return 44100; - case Format_SR48_C1_I16: - case Format_SR48_C2_I16: + case Format_SR_48000: return 48000; - case Format_Invalid: default: return 0; } @@ -69,14 +80,14 @@ unsigned Format_sampleRate(NBAIO_Format format) unsigned Format_channelCount(NBAIO_Format format) { - switch (format) { - case Format_SR44_1_C1_I16: - case Format_SR48_C1_I16: + if (format == Format_Invalid) { + return 0; + } + switch (format & Format_C_Mask) { + case Format_C_1: return 1; - case Format_SR44_1_C2_I16: - case Format_SR48_C2_I16: + case Format_C_2: return 2; - case Format_Invalid: default: return 0; } @@ -84,11 +95,46 @@ unsigned Format_channelCount(NBAIO_Format format) NBAIO_Format Format_from_SR_C(unsigned sampleRate, unsigned channelCount) { - if (sampleRate == 44100 && channelCount == 2) return Format_SR44_1_C2_I16; - if (sampleRate == 48000 && channelCount == 2) return Format_SR48_C2_I16; - if (sampleRate == 44100 && channelCount == 1) return Format_SR44_1_C1_I16; - if (sampleRate == 48000 && channelCount == 1) return Format_SR48_C1_I16; - return Format_Invalid; + NBAIO_Format format; + switch (sampleRate) { + case 8000: + format = Format_SR_8000; + break; + case 11025: + format = Format_SR_11025; + break; + case 16000: + format = Format_SR_16000; + break; + case 22050: + format = Format_SR_22050; + break; + case 24000: + format = Format_SR_24000; + break; + case 32000: + format = Format_SR_32000; + break; + case 44100: + format = Format_SR_44100; + break; + case 48000: + format = Format_SR_48000; + break; + default: + return Format_Invalid; + } + switch (channelCount) { + case 1: + format |= Format_C_1; + break; + case 2: + format |= Format_C_2; + break; + default: + return Format_Invalid; + } + return format; } // This is a default implementation; it is expected that subclasses will optimize this. diff --git a/media/libnbaio/NBLog.cpp b/media/libnbaio/NBLog.cpp new file mode 100644 index 0000000..045bf64 --- /dev/null +++ b/media/libnbaio/NBLog.cpp @@ -0,0 +1,447 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "NBLog" +//#define LOG_NDEBUG 0 + +#include <stdarg.h> +#include <stdint.h> +#include <stdio.h> +#include <string.h> +#include <time.h> +#include <new> +#include <cutils/atomic.h> +#include <media/nbaio/NBLog.h> +#include <utils/Log.h> + +namespace android { + +int NBLog::Entry::readAt(size_t offset) const +{ + // FIXME This is too slow, despite the name it is used during writing + if (offset == 0) + return mEvent; + else if (offset == 1) + return mLength; + else if (offset < (size_t) (mLength + 2)) + return ((char *) mData)[offset - 2]; + else if (offset == (size_t) (mLength + 2)) + return mLength; + else + return 0; +} + +// --------------------------------------------------------------------------- + +#if 0 // FIXME see note in NBLog.h +NBLog::Timeline::Timeline(size_t size, void *shared) + : mSize(roundup(size)), mOwn(shared == NULL), + mShared((Shared *) (mOwn ? new char[sharedSize(size)] : shared)) +{ + new (mShared) Shared; +} + +NBLog::Timeline::~Timeline() +{ + mShared->~Shared(); + if (mOwn) { + delete[] (char *) mShared; + } +} +#endif + +/*static*/ +size_t NBLog::Timeline::sharedSize(size_t size) +{ + return sizeof(Shared) + roundup(size); +} + +// --------------------------------------------------------------------------- + +NBLog::Writer::Writer() + : mSize(0), mShared(NULL), mRear(0), mEnabled(false) +{ +} + +NBLog::Writer::Writer(size_t size, void *shared) + : mSize(roundup(size)), mShared((Shared *) shared), mRear(0), mEnabled(mShared != NULL) +{ +} + +NBLog::Writer::Writer(size_t size, const sp<IMemory>& iMemory) + : mSize(roundup(size)), mShared(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL), + mIMemory(iMemory), mRear(0), mEnabled(mShared != NULL) +{ +} + +void NBLog::Writer::log(const char *string) +{ + if (!mEnabled) { + return; + } + size_t length = strlen(string); + if (length > 255) { + length = 255; + } + log(EVENT_STRING, string, length); +} + +void NBLog::Writer::logf(const char *fmt, ...) +{ + if (!mEnabled) { + return; + } + va_list ap; + va_start(ap, fmt); + Writer::logvf(fmt, ap); // the Writer:: is needed to avoid virtual dispatch for LockedWriter + va_end(ap); +} + +void NBLog::Writer::logvf(const char *fmt, va_list ap) +{ + if (!mEnabled) { + return; + } + char buffer[256]; + int length = vsnprintf(buffer, sizeof(buffer), fmt, ap); + if (length >= (int) sizeof(buffer)) { + length = sizeof(buffer) - 1; + // NUL termination is not required + // buffer[length] = '\0'; + } + if (length >= 0) { + log(EVENT_STRING, buffer, length); + } +} + +void NBLog::Writer::logTimestamp() +{ + if (!mEnabled) { + return; + } + struct timespec ts; + if (!clock_gettime(CLOCK_MONOTONIC, &ts)) { + log(EVENT_TIMESTAMP, &ts, sizeof(struct timespec)); + } +} + +void NBLog::Writer::logTimestamp(const struct timespec& ts) +{ + if (!mEnabled) { + return; + } + log(EVENT_TIMESTAMP, &ts, sizeof(struct timespec)); +} + +void NBLog::Writer::log(Event event, const void *data, size_t length) +{ + if (!mEnabled) { + return; + } + if (data == NULL || length > 255) { + return; + } + switch (event) { + case EVENT_STRING: + case EVENT_TIMESTAMP: + break; + case EVENT_RESERVED: + default: + return; + } + Entry entry(event, data, length); + log(&entry, true /*trusted*/); +} + +void NBLog::Writer::log(const NBLog::Entry *entry, bool trusted) +{ + if (!mEnabled) { + return; + } + if (!trusted) { + log(entry->mEvent, entry->mData, entry->mLength); + return; + } + size_t rear = mRear & (mSize - 1); + size_t written = mSize - rear; // written = number of bytes that have been written so far + size_t need = entry->mLength + 3; // mEvent, mLength, data[length], mLength + // need = number of bytes remaining to write + if (written > need) { + written = need; + } + size_t i; + // FIXME optimize this using memcpy for the data part of the Entry. + // The Entry could have a method copyTo(ptr, offset, size) to optimize the copy. + for (i = 0; i < written; ++i) { + mShared->mBuffer[rear + i] = entry->readAt(i); + } + if (rear + written == mSize && (need -= written) > 0) { + for (i = 0; i < need; ++i) { + mShared->mBuffer[i] = entry->readAt(written + i); + } + written += need; + } + android_atomic_release_store(mRear += written, &mShared->mRear); +} + +bool NBLog::Writer::isEnabled() const +{ + return mEnabled; +} + +bool NBLog::Writer::setEnabled(bool enabled) +{ + bool old = mEnabled; + mEnabled = enabled && mShared != NULL; + return old; +} + +// --------------------------------------------------------------------------- + +NBLog::LockedWriter::LockedWriter() + : Writer() +{ +} + +NBLog::LockedWriter::LockedWriter(size_t size, void *shared) + : Writer(size, shared) +{ +} + +void NBLog::LockedWriter::log(const char *string) +{ + Mutex::Autolock _l(mLock); + Writer::log(string); +} + +void NBLog::LockedWriter::logf(const char *fmt, ...) +{ + // FIXME should not take the lock until after formatting is done + Mutex::Autolock _l(mLock); + va_list ap; + va_start(ap, fmt); + Writer::logvf(fmt, ap); + va_end(ap); +} + +void NBLog::LockedWriter::logvf(const char *fmt, va_list ap) +{ + // FIXME should not take the lock until after formatting is done + Mutex::Autolock _l(mLock); + Writer::logvf(fmt, ap); +} + +void NBLog::LockedWriter::logTimestamp() +{ + // FIXME should not take the lock until after the clock_gettime() syscall + Mutex::Autolock _l(mLock); + Writer::logTimestamp(); +} + +void NBLog::LockedWriter::logTimestamp(const struct timespec& ts) +{ + Mutex::Autolock _l(mLock); + Writer::logTimestamp(ts); +} + +bool NBLog::LockedWriter::isEnabled() const +{ + Mutex::Autolock _l(mLock); + return Writer::isEnabled(); +} + +bool NBLog::LockedWriter::setEnabled(bool enabled) +{ + Mutex::Autolock _l(mLock); + return Writer::setEnabled(enabled); +} + +// --------------------------------------------------------------------------- + +NBLog::Reader::Reader(size_t size, const void *shared) + : mSize(roundup(size)), mShared((const Shared *) shared), mFront(0) +{ +} + +NBLog::Reader::Reader(size_t size, const sp<IMemory>& iMemory) + : mSize(roundup(size)), mShared(iMemory != 0 ? (const Shared *) iMemory->pointer() : NULL), + mIMemory(iMemory), mFront(0) +{ +} + +void NBLog::Reader::dump(int fd, size_t indent) +{ + int32_t rear = android_atomic_acquire_load(&mShared->mRear); + size_t avail = rear - mFront; + if (avail == 0) { + return; + } + size_t lost = 0; + if (avail > mSize) { + lost = avail - mSize; + mFront += lost; + avail = mSize; + } + size_t remaining = avail; // remaining = number of bytes left to read + size_t front = mFront & (mSize - 1); + size_t read = mSize - front; // read = number of bytes that have been read so far + if (read > remaining) { + read = remaining; + } + // make a copy to avoid race condition with writer + uint8_t *copy = new uint8_t[avail]; + // copy first part of circular buffer up until the wraparound point + memcpy(copy, &mShared->mBuffer[front], read); + if (front + read == mSize) { + if ((remaining -= read) > 0) { + // copy second part of circular buffer starting at beginning + memcpy(©[read], mShared->mBuffer, remaining); + read += remaining; + // remaining = 0 but not necessary + } + } + mFront += read; + size_t i = avail; + Event event; + size_t length; + struct timespec ts; + time_t maxSec = -1; + while (i >= 3) { + length = copy[i - 1]; + if (length + 3 > i || copy[i - length - 2] != length) { + break; + } + event = (Event) copy[i - length - 3]; + if (event == EVENT_TIMESTAMP) { + if (length != sizeof(struct timespec)) { + // corrupt + break; + } + memcpy(&ts, ©[i - length - 1], sizeof(struct timespec)); + if (ts.tv_sec > maxSec) { + maxSec = ts.tv_sec; + } + } + i -= length + 3; + } + if (i > 0) { + lost += i; + if (fd >= 0) { + fdprintf(fd, "%*swarning: lost %u bytes worth of events\n", indent, "", lost); + } else { + ALOGI("%*swarning: lost %u bytes worth of events\n", indent, "", lost); + } + } + size_t width = 1; + while (maxSec >= 10) { + ++width; + maxSec /= 10; + } + char prefix[32]; + if (maxSec >= 0) { + snprintf(prefix, sizeof(prefix), "[%*s] ", width + 4, ""); + } else { + prefix[0] = '\0'; + } + while (i < avail) { + event = (Event) copy[i]; + length = copy[i + 1]; + const void *data = ©[i + 2]; + size_t advance = length + 3; + switch (event) { + case EVENT_STRING: + if (fd >= 0) { + fdprintf(fd, "%*s%s%.*s\n", indent, "", prefix, length, (const char *) data); + } else { + ALOGI("%*s%s%.*s", indent, "", prefix, length, (const char *) data); + } break; + case EVENT_TIMESTAMP: { + // already checked that length == sizeof(struct timespec); + memcpy(&ts, data, sizeof(struct timespec)); + long prevNsec = ts.tv_nsec; + long deltaMin = LONG_MAX; + long deltaMax = -1; + long deltaTotal = 0; + size_t j = i; + for (;;) { + j += sizeof(struct timespec) + 3; + if (j >= avail || (Event) copy[j] != EVENT_TIMESTAMP) { + break; + } + struct timespec tsNext; + memcpy(&tsNext, ©[j + 2], sizeof(struct timespec)); + if (tsNext.tv_sec != ts.tv_sec) { + break; + } + long delta = tsNext.tv_nsec - prevNsec; + if (delta < 0) { + break; + } + if (delta < deltaMin) { + deltaMin = delta; + } + if (delta > deltaMax) { + deltaMax = delta; + } + deltaTotal += delta; + prevNsec = tsNext.tv_nsec; + } + size_t n = (j - i) / (sizeof(struct timespec) + 3); + if (n >= kSquashTimestamp) { + if (fd >= 0) { + fdprintf(fd, "%*s[%d.%03d to .%.03d by .%.03d to .%.03d]\n", indent, "", + (int) ts.tv_sec, (int) (ts.tv_nsec / 1000000), + (int) ((ts.tv_nsec + deltaTotal) / 1000000), + (int) (deltaMin / 1000000), (int) (deltaMax / 1000000)); + } else { + ALOGI("%*s[%d.%03d to .%.03d by .%.03d to .%.03d]\n", indent, "", + (int) ts.tv_sec, (int) (ts.tv_nsec / 1000000), + (int) ((ts.tv_nsec + deltaTotal) / 1000000), + (int) (deltaMin / 1000000), (int) (deltaMax / 1000000)); + } + i = j; + advance = 0; + break; + } + if (fd >= 0) { + fdprintf(fd, "%*s[%d.%03d]\n", indent, "", (int) ts.tv_sec, + (int) (ts.tv_nsec / 1000000)); + } else { + ALOGI("%*s[%d.%03d]", indent, "", (int) ts.tv_sec, + (int) (ts.tv_nsec / 1000000)); + } + } break; + case EVENT_RESERVED: + default: + if (fd >= 0) { + fdprintf(fd, "%*s%swarning: unknown event %d\n", indent, "", prefix, event); + } else { + ALOGI("%*s%swarning: unknown event %d", indent, "", prefix, event); + } + break; + } + i += advance; + } + // FIXME it would be more efficient to put a char mCopy[256] as a member variable of the dumper + delete[] copy; +} + +bool NBLog::Reader::isIMemory(const sp<IMemory>& iMemory) const +{ + return iMemory.get() == mIMemory.get(); +} + +} // namespace android diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp index a01d03f..64e3885 100644 --- a/media/libstagefright/ACodec.cpp +++ b/media/libstagefright/ACodec.cpp @@ -26,6 +26,7 @@ #include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/foundation/AMessage.h> +#include <media/stagefright/BufferProducerWrapper.h> #include <media/stagefright/MediaCodecList.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/NativeWindowWrapper.h> @@ -165,6 +166,24 @@ private: //////////////////////////////////////////////////////////////////////////////// +struct ACodec::DeathNotifier : public IBinder::DeathRecipient { + DeathNotifier(const sp<AMessage> ¬ify) + : mNotify(notify) { + } + + virtual void binderDied(const wp<IBinder> &) { + mNotify->post(); + } + +protected: + virtual ~DeathNotifier() {} + +private: + sp<AMessage> mNotify; + + DISALLOW_EVIL_CONSTRUCTORS(DeathNotifier); +}; + struct ACodec::UninitializedState : public ACodec::BaseState { UninitializedState(ACodec *codec); @@ -176,6 +195,8 @@ private: void onSetup(const sp<AMessage> &msg); bool onAllocateComponent(const sp<AMessage> &msg); + sp<DeathNotifier> mDeathNotifier; + DISALLOW_EVIL_CONSTRUCTORS(UninitializedState); }; @@ -192,6 +213,7 @@ private: friend struct ACodec::UninitializedState; bool onConfigureComponent(const sp<AMessage> &msg); + void onCreateInputSurface(const sp<AMessage> &msg); void onStart(); void onShutdown(bool keepComponentAllocated); @@ -374,6 +396,12 @@ void ACodec::initiateSetup(const sp<AMessage> &msg) { msg->post(); } +void ACodec::signalSetParameters(const sp<AMessage> ¶ms) { + sp<AMessage> msg = new AMessage(kWhatSetParameters, id()); + msg->setMessage("params", params); + msg->post(); +} + void ACodec::initiateAllocateComponent(const sp<AMessage> &msg) { msg->setWhat(kWhatAllocateComponent); msg->setTarget(id()); @@ -386,6 +414,14 @@ void ACodec::initiateConfigureComponent(const sp<AMessage> &msg) { msg->post(); } +void ACodec::initiateCreateInputSurface() { + (new AMessage(kWhatCreateInputSurface, id()))->post(); +} + +void ACodec::signalEndOfInputStream() { + (new AMessage(kWhatSignalEndOfInputStream, id()))->post(); +} + void ACodec::initiateStart() { (new AMessage(kWhatStart, id()))->post(); } @@ -612,7 +648,7 @@ status_t ACodec::allocateOutputBuffersFromNativeWindow() { sp<GraphicBuffer> graphicBuffer(new GraphicBuffer(buf, false)); BufferInfo info; info.mStatus = BufferInfo::OWNED_BY_US; - info.mData = new ABuffer(0); + info.mData = new ABuffer(NULL /* data */, def.nBufferSize /* capacity */); info.mGraphicBuffer = graphicBuffer; mBuffers[kPortIndexOutput].push(info); @@ -712,12 +748,10 @@ status_t ACodec::freeOutputBuffersNotOwnedByComponent() { BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i); - if (info->mStatus != - BufferInfo::OWNED_BY_COMPONENT) { - // We shouldn't have sent out any buffers to the client at this - // point. - CHECK_NE((int)info->mStatus, (int)BufferInfo::OWNED_BY_DOWNSTREAM); - + // At this time some buffers may still be with the component + // or being drained. + if (info->mStatus != BufferInfo::OWNED_BY_COMPONENT && + info->mStatus != BufferInfo::OWNED_BY_DOWNSTREAM) { CHECK_EQ((status_t)OK, freeBuffer(kPortIndexOutput, i)); } } @@ -803,6 +837,8 @@ status_t ACodec::setComponentRole( "audio_decoder.raw", "audio_encoder.raw" }, { MEDIA_MIMETYPE_AUDIO_FLAC, "audio_decoder.flac", "audio_encoder.flac" }, + { MEDIA_MIMETYPE_AUDIO_MSGSM, + "audio_decoder.gsm", "audio_encoder.gsm" }, }; static const size_t kNumMimeToRole = @@ -922,6 +958,19 @@ status_t ACodec::configureCodec( err = setupVideoDecoder(mime, width, height); } } + } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) { + int32_t numChannels, sampleRate; + if (!msg->findInt32("channel-count", &numChannels) + || !msg->findInt32("sample-rate", &sampleRate)) { + // Since we did not always check for these, leave them optional + // and have the decoder figure it all out. + err = OK; + } else { + err = setupRawAudioFormat( + encoder ? kPortIndexInput : kPortIndexOutput, + sampleRate, + numChannels); + } } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) { int32_t numChannels, sampleRate; if (!msg->findInt32("channel-count", &numChannels) @@ -964,17 +1013,23 @@ status_t ACodec::configureCodec( err = INVALID_OPERATION; } else { if (encoder) { - if (!msg->findInt32("flac-compression-level", &compressionLevel)) { + if (!msg->findInt32( + "flac-compression-level", &compressionLevel)) { compressionLevel = 5;// default FLAC compression level } else if (compressionLevel < 0) { - ALOGW("compression level %d outside [0..8] range, using 0", compressionLevel); + ALOGW("compression level %d outside [0..8] range, " + "using 0", + compressionLevel); compressionLevel = 0; } else if (compressionLevel > 8) { - ALOGW("compression level %d outside [0..8] range, using 8", compressionLevel); + ALOGW("compression level %d outside [0..8] range, " + "using 8", + compressionLevel); compressionLevel = 8; } } - err = setupFlacCodec(encoder, numChannels, sampleRate, compressionLevel); + err = setupFlacCodec( + encoder, numChannels, sampleRate, compressionLevel); } } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) { int32_t numChannels, sampleRate; @@ -1408,36 +1463,52 @@ status_t ACodec::setSupportedOutputFormat() { CHECK_EQ(err, (status_t)OK); CHECK_EQ((int)format.eCompressionFormat, (int)OMX_VIDEO_CodingUnused); - CHECK(format.eColorFormat == OMX_COLOR_FormatYUV420Planar - || format.eColorFormat == OMX_COLOR_FormatYUV420SemiPlanar - || format.eColorFormat == OMX_COLOR_FormatCbYCrY - || format.eColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar - || format.eColorFormat == OMX_QCOM_COLOR_FormatYVU420SemiPlanar - || format.eColorFormat == OMX_QCOM_COLOR_FormatYUV420PackedSemiPlanar64x32Tile2m8ka); - return mOMX->setParameter( mNode, OMX_IndexParamVideoPortFormat, &format, sizeof(format)); } +static const struct VideoCodingMapEntry { + const char *mMime; + OMX_VIDEO_CODINGTYPE mVideoCodingType; +} kVideoCodingMapEntry[] = { + { MEDIA_MIMETYPE_VIDEO_AVC, OMX_VIDEO_CodingAVC }, + { MEDIA_MIMETYPE_VIDEO_MPEG4, OMX_VIDEO_CodingMPEG4 }, + { MEDIA_MIMETYPE_VIDEO_H263, OMX_VIDEO_CodingH263 }, + { MEDIA_MIMETYPE_VIDEO_MPEG2, OMX_VIDEO_CodingMPEG2 }, + { MEDIA_MIMETYPE_VIDEO_VPX, OMX_VIDEO_CodingVPX }, +}; + static status_t GetVideoCodingTypeFromMime( const char *mime, OMX_VIDEO_CODINGTYPE *codingType) { - if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) { - *codingType = OMX_VIDEO_CodingAVC; - } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) { - *codingType = OMX_VIDEO_CodingMPEG4; - } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) { - *codingType = OMX_VIDEO_CodingH263; - } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG2, mime)) { - *codingType = OMX_VIDEO_CodingMPEG2; - } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_VPX, mime)) { - *codingType = OMX_VIDEO_CodingVPX; - } else { - *codingType = OMX_VIDEO_CodingUnused; - return ERROR_UNSUPPORTED; + for (size_t i = 0; + i < sizeof(kVideoCodingMapEntry) / sizeof(kVideoCodingMapEntry[0]); + ++i) { + if (!strcasecmp(mime, kVideoCodingMapEntry[i].mMime)) { + *codingType = kVideoCodingMapEntry[i].mVideoCodingType; + return OK; + } } - return OK; + *codingType = OMX_VIDEO_CodingUnused; + + return ERROR_UNSUPPORTED; +} + +static status_t GetMimeTypeForVideoCoding( + OMX_VIDEO_CODINGTYPE codingType, AString *mime) { + for (size_t i = 0; + i < sizeof(kVideoCodingMapEntry) / sizeof(kVideoCodingMapEntry[0]); + ++i) { + if (codingType == kVideoCodingMapEntry[i].mVideoCodingType) { + *mime = kVideoCodingMapEntry[i].mMime; + return OK; + } + } + + mime->clear(); + + return ERROR_UNSUPPORTED; } status_t ACodec::setupVideoDecoder( @@ -2085,6 +2156,42 @@ size_t ACodec::countBuffersOwnedByComponent(OMX_U32 portIndex) const { return n; } +size_t ACodec::countBuffersOwnedByNativeWindow() const { + size_t n = 0; + + for (size_t i = 0; i < mBuffers[kPortIndexOutput].size(); ++i) { + const BufferInfo &info = mBuffers[kPortIndexOutput].itemAt(i); + + if (info.mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) { + ++n; + } + } + + return n; +} + +void ACodec::waitUntilAllPossibleNativeWindowBuffersAreReturnedToUs() { + if (mNativeWindow == NULL) { + return; + } + + int minUndequeuedBufs = 0; + status_t err = mNativeWindow->query( + mNativeWindow.get(), NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, + &minUndequeuedBufs); + + if (err != OK) { + ALOGE("[%s] NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS query failed: %s (%d)", + mComponentName.c_str(), strerror(-err), -err); + + minUndequeuedBufs = 0; + } + + while (countBuffersOwnedByNativeWindow() > (size_t)minUndequeuedBufs + && dequeueBufferFromNativeWindow() != NULL) { + } +} + bool ACodec::allYourBuffersAreBelongToUs( OMX_U32 portIndex) { for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) { @@ -2141,49 +2248,61 @@ void ACodec::sendFormatChange() { { OMX_VIDEO_PORTDEFINITIONTYPE *videoDef = &def.format.video; - notify->setString("mime", MEDIA_MIMETYPE_VIDEO_RAW); + AString mime; + if (!mIsEncoder) { + notify->setString("mime", MEDIA_MIMETYPE_VIDEO_RAW); + } else if (GetMimeTypeForVideoCoding( + videoDef->eCompressionFormat, &mime) != OK) { + notify->setString("mime", "application/octet-stream"); + } else { + notify->setString("mime", mime.c_str()); + } + notify->setInt32("width", videoDef->nFrameWidth); notify->setInt32("height", videoDef->nFrameHeight); - notify->setInt32("stride", videoDef->nStride); - notify->setInt32("slice-height", videoDef->nSliceHeight); - notify->setInt32("color-format", videoDef->eColorFormat); - - OMX_CONFIG_RECTTYPE rect; - InitOMXParams(&rect); - rect.nPortIndex = kPortIndexOutput; - - if (mOMX->getConfig( - mNode, OMX_IndexConfigCommonOutputCrop, - &rect, sizeof(rect)) != OK) { - rect.nLeft = 0; - rect.nTop = 0; - rect.nWidth = videoDef->nFrameWidth; - rect.nHeight = videoDef->nFrameHeight; - } - CHECK_GE(rect.nLeft, 0); - CHECK_GE(rect.nTop, 0); - CHECK_GE(rect.nWidth, 0u); - CHECK_GE(rect.nHeight, 0u); - CHECK_LE(rect.nLeft + rect.nWidth - 1, videoDef->nFrameWidth); - CHECK_LE(rect.nTop + rect.nHeight - 1, videoDef->nFrameHeight); - - notify->setRect( - "crop", - rect.nLeft, - rect.nTop, - rect.nLeft + rect.nWidth - 1, - rect.nTop + rect.nHeight - 1); - - if (mNativeWindow != NULL) { - android_native_rect_t crop; - crop.left = rect.nLeft; - crop.top = rect.nTop; - crop.right = rect.nLeft + rect.nWidth; - crop.bottom = rect.nTop + rect.nHeight; - - CHECK_EQ(0, native_window_set_crop( - mNativeWindow.get(), &crop)); + if (!mIsEncoder) { + notify->setInt32("stride", videoDef->nStride); + notify->setInt32("slice-height", videoDef->nSliceHeight); + notify->setInt32("color-format", videoDef->eColorFormat); + + OMX_CONFIG_RECTTYPE rect; + InitOMXParams(&rect); + rect.nPortIndex = kPortIndexOutput; + + if (mOMX->getConfig( + mNode, OMX_IndexConfigCommonOutputCrop, + &rect, sizeof(rect)) != OK) { + rect.nLeft = 0; + rect.nTop = 0; + rect.nWidth = videoDef->nFrameWidth; + rect.nHeight = videoDef->nFrameHeight; + } + + CHECK_GE(rect.nLeft, 0); + CHECK_GE(rect.nTop, 0); + CHECK_GE(rect.nWidth, 0u); + CHECK_GE(rect.nHeight, 0u); + CHECK_LE(rect.nLeft + rect.nWidth - 1, videoDef->nFrameWidth); + CHECK_LE(rect.nTop + rect.nHeight - 1, videoDef->nFrameHeight); + + notify->setRect( + "crop", + rect.nLeft, + rect.nTop, + rect.nLeft + rect.nWidth - 1, + rect.nTop + rect.nHeight - 1); + + if (mNativeWindow != NULL) { + android_native_rect_t crop; + crop.left = rect.nLeft; + crop.top = rect.nTop; + crop.right = rect.nLeft + rect.nWidth; + crop.bottom = rect.nTop + rect.nHeight; + + CHECK_EQ(0, native_window_set_crop( + mNativeWindow.get(), &crop)); + } } break; } @@ -2191,41 +2310,108 @@ void ACodec::sendFormatChange() { case OMX_PortDomainAudio: { OMX_AUDIO_PORTDEFINITIONTYPE *audioDef = &def.format.audio; - CHECK_EQ((int)audioDef->eEncoding, (int)OMX_AUDIO_CodingPCM); - OMX_AUDIO_PARAM_PCMMODETYPE params; - InitOMXParams(¶ms); - params.nPortIndex = kPortIndexOutput; + switch (audioDef->eEncoding) { + case OMX_AUDIO_CodingPCM: + { + OMX_AUDIO_PARAM_PCMMODETYPE params; + InitOMXParams(¶ms); + params.nPortIndex = kPortIndexOutput; - CHECK_EQ(mOMX->getParameter( - mNode, OMX_IndexParamAudioPcm, - ¶ms, sizeof(params)), - (status_t)OK); + CHECK_EQ(mOMX->getParameter( + mNode, OMX_IndexParamAudioPcm, + ¶ms, sizeof(params)), + (status_t)OK); - CHECK(params.nChannels == 1 || params.bInterleaved); - CHECK_EQ(params.nBitPerSample, 16u); - CHECK_EQ((int)params.eNumData, (int)OMX_NumericalDataSigned); - CHECK_EQ((int)params.ePCMMode, (int)OMX_AUDIO_PCMModeLinear); - - notify->setString("mime", MEDIA_MIMETYPE_AUDIO_RAW); - notify->setInt32("channel-count", params.nChannels); - notify->setInt32("sample-rate", params.nSamplingRate); - if (mEncoderDelay + mEncoderPadding) { - size_t frameSize = params.nChannels * sizeof(int16_t); - if (mSkipCutBuffer != NULL) { - size_t prevbufsize = mSkipCutBuffer->size(); - if (prevbufsize != 0) { - ALOGW("Replacing SkipCutBuffer holding %d bytes", prevbufsize); + CHECK(params.nChannels == 1 || params.bInterleaved); + CHECK_EQ(params.nBitPerSample, 16u); + CHECK_EQ((int)params.eNumData, (int)OMX_NumericalDataSigned); + CHECK_EQ((int)params.ePCMMode, (int)OMX_AUDIO_PCMModeLinear); + + notify->setString("mime", MEDIA_MIMETYPE_AUDIO_RAW); + notify->setInt32("channel-count", params.nChannels); + notify->setInt32("sample-rate", params.nSamplingRate); + if (mEncoderDelay + mEncoderPadding) { + size_t frameSize = params.nChannels * sizeof(int16_t); + if (mSkipCutBuffer != NULL) { + size_t prevbufsize = mSkipCutBuffer->size(); + if (prevbufsize != 0) { + ALOGW("Replacing SkipCutBuffer holding %d bytes", prevbufsize); + } + } + mSkipCutBuffer = new SkipCutBuffer(mEncoderDelay * frameSize, + mEncoderPadding * frameSize); } + + if (mChannelMaskPresent) { + notify->setInt32("channel-mask", mChannelMask); + } + break; } - mSkipCutBuffer = new SkipCutBuffer(mEncoderDelay * frameSize, - mEncoderPadding * frameSize); - } - if (mChannelMaskPresent) { - notify->setInt32("channel-mask", mChannelMask); - } + case OMX_AUDIO_CodingAAC: + { + OMX_AUDIO_PARAM_AACPROFILETYPE params; + InitOMXParams(¶ms); + params.nPortIndex = kPortIndexOutput; + + CHECK_EQ(mOMX->getParameter( + mNode, OMX_IndexParamAudioAac, + ¶ms, sizeof(params)), + (status_t)OK); + notify->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC); + notify->setInt32("channel-count", params.nChannels); + notify->setInt32("sample-rate", params.nSampleRate); + break; + } + + case OMX_AUDIO_CodingAMR: + { + OMX_AUDIO_PARAM_AMRTYPE params; + InitOMXParams(¶ms); + params.nPortIndex = kPortIndexOutput; + + CHECK_EQ(mOMX->getParameter( + mNode, OMX_IndexParamAudioAmr, + ¶ms, sizeof(params)), + (status_t)OK); + + notify->setInt32("channel-count", 1); + if (params.eAMRBandMode >= OMX_AUDIO_AMRBandModeWB0) { + notify->setString( + "mime", MEDIA_MIMETYPE_AUDIO_AMR_WB); + + notify->setInt32("sample-rate", 16000); + } else { + notify->setString( + "mime", MEDIA_MIMETYPE_AUDIO_AMR_NB); + + notify->setInt32("sample-rate", 8000); + } + break; + } + + case OMX_AUDIO_CodingFLAC: + { + OMX_AUDIO_PARAM_FLACTYPE params; + InitOMXParams(¶ms); + params.nPortIndex = kPortIndexOutput; + + CHECK_EQ(mOMX->getParameter( + mNode, OMX_IndexParamAudioFlac, + ¶ms, sizeof(params)), + (status_t)OK); + + notify->setString("mime", MEDIA_MIMETYPE_AUDIO_FLAC); + notify->setInt32("channel-count", params.nChannels); + notify->setInt32("sample-rate", params.nSampleRate); + break; + } + + default: + TRESPASS(); + } break; } @@ -2454,6 +2640,21 @@ bool ACodec::BaseState::onMessageReceived(const sp<AMessage> &msg) { return onOMXMessage(msg); } + case ACodec::kWhatCreateInputSurface: + case ACodec::kWhatSignalEndOfInputStream: + { + ALOGE("Message 0x%x was not handled", msg->what()); + mCodec->signalError(OMX_ErrorUndefined, INVALID_OPERATION); + return true; + } + + case ACodec::kWhatOMXDied: + { + ALOGE("OMX/mediaserver died, signalling error!"); + mCodec->signalError(OMX_ErrorResourcesLost, DEAD_OBJECT); + break; + } + default: return false; } @@ -2856,19 +3057,18 @@ bool ACodec::BaseState::onOMXFillBufferDone( break; } - if (!mCodec->mIsEncoder && !mCodec->mSentFormat) { + if (!mCodec->mSentFormat) { mCodec->sendFormatChange(); } - if (mCodec->mNativeWindow == NULL) { - info->mData->setRange(rangeOffset, rangeLength); - + info->mData->setRange(rangeOffset, rangeLength); #if 0 + if (mCodec->mNativeWindow == NULL) { if (IsIDR(info->mData)) { ALOGI("IDR frame"); } -#endif } +#endif if (mCodec->mSkipCutBuffer != NULL) { mCodec->mSkipCutBuffer->submit(info->mData); @@ -2929,7 +3129,8 @@ void ACodec::BaseState::onOutputBufferDrained(const sp<AMessage> &msg) { int32_t render; if (mCodec->mNativeWindow != NULL - && msg->findInt32("render", &render) && render != 0) { + && msg->findInt32("render", &render) && render != 0 + && (info->mData == NULL || info->mData->size() != 0)) { // The client wants this buffer to be rendered. status_t err; @@ -3003,6 +3204,18 @@ ACodec::UninitializedState::UninitializedState(ACodec *codec) void ACodec::UninitializedState::stateEntered() { ALOGV("Now uninitialized"); + + if (mDeathNotifier != NULL) { + mCodec->mOMX->asBinder()->unlinkToDeath(mDeathNotifier); + mDeathNotifier.clear(); + } + + mCodec->mNativeWindow.clear(); + mCodec->mNode = NULL; + mCodec->mOMX.clear(); + mCodec->mQuirks = 0; + mCodec->mFlags = 0; + mCodec->mComponentName.clear(); } bool ACodec::UninitializedState::onMessageReceived(const sp<AMessage> &msg) { @@ -3074,6 +3287,15 @@ bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) { sp<IOMX> omx = client.interface(); + sp<AMessage> notify = new AMessage(kWhatOMXDied, mCodec->id()); + + mDeathNotifier = new DeathNotifier(notify); + if (omx->asBinder()->linkToDeath(mDeathNotifier) != OK) { + // This was a local binder, if it dies so do we, we won't care + // about any notifications in the afterlife. + mDeathNotifier.clear(); + } + Vector<OMXCodec::CodecNameAndQuirks> matchingCodecs; AString mime; @@ -3138,7 +3360,7 @@ bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) { return false; } - sp<AMessage> notify = new AMessage(kWhatOMXMessage, mCodec->id()); + notify = new AMessage(kWhatOMXMessage, mCodec->id()); observer->setNotificationMessage(notify); mCodec->mComponentName = componentName; @@ -3152,11 +3374,6 @@ bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) { mCodec->mOMX = omx; mCodec->mNode = node; - mCodec->mPortEOS[kPortIndexInput] = - mCodec->mPortEOS[kPortIndexOutput] = false; - - mCodec->mInputEOSResult = OK; - { sp<AMessage> notify = mCodec->mNotify->dup(); notify->setInt32("what", ACodec::kWhatComponentAllocated); @@ -3178,6 +3395,11 @@ ACodec::LoadedState::LoadedState(ACodec *codec) void ACodec::LoadedState::stateEntered() { ALOGV("[%s] Now Loaded", mCodec->mComponentName.c_str()); + mCodec->mPortEOS[kPortIndexInput] = + mCodec->mPortEOS[kPortIndexOutput] = false; + + mCodec->mInputEOSResult = OK; + if (mCodec->mShutdownInProgress) { bool keepComponentAllocated = mCodec->mKeepComponentAllocated; @@ -3192,13 +3414,6 @@ void ACodec::LoadedState::onShutdown(bool keepComponentAllocated) { if (!keepComponentAllocated) { CHECK_EQ(mCodec->mOMX->freeNode(mCodec->mNode), (status_t)OK); - mCodec->mNativeWindow.clear(); - mCodec->mNode = NULL; - mCodec->mOMX.clear(); - mCodec->mQuirks = 0; - mCodec->mFlags = 0; - mCodec->mComponentName.clear(); - mCodec->changeState(mCodec->mUninitializedState); } @@ -3218,6 +3433,13 @@ bool ACodec::LoadedState::onMessageReceived(const sp<AMessage> &msg) { break; } + case ACodec::kWhatCreateInputSurface: + { + onCreateInputSurface(msg); + handled = true; + break; + } + case ACodec::kWhatStart: { onStart(); @@ -3296,6 +3518,32 @@ bool ACodec::LoadedState::onConfigureComponent( return true; } +void ACodec::LoadedState::onCreateInputSurface( + const sp<AMessage> &msg) { + ALOGV("onCreateInputSurface"); + + sp<AMessage> notify = mCodec->mNotify->dup(); + notify->setInt32("what", ACodec::kWhatInputSurfaceCreated); + + sp<IGraphicBufferProducer> bufferProducer; + status_t err; + + err = mCodec->mOMX->createInputSurface(mCodec->mNode, kPortIndexInput, + &bufferProducer); + if (err == OK) { + notify->setObject("input-surface", + new BufferProducerWrapper(bufferProducer)); + } else { + // Can't use mCodec->signalError() here -- MediaCodec won't forward + // the error through because it's in the "configured" state. We + // send a kWhatInputSurfaceCreated with an error value instead. + ALOGE("[%s] onCreateInputSurface returning error %d", + mCodec->mComponentName.c_str(), err); + notify->setInt32("err", err); + } + notify->post(); +} + void ACodec::LoadedState::onStart() { ALOGV("onStart"); @@ -3345,6 +3593,27 @@ bool ACodec::LoadedToIdleState::onMessageReceived(const sp<AMessage> &msg) { return true; } + case kWhatSignalEndOfInputStream: + { + mCodec->onSignalEndOfInputStream(); + return true; + } + + case kWhatResume: + { + // We'll be active soon enough. + return true; + } + + case kWhatFlush: + { + // We haven't even started yet, so we're flushed alright... + sp<AMessage> notify = mCodec->mNotify->dup(); + notify->setInt32("what", ACodec::kWhatFlushCompleted); + notify->post(); + return true; + } + default: return BaseState::onMessageReceived(msg); } @@ -3390,6 +3659,28 @@ bool ACodec::IdleToExecutingState::onMessageReceived(const sp<AMessage> &msg) { return true; } + case kWhatResume: + { + // We'll be active soon enough. + return true; + } + + case kWhatFlush: + { + // We haven't even started yet, so we're flushed alright... + sp<AMessage> notify = mCodec->mNotify->dup(); + notify->setInt32("what", ACodec::kWhatFlushCompleted); + notify->post(); + + return true; + } + + case kWhatSignalEndOfInputStream: + { + mCodec->onSignalEndOfInputStream(); + return true; + } + default: return BaseState::onMessageReceived(msg); } @@ -3518,7 +3809,6 @@ bool ACodec::ExecutingState::onMessageReceived(const sp<AMessage> &msg) { (status_t)OK); mCodec->changeState(mCodec->mFlushingState); - handled = true; break; } @@ -3542,6 +3832,30 @@ bool ACodec::ExecutingState::onMessageReceived(const sp<AMessage> &msg) { break; } + case kWhatSetParameters: + { + sp<AMessage> params; + CHECK(msg->findMessage("params", ¶ms)); + + status_t err = mCodec->setParameters(params); + + sp<AMessage> reply; + if (msg->findMessage("reply", &reply)) { + reply->setInt32("err", err); + reply->post(); + } + + handled = true; + break; + } + + case ACodec::kWhatSignalEndOfInputStream: + { + mCodec->onSignalEndOfInputStream(); + handled = true; + break; + } + default: handled = BaseState::onMessageReceived(msg); break; @@ -3550,6 +3864,42 @@ bool ACodec::ExecutingState::onMessageReceived(const sp<AMessage> &msg) { return handled; } +status_t ACodec::setParameters(const sp<AMessage> ¶ms) { + int32_t videoBitrate; + if (params->findInt32("videoBitrate", &videoBitrate)) { + OMX_VIDEO_CONFIG_BITRATETYPE configParams; + InitOMXParams(&configParams); + configParams.nPortIndex = kPortIndexOutput; + configParams.nEncodeBitrate = videoBitrate; + + status_t err = mOMX->setConfig( + mNode, + OMX_IndexConfigVideoBitrate, + &configParams, + sizeof(configParams)); + + if (err != OK) { + ALOGE("setConfig(OMX_IndexConfigVideoBitrate, %d) failed w/ err %d", + videoBitrate, err); + + return err; + } + } + + return OK; +} + +void ACodec::onSignalEndOfInputStream() { + sp<AMessage> notify = mNotify->dup(); + notify->setInt32("what", ACodec::kWhatSignaledInputEOS); + + status_t err = mOMX->signalEndOfInputStream(mNode); + if (err != OK) { + notify->setInt32("err", err); + } + notify->post(); +} + bool ACodec::ExecutingState::onOMXEvent( OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) { switch (event) { @@ -3964,6 +4314,10 @@ void ACodec::FlushingState::changeStateIfWeOwnAllBuffers() { if (mFlushComplete[kPortIndexInput] && mFlushComplete[kPortIndexOutput] && mCodec->allYourBuffersAreBelongToUs()) { + // We now own all buffers except possibly those still queued with + // the native window for rendering. Let's get those back as well. + mCodec->waitUntilAllPossibleNativeWindowBuffersAreReturnedToUs(); + sp<AMessage> notify = mCodec->mNotify->dup(); notify->setInt32("what", ACodec::kWhatFlushCompleted); notify->post(); @@ -3973,6 +4327,10 @@ void ACodec::FlushingState::changeStateIfWeOwnAllBuffers() { mCodec->mInputEOSResult = OK; + if (mCodec->mSkipCutBuffer != NULL) { + mCodec->mSkipCutBuffer->clear(); + } + mCodec->changeState(mCodec->mExecutingState); } } diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk index cc0581e..acc3abf 100644 --- a/media/libstagefright/Android.mk +++ b/media/libstagefright/Android.mk @@ -19,19 +19,20 @@ LOCAL_SRC_FILES:= \ ESDS.cpp \ FileSource.cpp \ FLACExtractor.cpp \ - FragmentedMP4Extractor.cpp \ HTTPBase.cpp \ JPEGSource.cpp \ MP3Extractor.cpp \ MPEG2TSWriter.cpp \ MPEG4Extractor.cpp \ MPEG4Writer.cpp \ + MediaAdapter.cpp \ MediaBuffer.cpp \ MediaBufferGroup.cpp \ MediaCodec.cpp \ MediaCodecList.cpp \ MediaDefs.cpp \ MediaExtractor.cpp \ + MediaMuxer.cpp \ MediaSource.cpp \ MetaData.cpp \ NuCachedSource2.cpp \ @@ -78,7 +79,6 @@ LOCAL_SHARED_LIBRARIES := \ libicuuc \ liblog \ libmedia \ - libmedia_native \ libsonivox \ libssl \ libstagefright_omx \ diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp index 861aebe..3cf4d5c 100644 --- a/media/libstagefright/AudioSource.cpp +++ b/media/libstagefright/AudioSource.cpp @@ -58,7 +58,7 @@ AudioSource::AudioSource( ALOGV("sampleRate: %d, channelCount: %d", sampleRate, channelCount); CHECK(channelCount == 1 || channelCount == 2); - int minFrameCount; + size_t minFrameCount; status_t status = AudioRecord::getMinFrameCount(&minFrameCount, sampleRate, AUDIO_FORMAT_PCM_16_BIT, diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp index 1e2625a..bd28118 100644 --- a/media/libstagefright/AwesomePlayer.cpp +++ b/media/libstagefright/AwesomePlayer.cpp @@ -48,8 +48,8 @@ #include <media/stagefright/MetaData.h> #include <media/stagefright/OMXCodec.h> -#include <gui/ISurfaceTexture.h> -#include <gui/SurfaceTextureClient.h> +#include <gui/IGraphicBufferProducer.h> +#include <gui/Surface.h> #include <media/stagefright/foundation/AMessage.h> @@ -1178,12 +1178,12 @@ bool AwesomePlayer::isPlaying() const { return (mFlags & PLAYING) || (mFlags & CACHE_UNDERRUN); } -status_t AwesomePlayer::setSurfaceTexture(const sp<ISurfaceTexture> &surfaceTexture) { +status_t AwesomePlayer::setSurfaceTexture(const sp<IGraphicBufferProducer> &bufferProducer) { Mutex::Autolock autoLock(mLock); status_t err; - if (surfaceTexture != NULL) { - err = setNativeWindow_l(new SurfaceTextureClient(surfaceTexture)); + if (bufferProducer != NULL) { + err = setNativeWindow_l(new Surface(bufferProducer)); } else { err = setNativeWindow_l(NULL); } @@ -2511,6 +2511,7 @@ status_t AwesomePlayer::setVideoScalingMode_l(int32_t mode) { if (err != OK) { ALOGW("Failed to set scaling mode: %d", err); } + return err; } return OK; } diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp index efd7af7..5a26b06 100755..100644 --- a/media/libstagefright/CameraSource.cpp +++ b/media/libstagefright/CameraSource.cpp @@ -121,13 +121,14 @@ static int32_t getColorFormat(const char* colorFormat) { CHECK(!"Unknown color format"); } -CameraSource *CameraSource::Create() { +CameraSource *CameraSource::Create(const String16 &clientName) { Size size; size.width = -1; size.height = -1; sp<ICamera> camera; - return new CameraSource(camera, NULL, 0, size, -1, NULL, false); + return new CameraSource(camera, NULL, 0, clientName, -1, + size, -1, NULL, false); } // static @@ -135,14 +136,16 @@ CameraSource *CameraSource::CreateFromCamera( const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy, int32_t cameraId, + const String16& clientName, + uid_t clientUid, Size videoSize, int32_t frameRate, - const sp<Surface>& surface, + const sp<IGraphicBufferProducer>& surface, bool storeMetaDataInVideoBuffers) { CameraSource *source = new CameraSource(camera, proxy, cameraId, - videoSize, frameRate, surface, - storeMetaDataInVideoBuffers); + clientName, clientUid, videoSize, frameRate, surface, + storeMetaDataInVideoBuffers); return source; } @@ -150,9 +153,11 @@ CameraSource::CameraSource( const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy, int32_t cameraId, + const String16& clientName, + uid_t clientUid, Size videoSize, int32_t frameRate, - const sp<Surface>& surface, + const sp<IGraphicBufferProducer>& surface, bool storeMetaDataInVideoBuffers) : mCameraFlags(0), mNumInputBuffers(0), @@ -173,6 +178,7 @@ CameraSource::CameraSource( mVideoSize.height = -1; mInitCheck = init(camera, proxy, cameraId, + clientName, clientUid, videoSize, frameRate, storeMetaDataInVideoBuffers); if (mInitCheck != OK) releaseCamera(); @@ -184,10 +190,10 @@ status_t CameraSource::initCheck() const { status_t CameraSource::isCameraAvailable( const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy, - int32_t cameraId) { + int32_t cameraId, const String16& clientName, uid_t clientUid) { if (camera == 0) { - mCamera = Camera::connect(cameraId); + mCamera = Camera::connect(cameraId, clientName, clientUid); if (mCamera == 0) return -EBUSY; mCameraFlags &= ~FLAGS_HOT_CAMERA; } else { @@ -469,6 +475,8 @@ status_t CameraSource::init( const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy, int32_t cameraId, + const String16& clientName, + uid_t clientUid, Size videoSize, int32_t frameRate, bool storeMetaDataInVideoBuffers) { @@ -476,7 +484,7 @@ status_t CameraSource::init( ALOGV("init"); status_t err = OK; int64_t token = IPCThreadState::self()->clearCallingIdentity(); - err = initWithCameraAccess(camera, proxy, cameraId, + err = initWithCameraAccess(camera, proxy, cameraId, clientName, clientUid, videoSize, frameRate, storeMetaDataInVideoBuffers); IPCThreadState::self()->restoreCallingIdentity(token); @@ -487,13 +495,16 @@ status_t CameraSource::initWithCameraAccess( const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy, int32_t cameraId, + const String16& clientName, + uid_t clientUid, Size videoSize, int32_t frameRate, bool storeMetaDataInVideoBuffers) { ALOGV("initWithCameraAccess"); status_t err = OK; - if ((err = isCameraAvailable(camera, proxy, cameraId)) != OK) { + if ((err = isCameraAvailable(camera, proxy, cameraId, + clientName, clientUid)) != OK) { ALOGE("Camera connection could not be established."); return err; } @@ -525,7 +536,7 @@ status_t CameraSource::initWithCameraAccess( if (mSurface != NULL) { // This CHECK is good, since we just passed the lock/unlock // check earlier by calling mCamera->setParameters(). - CHECK_EQ((status_t)OK, mCamera->setPreviewDisplay(mSurface)); + CHECK_EQ((status_t)OK, mCamera->setPreviewTexture(mSurface)); } // By default, do not store metadata in video buffers diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp index 26ce7ae..20214e8 100644 --- a/media/libstagefright/CameraSourceTimeLapse.cpp +++ b/media/libstagefright/CameraSourceTimeLapse.cpp @@ -36,13 +36,16 @@ CameraSourceTimeLapse *CameraSourceTimeLapse::CreateFromCamera( const sp<ICamera> &camera, const sp<ICameraRecordingProxy> &proxy, int32_t cameraId, + const String16& clientName, + uid_t clientUid, Size videoSize, int32_t videoFrameRate, - const sp<Surface>& surface, + const sp<IGraphicBufferProducer>& surface, int64_t timeBetweenFrameCaptureUs) { CameraSourceTimeLapse *source = new CameraSourceTimeLapse(camera, proxy, cameraId, + clientName, clientUid, videoSize, videoFrameRate, surface, timeBetweenFrameCaptureUs); @@ -59,11 +62,14 @@ CameraSourceTimeLapse::CameraSourceTimeLapse( const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy, int32_t cameraId, + const String16& clientName, + uid_t clientUid, Size videoSize, int32_t videoFrameRate, - const sp<Surface>& surface, + const sp<IGraphicBufferProducer>& surface, int64_t timeBetweenFrameCaptureUs) - : CameraSource(camera, proxy, cameraId, videoSize, videoFrameRate, surface, true), + : CameraSource(camera, proxy, cameraId, clientName, clientUid, + videoSize, videoFrameRate, surface, true), mTimeBetweenTimeLapseVideoFramesUs(1E6/videoFrameRate), mLastTimeLapseFrameRealTimestampUs(0), mSkipCurrentFrame(false) { diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp index 9d0eea2..fc6fd9c 100644 --- a/media/libstagefright/DataSource.cpp +++ b/media/libstagefright/DataSource.cpp @@ -23,7 +23,6 @@ #include "include/AACExtractor.h" #include "include/DRMExtractor.h" #include "include/FLACExtractor.h" -#include "include/FragmentedMP4Extractor.h" #include "include/HTTPBase.h" #include "include/MP3Extractor.h" #include "include/MPEG2PSExtractor.h" @@ -59,6 +58,45 @@ bool DataSource::getUInt16(off64_t offset, uint16_t *x) { return true; } +bool DataSource::getUInt24(off64_t offset, uint32_t *x) { + *x = 0; + + uint8_t byte[3]; + if (readAt(offset, byte, 3) != 3) { + return false; + } + + *x = (byte[0] << 16) | (byte[1] << 8) | byte[2]; + + return true; +} + +bool DataSource::getUInt32(off64_t offset, uint32_t *x) { + *x = 0; + + uint32_t tmp; + if (readAt(offset, &tmp, 4) != 4) { + return false; + } + + *x = ntohl(tmp); + + return true; +} + +bool DataSource::getUInt64(off64_t offset, uint64_t *x) { + *x = 0; + + uint64_t tmp; + if (readAt(offset, &tmp, 8) != 8) { + return false; + } + + *x = ntoh64(tmp); + + return true; +} + status_t DataSource::getSize(off64_t *size) { *size = 0; @@ -111,7 +149,6 @@ void DataSource::RegisterSniffer(SnifferFunc func) { // static void DataSource::RegisterDefaultSniffers() { RegisterSniffer(SniffMPEG4); - RegisterSniffer(SniffFragmentedMP4); RegisterSniffer(SniffMatroska); RegisterSniffer(SniffOgg); RegisterSniffer(SniffWAV); diff --git a/media/libstagefright/FragmentedMP4Extractor.cpp b/media/libstagefright/FragmentedMP4Extractor.cpp deleted file mode 100644 index 82712ef..0000000 --- a/media/libstagefright/FragmentedMP4Extractor.cpp +++ /dev/null @@ -1,460 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//#define LOG_NDEBUG 0 -#define LOG_TAG "FragmentedMP4Extractor" -#include <utils/Log.h> - -#include "include/FragmentedMP4Extractor.h" -#include "include/SampleTable.h" -#include "include/ESDS.h" - -#include <arpa/inet.h> - -#include <ctype.h> -#include <stdint.h> -#include <stdlib.h> -#include <string.h> - -#include <cutils/properties.h> // for property_get - -#include <media/stagefright/foundation/ABitReader.h> -#include <media/stagefright/foundation/ABuffer.h> -#include <media/stagefright/foundation/ADebug.h> -#include <media/stagefright/foundation/AMessage.h> -#include <media/stagefright/DataSource.h> -#include <media/stagefright/MediaBuffer.h> -#include <media/stagefright/MediaBufferGroup.h> -#include <media/stagefright/MediaDefs.h> -#include <media/stagefright/MediaSource.h> -#include <media/stagefright/MetaData.h> -#include <media/stagefright/Utils.h> -#include <utils/String8.h> - -namespace android { - -class FragmentedMPEG4Source : public MediaSource { -public: - // Caller retains ownership of the Parser - FragmentedMPEG4Source(bool audio, - const sp<MetaData> &format, - const sp<FragmentedMP4Parser> &parser, - const sp<FragmentedMP4Extractor> &extractor); - - virtual status_t start(MetaData *params = NULL); - virtual status_t stop(); - - virtual sp<MetaData> getFormat(); - - virtual status_t read( - MediaBuffer **buffer, const ReadOptions *options = NULL); - -protected: - virtual ~FragmentedMPEG4Source(); - -private: - Mutex mLock; - - sp<MetaData> mFormat; - sp<FragmentedMP4Parser> mParser; - sp<FragmentedMP4Extractor> mExtractor; - bool mIsAudioTrack; - uint32_t mCurrentSampleIndex; - - bool mIsAVC; - size_t mNALLengthSize; - - bool mStarted; - - MediaBufferGroup *mGroup; - - bool mWantsNALFragments; - - uint8_t *mSrcBuffer; - - FragmentedMPEG4Source(const FragmentedMPEG4Source &); - FragmentedMPEG4Source &operator=(const FragmentedMPEG4Source &); -}; - - -FragmentedMP4Extractor::FragmentedMP4Extractor(const sp<DataSource> &source) - : mLooper(new ALooper), - mParser(new FragmentedMP4Parser()), - mDataSource(source), - mInitCheck(NO_INIT), - mFileMetaData(new MetaData) { - ALOGV("FragmentedMP4Extractor"); - mLooper->registerHandler(mParser); - mLooper->start(false /* runOnCallingThread */); - mParser->start(mDataSource); - - bool hasVideo = mParser->getFormat(false /* audio */, true /* synchronous */) != NULL; - bool hasAudio = mParser->getFormat(true /* audio */, true /* synchronous */) != NULL; - - ALOGV("number of tracks: %d", countTracks()); - - if (hasVideo) { - mFileMetaData->setCString( - kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG4); - } else if (hasAudio) { - mFileMetaData->setCString(kKeyMIMEType, "audio/mp4"); - } else { - ALOGE("no audio and no video, no idea what file type this is"); - } - // tracks are numbered such that video track is first, audio track is second - if (hasAudio && hasVideo) { - mTrackCount = 2; - mAudioTrackIndex = 1; - } else if (hasAudio) { - mTrackCount = 1; - mAudioTrackIndex = 0; - } else if (hasVideo) { - mTrackCount = 1; - mAudioTrackIndex = -1; - } else { - mTrackCount = 0; - mAudioTrackIndex = -1; - } -} - -FragmentedMP4Extractor::~FragmentedMP4Extractor() { - ALOGV("~FragmentedMP4Extractor"); - mLooper->stop(); -} - -uint32_t FragmentedMP4Extractor::flags() const { - return CAN_PAUSE | - (mParser->isSeekable() ? (CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK) : 0); -} - -sp<MetaData> FragmentedMP4Extractor::getMetaData() { - return mFileMetaData; -} - -size_t FragmentedMP4Extractor::countTracks() { - return mTrackCount; -} - - -sp<MetaData> FragmentedMP4Extractor::getTrackMetaData( - size_t index, uint32_t flags) { - if (index >= countTracks()) { - return NULL; - } - - sp<AMessage> msg = mParser->getFormat(index == mAudioTrackIndex, true /* synchronous */); - - if (msg == NULL) { - ALOGV("got null format for track %d", index); - return NULL; - } - - sp<MetaData> meta = new MetaData(); - convertMessageToMetaData(msg, meta); - return meta; -} - -static void MakeFourCCString(uint32_t x, char *s) { - s[0] = x >> 24; - s[1] = (x >> 16) & 0xff; - s[2] = (x >> 8) & 0xff; - s[3] = x & 0xff; - s[4] = '\0'; -} - -sp<MediaSource> FragmentedMP4Extractor::getTrack(size_t index) { - if (index >= countTracks()) { - return NULL; - } - return new FragmentedMPEG4Source(index == mAudioTrackIndex, getTrackMetaData(index, 0), mParser, this); -} - - -//////////////////////////////////////////////////////////////////////////////// - -FragmentedMPEG4Source::FragmentedMPEG4Source( - bool audio, - const sp<MetaData> &format, - const sp<FragmentedMP4Parser> &parser, - const sp<FragmentedMP4Extractor> &extractor) - : mFormat(format), - mParser(parser), - mExtractor(extractor), - mIsAudioTrack(audio), - mStarted(false), - mGroup(NULL), - mWantsNALFragments(false), - mSrcBuffer(NULL) { -} - -FragmentedMPEG4Source::~FragmentedMPEG4Source() { - if (mStarted) { - stop(); - } -} - -status_t FragmentedMPEG4Source::start(MetaData *params) { - Mutex::Autolock autoLock(mLock); - - CHECK(!mStarted); - - int32_t val; - if (params && params->findInt32(kKeyWantsNALFragments, &val) - && val != 0) { - mWantsNALFragments = true; - } else { - mWantsNALFragments = false; - } - ALOGV("caller wants NAL fragments: %s", mWantsNALFragments ? "yes" : "no"); - - mGroup = new MediaBufferGroup; - - int32_t max_size = 65536; - // XXX CHECK(mFormat->findInt32(kKeyMaxInputSize, &max_size)); - - mGroup->add_buffer(new MediaBuffer(max_size)); - - mSrcBuffer = new uint8_t[max_size]; - - mStarted = true; - - return OK; -} - -status_t FragmentedMPEG4Source::stop() { - Mutex::Autolock autoLock(mLock); - - CHECK(mStarted); - - delete[] mSrcBuffer; - mSrcBuffer = NULL; - - delete mGroup; - mGroup = NULL; - - mStarted = false; - mCurrentSampleIndex = 0; - - return OK; -} - -sp<MetaData> FragmentedMPEG4Source::getFormat() { - Mutex::Autolock autoLock(mLock); - - return mFormat; -} - - -status_t FragmentedMPEG4Source::read( - MediaBuffer **out, const ReadOptions *options) { - int64_t seekTimeUs; - ReadOptions::SeekMode mode; - if (options && options->getSeekTo(&seekTimeUs, &mode)) { - mParser->seekTo(mIsAudioTrack, seekTimeUs); - } - MediaBuffer *buffer = NULL; - mGroup->acquire_buffer(&buffer); - sp<ABuffer> parseBuffer; - - status_t ret = mParser->dequeueAccessUnit(mIsAudioTrack, &parseBuffer, true /* synchronous */); - if (ret != OK) { - buffer->release(); - ALOGV("returning %d", ret); - return ret; - } - sp<AMessage> meta = parseBuffer->meta(); - int64_t timeUs; - CHECK(meta->findInt64("timeUs", &timeUs)); - buffer->meta_data()->setInt64(kKeyTime, timeUs); - buffer->set_range(0, parseBuffer->size()); - memcpy(buffer->data(), parseBuffer->data(), parseBuffer->size()); - *out = buffer; - return OK; -} - - -static bool isCompatibleBrand(uint32_t fourcc) { - static const uint32_t kCompatibleBrands[] = { - FOURCC('i', 's', 'o', 'm'), - FOURCC('i', 's', 'o', '2'), - FOURCC('a', 'v', 'c', '1'), - FOURCC('3', 'g', 'p', '4'), - FOURCC('m', 'p', '4', '1'), - FOURCC('m', 'p', '4', '2'), - - // Won't promise that the following file types can be played. - // Just give these file types a chance. - FOURCC('q', 't', ' ', ' '), // Apple's QuickTime - FOURCC('M', 'S', 'N', 'V'), // Sony's PSP - - FOURCC('3', 'g', '2', 'a'), // 3GPP2 - FOURCC('3', 'g', '2', 'b'), - }; - - for (size_t i = 0; - i < sizeof(kCompatibleBrands) / sizeof(kCompatibleBrands[0]); - ++i) { - if (kCompatibleBrands[i] == fourcc) { - return true; - } - } - - return false; -} - -// Attempt to actually parse the 'ftyp' atom and determine if a suitable -// compatible brand is present. -// Also try to identify where this file's metadata ends -// (end of the 'moov' atom) and report it to the caller as part of -// the metadata. -static bool Sniff( - const sp<DataSource> &source, String8 *mimeType, float *confidence, - sp<AMessage> *meta) { - // We scan up to 128k bytes to identify this file as an MP4. - static const off64_t kMaxScanOffset = 128ll * 1024ll; - - off64_t offset = 0ll; - bool foundGoodFileType = false; - bool isFragmented = false; - off64_t moovAtomEndOffset = -1ll; - bool done = false; - - while (!done && offset < kMaxScanOffset) { - uint32_t hdr[2]; - if (source->readAt(offset, hdr, 8) < 8) { - return false; - } - - uint64_t chunkSize = ntohl(hdr[0]); - uint32_t chunkType = ntohl(hdr[1]); - off64_t chunkDataOffset = offset + 8; - - if (chunkSize == 1) { - if (source->readAt(offset + 8, &chunkSize, 8) < 8) { - return false; - } - - chunkSize = ntoh64(chunkSize); - chunkDataOffset += 8; - - if (chunkSize < 16) { - // The smallest valid chunk is 16 bytes long in this case. - return false; - } - } else if (chunkSize < 8) { - // The smallest valid chunk is 8 bytes long. - return false; - } - - off64_t chunkDataSize = offset + chunkSize - chunkDataOffset; - - char chunkstring[5]; - MakeFourCCString(chunkType, chunkstring); - ALOGV("saw chunk type %s, size %lld @ %lld", chunkstring, chunkSize, offset); - switch (chunkType) { - case FOURCC('f', 't', 'y', 'p'): - { - if (chunkDataSize < 8) { - return false; - } - - uint32_t numCompatibleBrands = (chunkDataSize - 8) / 4; - for (size_t i = 0; i < numCompatibleBrands + 2; ++i) { - if (i == 1) { - // Skip this index, it refers to the minorVersion, - // not a brand. - continue; - } - - uint32_t brand; - if (source->readAt( - chunkDataOffset + 4 * i, &brand, 4) < 4) { - return false; - } - - brand = ntohl(brand); - char brandstring[5]; - MakeFourCCString(brand, brandstring); - ALOGV("Brand: %s", brandstring); - - if (isCompatibleBrand(brand)) { - foundGoodFileType = true; - break; - } - } - - if (!foundGoodFileType) { - return false; - } - - break; - } - - case FOURCC('m', 'o', 'o', 'v'): - { - moovAtomEndOffset = offset + chunkSize; - break; - } - - case FOURCC('m', 'o', 'o', 'f'): - { - // this is kind of broken, since we might not actually find a - // moof box in the first 128k. - isFragmented = true; - done = true; - break; - } - - default: - break; - } - - offset += chunkSize; - } - - if (!foundGoodFileType || !isFragmented) { - return false; - } - - *mimeType = MEDIA_MIMETYPE_CONTAINER_MPEG4; - *confidence = 0.5f; // slightly more than MPEG4Extractor - - if (moovAtomEndOffset >= 0) { - *meta = new AMessage; - (*meta)->setInt64("meta-data-size", moovAtomEndOffset); - (*meta)->setInt32("fragmented", 1); // tell MediaExtractor what to instantiate - - ALOGV("found metadata size: %lld", moovAtomEndOffset); - } - - return true; -} - -// used by DataSource::RegisterDefaultSniffers -bool SniffFragmentedMP4( - const sp<DataSource> &source, String8 *mimeType, float *confidence, - sp<AMessage> *meta) { - ALOGV("SniffFragmentedMP4"); - char prop[PROPERTY_VALUE_MAX]; - if (property_get("media.stagefright.use-fragmp4", prop, NULL) - && (!strcmp(prop, "1") || !strcasecmp(prop, "true"))) { - return Sniff(source, mimeType, confidence, meta); - } - - return false; -} - -} // namespace android diff --git a/media/libstagefright/HTTPBase.cpp b/media/libstagefright/HTTPBase.cpp index 40bfc55..d2cc6c2 100644 --- a/media/libstagefright/HTTPBase.cpp +++ b/media/libstagefright/HTTPBase.cpp @@ -58,6 +58,16 @@ sp<HTTPBase> HTTPBase::Create(uint32_t flags) { } } +// static +status_t HTTPBase::UpdateProxyConfig( + const char *host, int32_t port, const char *exclusionList) { +#if CHROMIUM_AVAILABLE + return UpdateChromiumHTTPDataSourceProxyConfig(host, port, exclusionList); +#else + return INVALID_OPERATION; +#endif +} + void HTTPBase::addBandwidthMeasurement( size_t numBytes, int64_t delayUs) { Mutex::Autolock autoLock(mLock); diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp index 1a62f9d..145869e 100644 --- a/media/libstagefright/MPEG4Extractor.cpp +++ b/media/libstagefright/MPEG4Extractor.cpp @@ -22,8 +22,6 @@ #include "include/SampleTable.h" #include "include/ESDS.h" -#include <arpa/inet.h> - #include <ctype.h> #include <stdint.h> #include <stdlib.h> @@ -33,13 +31,11 @@ #include <media/stagefright/foundation/ABuffer.h> #include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/foundation/AMessage.h> -#include <media/stagefright/DataSource.h> #include <media/stagefright/MediaBuffer.h> #include <media/stagefright/MediaBufferGroup.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaSource.h> #include <media/stagefright/MetaData.h> -#include <media/stagefright/Utils.h> #include <utils/String8.h> namespace android { @@ -50,15 +46,17 @@ public: MPEG4Source(const sp<MetaData> &format, const sp<DataSource> &dataSource, int32_t timeScale, - const sp<SampleTable> &sampleTable); + const sp<SampleTable> &sampleTable, + Vector<SidxEntry> &sidx, + off64_t firstMoofOffset); virtual status_t start(MetaData *params = NULL); virtual status_t stop(); virtual sp<MetaData> getFormat(); - virtual status_t read( - MediaBuffer **buffer, const ReadOptions *options = NULL); + virtual status_t read(MediaBuffer **buffer, const ReadOptions *options = NULL); + virtual status_t fragmentedRead(MediaBuffer **buffer, const ReadOptions *options = NULL); protected: virtual ~MPEG4Source(); @@ -71,6 +69,27 @@ private: int32_t mTimescale; sp<SampleTable> mSampleTable; uint32_t mCurrentSampleIndex; + uint32_t mCurrentFragmentIndex; + Vector<SidxEntry> &mSegments; + off64_t mFirstMoofOffset; + off64_t mCurrentMoofOffset; + off64_t mNextMoofOffset; + uint32_t mCurrentTime; + int32_t mLastParsedTrackId; + int32_t mTrackId; + + int32_t mCryptoMode; // passed in from extractor + int32_t mDefaultIVSize; // passed in from extractor + uint8_t mCryptoKey[16]; // passed in from extractor + uint32_t mCurrentAuxInfoType; + uint32_t mCurrentAuxInfoTypeParameter; + int32_t mCurrentDefaultSampleInfoSize; + uint32_t mCurrentSampleInfoCount; + uint32_t mCurrentSampleInfoAllocSize; + uint8_t* mCurrentSampleInfoSizes; + uint32_t mCurrentSampleInfoOffsetCount; + uint32_t mCurrentSampleInfoOffsetsAllocSize; + uint64_t* mCurrentSampleInfoOffsets; bool mIsAVC; size_t mNALLengthSize; @@ -86,6 +105,43 @@ private: uint8_t *mSrcBuffer; size_t parseNALSize(const uint8_t *data) const; + status_t parseChunk(off64_t *offset); + status_t parseTrackFragmentHeader(off64_t offset, off64_t size); + status_t parseTrackFragmentRun(off64_t offset, off64_t size); + status_t parseSampleAuxiliaryInformationSizes(off64_t offset, off64_t size); + status_t parseSampleAuxiliaryInformationOffsets(off64_t offset, off64_t size); + + struct TrackFragmentHeaderInfo { + enum Flags { + kBaseDataOffsetPresent = 0x01, + kSampleDescriptionIndexPresent = 0x02, + kDefaultSampleDurationPresent = 0x08, + kDefaultSampleSizePresent = 0x10, + kDefaultSampleFlagsPresent = 0x20, + kDurationIsEmpty = 0x10000, + }; + + uint32_t mTrackID; + uint32_t mFlags; + uint64_t mBaseDataOffset; + uint32_t mSampleDescriptionIndex; + uint32_t mDefaultSampleDuration; + uint32_t mDefaultSampleSize; + uint32_t mDefaultSampleFlags; + + uint64_t mDataOffset; + }; + TrackFragmentHeaderInfo mTrackFragmentHeaderInfo; + + struct Sample { + off64_t offset; + size_t size; + uint32_t duration; + uint8_t iv[16]; + Vector<size_t> clearsizes; + Vector<size_t> encryptedsizes; + }; + Vector<Sample> mCurrentSamples; MPEG4Source(const MPEG4Source &); MPEG4Source &operator=(const MPEG4Source &); @@ -264,8 +320,25 @@ static const char *FourCC2MIME(uint32_t fourcc) { } } +static bool AdjustChannelsAndRate(uint32_t fourcc, uint32_t *channels, uint32_t *rate) { + if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_NB, FourCC2MIME(fourcc))) { + // AMR NB audio is always mono, 8kHz + *channels = 1; + *rate = 8000; + return true; + } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, FourCC2MIME(fourcc))) { + // AMR WB audio is always mono, 16kHz + *channels = 1; + *rate = 16000; + return true; + } + return false; +} + MPEG4Extractor::MPEG4Extractor(const sp<DataSource> &source) - : mDataSource(source), + : mSidxDuration(0), + mMoofOffset(0), + mDataSource(source), mInitCheck(NO_INIT), mHasVideo(false), mFirstTrack(NULL), @@ -293,6 +366,16 @@ MPEG4Extractor::~MPEG4Extractor() { sinf = next; } mFirstSINF = NULL; + + for (size_t i = 0; i < mPssh.size(); i++) { + delete [] mPssh[i].data; + } +} + +uint32_t MPEG4Extractor::flags() const { + return CAN_PAUSE | + ((mMoofOffset == 0 || mSidxEntries.size() != 0) ? + (CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK) : 0); } sp<MetaData> MPEG4Extractor::getMetaData() { @@ -307,6 +390,7 @@ sp<MetaData> MPEG4Extractor::getMetaData() { size_t MPEG4Extractor::countTracks() { status_t err; if ((err = readMetaData()) != OK) { + ALOGV("MPEG4Extractor::countTracks: no tracks"); return 0; } @@ -317,6 +401,7 @@ size_t MPEG4Extractor::countTracks() { track = track->next; } + ALOGV("MPEG4Extractor::countTracks: %d tracks", n); return n; } @@ -348,15 +433,24 @@ sp<MetaData> MPEG4Extractor::getTrackMetaData( const char *mime; CHECK(track->meta->findCString(kKeyMIMEType, &mime)); if (!strncasecmp("video/", mime, 6)) { - uint32_t sampleIndex; - uint32_t sampleTime; - if (track->sampleTable->findThumbnailSample(&sampleIndex) == OK - && track->sampleTable->getMetaDataForSample( - sampleIndex, NULL /* offset */, NULL /* size */, - &sampleTime) == OK) { - track->meta->setInt64( - kKeyThumbnailTime, - ((int64_t)sampleTime * 1000000) / track->timescale); + if (mMoofOffset > 0) { + int64_t duration; + if (track->meta->findInt64(kKeyDuration, &duration)) { + // nothing fancy, just pick a frame near 1/4th of the duration + track->meta->setInt64( + kKeyThumbnailTime, duration / 4); + } + } else { + uint32_t sampleIndex; + uint32_t sampleTime; + if (track->sampleTable->findThumbnailSample(&sampleIndex) == OK + && track->sampleTable->getMetaDataForSample( + sampleIndex, NULL /* offset */, NULL /* size */, + &sampleTime) == OK) { + track->meta->setInt64( + kKeyThumbnailTime, + ((int64_t)sampleTime * 1000000) / track->timescale); + } } } } @@ -364,6 +458,14 @@ sp<MetaData> MPEG4Extractor::getTrackMetaData( return track->meta; } +static void MakeFourCCString(uint32_t x, char *s) { + s[0] = x >> 24; + s[1] = (x >> 16) & 0xff; + s[2] = (x >> 8) & 0xff; + s[3] = x & 0xff; + s[4] = '\0'; +} + status_t MPEG4Extractor::readMetaData() { if (mInitCheck != NO_INIT) { return mInitCheck; @@ -371,7 +473,25 @@ status_t MPEG4Extractor::readMetaData() { off64_t offset = 0; status_t err; - while ((err = parseChunk(&offset, 0)) == OK) { + while (true) { + err = parseChunk(&offset, 0); + if (err == OK) { + continue; + } + + uint32_t hdr[2]; + if (mDataSource->readAt(offset, hdr, 8) < 8) { + break; + } + uint32_t chunk_type = ntohl(hdr[1]); + if (chunk_type == FOURCC('s', 'i', 'd', 'x')) { + // parse the sidx box too + continue; + } else if (chunk_type == FOURCC('m', 'o', 'o', 'f')) { + // store the offset of the first segment + mMoofOffset = offset; + } + break; } if (mInitCheck == OK) { @@ -388,6 +508,23 @@ status_t MPEG4Extractor::readMetaData() { } CHECK_NE(err, (status_t)NO_INIT); + + // copy pssh data into file metadata + int psshsize = 0; + for (size_t i = 0; i < mPssh.size(); i++) { + psshsize += 20 + mPssh[i].datalen; + } + if (psshsize) { + char *buf = (char*)malloc(psshsize); + char *ptr = buf; + for (size_t i = 0; i < mPssh.size(); i++) { + memcpy(ptr, mPssh[i].uuid, 20); // uuid + length + memcpy(ptr + 20, mPssh[i].data, mPssh[i].datalen); + ptr += (20 + mPssh[i].datalen); + } + mFileMetaData->setData(kKeyPssh, 'pssh', buf, psshsize); + free(buf); + } return mInitCheck; } @@ -559,14 +696,6 @@ status_t MPEG4Extractor::parseDrmSINF(off64_t *offset, off64_t data_offset) { return UNKNOWN_ERROR; // Return a dummy error. } -static void MakeFourCCString(uint32_t x, char *s) { - s[0] = x >> 24; - s[1] = (x >> 16) & 0xff; - s[2] = (x >> 8) & 0xff; - s[3] = x & 0xff; - s[4] = '\0'; -} - struct PathAdder { PathAdder(Vector<uint32_t> *path, uint32_t chunkType) : mPath(path) { @@ -630,7 +759,7 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { char chunk[5]; MakeFourCCString(chunk_type, chunk); - ALOGV("chunk: %s @ %lld", chunk, *offset); + ALOGV("chunk: %s @ %lld, %d", chunk, *offset, depth); #if 0 static const char kWhitespace[] = " "; @@ -686,6 +815,8 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { case FOURCC('m', 'f', 'r', 'a'): case FOURCC('u', 'd', 't', 'a'): case FOURCC('i', 'l', 's', 't'): + case FOURCC('s', 'i', 'n', 'f'): + case FOURCC('s', 'c', 'h', 'i'): { if (chunk_type == FOURCC('s', 't', 'b', 'l')) { ALOGV("sampleTable chunk is %d bytes long.", (size_t)chunk_size); @@ -773,6 +904,75 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { break; } + case FOURCC('f', 'r', 'm', 'a'): + { + uint32_t original_fourcc; + if (mDataSource->readAt(data_offset, &original_fourcc, 4) < 4) { + return ERROR_IO; + } + original_fourcc = ntohl(original_fourcc); + ALOGV("read original format: %d", original_fourcc); + mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(original_fourcc)); + uint32_t num_channels = 0; + uint32_t sample_rate = 0; + if (AdjustChannelsAndRate(original_fourcc, &num_channels, &sample_rate)) { + mLastTrack->meta->setInt32(kKeyChannelCount, num_channels); + mLastTrack->meta->setInt32(kKeySampleRate, sample_rate); + } + *offset += chunk_size; + break; + } + + case FOURCC('t', 'e', 'n', 'c'): + { + if (chunk_size < 32) { + return ERROR_MALFORMED; + } + + // tenc box contains 1 byte version, 3 byte flags, 3 byte default algorithm id, one byte + // default IV size, 16 bytes default KeyID + // (ISO 23001-7) + char buf[4]; + memset(buf, 0, 4); + if (mDataSource->readAt(data_offset + 4, buf + 1, 3) < 3) { + return ERROR_IO; + } + uint32_t defaultAlgorithmId = ntohl(*((int32_t*)buf)); + if (defaultAlgorithmId > 1) { + // only 0 (clear) and 1 (AES-128) are valid + return ERROR_MALFORMED; + } + + memset(buf, 0, 4); + if (mDataSource->readAt(data_offset + 7, buf + 3, 1) < 1) { + return ERROR_IO; + } + uint32_t defaultIVSize = ntohl(*((int32_t*)buf)); + + if ((defaultAlgorithmId == 0 && defaultIVSize != 0) || + (defaultAlgorithmId != 0 && defaultIVSize == 0)) { + // only unencrypted data must have 0 IV size + return ERROR_MALFORMED; + } else if (defaultIVSize != 0 && + defaultIVSize != 8 && + defaultIVSize != 16) { + // only supported sizes are 0, 8 and 16 + return ERROR_MALFORMED; + } + + uint8_t defaultKeyId[16]; + + if (mDataSource->readAt(data_offset + 8, &defaultKeyId, 16) < 16) { + return ERROR_IO; + } + + mLastTrack->meta->setInt32(kKeyCryptoMode, defaultAlgorithmId); + mLastTrack->meta->setInt32(kKeyCryptoDefaultIVSize, defaultIVSize); + mLastTrack->meta->setData(kKeyCryptoKey, 'tenc', defaultKeyId, 16); + *offset += chunk_size; + break; + } + case FOURCC('t', 'k', 'h', 'd'): { status_t err; @@ -784,6 +984,37 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { break; } + case FOURCC('p', 's', 's', 'h'): + { + PsshInfo pssh; + + if (mDataSource->readAt(data_offset + 4, &pssh.uuid, 16) < 16) { + return ERROR_IO; + } + + uint32_t psshdatalen = 0; + if (mDataSource->readAt(data_offset + 20, &psshdatalen, 4) < 4) { + return ERROR_IO; + } + pssh.datalen = ntohl(psshdatalen); + ALOGV("pssh data size: %d", pssh.datalen); + if (pssh.datalen + 20 > chunk_size) { + // pssh data length exceeds size of containing box + return ERROR_MALFORMED; + } + + pssh.data = new uint8_t[pssh.datalen]; + ALOGV("allocated pssh @ %p", pssh.data); + ssize_t requested = (ssize_t) pssh.datalen; + if (mDataSource->readAt(data_offset + 24, pssh.data, requested) < requested) { + return ERROR_IO; + } + mPssh.push_back(pssh); + + *offset += chunk_size; + break; + } + case FOURCC('m', 'd', 'h', 'd'): { if (chunk_data_size < 4) { @@ -816,7 +1047,7 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { mLastTrack->timescale = ntohl(timescale); - int64_t duration; + int64_t duration = 0; if (version == 1) { if (mDataSource->readAt( timescale_offset + 4, &duration, sizeof(duration)) @@ -825,13 +1056,16 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { } duration = ntoh64(duration); } else { - int32_t duration32; + uint32_t duration32; if (mDataSource->readAt( timescale_offset + 4, &duration32, sizeof(duration32)) < (ssize_t)sizeof(duration32)) { return ERROR_IO; } - duration = ntohl(duration32); + // ffmpeg sets duration to -1, which is incorrect. + if (duration32 != 0xffffffff) { + duration = ntohl(duration32); + } } mLastTrack->meta->setInt64( kKeyDuration, (duration * 1000000) / mLastTrack->timescale); @@ -894,16 +1128,17 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { // For 3GPP timed text, there could be multiple tx3g boxes contain // multiple text display formats. These formats will be used to // display the timed text. + // For encrypted files, there may also be more than one entry. const char *mime; CHECK(mLastTrack->meta->findCString(kKeyMIMEType, &mime)); - if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP)) { + if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) && + strcasecmp(mime, "application/octet-stream")) { // For now we only support a single type of media per track. mLastTrack->skipTrack = true; *offset += chunk_size; break; } } - off64_t stop_offset = *offset + chunk_size; *offset = data_offset + 8; for (uint32_t i = 0; i < entry_count; ++i) { @@ -920,6 +1155,7 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { } case FOURCC('m', 'p', '4', 'a'): + case FOURCC('e', 'n', 'c', 'a'): case FOURCC('s', 'a', 'm', 'r'): case FOURCC('s', 'a', 'w', 'b'): { @@ -935,29 +1171,18 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { } uint16_t data_ref_index = U16_AT(&buffer[6]); - uint16_t num_channels = U16_AT(&buffer[16]); + uint32_t num_channels = U16_AT(&buffer[16]); uint16_t sample_size = U16_AT(&buffer[18]); uint32_t sample_rate = U32_AT(&buffer[24]) >> 16; - if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_NB, - FourCC2MIME(chunk_type))) { - // AMR NB audio is always mono, 8kHz - num_channels = 1; - sample_rate = 8000; - } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, - FourCC2MIME(chunk_type))) { - // AMR WB audio is always mono, 16kHz - num_channels = 1; - sample_rate = 16000; + if (chunk_type != FOURCC('e', 'n', 'c', 'a')) { + // if the chunk type is enca, we'll get the type from the sinf/frma box later + mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(chunk_type)); + AdjustChannelsAndRate(chunk_type, &num_channels, &sample_rate); } - -#if 0 - printf("*** coding='%s' %d channels, size %d, rate %d\n", + ALOGV("*** coding='%s' %d channels, size %d, rate %d\n", chunk, num_channels, sample_size, sample_rate); -#endif - - mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(chunk_type)); mLastTrack->meta->setInt32(kKeyChannelCount, num_channels); mLastTrack->meta->setInt32(kKeySampleRate, sample_rate); @@ -977,6 +1202,7 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { } case FOURCC('m', 'p', '4', 'v'): + case FOURCC('e', 'n', 'c', 'v'): case FOURCC('s', '2', '6', '3'): case FOURCC('H', '2', '6', '3'): case FOURCC('h', '2', '6', '3'): @@ -999,7 +1225,7 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { uint16_t width = U16_AT(&buffer[6 + 18]); uint16_t height = U16_AT(&buffer[6 + 20]); - // The video sample is not stand-compliant if it has invalid dimension. + // The video sample is not standard-compliant if it has invalid dimension. // Use some default width and height value, and // let the decoder figure out the actual width and height (and thus // be prepared for INFO_FOMRAT_CHANGED event). @@ -1009,7 +1235,10 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { // printf("*** coding='%s' width=%d height=%d\n", // chunk, width, height); - mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(chunk_type)); + if (chunk_type != FOURCC('e', 'n', 'c', 'v')) { + // if the chunk type is encv, we'll get the type from the sinf/frma box later + mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(chunk_type)); + } mLastTrack->meta->setInt32(kKeyWidth, width); mLastTrack->meta->setInt32(kKeyHeight, height); @@ -1075,11 +1304,23 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { return err; } - // Assume that a given buffer only contains at most 10 fragments, - // each fragment originally prefixed with a 2 byte length will - // have a 4 byte header (0x00 0x00 0x00 0x01) after conversion, - // and thus will grow by 2 bytes per fragment. - mLastTrack->meta->setInt32(kKeyMaxInputSize, max_size + 10 * 2); + if (max_size != 0) { + // Assume that a given buffer only contains at most 10 chunks, + // each chunk originally prefixed with a 2 byte length will + // have a 4 byte header (0x00 0x00 0x00 0x01) after conversion, + // and thus will grow by 2 bytes per chunk. + mLastTrack->meta->setInt32(kKeyMaxInputSize, max_size + 10 * 2); + } else { + // No size was specified. Pick a conservatively large size. + int32_t width, height; + if (mLastTrack->meta->findInt32(kKeyWidth, &width) && + mLastTrack->meta->findInt32(kKeyHeight, &height)) { + mLastTrack->meta->setInt32(kKeyMaxInputSize, width * height * 3 / 2); + } else { + ALOGE("No width or height, assuming worst case 1080p"); + mLastTrack->meta->setInt32(kKeyMaxInputSize, 3110400); + } + } *offset += chunk_size; // Calculate average frame rate. @@ -1354,6 +1595,7 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { case FOURCC('m', 'd', 'a', 't'): { + ALOGV("mdat chunk, drm: %d", mIsDrm); if (!mIsDrm) { *offset += chunk_size; break; @@ -1448,6 +1690,13 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { break; } + case FOURCC('s', 'i', 'd', 'x'): + { + parseSegmentIndex(data_offset, chunk_data_size); + *offset += chunk_size; + return UNKNOWN_ERROR; // stop parsing after sidx + } + default: { *offset += chunk_size; @@ -1458,6 +1707,125 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { return OK; } +status_t MPEG4Extractor::parseSegmentIndex(off64_t offset, size_t size) { + ALOGV("MPEG4Extractor::parseSegmentIndex"); + + if (size < 12) { + return -EINVAL; + } + + uint32_t flags; + if (!mDataSource->getUInt32(offset, &flags)) { + return ERROR_MALFORMED; + } + + uint32_t version = flags >> 24; + flags &= 0xffffff; + + ALOGV("sidx version %d", version); + + uint32_t referenceId; + if (!mDataSource->getUInt32(offset + 4, &referenceId)) { + return ERROR_MALFORMED; + } + + uint32_t timeScale; + if (!mDataSource->getUInt32(offset + 8, &timeScale)) { + return ERROR_MALFORMED; + } + ALOGV("sidx refid/timescale: %d/%d", referenceId, timeScale); + + uint64_t earliestPresentationTime; + uint64_t firstOffset; + + offset += 12; + size -= 12; + + if (version == 0) { + if (size < 8) { + return -EINVAL; + } + uint32_t tmp; + if (!mDataSource->getUInt32(offset, &tmp)) { + return ERROR_MALFORMED; + } + earliestPresentationTime = tmp; + if (!mDataSource->getUInt32(offset + 4, &tmp)) { + return ERROR_MALFORMED; + } + firstOffset = tmp; + offset += 8; + size -= 8; + } else { + if (size < 16) { + return -EINVAL; + } + if (!mDataSource->getUInt64(offset, &earliestPresentationTime)) { + return ERROR_MALFORMED; + } + if (!mDataSource->getUInt64(offset + 8, &firstOffset)) { + return ERROR_MALFORMED; + } + offset += 16; + size -= 16; + } + ALOGV("sidx pres/off: %Ld/%Ld", earliestPresentationTime, firstOffset); + + if (size < 4) { + return -EINVAL; + } + + uint16_t referenceCount; + if (!mDataSource->getUInt16(offset + 2, &referenceCount)) { + return ERROR_MALFORMED; + } + offset += 4; + size -= 4; + ALOGV("refcount: %d", referenceCount); + + if (size < referenceCount * 12) { + return -EINVAL; + } + + uint64_t total_duration = 0; + for (unsigned int i = 0; i < referenceCount; i++) { + uint32_t d1, d2, d3; + + if (!mDataSource->getUInt32(offset, &d1) || // size + !mDataSource->getUInt32(offset + 4, &d2) || // duration + !mDataSource->getUInt32(offset + 8, &d3)) { // flags + return ERROR_MALFORMED; + } + + if (d1 & 0x80000000) { + ALOGW("sub-sidx boxes not supported yet"); + } + bool sap = d3 & 0x80000000; + bool saptype = d3 >> 28; + if (!sap || saptype > 2) { + ALOGW("not a stream access point, or unsupported type"); + } + total_duration += d2; + offset += 12; + ALOGV(" item %d, %08x %08x %08x", i, d1, d2, d3); + SidxEntry se; + se.mSize = d1 & 0x7fffffff; + se.mDurationUs = 1000000LL * d2 / timeScale; + mSidxEntries.add(se); + } + + mSidxDuration = total_duration * 1000000 / timeScale; + ALOGV("duration: %lld", mSidxDuration); + + int64_t metaDuration; + if (!mLastTrack->meta->findInt64(kKeyDuration, &metaDuration) || metaDuration == 0) { + mLastTrack->meta->setInt64(kKeyDuration, mSidxDuration); + } + return OK; +} + + + status_t MPEG4Extractor::parseTrackHeader( off64_t data_offset, off64_t data_size) { if (data_size < 4) { @@ -1754,8 +2122,11 @@ sp<MediaSource> MPEG4Extractor::getTrack(size_t index) { return NULL; } + ALOGV("getTrack called, pssh: %d", mPssh.size()); + return new MPEG4Source( - track->meta, mDataSource, track->timescale, track->sampleTable); + track->meta, mDataSource, track->timescale, track->sampleTable, + mSidxEntries, mMoofOffset); } // static @@ -1852,17 +2223,30 @@ status_t MPEG4Extractor::updateAudioTrackInfoFromESDS_MPEG4Audio( sampleRate = br.getBits(24); numChannels = br.getBits(4); } else { - static uint32_t kSamplingRate[] = { - 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, - 16000, 12000, 11025, 8000, 7350 - }; - - if (freqIndex == 13 || freqIndex == 14) { - return ERROR_MALFORMED; + numChannels = br.getBits(4); + if (objectType == 5) { + // SBR specific config per 14496-3 table 1.13 + freqIndex = br.getBits(4); + if (freqIndex == 15) { + if (csd_size < 8) { + return ERROR_MALFORMED; + } + sampleRate = br.getBits(24); + } } - sampleRate = kSamplingRate[freqIndex]; - numChannels = br.getBits(4); + if (sampleRate == 0) { + static uint32_t kSamplingRate[] = { + 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, + 16000, 12000, 11025, 8000, 7350 + }; + + if (freqIndex == 13 || freqIndex == 14) { + return ERROR_MALFORMED; + } + + sampleRate = kSamplingRate[freqIndex]; + } } if (numChannels == 0) { @@ -1898,12 +2282,23 @@ MPEG4Source::MPEG4Source( const sp<MetaData> &format, const sp<DataSource> &dataSource, int32_t timeScale, - const sp<SampleTable> &sampleTable) + const sp<SampleTable> &sampleTable, + Vector<SidxEntry> &sidx, + off64_t firstMoofOffset) : mFormat(format), mDataSource(dataSource), mTimescale(timeScale), mSampleTable(sampleTable), mCurrentSampleIndex(0), + mCurrentFragmentIndex(0), + mSegments(sidx), + mFirstMoofOffset(firstMoofOffset), + mCurrentMoofOffset(firstMoofOffset), + mCurrentTime(0), + mCurrentSampleInfoAllocSize(0), + mCurrentSampleInfoSizes(NULL), + mCurrentSampleInfoOffsetsAllocSize(0), + mCurrentSampleInfoOffsets(NULL), mIsAVC(false), mNALLengthSize(0), mStarted(false), @@ -1911,6 +2306,19 @@ MPEG4Source::MPEG4Source( mBuffer(NULL), mWantsNALFragments(false), mSrcBuffer(NULL) { + + mFormat->findInt32(kKeyCryptoMode, &mCryptoMode); + mDefaultIVSize = 0; + mFormat->findInt32(kKeyCryptoDefaultIVSize, &mDefaultIVSize); + uint32_t keytype; + const void *key; + size_t keysize; + if (mFormat->findData(kKeyCryptoKey, &keytype, &key, &keysize)) { + CHECK(keysize <= 16); + memset(mCryptoKey, 0, 16); + memcpy(mCryptoKey, key, keysize); + } + const char *mime; bool success = mFormat->findCString(kKeyMIMEType, &mime); CHECK(success); @@ -1931,12 +2339,21 @@ MPEG4Source::MPEG4Source( // The number of bytes used to encode the length of a NAL unit. mNALLengthSize = 1 + (ptr[4] & 3); } + + CHECK(format->findInt32(kKeyTrackID, &mTrackId)); + + if (mFirstMoofOffset != 0) { + off64_t offset = mFirstMoofOffset; + parseChunk(&offset); + } } MPEG4Source::~MPEG4Source() { if (mStarted) { stop(); } + free(mCurrentSampleInfoSizes); + free(mCurrentSampleInfoOffsets); } status_t MPEG4Source::start(MetaData *params) { @@ -1988,6 +2405,529 @@ status_t MPEG4Source::stop() { return OK; } +status_t MPEG4Source::parseChunk(off64_t *offset) { + uint32_t hdr[2]; + if (mDataSource->readAt(*offset, hdr, 8) < 8) { + return ERROR_IO; + } + uint64_t chunk_size = ntohl(hdr[0]); + uint32_t chunk_type = ntohl(hdr[1]); + off64_t data_offset = *offset + 8; + + if (chunk_size == 1) { + if (mDataSource->readAt(*offset + 8, &chunk_size, 8) < 8) { + return ERROR_IO; + } + chunk_size = ntoh64(chunk_size); + data_offset += 8; + + if (chunk_size < 16) { + // The smallest valid chunk is 16 bytes long in this case. + return ERROR_MALFORMED; + } + } else if (chunk_size < 8) { + // The smallest valid chunk is 8 bytes long. + return ERROR_MALFORMED; + } + + char chunk[5]; + MakeFourCCString(chunk_type, chunk); + ALOGV("MPEG4Source chunk %s @ %llx", chunk, *offset); + + off64_t chunk_data_size = *offset + chunk_size - data_offset; + + switch(chunk_type) { + + case FOURCC('t', 'r', 'a', 'f'): + case FOURCC('m', 'o', 'o', 'f'): { + off64_t stop_offset = *offset + chunk_size; + *offset = data_offset; + while (*offset < stop_offset) { + status_t err = parseChunk(offset); + if (err != OK) { + return err; + } + } + if (chunk_type == FOURCC('m', 'o', 'o', 'f')) { + // *offset points to the mdat box following this moof + parseChunk(offset); // doesn't actually parse it, just updates offset + mNextMoofOffset = *offset; + } + break; + } + + case FOURCC('t', 'f', 'h', 'd'): { + status_t err; + if ((err = parseTrackFragmentHeader(data_offset, chunk_data_size)) != OK) { + return err; + } + *offset += chunk_size; + break; + } + + case FOURCC('t', 'r', 'u', 'n'): { + status_t err; + if (mLastParsedTrackId == mTrackId) { + if ((err = parseTrackFragmentRun(data_offset, chunk_data_size)) != OK) { + return err; + } + } + + *offset += chunk_size; + break; + } + + case FOURCC('s', 'a', 'i', 'z'): { + status_t err; + if ((err = parseSampleAuxiliaryInformationSizes(data_offset, chunk_data_size)) != OK) { + return err; + } + *offset += chunk_size; + break; + } + case FOURCC('s', 'a', 'i', 'o'): { + status_t err; + if ((err = parseSampleAuxiliaryInformationOffsets(data_offset, chunk_data_size)) != OK) { + return err; + } + *offset += chunk_size; + break; + } + + case FOURCC('m', 'd', 'a', 't'): { + // parse DRM info if present + ALOGV("MPEG4Source::parseChunk mdat"); + // if saiz/saoi was previously observed, do something with the sampleinfos + *offset += chunk_size; + break; + } + + default: { + *offset += chunk_size; + break; + } + } + return OK; +} + +status_t MPEG4Source::parseSampleAuxiliaryInformationSizes(off64_t offset, off64_t size) { + ALOGV("parseSampleAuxiliaryInformationSizes"); + // 14496-12 8.7.12 + uint8_t version; + if (mDataSource->readAt( + offset, &version, sizeof(version)) + < (ssize_t)sizeof(version)) { + return ERROR_IO; + } + + if (version != 0) { + return ERROR_UNSUPPORTED; + } + offset++; + + uint32_t flags; + if (!mDataSource->getUInt24(offset, &flags)) { + return ERROR_IO; + } + offset += 3; + + if (flags & 1) { + uint32_t tmp; + if (!mDataSource->getUInt32(offset, &tmp)) { + return ERROR_MALFORMED; + } + mCurrentAuxInfoType = tmp; + offset += 4; + if (!mDataSource->getUInt32(offset, &tmp)) { + return ERROR_MALFORMED; + } + mCurrentAuxInfoTypeParameter = tmp; + offset += 4; + } + + uint8_t defsize; + if (mDataSource->readAt(offset, &defsize, 1) != 1) { + return ERROR_MALFORMED; + } + mCurrentDefaultSampleInfoSize = defsize; + offset++; + + uint32_t smplcnt; + if (!mDataSource->getUInt32(offset, &smplcnt)) { + return ERROR_MALFORMED; + } + mCurrentSampleInfoCount = smplcnt; + offset += 4; + + if (mCurrentDefaultSampleInfoSize != 0) { + ALOGV("@@@@ using default sample info size of %d", mCurrentDefaultSampleInfoSize); + return OK; + } + if (smplcnt > mCurrentSampleInfoAllocSize) { + mCurrentSampleInfoSizes = (uint8_t*) realloc(mCurrentSampleInfoSizes, smplcnt); + mCurrentSampleInfoAllocSize = smplcnt; + } + + mDataSource->readAt(offset, mCurrentSampleInfoSizes, smplcnt); + return OK; +} + +status_t MPEG4Source::parseSampleAuxiliaryInformationOffsets(off64_t offset, off64_t size) { + ALOGV("parseSampleAuxiliaryInformationOffsets"); + // 14496-12 8.7.13 + uint8_t version; + if (mDataSource->readAt(offset, &version, sizeof(version)) != 1) { + return ERROR_IO; + } + offset++; + + uint32_t flags; + if (!mDataSource->getUInt24(offset, &flags)) { + return ERROR_IO; + } + offset += 3; + + uint32_t entrycount; + if (!mDataSource->getUInt32(offset, &entrycount)) { + return ERROR_IO; + } + offset += 4; + + if (entrycount > mCurrentSampleInfoOffsetsAllocSize) { + mCurrentSampleInfoOffsets = (uint64_t*) realloc(mCurrentSampleInfoOffsets, entrycount * 8); + mCurrentSampleInfoOffsetsAllocSize = entrycount; + } + mCurrentSampleInfoOffsetCount = entrycount; + + for (size_t i = 0; i < entrycount; i++) { + if (version == 0) { + uint32_t tmp; + if (!mDataSource->getUInt32(offset, &tmp)) { + return ERROR_IO; + } + mCurrentSampleInfoOffsets[i] = tmp; + offset += 4; + } else { + uint64_t tmp; + if (!mDataSource->getUInt64(offset, &tmp)) { + return ERROR_IO; + } + mCurrentSampleInfoOffsets[i] = tmp; + offset += 8; + } + } + + // parse clear/encrypted data + + off64_t drmoffset = mCurrentSampleInfoOffsets[0]; // from moof + + drmoffset += mCurrentMoofOffset; + int ivlength; + CHECK(mFormat->findInt32(kKeyCryptoDefaultIVSize, &ivlength)); + + // read CencSampleAuxiliaryDataFormats + for (size_t i = 0; i < mCurrentSampleInfoCount; i++) { + Sample *smpl = &mCurrentSamples.editItemAt(i); + + memset(smpl->iv, 0, 16); + if (mDataSource->readAt(drmoffset, smpl->iv, ivlength) != ivlength) { + return ERROR_IO; + } + + drmoffset += ivlength; + + int32_t smplinfosize = mCurrentDefaultSampleInfoSize; + if (smplinfosize == 0) { + smplinfosize = mCurrentSampleInfoSizes[i]; + } + if (smplinfosize > ivlength) { + uint16_t numsubsamples; + if (!mDataSource->getUInt16(drmoffset, &numsubsamples)) { + return ERROR_IO; + } + drmoffset += 2; + for (size_t j = 0; j < numsubsamples; j++) { + uint16_t numclear; + uint32_t numencrypted; + if (!mDataSource->getUInt16(drmoffset, &numclear)) { + return ERROR_IO; + } + drmoffset += 2; + if (!mDataSource->getUInt32(drmoffset, &numencrypted)) { + return ERROR_IO; + } + drmoffset += 4; + smpl->clearsizes.add(numclear); + smpl->encryptedsizes.add(numencrypted); + } + } else { + smpl->clearsizes.add(0); + smpl->encryptedsizes.add(smpl->size); + } + } + + + return OK; +} + +status_t MPEG4Source::parseTrackFragmentHeader(off64_t offset, off64_t size) { + + if (size < 8) { + return -EINVAL; + } + + uint32_t flags; + if (!mDataSource->getUInt32(offset, &flags)) { // actually version + flags + return ERROR_MALFORMED; + } + + if (flags & 0xff000000) { + return -EINVAL; + } + + if (!mDataSource->getUInt32(offset + 4, (uint32_t*)&mLastParsedTrackId)) { + return ERROR_MALFORMED; + } + + if (mLastParsedTrackId != mTrackId) { + // this is not the right track, skip it + return OK; + } + + mTrackFragmentHeaderInfo.mFlags = flags; + mTrackFragmentHeaderInfo.mTrackID = mLastParsedTrackId; + offset += 8; + size -= 8; + + ALOGV("fragment header: %08x %08x", flags, mTrackFragmentHeaderInfo.mTrackID); + + if (flags & TrackFragmentHeaderInfo::kBaseDataOffsetPresent) { + if (size < 8) { + return -EINVAL; + } + + if (!mDataSource->getUInt64(offset, &mTrackFragmentHeaderInfo.mBaseDataOffset)) { + return ERROR_MALFORMED; + } + offset += 8; + size -= 8; + } + + if (flags & TrackFragmentHeaderInfo::kSampleDescriptionIndexPresent) { + if (size < 4) { + return -EINVAL; + } + + if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mSampleDescriptionIndex)) { + return ERROR_MALFORMED; + } + offset += 4; + size -= 4; + } + + if (flags & TrackFragmentHeaderInfo::kDefaultSampleDurationPresent) { + if (size < 4) { + return -EINVAL; + } + + if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mDefaultSampleDuration)) { + return ERROR_MALFORMED; + } + offset += 4; + size -= 4; + } + + if (flags & TrackFragmentHeaderInfo::kDefaultSampleSizePresent) { + if (size < 4) { + return -EINVAL; + } + + if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mDefaultSampleSize)) { + return ERROR_MALFORMED; + } + offset += 4; + size -= 4; + } + + if (flags & TrackFragmentHeaderInfo::kDefaultSampleFlagsPresent) { + if (size < 4) { + return -EINVAL; + } + + if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mDefaultSampleFlags)) { + return ERROR_MALFORMED; + } + offset += 4; + size -= 4; + } + + if (!(flags & TrackFragmentHeaderInfo::kBaseDataOffsetPresent)) { + mTrackFragmentHeaderInfo.mBaseDataOffset = mCurrentMoofOffset; + } + + mTrackFragmentHeaderInfo.mDataOffset = 0; + return OK; +} + +status_t MPEG4Source::parseTrackFragmentRun(off64_t offset, off64_t size) { + + ALOGV("MPEG4Extractor::parseTrackFragmentRun"); + if (size < 8) { + return -EINVAL; + } + + enum { + kDataOffsetPresent = 0x01, + kFirstSampleFlagsPresent = 0x04, + kSampleDurationPresent = 0x100, + kSampleSizePresent = 0x200, + kSampleFlagsPresent = 0x400, + kSampleCompositionTimeOffsetPresent = 0x800, + }; + + uint32_t flags; + if (!mDataSource->getUInt32(offset, &flags)) { + return ERROR_MALFORMED; + } + ALOGV("fragment run flags: %08x", flags); + + if (flags & 0xff000000) { + return -EINVAL; + } + + if ((flags & kFirstSampleFlagsPresent) && (flags & kSampleFlagsPresent)) { + // These two shall not be used together. + return -EINVAL; + } + + uint32_t sampleCount; + if (!mDataSource->getUInt32(offset + 4, &sampleCount)) { + return ERROR_MALFORMED; + } + offset += 8; + size -= 8; + + uint64_t dataOffset = mTrackFragmentHeaderInfo.mDataOffset; + + uint32_t firstSampleFlags = 0; + + if (flags & kDataOffsetPresent) { + if (size < 4) { + return -EINVAL; + } + + int32_t dataOffsetDelta; + if (!mDataSource->getUInt32(offset, (uint32_t*)&dataOffsetDelta)) { + return ERROR_MALFORMED; + } + + dataOffset = mTrackFragmentHeaderInfo.mBaseDataOffset + dataOffsetDelta; + + offset += 4; + size -= 4; + } + + if (flags & kFirstSampleFlagsPresent) { + if (size < 4) { + return -EINVAL; + } + + if (!mDataSource->getUInt32(offset, &firstSampleFlags)) { + return ERROR_MALFORMED; + } + offset += 4; + size -= 4; + } + + uint32_t sampleDuration = 0, sampleSize = 0, sampleFlags = 0, + sampleCtsOffset = 0; + + size_t bytesPerSample = 0; + if (flags & kSampleDurationPresent) { + bytesPerSample += 4; + } else if (mTrackFragmentHeaderInfo.mFlags + & TrackFragmentHeaderInfo::kDefaultSampleDurationPresent) { + sampleDuration = mTrackFragmentHeaderInfo.mDefaultSampleDuration; + } else { + sampleDuration = mTrackFragmentHeaderInfo.mDefaultSampleDuration; + } + + if (flags & kSampleSizePresent) { + bytesPerSample += 4; + } else if (mTrackFragmentHeaderInfo.mFlags + & TrackFragmentHeaderInfo::kDefaultSampleSizePresent) { + sampleSize = mTrackFragmentHeaderInfo.mDefaultSampleSize; + } else { + sampleSize = mTrackFragmentHeaderInfo.mDefaultSampleSize; + } + + if (flags & kSampleFlagsPresent) { + bytesPerSample += 4; + } else if (mTrackFragmentHeaderInfo.mFlags + & TrackFragmentHeaderInfo::kDefaultSampleFlagsPresent) { + sampleFlags = mTrackFragmentHeaderInfo.mDefaultSampleFlags; + } else { + sampleFlags = mTrackFragmentHeaderInfo.mDefaultSampleFlags; + } + + if (flags & kSampleCompositionTimeOffsetPresent) { + bytesPerSample += 4; + } else { + sampleCtsOffset = 0; + } + + if (size < sampleCount * bytesPerSample) { + return -EINVAL; + } + + Sample tmp; + for (uint32_t i = 0; i < sampleCount; ++i) { + if (flags & kSampleDurationPresent) { + if (!mDataSource->getUInt32(offset, &sampleDuration)) { + return ERROR_MALFORMED; + } + offset += 4; + } + + if (flags & kSampleSizePresent) { + if (!mDataSource->getUInt32(offset, &sampleSize)) { + return ERROR_MALFORMED; + } + offset += 4; + } + + if (flags & kSampleFlagsPresent) { + if (!mDataSource->getUInt32(offset, &sampleFlags)) { + return ERROR_MALFORMED; + } + offset += 4; + } + + if (flags & kSampleCompositionTimeOffsetPresent) { + if (!mDataSource->getUInt32(offset, &sampleCtsOffset)) { + return ERROR_MALFORMED; + } + offset += 4; + } + + ALOGV("adding sample %d at offset 0x%08llx, size %u, duration %u, " + " flags 0x%08x", i + 1, + dataOffset, sampleSize, sampleDuration, + (flags & kFirstSampleFlagsPresent) && i == 0 + ? firstSampleFlags : sampleFlags); + tmp.offset = dataOffset; + tmp.size = sampleSize; + tmp.duration = sampleDuration; + mCurrentSamples.add(tmp); + + dataOffset += sampleSize; + } + + mTrackFragmentHeaderInfo.mDataOffset = dataOffset; + + return OK; +} + sp<MetaData> MPEG4Source::getFormat() { Mutex::Autolock autoLock(mLock); @@ -2019,6 +2959,10 @@ status_t MPEG4Source::read( CHECK(mStarted); + if (mFirstMoofOffset > 0) { + return fragmentedRead(out, options); + } + *out = NULL; int64_t targetSampleTimeUs = -1; @@ -2076,6 +3020,7 @@ status_t MPEG4Source::read( // we had seeked to the end of stream, ending normally. err = ERROR_END_OF_STREAM; } + ALOGV("end of stream"); return err; } @@ -2286,6 +3231,268 @@ status_t MPEG4Source::read( } } +status_t MPEG4Source::fragmentedRead( + MediaBuffer **out, const ReadOptions *options) { + + ALOGV("MPEG4Source::fragmentedRead"); + + CHECK(mStarted); + + *out = NULL; + + int64_t targetSampleTimeUs = -1; + + int64_t seekTimeUs; + ReadOptions::SeekMode mode; + if (options && options->getSeekTo(&seekTimeUs, &mode)) { + + int numSidxEntries = mSegments.size(); + if (numSidxEntries != 0) { + int64_t totalTime = 0; + off64_t totalOffset = mFirstMoofOffset; + for (int i = 0; i < numSidxEntries; i++) { + const SidxEntry *se = &mSegments[i]; + if (totalTime + se->mDurationUs > seekTimeUs) { + // The requested time is somewhere in this segment + if ((mode == ReadOptions::SEEK_NEXT_SYNC) || + (mode == ReadOptions::SEEK_CLOSEST_SYNC && + (seekTimeUs - totalTime) > (totalTime + se->mDurationUs - seekTimeUs))) { + // requested next sync, or closest sync and it was closer to the end of + // this segment + totalTime += se->mDurationUs; + totalOffset += se->mSize; + } + break; + } + totalTime += se->mDurationUs; + totalOffset += se->mSize; + } + mCurrentMoofOffset = totalOffset; + mCurrentSamples.clear(); + mCurrentSampleIndex = 0; + parseChunk(&totalOffset); + mCurrentTime = totalTime * mTimescale / 1000000ll; + } + + if (mBuffer != NULL) { + mBuffer->release(); + mBuffer = NULL; + } + + // fall through + } + + off64_t offset = 0; + size_t size; + uint32_t cts = 0; + bool isSyncSample = false; + bool newBuffer = false; + if (mBuffer == NULL) { + newBuffer = true; + + if (mCurrentSampleIndex >= mCurrentSamples.size()) { + // move to next fragment + Sample lastSample = mCurrentSamples[mCurrentSamples.size() - 1]; + off64_t nextMoof = mNextMoofOffset; // lastSample.offset + lastSample.size; + mCurrentMoofOffset = nextMoof; + mCurrentSamples.clear(); + mCurrentSampleIndex = 0; + parseChunk(&nextMoof); + if (mCurrentSampleIndex >= mCurrentSamples.size()) { + return ERROR_END_OF_STREAM; + } + } + + const Sample *smpl = &mCurrentSamples[mCurrentSampleIndex]; + offset = smpl->offset; + size = smpl->size; + cts = mCurrentTime; + mCurrentTime += smpl->duration; + isSyncSample = (mCurrentSampleIndex == 0); // XXX + + status_t err = mGroup->acquire_buffer(&mBuffer); + + if (err != OK) { + CHECK(mBuffer == NULL); + ALOGV("acquire_buffer returned %d", err); + return err; + } + } + + const Sample *smpl = &mCurrentSamples[mCurrentSampleIndex]; + const sp<MetaData> bufmeta = mBuffer->meta_data(); + bufmeta->clear(); + if (smpl->encryptedsizes.size()) { + // store clear/encrypted lengths in metadata + bufmeta->setData(kKeyPlainSizes, 0, + smpl->clearsizes.array(), smpl->clearsizes.size() * 4); + bufmeta->setData(kKeyEncryptedSizes, 0, + smpl->encryptedsizes.array(), smpl->encryptedsizes.size() * 4); + bufmeta->setData(kKeyCryptoIV, 0, smpl->iv, 16); // use 16 or the actual size? + bufmeta->setInt32(kKeyCryptoDefaultIVSize, mDefaultIVSize); + bufmeta->setInt32(kKeyCryptoMode, mCryptoMode); + bufmeta->setData(kKeyCryptoKey, 0, mCryptoKey, 16); + } + + if (!mIsAVC || mWantsNALFragments) { + if (newBuffer) { + ssize_t num_bytes_read = + mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size); + + if (num_bytes_read < (ssize_t)size) { + mBuffer->release(); + mBuffer = NULL; + + ALOGV("i/o error"); + return ERROR_IO; + } + + CHECK(mBuffer != NULL); + mBuffer->set_range(0, size); + mBuffer->meta_data()->setInt64( + kKeyTime, ((int64_t)cts * 1000000) / mTimescale); + + if (targetSampleTimeUs >= 0) { + mBuffer->meta_data()->setInt64( + kKeyTargetTime, targetSampleTimeUs); + } + + if (isSyncSample) { + mBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1); + } + + ++mCurrentSampleIndex; + } + + if (!mIsAVC) { + *out = mBuffer; + mBuffer = NULL; + + return OK; + } + + // Each NAL unit is split up into its constituent fragments and + // each one of them returned in its own buffer. + + CHECK(mBuffer->range_length() >= mNALLengthSize); + + const uint8_t *src = + (const uint8_t *)mBuffer->data() + mBuffer->range_offset(); + + size_t nal_size = parseNALSize(src); + if (mBuffer->range_length() < mNALLengthSize + nal_size) { + ALOGE("incomplete NAL unit."); + + mBuffer->release(); + mBuffer = NULL; + + return ERROR_MALFORMED; + } + + MediaBuffer *clone = mBuffer->clone(); + CHECK(clone != NULL); + clone->set_range(mBuffer->range_offset() + mNALLengthSize, nal_size); + + CHECK(mBuffer != NULL); + mBuffer->set_range( + mBuffer->range_offset() + mNALLengthSize + nal_size, + mBuffer->range_length() - mNALLengthSize - nal_size); + + if (mBuffer->range_length() == 0) { + mBuffer->release(); + mBuffer = NULL; + } + + *out = clone; + + return OK; + } else { + ALOGV("whole NAL"); + // Whole NAL units are returned but each fragment is prefixed by + // the start code (0x00 00 00 01). + ssize_t num_bytes_read = 0; + int32_t drm = 0; + bool usesDRM = (mFormat->findInt32(kKeyIsDRM, &drm) && drm != 0); + if (usesDRM) { + num_bytes_read = + mDataSource->readAt(offset, (uint8_t*)mBuffer->data(), size); + } else { + num_bytes_read = mDataSource->readAt(offset, mSrcBuffer, size); + } + + if (num_bytes_read < (ssize_t)size) { + mBuffer->release(); + mBuffer = NULL; + + ALOGV("i/o error"); + return ERROR_IO; + } + + if (usesDRM) { + CHECK(mBuffer != NULL); + mBuffer->set_range(0, size); + + } else { + uint8_t *dstData = (uint8_t *)mBuffer->data(); + size_t srcOffset = 0; + size_t dstOffset = 0; + + while (srcOffset < size) { + bool isMalFormed = (srcOffset + mNALLengthSize > size); + size_t nalLength = 0; + if (!isMalFormed) { + nalLength = parseNALSize(&mSrcBuffer[srcOffset]); + srcOffset += mNALLengthSize; + isMalFormed = srcOffset + nalLength > size; + } + + if (isMalFormed) { + ALOGE("Video is malformed"); + mBuffer->release(); + mBuffer = NULL; + return ERROR_MALFORMED; + } + + if (nalLength == 0) { + continue; + } + + CHECK(dstOffset + 4 <= mBuffer->size()); + + dstData[dstOffset++] = 0; + dstData[dstOffset++] = 0; + dstData[dstOffset++] = 0; + dstData[dstOffset++] = 1; + memcpy(&dstData[dstOffset], &mSrcBuffer[srcOffset], nalLength); + srcOffset += nalLength; + dstOffset += nalLength; + } + CHECK_EQ(srcOffset, size); + CHECK(mBuffer != NULL); + mBuffer->set_range(0, dstOffset); + } + + mBuffer->meta_data()->setInt64( + kKeyTime, ((int64_t)cts * 1000000) / mTimescale); + + if (targetSampleTimeUs >= 0) { + mBuffer->meta_data()->setInt64( + kKeyTargetTime, targetSampleTimeUs); + } + + if (isSyncSample) { + mBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1); + } + + ++mCurrentSampleIndex; + + *out = mBuffer; + mBuffer = NULL; + + return OK; + } +} + MPEG4Extractor::Track *MPEG4Extractor::findTrackByMimePrefix( const char *mimePrefix) { for (Track *track = mFirstTrack; track != NULL; track = track->next) { @@ -2398,6 +3605,9 @@ static bool BetterSniffMPEG4( off64_t chunkDataSize = offset + chunkSize - chunkDataOffset; + char chunkstring[5]; + MakeFourCCString(chunkType, chunkstring); + ALOGV("saw chunk type %s, size %lld @ %lld", chunkstring, chunkSize, offset); switch (chunkType) { case FOURCC('f', 't', 'y', 'p'): { diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp index 326930f..a0f17b5 100755..100644 --- a/media/libstagefright/MPEG4Writer.cpp +++ b/media/libstagefright/MPEG4Writer.cpp @@ -212,7 +212,6 @@ private: int64_t mTrackDurationUs; int64_t mMaxChunkDurationUs; - bool mIsRealTimeRecording; int64_t mEstimatedTrackSizeBytes; int64_t mMdatSizeBytes; int32_t mTimeScale; @@ -335,6 +334,7 @@ private: MPEG4Writer::MPEG4Writer(const char *filename) : mFd(-1), mInitCheck(NO_INIT), + mIsRealTimeRecording(true), mUse4ByteNalLength(true), mUse32BitOffset(true), mIsFileSizeLimitExplicitlyRequested(false), @@ -359,6 +359,7 @@ MPEG4Writer::MPEG4Writer(const char *filename) MPEG4Writer::MPEG4Writer(int fd) : mFd(dup(fd)), mInitCheck(mFd < 0? NO_INIT: OK), + mIsRealTimeRecording(true), mUse4ByteNalLength(true), mUse32BitOffset(true), mIsFileSizeLimitExplicitlyRequested(false), @@ -428,6 +429,42 @@ status_t MPEG4Writer::addSource(const sp<MediaSource> &source) { ALOGE("Attempt to add source AFTER recording is started"); return UNKNOWN_ERROR; } + + // At most 2 tracks can be supported. + if (mTracks.size() >= 2) { + ALOGE("Too many tracks (%d) to add", mTracks.size()); + return ERROR_UNSUPPORTED; + } + + CHECK(source.get() != NULL); + + // A track of type other than video or audio is not supported. + const char *mime; + source->getFormat()->findCString(kKeyMIMEType, &mime); + bool isAudio = !strncasecmp(mime, "audio/", 6); + bool isVideo = !strncasecmp(mime, "video/", 6); + if (!isAudio && !isVideo) { + ALOGE("Track (%s) other than video or audio is not supported", + mime); + return ERROR_UNSUPPORTED; + } + + // At this point, we know the track to be added is either + // video or audio. Thus, we only need to check whether it + // is an audio track or not (if it is not, then it must be + // a video track). + + // No more than one video or one audio track is supported. + for (List<Track*>::iterator it = mTracks.begin(); + it != mTracks.end(); ++it) { + if ((*it)->isAudio() == isAudio) { + ALOGE("%s track already exists", isAudio? "Audio": "Video"); + return ERROR_UNSUPPORTED; + } + } + + // This is the first track of either audio or video. + // Go ahead to add the track. Track *track = new Track(this, source, 1 + mTracks.size()); mTracks.push_back(track); @@ -435,6 +472,11 @@ status_t MPEG4Writer::addSource(const sp<MediaSource> &source) { } status_t MPEG4Writer::startTracks(MetaData *params) { + if (mTracks.empty()) { + ALOGE("No source added"); + return INVALID_OPERATION; + } + for (List<Track *>::iterator it = mTracks.begin(); it != mTracks.end(); ++it) { status_t err = (*it)->start(params); @@ -555,6 +597,11 @@ status_t MPEG4Writer::start(MetaData *param) { mUse4ByteNalLength = false; } + int32_t isRealTimeRecording; + if (param && param->findInt32(kKeyRealTimeRecording, &isRealTimeRecording)) { + mIsRealTimeRecording = isRealTimeRecording; + } + mStartTimestampUs = -1; if (mStarted) { @@ -575,13 +622,50 @@ status_t MPEG4Writer::start(MetaData *param) { /* * When the requested file size limit is small, the priority * is to meet the file size limit requirement, rather than - * to make the file streamable. + * to make the file streamable. mStreamableFile does not tell + * whether the actual recorded file is streamable or not. */ mStreamableFile = (mMaxFileSizeLimitBytes != 0 && mMaxFileSizeLimitBytes >= kMinStreamableFileSizeInBytes); - mWriteMoovBoxToMemory = mStreamableFile; + /* + * mWriteMoovBoxToMemory is true if the amount of data in moov box is + * smaller than the reserved free space at the beginning of a file, AND + * when the content of moov box is constructed. Note that video/audio + * frame data is always written to the file but not in the memory. + * + * Before stop()/reset() is called, mWriteMoovBoxToMemory is always + * false. When reset() is called at the end of a recording session, + * Moov box needs to be constructed. + * + * 1) Right before a moov box is constructed, mWriteMoovBoxToMemory + * to set to mStreamableFile so that if + * the file is intended to be streamable, it is set to true; + * otherwise, it is set to false. When the value is set to false, + * all the content of the moov box is written immediately to + * the end of the file. When the value is set to true, all the + * content of the moov box is written to an in-memory cache, + * mMoovBoxBuffer, util the following condition happens. Note + * that the size of the in-memory cache is the same as the + * reserved free space at the beginning of the file. + * + * 2) While the data of the moov box is written to an in-memory + * cache, the data size is checked against the reserved space. + * If the data size surpasses the reserved space, subsequent moov + * data could no longer be hold in the in-memory cache. This also + * indicates that the reserved space was too small. At this point, + * _all_ moov data must be written to the end of the file. + * mWriteMoovBoxToMemory must be set to false to direct the write + * to the file. + * + * 3) If the data size in moov box is smaller than the reserved + * space after moov box is completely constructed, the in-memory + * cache copy of the moov box is written to the reserved free + * space. Thus, immediately after the moov is completedly + * constructed, mWriteMoovBoxToMemory is always set to false. + */ + mWriteMoovBoxToMemory = false; mMoovBoxBuffer = NULL; mMoovBoxBufferOffset = 0; @@ -786,15 +870,25 @@ status_t MPEG4Writer::reset() { } lseek64(mFd, mOffset, SEEK_SET); - const off64_t moovOffset = mOffset; - mWriteMoovBoxToMemory = mStreamableFile; - mMoovBoxBuffer = (uint8_t *) malloc(mEstimatedMoovBoxSize); + // Construct moov box now mMoovBoxBufferOffset = 0; - CHECK(mMoovBoxBuffer != NULL); + mWriteMoovBoxToMemory = mStreamableFile; + if (mWriteMoovBoxToMemory) { + // There is no need to allocate in-memory cache + // for moov box if the file is not streamable. + + mMoovBoxBuffer = (uint8_t *) malloc(mEstimatedMoovBoxSize); + CHECK(mMoovBoxBuffer != NULL); + } writeMoovBox(maxDurationUs); - mWriteMoovBoxToMemory = false; - if (mStreamableFile) { + // mWriteMoovBoxToMemory could be set to false in + // MPEG4Writer::write() method + if (mWriteMoovBoxToMemory) { + mWriteMoovBoxToMemory = false; + // Content of the moov box is saved in the cache, and the in-memory + // moov box needs to be written to the file in a single shot. + CHECK_LE(mMoovBoxBufferOffset + 8, mEstimatedMoovBoxSize); // Moov box @@ -806,13 +900,15 @@ status_t MPEG4Writer::reset() { lseek64(mFd, mOffset, SEEK_SET); writeInt32(mEstimatedMoovBoxSize - mMoovBoxBufferOffset); write("free", 4); + } else { + ALOGI("The mp4 file will not be streamable."); + } - // Free temp memory + // Free in-memory cache for moov box + if (mMoovBoxBuffer != NULL) { free(mMoovBoxBuffer); mMoovBoxBuffer = NULL; mMoovBoxBufferOffset = 0; - } else { - ALOGI("The mp4 file will not be streamable."); } CHECK(mBoxes.empty()); @@ -994,23 +1090,28 @@ size_t MPEG4Writer::write( const size_t bytes = size * nmemb; if (mWriteMoovBoxToMemory) { - // This happens only when we write the moov box at the end of - // recording, not for each output video/audio frame we receive. + off64_t moovBoxSize = 8 + mMoovBoxBufferOffset + bytes; if (moovBoxSize > mEstimatedMoovBoxSize) { + // The reserved moov box at the beginning of the file + // is not big enough. Moov box should be written to + // the end of the file from now on, but not to the + // in-memory cache. + + // We write partial moov box that is in the memory to + // the file first. for (List<off64_t>::iterator it = mBoxes.begin(); it != mBoxes.end(); ++it) { (*it) += mOffset; } lseek64(mFd, mOffset, SEEK_SET); ::write(mFd, mMoovBoxBuffer, mMoovBoxBufferOffset); - ::write(mFd, ptr, size * nmemb); + ::write(mFd, ptr, bytes); mOffset += (bytes + mMoovBoxBufferOffset); - free(mMoovBoxBuffer); - mMoovBoxBuffer = NULL; - mMoovBoxBufferOffset = 0; + + // All subsequent moov box content will be written + // to the end of the file. mWriteMoovBoxToMemory = false; - mStreamableFile = false; } else { memcpy(mMoovBoxBuffer + mMoovBoxBufferOffset, ptr, bytes); mMoovBoxBufferOffset += bytes; @@ -1545,12 +1646,18 @@ void MPEG4Writer::threadFunc() { mChunkReadyCondition.wait(mLock); } - // Actual write without holding the lock in order to - // reduce the blocking time for media track threads. + // In real time recording mode, write without holding the lock in order + // to reduce the blocking time for media track threads. + // Otherwise, hold the lock until the existing chunks get written to the + // file. if (chunkFound) { - mLock.unlock(); + if (mIsRealTimeRecording) { + mLock.unlock(); + } writeChunkToFile(&chunk); - mLock.lock(); + if (mIsRealTimeRecording) { + mLock.lock(); + } } } @@ -1600,18 +1707,10 @@ status_t MPEG4Writer::Track::start(MetaData *params) { mRotation = rotationDegrees; } - mIsRealTimeRecording = true; - { - int32_t isNotRealTime; - if (params && params->findInt32(kKeyNotRealTime, &isNotRealTime)) { - mIsRealTimeRecording = (isNotRealTime == 0); - } - } - initTrackingProgressStatus(params); sp<MetaData> meta = new MetaData; - if (mIsRealTimeRecording && mOwner->numTracks() > 1) { + if (mOwner->isRealTimeRecording() && mOwner->numTracks() > 1) { /* * This extra delay of accepting incoming audio/video signals * helps to align a/v start time at the beginning of a recording @@ -1989,7 +2088,10 @@ status_t MPEG4Writer::Track::threadEntry() { } else { prctl(PR_SET_NAME, (unsigned long)"VideoTrackEncoding", 0, 0, 0); } - androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO); + + if (mOwner->isRealTimeRecording()) { + androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO); + } sp<MetaData> meta_data; @@ -2150,7 +2252,7 @@ status_t MPEG4Writer::Track::threadEntry() { } - if (mIsRealTimeRecording) { + if (mOwner->isRealTimeRecording()) { if (mIsAudio) { updateDriftTime(meta_data); } @@ -2436,6 +2538,10 @@ int64_t MPEG4Writer::getDriftTimeUs() { return mDriftTimeUs; } +bool MPEG4Writer::isRealTimeRecording() const { + return mIsRealTimeRecording; +} + bool MPEG4Writer::useNalLengthFour() { return mUse4ByteNalLength; } diff --git a/media/libstagefright/MediaAdapter.cpp b/media/libstagefright/MediaAdapter.cpp new file mode 100644 index 0000000..2484212 --- /dev/null +++ b/media/libstagefright/MediaAdapter.cpp @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "MediaAdapter" +#include <utils/Log.h> + +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/MediaAdapter.h> +#include <media/stagefright/MediaBuffer.h> + +namespace android { + +MediaAdapter::MediaAdapter(const sp<MetaData> &meta) + : mCurrentMediaBuffer(NULL), + mStarted(false), + mOutputFormat(meta) { +} + +MediaAdapter::~MediaAdapter() { + Mutex::Autolock autoLock(mAdapterLock); + mOutputFormat.clear(); + CHECK(mCurrentMediaBuffer == NULL); +} + +status_t MediaAdapter::start(MetaData *params) { + Mutex::Autolock autoLock(mAdapterLock); + if (!mStarted) { + mStarted = true; + } + return OK; +} + +status_t MediaAdapter::stop() { + Mutex::Autolock autoLock(mAdapterLock); + if (mStarted) { + mStarted = false; + // If stop() happens immediately after a pushBuffer(), we should + // clean up the mCurrentMediaBuffer + if (mCurrentMediaBuffer != NULL) { + mCurrentMediaBuffer->release(); + mCurrentMediaBuffer = NULL; + } + // While read() is still waiting, we should signal it to finish. + mBufferReadCond.signal(); + } + return OK; +} + +sp<MetaData> MediaAdapter::getFormat() { + Mutex::Autolock autoLock(mAdapterLock); + return mOutputFormat; +} + +void MediaAdapter::signalBufferReturned(MediaBuffer *buffer) { + Mutex::Autolock autoLock(mAdapterLock); + CHECK(buffer != NULL); + buffer->setObserver(0); + buffer->release(); + ALOGV("buffer returned %p", buffer); + mBufferReturnedCond.signal(); +} + +status_t MediaAdapter::read( + MediaBuffer **buffer, const ReadOptions *options) { + Mutex::Autolock autoLock(mAdapterLock); + if (!mStarted) { + ALOGV("Read before even started!"); + return ERROR_END_OF_STREAM; + } + + while (mCurrentMediaBuffer == NULL && mStarted) { + ALOGV("waiting @ read()"); + mBufferReadCond.wait(mAdapterLock); + } + + if (!mStarted) { + ALOGV("read interrupted after stop"); + CHECK(mCurrentMediaBuffer == NULL); + return ERROR_END_OF_STREAM; + } + + CHECK(mCurrentMediaBuffer != NULL); + + *buffer = mCurrentMediaBuffer; + mCurrentMediaBuffer = NULL; + (*buffer)->setObserver(this); + + return OK; +} + +status_t MediaAdapter::pushBuffer(MediaBuffer *buffer) { + if (buffer == NULL) { + ALOGE("pushBuffer get an NULL buffer"); + return -EINVAL; + } + + Mutex::Autolock autoLock(mAdapterLock); + if (!mStarted) { + ALOGE("pushBuffer called before start"); + return INVALID_OPERATION; + } + mCurrentMediaBuffer = buffer; + mBufferReadCond.signal(); + + ALOGV("wait for the buffer returned @ pushBuffer! %p", buffer); + mBufferReturnedCond.wait(mAdapterLock); + + return OK; +} + +} // namespace android + diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp index cb8a651..f412dc8 100644 --- a/media/libstagefright/MediaCodec.cpp +++ b/media/libstagefright/MediaCodec.cpp @@ -22,7 +22,7 @@ #include "include/SoftwareRenderer.h" -#include <gui/SurfaceTextureClient.h> +#include <gui/Surface.h> #include <media/ICrypto.h> #include <media/stagefright/foundation/ABuffer.h> #include <media/stagefright/foundation/ADebug.h> @@ -30,10 +30,14 @@ #include <media/stagefright/foundation/AString.h> #include <media/stagefright/foundation/hexdump.h> #include <media/stagefright/ACodec.h> +#include <media/stagefright/BufferProducerWrapper.h> +#include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MetaData.h> #include <media/stagefright/NativeWindowWrapper.h> +#include "include/avc_utils.h" + namespace android { // static @@ -62,12 +66,14 @@ MediaCodec::MediaCodec(const sp<ALooper> &looper) : mState(UNINITIALIZED), mLooper(looper), mCodec(new ACodec), + mReplyID(0), mFlags(0), mSoftRenderer(NULL), mDequeueInputTimeoutGeneration(0), mDequeueInputReplyID(0), mDequeueOutputTimeoutGeneration(0), - mDequeueOutputReplyID(0) { + mDequeueOutputReplyID(0), + mHaveInputSurface(false) { } MediaCodec::~MediaCodec() { @@ -132,7 +138,7 @@ status_t MediaCodec::init(const char *name, bool nameIsType, bool encoder) { status_t MediaCodec::configure( const sp<AMessage> &format, - const sp<SurfaceTextureClient> &nativeWindow, + const sp<Surface> &nativeWindow, const sp<ICrypto> &crypto, uint32_t flags) { sp<AMessage> msg = new AMessage(kWhatConfigure, id()); @@ -154,6 +160,26 @@ status_t MediaCodec::configure( return PostAndAwaitResponse(msg, &response); } +status_t MediaCodec::createInputSurface( + sp<IGraphicBufferProducer>* bufferProducer) { + sp<AMessage> msg = new AMessage(kWhatCreateInputSurface, id()); + + sp<AMessage> response; + status_t err = PostAndAwaitResponse(msg, &response); + if (err == NO_ERROR) { + // unwrap the sp<IGraphicBufferProducer> + sp<RefBase> obj; + bool found = response->findObject("input-surface", &obj); + CHECK(found); + sp<BufferProducerWrapper> wrapper( + static_cast<BufferProducerWrapper*>(obj.get())); + *bufferProducer = wrapper->getBufferProducer(); + } else { + ALOGW("createInputSurface failed, err=%d", err); + } + return err; +} + status_t MediaCodec::start() { sp<AMessage> msg = new AMessage(kWhatStart, id()); @@ -288,6 +314,13 @@ status_t MediaCodec::releaseOutputBuffer(size_t index) { return PostAndAwaitResponse(msg, &response); } +status_t MediaCodec::signalEndOfInputStream() { + sp<AMessage> msg = new AMessage(kWhatSignalEndOfInputStream, id()); + + sp<AMessage> response; + return PostAndAwaitResponse(msg, &response); +} + status_t MediaCodec::getOutputFormat(sp<AMessage> *format) const { sp<AMessage> msg = new AMessage(kWhatGetOutputFormat, id()); @@ -476,6 +509,11 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) { "(omx error 0x%08x, internalError %d)", omxError, internalError); + if (omxError == OMX_ErrorResourcesLost + && internalError == DEAD_OBJECT) { + mFlags |= kFlagSawMediaServerDie; + } + bool sendErrorReponse = true; switch (mState) { @@ -504,6 +542,19 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) { // the shutdown complete notification. sendErrorReponse = false; + + if (mFlags & kFlagSawMediaServerDie) { + // MediaServer died, there definitely won't + // be a shutdown complete notification after + // all. + + // note that we're directly going from + // STOPPING->UNINITIALIZED, instead of the + // usual STOPPING->INITIALIZED state. + setState(UNINITIALIZED); + + (new AMessage)->postReply(mReplyID); + } break; } @@ -571,10 +622,44 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) { CHECK_EQ(mState, CONFIGURING); setState(CONFIGURED); + // reset input surface flag + mHaveInputSurface = false; + (new AMessage)->postReply(mReplyID); break; } + case ACodec::kWhatInputSurfaceCreated: + { + // response to ACodec::kWhatCreateInputSurface + status_t err = NO_ERROR; + sp<AMessage> response = new AMessage(); + if (!msg->findInt32("err", &err)) { + sp<RefBase> obj; + msg->findObject("input-surface", &obj); + CHECK(obj != NULL); + response->setObject("input-surface", obj); + mHaveInputSurface = true; + } else { + response->setInt32("err", err); + } + response->postReply(mReplyID); + break; + } + + case ACodec::kWhatSignaledInputEOS: + { + // response to ACodec::kWhatSignalEndOfInputStream + sp<AMessage> response = new AMessage(); + status_t err; + if (msg->findInt32("err", &err)) { + response->setInt32("err", err); + } + response->postReply(mReplyID); + break; + } + + case ACodec::kWhatBuffersAllocated: { int32_t portIndex; @@ -659,8 +744,16 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) { } mOutputFormat = msg; - mFlags |= kFlagOutputFormatChanged; - postActivityNotificationIfPossible(); + + if (mFlags & kFlagIsEncoder) { + // Before we announce the format change we should + // collect codec specific data and amend the output + // format as necessary. + mFlags |= kFlagGatherCodecSpecificData; + } else { + mFlags |= kFlagOutputFormatChanged; + postActivityNotificationIfPossible(); + } break; } @@ -730,6 +823,25 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) { buffer->meta()->setInt32("omxFlags", omxFlags); + if (mFlags & kFlagGatherCodecSpecificData) { + // This is the very first output buffer after a + // format change was signalled, it'll either contain + // the one piece of codec specific data we can expect + // or there won't be codec specific data. + if (omxFlags & OMX_BUFFERFLAG_CODECCONFIG) { + status_t err = + amendOutputFormatWithCodecSpecificData(buffer); + + if (err != OK) { + ALOGE("Codec spit out malformed codec " + "specific data!"); + } + } + + mFlags &= ~kFlagGatherCodecSpecificData; + mFlags |= kFlagOutputFormatChanged; + } + if (mFlags & kFlagDequeueOutputPending) { CHECK(handleDequeueOutputBuffer(mDequeueOutputReplyID)); @@ -873,6 +985,7 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) { if (flags & CONFIGURE_FLAG_ENCODE) { format->setInt32("encoder", true); + mFlags |= kFlagIsEncoder; } extractCSD(format); @@ -881,11 +994,12 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) { break; } - case kWhatStart: + case kWhatCreateInputSurface: { uint32_t replyID; CHECK(msg->senderAwaitsResponse(&replyID)); + // Must be configured, but can't have been started yet. if (mState != CONFIGURED) { sp<AMessage> response = new AMessage; response->setInt32("err", INVALID_OPERATION); @@ -895,19 +1009,16 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) { } mReplyID = replyID; - setState(STARTING); - - mCodec->initiateStart(); + mCodec->initiateCreateInputSurface(); break; } - case kWhatStop: + case kWhatStart: { uint32_t replyID; CHECK(msg->senderAwaitsResponse(&replyID)); - if (mState != INITIALIZED - && mState != CONFIGURED && mState != STARTED) { + if (mState != CONFIGURED) { sp<AMessage> response = new AMessage; response->setInt32("err", INVALID_OPERATION); @@ -916,31 +1027,53 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) { } mReplyID = replyID; - setState(STOPPING); + setState(STARTING); - mCodec->initiateShutdown(true /* keepComponentAllocated */); - returnBuffersToCodec(); + mCodec->initiateStart(); break; } + case kWhatStop: case kWhatRelease: { + State targetState = + (msg->what() == kWhatStop) ? INITIALIZED : UNINITIALIZED; + uint32_t replyID; CHECK(msg->senderAwaitsResponse(&replyID)); if (mState != INITIALIZED && mState != CONFIGURED && mState != STARTED) { + // We may be in "UNINITIALIZED" state already without the + // client being aware of this if media server died while + // we were being stopped. The client would assume that + // after stop() returned, it would be safe to call release() + // and it should be in this case, no harm to allow a release() + // if we're already uninitialized. + // Similarly stopping a stopped MediaCodec should be benign. sp<AMessage> response = new AMessage; - response->setInt32("err", INVALID_OPERATION); + response->setInt32( + "err", + mState == targetState ? OK : INVALID_OPERATION); response->postReply(replyID); break; } + if (mFlags & kFlagSawMediaServerDie) { + // It's dead, Jim. Don't expect initiateShutdown to yield + // any useful results now... + setState(UNINITIALIZED); + (new AMessage)->postReply(replyID); + break; + } + mReplyID = replyID; - setState(RELEASING); + setState(msg->what() == kWhatStop ? STOPPING : RELEASING); + + mCodec->initiateShutdown( + msg->what() == kWhatStop /* keepComponentAllocated */); - mCodec->initiateShutdown(); returnBuffersToCodec(); break; } @@ -950,6 +1083,14 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) { uint32_t replyID; CHECK(msg->senderAwaitsResponse(&replyID)); + if (mHaveInputSurface) { + ALOGE("dequeueInputBuffer can't be used with input surface"); + sp<AMessage> response = new AMessage; + response->setInt32("err", INVALID_OPERATION); + response->postReply(replyID); + break; + } + if (handleDequeueInputBuffer(replyID, true /* new request */)) { break; } @@ -1093,6 +1234,24 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) { break; } + case kWhatSignalEndOfInputStream: + { + uint32_t replyID; + CHECK(msg->senderAwaitsResponse(&replyID)); + + if (mState != STARTED || (mFlags & kFlagStickyError)) { + sp<AMessage> response = new AMessage; + response->setInt32("err", INVALID_OPERATION); + + response->postReply(replyID); + break; + } + + mReplyID = replyID; + mCodec->signalEndOfInputStream(); + break; + } + case kWhatGetBuffers: { uint32_t replyID; @@ -1203,6 +1362,23 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) { break; } + case kWhatSetParameters: + { + uint32_t replyID; + CHECK(msg->senderAwaitsResponse(&replyID)); + + sp<AMessage> params; + CHECK(msg->findMessage("params", ¶ms)); + + status_t err = onSetParameters(params); + + sp<AMessage> response = new AMessage; + response->setInt32("err", err); + + response->postReply(replyID); + break; + } + default: TRESPASS(); } @@ -1268,12 +1444,19 @@ void MediaCodec::setState(State newState) { mFlags &= ~kFlagOutputFormatChanged; mFlags &= ~kFlagOutputBuffersChanged; mFlags &= ~kFlagStickyError; + mFlags &= ~kFlagIsEncoder; + mFlags &= ~kFlagGatherCodecSpecificData; mActivityNotify.clear(); } if (newState == UNINITIALIZED) { mComponentName.clear(); + + // The component is gone, mediaserver's probably back up already + // but should definitely be back up should we try to instantiate + // another component.. and the cycle continues. + mFlags &= ~kFlagSawMediaServerDie; } mState = newState; @@ -1473,7 +1656,7 @@ status_t MediaCodec::onReleaseOutputBuffer(const sp<AMessage> &msg) { return -EACCES; } - if (render) { + if (render && (info->mData == NULL || info->mData->size() != 0)) { info->mNotify->setInt32("render", true); if (mSoftRenderer != NULL) { @@ -1509,7 +1692,7 @@ ssize_t MediaCodec::dequeuePortBuffer(int32_t portIndex) { } status_t MediaCodec::setNativeWindow( - const sp<SurfaceTextureClient> &surfaceTextureClient) { + const sp<Surface> &surfaceTextureClient) { status_t err; if (mNativeWindow != NULL) { @@ -1556,4 +1739,59 @@ void MediaCodec::postActivityNotificationIfPossible() { } } +status_t MediaCodec::setParameters(const sp<AMessage> ¶ms) { + sp<AMessage> msg = new AMessage(kWhatSetParameters, id()); + msg->setMessage("params", params); + + sp<AMessage> response; + return PostAndAwaitResponse(msg, &response); +} + +status_t MediaCodec::onSetParameters(const sp<AMessage> ¶ms) { + mCodec->signalSetParameters(params); + + return OK; +} + +status_t MediaCodec::amendOutputFormatWithCodecSpecificData( + const sp<ABuffer> &buffer) { + AString mime; + CHECK(mOutputFormat->findString("mime", &mime)); + + if (!strcasecmp(mime.c_str(), MEDIA_MIMETYPE_VIDEO_AVC)) { + // Codec specific data should be SPS and PPS in a single buffer, + // each prefixed by a startcode (0x00 0x00 0x00 0x01). + // We separate the two and put them into the output format + // under the keys "csd-0" and "csd-1". + + unsigned csdIndex = 0; + + const uint8_t *data = buffer->data(); + size_t size = buffer->size(); + + const uint8_t *nalStart; + size_t nalSize; + while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) { + sp<ABuffer> csd = new ABuffer(nalSize + 4); + memcpy(csd->data(), "\x00\x00\x00\x01", 4); + memcpy(csd->data() + 4, nalStart, nalSize); + + mOutputFormat->setBuffer( + StringPrintf("csd-%u", csdIndex).c_str(), csd); + + ++csdIndex; + } + + if (csdIndex != 2) { + return ERROR_MALFORMED; + } + } else { + // For everything else we just stash the codec specific data into + // the output format as a single piece of csd under "csd-0". + mOutputFormat->setBuffer("csd-0", buffer); + } + + return OK; +} + } // namespace android diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp index e7b5903..5d8029c 100644 --- a/media/libstagefright/MediaDefs.cpp +++ b/media/libstagefright/MediaDefs.cpp @@ -40,6 +40,7 @@ const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW = "audio/g711-mlaw"; const char *MEDIA_MIMETYPE_AUDIO_RAW = "audio/raw"; const char *MEDIA_MIMETYPE_AUDIO_FLAC = "audio/flac"; const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS = "audio/aac-adts"; +const char *MEDIA_MIMETYPE_AUDIO_MSGSM = "audio/gsm"; const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4"; const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav"; diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp index b18c916..9ab6611 100644 --- a/media/libstagefright/MediaExtractor.cpp +++ b/media/libstagefright/MediaExtractor.cpp @@ -21,7 +21,6 @@ #include "include/AMRExtractor.h" #include "include/MP3Extractor.h" #include "include/MPEG4Extractor.h" -#include "include/FragmentedMP4Extractor.h" #include "include/WAVExtractor.h" #include "include/OggExtractor.h" #include "include/MPEG2PSExtractor.h" @@ -94,12 +93,7 @@ sp<MediaExtractor> MediaExtractor::Create( MediaExtractor *ret = NULL; if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG4) || !strcasecmp(mime, "audio/mp4")) { - int fragmented = 0; - if (meta != NULL && meta->findInt32("fragmented", &fragmented) && fragmented) { - ret = new FragmentedMP4Extractor(source); - } else { - ret = new MPEG4Extractor(source); - } + ret = new MPEG4Extractor(source); } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) { ret = new MP3Extractor(source, meta); } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB) diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp new file mode 100644 index 0000000..94ce5de --- /dev/null +++ b/media/libstagefright/MediaMuxer.cpp @@ -0,0 +1,173 @@ +/* + * Copyright 2013, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "MediaMuxer" +#include <utils/Log.h> + +#include <media/stagefright/MediaMuxer.h> + +#include <media/stagefright/foundation/ABuffer.h> +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/AMessage.h> +#include <media/stagefright/MediaAdapter.h> +#include <media/stagefright/MediaBuffer.h> +#include <media/stagefright/MediaCodec.h> +#include <media/stagefright/MediaDefs.h> +#include <media/stagefright/MediaErrors.h> +#include <media/stagefright/MediaSource.h> +#include <media/stagefright/MetaData.h> +#include <media/stagefright/MPEG4Writer.h> +#include <media/stagefright/Utils.h> + +namespace android { + +MediaMuxer::MediaMuxer(const char *path, OutputFormat format) + : mState(UNINITIALIZED) { + if (format == OUTPUT_FORMAT_MPEG_4) { + mWriter = new MPEG4Writer(path); + mFileMeta = new MetaData; + mState = INITIALIZED; + } + +} + +MediaMuxer::MediaMuxer(int fd, OutputFormat format) + : mState(UNINITIALIZED) { + if (format == OUTPUT_FORMAT_MPEG_4) { + mWriter = new MPEG4Writer(fd); + mFileMeta = new MetaData; + mState = INITIALIZED; + } +} + +MediaMuxer::~MediaMuxer() { + Mutex::Autolock autoLock(mMuxerLock); + + // Clean up all the internal resources. + mFileMeta.clear(); + mWriter.clear(); + mTrackList.clear(); +} + +ssize_t MediaMuxer::addTrack(const sp<AMessage> &format) { + Mutex::Autolock autoLock(mMuxerLock); + + if (format.get() == NULL) { + ALOGE("addTrack() get a null format"); + return -EINVAL; + } + + if (mState != INITIALIZED) { + ALOGE("addTrack() must be called after constructor and before start()."); + return INVALID_OPERATION; + } + + sp<MetaData> trackMeta = new MetaData; + convertMessageToMetaData(format, trackMeta); + + sp<MediaAdapter> newTrack = new MediaAdapter(trackMeta); + status_t result = mWriter->addSource(newTrack); + if (result == OK) { + return mTrackList.add(newTrack); + } + return -1; +} + +status_t MediaMuxer::setOrientationHint(int degrees) { + Mutex::Autolock autoLock(mMuxerLock); + if (mState != INITIALIZED) { + ALOGE("setOrientationHint() must be called before start()."); + return INVALID_OPERATION; + } + + if (degrees != 0 && degrees != 90 && degrees != 180 && degrees != 270) { + ALOGE("setOrientationHint() get invalid degrees"); + return -EINVAL; + } + + mFileMeta->setInt32(kKeyRotation, degrees); + return OK; +} + +status_t MediaMuxer::start() { + Mutex::Autolock autoLock(mMuxerLock); + if (mState == INITIALIZED) { + mState = STARTED; + mFileMeta->setInt32(kKeyRealTimeRecording, false); + return mWriter->start(mFileMeta.get()); + } else { + ALOGE("start() is called in invalid state %d", mState); + return INVALID_OPERATION; + } +} + +status_t MediaMuxer::stop() { + Mutex::Autolock autoLock(mMuxerLock); + + if (mState == STARTED) { + mState = STOPPED; + for (size_t i = 0; i < mTrackList.size(); i++) { + if (mTrackList[i]->stop() != OK) { + return INVALID_OPERATION; + } + } + return mWriter->stop(); + } else { + ALOGE("stop() is called in invalid state %d", mState); + return INVALID_OPERATION; + } +} + +status_t MediaMuxer::writeSampleData(const sp<ABuffer> &buffer, size_t trackIndex, + int64_t timeUs, uint32_t flags) { + Mutex::Autolock autoLock(mMuxerLock); + + if (buffer.get() == NULL) { + ALOGE("WriteSampleData() get an NULL buffer."); + return -EINVAL; + } + + if (mState != STARTED) { + ALOGE("WriteSampleData() is called in invalid state %d", mState); + return INVALID_OPERATION; + } + + if (trackIndex >= mTrackList.size()) { + ALOGE("WriteSampleData() get an invalid index %d", trackIndex); + return -EINVAL; + } + + MediaBuffer* mediaBuffer = new MediaBuffer(buffer); + + mediaBuffer->add_ref(); // Released in MediaAdapter::signalBufferReturned(). + mediaBuffer->set_range(buffer->offset(), buffer->size()); + + sp<MetaData> sampleMetaData = mediaBuffer->meta_data(); + sampleMetaData->setInt64(kKeyTime, timeUs); + // Just set the kKeyDecodingTime as the presentation time for now. + sampleMetaData->setInt64(kKeyDecodingTime, timeUs); + + if (flags & MediaCodec::BUFFER_FLAG_SYNCFRAME) { + sampleMetaData->setInt32(kKeyIsSyncFrame, true); + } + + sp<MediaAdapter> currentTrack = mTrackList[trackIndex]; + // This pushBuffer will wait until the mediaBuffer is consumed. + return currentTrack->pushBuffer(mediaBuffer); +} + +} // namespace android diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp index 404fa94..7bc7da2 100644 --- a/media/libstagefright/NuMediaExtractor.cpp +++ b/media/libstagefright/NuMediaExtractor.cpp @@ -228,6 +228,34 @@ status_t NuMediaExtractor::getTrackFormat( return convertMetaDataToMessage(meta, format); } +status_t NuMediaExtractor::getFileFormat(sp<AMessage> *format) const { + Mutex::Autolock autoLock(mLock); + + *format = NULL; + + if (mImpl == NULL) { + return -EINVAL; + } + + sp<MetaData> meta = mImpl->getMetaData(); + + const char *mime; + CHECK(meta->findCString(kKeyMIMEType, &mime)); + *format = new AMessage(); + (*format)->setString("mime", mime); + + uint32_t type; + const void *pssh; + size_t psshsize; + if (meta->findData(kKeyPssh, &type, &pssh, &psshsize)) { + sp<ABuffer> buf = new ABuffer(psshsize); + memcpy(buf->data(), pssh, psshsize); + (*format)->setBuffer("pssh", buf); + } + + return OK; +} + status_t NuMediaExtractor::selectTrack(size_t index) { Mutex::Autolock autoLock(mLock); diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp index 7cdb793..1822f07 100644 --- a/media/libstagefright/OMXClient.cpp +++ b/media/libstagefright/OMXClient.cpp @@ -32,7 +32,7 @@ struct MuxOMX : public IOMX { MuxOMX(const sp<IOMX> &remoteOMX); virtual ~MuxOMX(); - virtual IBinder *onAsBinder() { return NULL; } + virtual IBinder *onAsBinder() { return mRemoteOMX->asBinder().get(); } virtual bool livesLocally(node_id node, pid_t pid); @@ -83,6 +83,12 @@ struct MuxOMX : public IOMX { node_id node, OMX_U32 port_index, const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer); + virtual status_t createInputSurface( + node_id node, OMX_U32 port_index, + sp<IGraphicBufferProducer> *bufferProducer); + + virtual status_t signalEndOfInputStream(node_id node); + virtual status_t allocateBuffer( node_id node, OMX_U32 port_index, size_t size, buffer_id *buffer, void **buffer_data); @@ -274,6 +280,18 @@ status_t MuxOMX::useGraphicBuffer( node, port_index, graphicBuffer, buffer); } +status_t MuxOMX::createInputSurface( + node_id node, OMX_U32 port_index, + sp<IGraphicBufferProducer> *bufferProducer) { + status_t err = getOMX(node)->createInputSurface( + node, port_index, bufferProducer); + return err; +} + +status_t MuxOMX::signalEndOfInputStream(node_id node) { + return getOMX(node)->signalEndOfInputStream(node); +} + status_t MuxOMX::allocateBuffer( node_id node, OMX_U32 port_index, size_t size, buffer_id *buffer, void **buffer_data) { diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp index 70de174..9d349a1 100755..100644 --- a/media/libstagefright/OMXCodec.cpp +++ b/media/libstagefright/OMXCodec.cpp @@ -522,6 +522,17 @@ status_t OMXCodec::configureCodec(const sp<MetaData> &meta) { CODEC_LOGE("setAACFormat() failed (err = %d)", err); return err; } + } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_MPEG, mMIME)) { + int32_t numChannels, sampleRate; + if (meta->findInt32(kKeyChannelCount, &numChannels) + && meta->findInt32(kKeySampleRate, &sampleRate)) { + // Since we did not always check for these, leave them optional + // and have the decoder figure it all out. + setRawAudioFormat( + mIsEncoder ? kPortIndexInput : kPortIndexOutput, + sampleRate, + numChannels); + } } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_G711_ALAW, mMIME) || !strcasecmp(MEDIA_MIMETYPE_AUDIO_G711_MLAW, mMIME)) { // These are PCM-like formats with a fixed sample rate but @@ -1213,13 +1224,6 @@ status_t OMXCodec::setVideoOutputFormat( CHECK_EQ(err, (status_t)OK); CHECK_EQ((int)format.eCompressionFormat, (int)OMX_VIDEO_CodingUnused); - CHECK(format.eColorFormat == OMX_COLOR_FormatYUV420Planar - || format.eColorFormat == OMX_COLOR_FormatYUV420SemiPlanar - || format.eColorFormat == OMX_COLOR_FormatCbYCrY - || format.eColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar - || format.eColorFormat == OMX_QCOM_COLOR_FormatYVU420SemiPlanar - || format.eColorFormat == OMX_QCOM_COLOR_FormatYUV420PackedSemiPlanar64x32Tile2m8ka); - int32_t colorFormat; if (meta->findInt32(kKeyColorFormat, &colorFormat) && colorFormat != OMX_COLOR_FormatUnused @@ -1390,6 +1394,8 @@ void OMXCodec::setComponentRole( "audio_decoder.raw", "audio_encoder.raw" }, { MEDIA_MIMETYPE_AUDIO_FLAC, "audio_decoder.flac", "audio_encoder.flac" }, + { MEDIA_MIMETYPE_AUDIO_MSGSM, + "audio_decoder.gsm", "audio_encoder.gsm" }, }; static const size_t kNumMimeToRole = diff --git a/media/libstagefright/SkipCutBuffer.cpp b/media/libstagefright/SkipCutBuffer.cpp index 773854f..773854f 100755..100644 --- a/media/libstagefright/SkipCutBuffer.cpp +++ b/media/libstagefright/SkipCutBuffer.cpp diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp index bccffd8..af8186c 100644 --- a/media/libstagefright/StagefrightMediaScanner.cpp +++ b/media/libstagefright/StagefrightMediaScanner.cpp @@ -42,7 +42,7 @@ static bool FileHasAcceptableExtension(const char *extension) { ".mpeg", ".ogg", ".mid", ".smf", ".imy", ".wma", ".aac", ".wav", ".amr", ".midi", ".xmf", ".rtttl", ".rtx", ".ota", ".mkv", ".mka", ".webm", ".ts", ".fl", ".flac", ".mxmf", - ".avi", ".mpeg", ".mpg", ".mpga" + ".avi", ".mpeg", ".mpg", ".awb", ".mpga" }; static const size_t kNumValidExtensions = sizeof(kValidExtensions) / sizeof(kValidExtensions[0]); diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp index 3c002fc..409038a 100644 --- a/media/libstagefright/SurfaceMediaSource.cpp +++ b/media/libstagefright/SurfaceMediaSource.cpp @@ -298,6 +298,10 @@ status_t SurfaceMediaSource::read( MediaBuffer **buffer, // wait for a buffer to be queued mFrameAvailableCondition.wait(mMutex); } else if (err == OK) { + err = item.mFence->waitForever("SurfaceMediaSource::read"); + if (err) { + ALOGW("read: failed to wait for buffer fence: %d", err); + } // First time seeing the buffer? Added it to the SMS slot if (item.mGraphicBuffer != NULL) { diff --git a/media/libstagefright/ThrottledSource.cpp b/media/libstagefright/ThrottledSource.cpp index 348a9d3..7496752 100644 --- a/media/libstagefright/ThrottledSource.cpp +++ b/media/libstagefright/ThrottledSource.cpp @@ -31,10 +31,6 @@ ThrottledSource::ThrottledSource( CHECK(mBandwidthLimitBytesPerSecond > 0); } -status_t ThrottledSource::initCheck() const { - return mSource->initCheck(); -} - ssize_t ThrottledSource::readAt(off64_t offset, void *data, size_t size) { Mutex::Autolock autoLock(mLock); @@ -62,17 +58,9 @@ ssize_t ThrottledSource::readAt(off64_t offset, void *data, size_t size) { if (whenUs > nowUs) { usleep(whenUs - nowUs); } - return n; } -status_t ThrottledSource::getSize(off64_t *size) { - return mSource->getSize(size); -} - -uint32_t ThrottledSource::flags() { - return mSource->flags(); -} } // namespace android diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp index 74e9222..b0df379 100644 --- a/media/libstagefright/Utils.cpp +++ b/media/libstagefright/Utils.cpp @@ -21,7 +21,7 @@ #include "include/ESDS.h" #include <arpa/inet.h> - +#include <cutils/properties.h> #include <media/stagefright/foundation/ABuffer.h> #include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/foundation/AMessage.h> @@ -78,6 +78,11 @@ status_t convertMetaDataToMessage( msg->setInt64("durationUs", durationUs); } + int32_t isSync; + if (meta->findInt32(kKeyIsSyncFrame, &isSync) && isSync != 0) { + msg->setInt32("is-sync-frame", 1); + } + if (!strncasecmp("video/", mime, 6)) { int32_t width, height; CHECK(meta->findInt32(kKeyWidth, &width)); @@ -85,6 +90,13 @@ status_t convertMetaDataToMessage( msg->setInt32("width", width); msg->setInt32("height", height); + + int32_t sarWidth, sarHeight; + if (meta->findInt32(kKeySARWidth, &sarWidth) + && meta->findInt32(kKeySARHeight, &sarHeight)) { + msg->setInt32("sar-width", sarWidth); + msg->setInt32("sar-height", sarHeight); + } } else if (!strncasecmp("audio/", mime, 6)) { int32_t numChannels, sampleRate; CHECK(meta->findInt32(kKeyChannelCount, &numChannels)); @@ -363,6 +375,11 @@ void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) { meta->setInt64(kKeyDuration, durationUs); } + int32_t isSync; + if (msg->findInt32("is-sync-frame", &isSync) && isSync != 0) { + meta->setInt32(kKeyIsSyncFrame, 1); + } + if (mime.startsWith("video/")) { int32_t width; int32_t height; @@ -372,6 +389,13 @@ void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) { } else { ALOGW("did not find width and/or height"); } + + int32_t sarWidth, sarHeight; + if (msg->findInt32("sar-width", &sarWidth) + && msg->findInt32("sar-height", &sarHeight)) { + meta->setInt32(kKeySARWidth, sarWidth); + meta->setInt32(kKeySARHeight, sarHeight); + } } else if (mime.startsWith("audio/")) { int32_t numChannels; if (msg->findInt32("channel-count", &numChannels)) { @@ -431,6 +455,21 @@ void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) { #endif } +AString MakeUserAgent() { + AString ua; + ua.append("stagefright/1.2 (Linux;Android "); + +#if (PROPERTY_VALUE_MAX < 8) +#error "PROPERTY_VALUE_MAX must be at least 8" +#endif + + char value[PROPERTY_VALUE_MAX]; + property_get("ro.build.version.release", value, "Unknown"); + ua.append(value); + ua.append(")"); + + return ua; +} } // namespace android diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp index 2a7f628..22af6fb 100644 --- a/media/libstagefright/WAVExtractor.cpp +++ b/media/libstagefright/WAVExtractor.cpp @@ -38,6 +38,7 @@ enum { WAVE_FORMAT_PCM = 0x0001, WAVE_FORMAT_ALAW = 0x0006, WAVE_FORMAT_MULAW = 0x0007, + WAVE_FORMAT_MSGSM = 0x0031, WAVE_FORMAT_EXTENSIBLE = 0xFFFE }; @@ -178,6 +179,7 @@ status_t WAVExtractor::init() { if (mWaveFormat != WAVE_FORMAT_PCM && mWaveFormat != WAVE_FORMAT_ALAW && mWaveFormat != WAVE_FORMAT_MULAW + && mWaveFormat != WAVE_FORMAT_MSGSM && mWaveFormat != WAVE_FORMAT_EXTENSIBLE) { return ERROR_UNSUPPORTED; } @@ -216,6 +218,10 @@ status_t WAVExtractor::init() { && mBitsPerSample != 24) { return ERROR_UNSUPPORTED; } + } else if (mWaveFormat == WAVE_FORMAT_MSGSM) { + if (mBitsPerSample != 0) { + return ERROR_UNSUPPORTED; + } } else { CHECK(mWaveFormat == WAVE_FORMAT_MULAW || mWaveFormat == WAVE_FORMAT_ALAW); @@ -283,6 +289,10 @@ status_t WAVExtractor::init() { mTrackMeta->setCString( kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_G711_ALAW); break; + case WAVE_FORMAT_MSGSM: + mTrackMeta->setCString( + kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MSGSM); + break; default: CHECK_EQ(mWaveFormat, (uint16_t)WAVE_FORMAT_MULAW); mTrackMeta->setCString( @@ -294,11 +304,17 @@ status_t WAVExtractor::init() { mTrackMeta->setInt32(kKeyChannelMask, mChannelMask); mTrackMeta->setInt32(kKeySampleRate, mSampleRate); - size_t bytesPerSample = mBitsPerSample >> 3; - - int64_t durationUs = - 1000000LL * (mDataSize / (mNumChannels * bytesPerSample)) - / mSampleRate; + int64_t durationUs = 0; + if (mWaveFormat == WAVE_FORMAT_MSGSM) { + // 65 bytes decode to 320 8kHz samples + durationUs = + 1000000LL * (mDataSize / 65 * 320) / 8000; + } else { + size_t bytesPerSample = mBitsPerSample >> 3; + durationUs = + 1000000LL * (mDataSize / (mNumChannels * bytesPerSample)) + / mSampleRate; + } mTrackMeta->setInt64(kKeyDuration, durationUs); @@ -388,7 +404,16 @@ status_t WAVSource::read( int64_t seekTimeUs; ReadOptions::SeekMode mode; if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) { - int64_t pos = (seekTimeUs * mSampleRate) / 1000000 * mNumChannels * (mBitsPerSample >> 3); + int64_t pos = 0; + + if (mWaveFormat == WAVE_FORMAT_MSGSM) { + // 65 bytes decode to 320 8kHz samples + int64_t samplenumber = (seekTimeUs * mSampleRate) / 1000000; + int64_t framenumber = samplenumber / 320; + pos = framenumber * 65; + } else { + pos = (seekTimeUs * mSampleRate) / 1000000 * mNumChannels * (mBitsPerSample >> 3); + } if (pos > mSize) { pos = mSize; } @@ -414,6 +439,15 @@ status_t WAVSource::read( maxBytesToRead = maxBytesAvailable; } + if (mWaveFormat == WAVE_FORMAT_MSGSM) { + // Microsoft packs 2 frames into 65 bytes, rather than using separate 33-byte frames, + // so read multiples of 65, and use smaller buffers to account for ~10:1 expansion ratio + if (maxBytesToRead > 1024) { + maxBytesToRead = 1024; + } + maxBytesToRead = (maxBytesToRead / 65) * 65; + } + ssize_t n = mDataSource->readAt( mCurrentPos, buffer->data(), maxBytesToRead); @@ -470,12 +504,17 @@ status_t WAVSource::read( } } - size_t bytesPerSample = mBitsPerSample >> 3; + int64_t timeStampUs = 0; + + if (mWaveFormat == WAVE_FORMAT_MSGSM) { + timeStampUs = 1000000LL * (mCurrentPos - mOffset) * 320 / 65 / mSampleRate; + } else { + size_t bytesPerSample = mBitsPerSample >> 3; + timeStampUs = 1000000LL * (mCurrentPos - mOffset) + / (mNumChannels * bytesPerSample) / mSampleRate; + } - buffer->meta_data()->setInt64( - kKeyTime, - 1000000LL * (mCurrentPos - mOffset) - / (mNumChannels * bytesPerSample) / mSampleRate); + buffer->meta_data()->setInt64(kKeyTime, timeStampUs); buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1); mCurrentPos += n; diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp index a141752..b822868 100644 --- a/media/libstagefright/avc_utils.cpp +++ b/media/libstagefright/avc_utils.cpp @@ -22,6 +22,7 @@ #include <media/stagefright/foundation/ABitReader.h> #include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/hexdump.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MetaData.h> @@ -41,7 +42,9 @@ unsigned parseUE(ABitReader *br) { // Determine video dimensions from the sequence parameterset. void FindAVCDimensions( - const sp<ABuffer> &seqParamSet, int32_t *width, int32_t *height) { + const sp<ABuffer> &seqParamSet, + int32_t *width, int32_t *height, + int32_t *sarWidth, int32_t *sarHeight) { ABitReader br(seqParamSet->data() + 1, seqParamSet->size() - 1); unsigned profile_idc = br.getBits(8); @@ -129,6 +132,48 @@ void FindAVCDimensions( *height -= (frame_crop_top_offset + frame_crop_bottom_offset) * cropUnitY; } + + if (sarWidth != NULL) { + *sarWidth = 0; + } + + if (sarHeight != NULL) { + *sarHeight = 0; + } + + if (br.getBits(1)) { // vui_parameters_present_flag + unsigned sar_width = 0, sar_height = 0; + + if (br.getBits(1)) { // aspect_ratio_info_present_flag + unsigned aspect_ratio_idc = br.getBits(8); + + if (aspect_ratio_idc == 255 /* extendedSAR */) { + sar_width = br.getBits(16); + sar_height = br.getBits(16); + } else if (aspect_ratio_idc > 0 && aspect_ratio_idc < 14) { + static const int32_t kFixedSARWidth[] = { + 1, 12, 10, 16, 40, 24, 20, 32, 80, 18, 15, 64, 160 + }; + + static const int32_t kFixedSARHeight[] = { + 1, 11, 11, 11, 33, 11, 11, 11, 33, 11, 11, 33, 99 + }; + + sar_width = kFixedSARWidth[aspect_ratio_idc - 1]; + sar_height = kFixedSARHeight[aspect_ratio_idc - 1]; + } + } + + ALOGV("sample aspect ratio = %u : %u", sar_width, sar_height); + + if (sarWidth != NULL) { + *sarWidth = sar_width; + } + + if (sarHeight != NULL) { + *sarHeight = sar_height; + } + } } status_t getNextNALUnit( @@ -254,7 +299,9 @@ sp<MetaData> MakeAVCCodecSpecificData(const sp<ABuffer> &accessUnit) { } int32_t width, height; - FindAVCDimensions(seqParamSet, &width, &height); + int32_t sarWidth, sarHeight; + FindAVCDimensions( + seqParamSet, &width, &height, &sarWidth, &sarHeight); size_t stopOffset; sp<ABuffer> picParamSet = FindNAL(data, size, 8, &stopOffset); @@ -301,8 +348,29 @@ sp<MetaData> MakeAVCCodecSpecificData(const sp<ABuffer> &accessUnit) { meta->setInt32(kKeyWidth, width); meta->setInt32(kKeyHeight, height); - ALOGI("found AVC codec config (%d x %d, %s-profile level %d.%d)", - width, height, AVCProfileToString(profile), level / 10, level % 10); + if (sarWidth > 1 || sarHeight > 1) { + // We treat 0:0 (unspecified) as 1:1. + + meta->setInt32(kKeySARWidth, sarWidth); + meta->setInt32(kKeySARHeight, sarHeight); + + ALOGI("found AVC codec config (%d x %d, %s-profile level %d.%d) " + "SAR %d : %d", + width, + height, + AVCProfileToString(profile), + level / 10, + level % 10, + sarWidth, + sarHeight); + } else { + ALOGI("found AVC codec config (%d x %d, %s-profile level %d.%d)", + width, + height, + AVCProfileToString(profile), + level / 10, + level % 10); + } return meta; } diff --git a/media/libstagefright/chromium_http/Android.mk b/media/libstagefright/chromium_http/Android.mk index 2c6d84c..f26f386 100644 --- a/media/libstagefright/chromium_http/Android.mk +++ b/media/libstagefright/chromium_http/Android.mk @@ -22,6 +22,7 @@ LOCAL_SHARED_LIBRARIES += \ libchromium_net \ libutils \ libcutils \ + liblog \ libstagefright_foundation \ libstagefright \ libdrmframework diff --git a/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp b/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp index 91ce175..a862d8b 100644 --- a/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp +++ b/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp @@ -340,5 +340,11 @@ status_t ChromiumHTTPDataSource::reconnectAtOffset(off64_t offset) { return err; } +// static +status_t ChromiumHTTPDataSource::UpdateProxyConfig( + const char *host, int32_t port, const char *exclusionList) { + return SfDelegate::UpdateProxyConfig(host, port, exclusionList); +} + } // namespace android diff --git a/media/libstagefright/chromium_http/chromium_http_stub.cpp b/media/libstagefright/chromium_http/chromium_http_stub.cpp index 560a61f..289f6de 100644 --- a/media/libstagefright/chromium_http/chromium_http_stub.cpp +++ b/media/libstagefright/chromium_http/chromium_http_stub.cpp @@ -26,6 +26,11 @@ HTTPBase *createChromiumHTTPDataSource(uint32_t flags) { return new ChromiumHTTPDataSource(flags); } +status_t UpdateChromiumHTTPDataSourceProxyConfig( + const char *host, int32_t port, const char *exclusionList) { + return ChromiumHTTPDataSource::UpdateProxyConfig(host, port, exclusionList); +} + DataSource *createDataUriSource(const char *uri) { return new DataUriSource(uri); } diff --git a/media/libstagefright/chromium_http/support.cpp b/media/libstagefright/chromium_http/support.cpp index 13ae3df..741cb1d 100644 --- a/media/libstagefright/chromium_http/support.cpp +++ b/media/libstagefright/chromium_http/support.cpp @@ -36,15 +36,15 @@ #include "include/ChromiumHTTPDataSource.h" #include <cutils/log.h> -#include <cutils/properties.h> #include <media/stagefright/MediaErrors.h> +#include <media/stagefright/Utils.h> #include <string> namespace android { static Mutex gNetworkThreadLock; static base::Thread *gNetworkThread = NULL; -static scoped_refptr<net::URLRequestContext> gReqContext; +static scoped_refptr<SfRequestContext> gReqContext; static scoped_ptr<net::NetworkChangeNotifier> gNetworkChangeNotifier; bool logMessageHandler( @@ -156,19 +156,7 @@ net::NetLog::LogLevel SfNetLog::GetLogLevel() const { //////////////////////////////////////////////////////////////////////////////// SfRequestContext::SfRequestContext() { - AString ua; - ua.append("stagefright/1.2 (Linux;Android "); - -#if (PROPERTY_VALUE_MAX < 8) -#error "PROPERTY_VALUE_MAX must be at least 8" -#endif - - char value[PROPERTY_VALUE_MAX]; - property_get("ro.build.version.release", value, "Unknown"); - ua.append(value); - ua.append(")"); - - mUserAgent = ua.c_str(); + mUserAgent = MakeUserAgent().c_str(); set_net_log(new SfNetLog()); @@ -181,8 +169,10 @@ SfRequestContext::SfRequestContext() { set_ssl_config_service( net::SSLConfigService::CreateSystemSSLConfigService()); + mProxyConfigService = new net::ProxyConfigServiceAndroid; + set_proxy_service(net::ProxyService::CreateWithoutProxyResolver( - new net::ProxyConfigServiceAndroid, net_log())); + mProxyConfigService, net_log())); set_http_transaction_factory(new net::HttpCache( host_resolver(), @@ -203,6 +193,31 @@ const std::string &SfRequestContext::GetUserAgent(const GURL &url) const { return mUserAgent; } +status_t SfRequestContext::updateProxyConfig( + const char *host, int32_t port, const char *exclusionList) { + Mutex::Autolock autoLock(mProxyConfigLock); + + if (host == NULL || *host == '\0') { + MY_LOGV("updateProxyConfig NULL"); + + std::string proxy; + std::string exList; + mProxyConfigService->UpdateProxySettings(proxy, exList); + } else { +#if !defined(LOG_NDEBUG) || LOG_NDEBUG == 0 + LOG_PRI(ANDROID_LOG_VERBOSE, LOG_TAG, + "updateProxyConfig %s:%d, exclude '%s'", + host, port, exclusionList); +#endif + + std::string proxy = StringPrintf("%s:%d", host, port).c_str(); + std::string exList = exclusionList; + mProxyConfigService->UpdateProxySettings(proxy, exList); + } + + return OK; +} + //////////////////////////////////////////////////////////////////////////////// SfNetworkLibrary::SfNetworkLibrary() {} @@ -231,6 +246,14 @@ SfDelegate::~SfDelegate() { CHECK(mURLRequest == NULL); } +// static +status_t SfDelegate::UpdateProxyConfig( + const char *host, int32_t port, const char *exclusionList) { + InitializeNetworkThreadIfNecessary(); + + return gReqContext->updateProxyConfig(host, port, exclusionList); +} + void SfDelegate::setOwner(ChromiumHTTPDataSource *owner) { mOwner = owner; } diff --git a/media/libstagefright/chromium_http/support.h b/media/libstagefright/chromium_http/support.h index d2c5bc0..975a1d3 100644 --- a/media/libstagefright/chromium_http/support.h +++ b/media/libstagefright/chromium_http/support.h @@ -27,8 +27,13 @@ #include "net/base/io_buffer.h" #include <utils/KeyedVector.h> +#include <utils/Mutex.h> #include <utils/String8.h> +namespace net { + struct ProxyConfigServiceAndroid; +}; + namespace android { struct SfNetLog : public net::NetLog { @@ -55,8 +60,14 @@ struct SfRequestContext : public net::URLRequestContext { virtual const std::string &GetUserAgent(const GURL &url) const; + status_t updateProxyConfig( + const char *host, int32_t port, const char *exclusionList); + private: + Mutex mProxyConfigLock; + std::string mUserAgent; + net::ProxyConfigServiceAndroid *mProxyConfigService; DISALLOW_EVIL_CONSTRUCTORS(SfRequestContext); }; @@ -120,6 +131,9 @@ struct SfDelegate : public net::URLRequest::Delegate { virtual void OnReadCompleted(net::URLRequest *request, int bytes_read); + static status_t UpdateProxyConfig( + const char *host, int32_t port, const char *exclusionList); + private: typedef Delegate inherited; diff --git a/media/libstagefright/chromium_http_stub.cpp b/media/libstagefright/chromium_http_stub.cpp index cbd8796..ed8a878 100644 --- a/media/libstagefright/chromium_http_stub.cpp +++ b/media/libstagefright/chromium_http_stub.cpp @@ -30,6 +30,9 @@ static Mutex gLibMutex; HTTPBase *(*gLib_createChromiumHTTPDataSource)(uint32_t flags); DataSource *(*gLib_createDataUriSource)(const char *uri); +status_t (*gLib_UpdateChromiumHTTPDataSourceProxyConfig)( + const char *host, int32_t port, const char *exclusionList); + static bool load_libstagefright_chromium_http() { Mutex::Autolock autoLock(gLibMutex); void *sym; @@ -59,6 +62,14 @@ static bool load_libstagefright_chromium_http() { } gLib_createDataUriSource = (DataSource *(*)(const char *))sym; + sym = dlsym(gHandle, "UpdateChromiumHTTPDataSourceProxyConfig"); + if (sym == NULL) { + gHandle = NULL; + return false; + } + gLib_UpdateChromiumHTTPDataSourceProxyConfig = + (status_t (*)(const char *, int32_t, const char *))sym; + return true; } @@ -70,6 +81,16 @@ HTTPBase *createChromiumHTTPDataSource(uint32_t flags) { return gLib_createChromiumHTTPDataSource(flags); } +status_t UpdateChromiumHTTPDataSourceProxyConfig( + const char *host, int32_t port, const char *exclusionList) { + if (!load_libstagefright_chromium_http()) { + return INVALID_OPERATION; + } + + return gLib_UpdateChromiumHTTPDataSourceProxyConfig( + host, port, exclusionList); +} + DataSource *createDataUriSource(const char *uri) { if (!load_libstagefright_chromium_http()) { return NULL; diff --git a/media/libstagefright/codecs/aacdec/Android.mk b/media/libstagefright/codecs/aacdec/Android.mk index 4dc38a8..ffa64f9 100644 --- a/media/libstagefright/codecs/aacdec/Android.mk +++ b/media/libstagefright/codecs/aacdec/Android.mk @@ -20,7 +20,7 @@ LOCAL_CFLAGS := LOCAL_STATIC_LIBRARIES := libFraunhoferAAC LOCAL_SHARED_LIBRARIES := \ - libstagefright_omx libstagefright_foundation libutils libcutils + libstagefright_omx libstagefright_foundation libutils libcutils liblog LOCAL_MODULE := libstagefright_soft_aacdec LOCAL_MODULE_TAGS := optional diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp index d88813e..cf50dc9 100644 --- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp +++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp @@ -29,6 +29,7 @@ #define DRC_DEFAULT_MOBILE_REF_LEVEL 64 /* 64*-0.25dB = -16 dB below full scale for mobile conf */ #define DRC_DEFAULT_MOBILE_DRC_CUT 127 /* maximum compression of dynamic range for mobile conf */ +#define DRC_DEFAULT_MOBILE_DRC_BOOST 127 /* maximum compression of dynamic range for mobile conf */ #define MAX_CHANNEL_COUNT 6 /* maximum number of audio channels that can be decoded */ // names of properties that can be used to override the default DRC settings #define PROP_DRC_OVERRIDE_REF_LEVEL "aac_drc_reference_level" @@ -118,7 +119,7 @@ status_t SoftAAC2::initDecoder() { status = OK; } } - mIsFirst = true; + mDecoderHasData = false; // for streams that contain metadata, use the mobile profile DRC settings unless overridden // by platform properties: @@ -146,6 +147,8 @@ status_t SoftAAC2::initDecoder() { unsigned boost = atoi(value); ALOGV("AAC decoder using AAC_DRC_BOOST_FACTOR of %d", boost); aacDecoder_SetParam(mAACDecoder, AAC_DRC_BOOST_FACTOR, boost); + } else { + aacDecoder_SetParam(mAACDecoder, AAC_DRC_BOOST_FACTOR, DRC_DEFAULT_MOBILE_DRC_BOOST); } return status; @@ -327,6 +330,7 @@ void SoftAAC2::onQueueFilled(OMX_U32 portIndex) { notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL); return; } + inQueue.erase(inQueue.begin()); info->mOwnedByUs = false; notifyEmptyBufferDone(header); @@ -358,7 +362,7 @@ void SoftAAC2::onQueueFilled(OMX_U32 portIndex) { inInfo->mOwnedByUs = false; notifyEmptyBufferDone(inHeader); - if (!mIsFirst) { + if (mDecoderHasData) { // flush out the decoder's delayed data by calling DecodeFrame // one more time, with the AACDEC_FLUSH flag set INT_PCM *outBuffer = @@ -370,6 +374,7 @@ void SoftAAC2::onQueueFilled(OMX_U32 portIndex) { outBuffer, outHeader->nAllocLen, AACDEC_FLUSH); + mDecoderHasData = false; if (decoderErr != AAC_DEC_OK) { mSignalledError = true; @@ -385,9 +390,7 @@ void SoftAAC2::onQueueFilled(OMX_U32 portIndex) { * sizeof(int16_t) * mStreamInfo->numChannels; } else { - // Since we never discarded frames from the start, we won't have - // to add any padding at the end either. - + // we never submitted any data to the decoder, so there's nothing to flush out outHeader->nFilledLen = 0; } @@ -473,6 +476,7 @@ void SoftAAC2::onQueueFilled(OMX_U32 portIndex) { inBuffer, inBufferLength, bytesValid); + mDecoderHasData = true; decoderErr = aacDecoder_DecodeFrame(mAACDecoder, outBuffer, @@ -484,6 +488,35 @@ void SoftAAC2::onQueueFilled(OMX_U32 portIndex) { } } + size_t numOutBytes = + mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels; + + if (decoderErr == AAC_DEC_OK) { + UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0]; + inHeader->nFilledLen -= inBufferUsedLength; + inHeader->nOffset += inBufferUsedLength; + } else { + ALOGW("AAC decoder returned error %d, substituting silence", + decoderErr); + + memset(outHeader->pBuffer + outHeader->nOffset, 0, numOutBytes); + + // Discard input buffer. + inHeader->nFilledLen = 0; + + aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1); + + // fall through + } + + if (inHeader->nFilledLen == 0) { + inInfo->mOwnedByUs = false; + inQueue.erase(inQueue.begin()); + inInfo = NULL; + notifyEmptyBufferDone(inHeader); + inHeader = NULL; + } + /* * AAC+/eAAC+ streams can be signalled in two ways: either explicitly * or implicitly, according to MPEG4 spec. AAC+/eAAC+ is a dual @@ -502,15 +535,9 @@ void SoftAAC2::onQueueFilled(OMX_U32 portIndex) { if (mStreamInfo->sampleRate != prevSampleRate || mStreamInfo->numChannels != prevNumChannels) { maybeConfigureDownmix(); - ALOGI("Reconfiguring decoder: %d Hz, %d channels", - mStreamInfo->sampleRate, - mStreamInfo->numChannels); - - // We're going to want to revisit this input buffer, but - // may have already advanced the offset. Undo that if - // necessary. - inHeader->nOffset -= adtsHeaderSize; - inHeader->nFilledLen += adtsHeaderSize; + ALOGI("Reconfiguring decoder: %d->%d Hz, %d->%d channels", + prevSampleRate, mStreamInfo->sampleRate, + prevNumChannels, mStreamInfo->numChannels); notify(OMX_EventPortSettingsChanged, 1, 0, NULL); mOutputPortSettingsChange = AWAITING_DISABLED; @@ -523,38 +550,10 @@ void SoftAAC2::onQueueFilled(OMX_U32 portIndex) { return; } - size_t numOutBytes = - mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels; - - if (decoderErr == AAC_DEC_OK) { - UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0]; - inHeader->nFilledLen -= inBufferUsedLength; - inHeader->nOffset += inBufferUsedLength; - } else { - ALOGW("AAC decoder returned error %d, substituting silence", - decoderErr); - - memset(outHeader->pBuffer + outHeader->nOffset, 0, numOutBytes); - - // Discard input buffer. - inHeader->nFilledLen = 0; - - aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1); - - // fall through - } - if (decoderErr == AAC_DEC_OK || mNumSamplesOutput > 0) { // We'll only output data if we successfully decoded it or // we've previously decoded valid data, in the latter case // (decode failed) we'll output a silent frame. - if (mIsFirst) { - mIsFirst = false; - // the first decoded frame should be discarded to account - // for decoder delay - numOutBytes = 0; - } - outHeader->nFilledLen = numOutBytes; outHeader->nFlags = 0; @@ -571,14 +570,6 @@ void SoftAAC2::onQueueFilled(OMX_U32 portIndex) { outHeader = NULL; } - if (inHeader->nFilledLen == 0) { - inInfo->mOwnedByUs = false; - inQueue.erase(inQueue.begin()); - inInfo = NULL; - notifyEmptyBufferDone(inHeader); - inHeader = NULL; - } - if (decoderErr == AAC_DEC_OK) { ++mInputBufferCount; } @@ -589,11 +580,32 @@ void SoftAAC2::onPortFlushCompleted(OMX_U32 portIndex) { if (portIndex == 0) { // Make sure that the next buffer output does not still // depend on fragments from the last one decoded. - aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1); - mIsFirst = true; + // drain all existing data + drainDecoder(); } } +void SoftAAC2::drainDecoder() { + // a buffer big enough for 6 channels of decoded HE-AAC + short buf [2048*6]; + aacDecoder_DecodeFrame(mAACDecoder, + buf, sizeof(buf), AACDEC_FLUSH | AACDEC_CLRHIST | AACDEC_INTR); + aacDecoder_DecodeFrame(mAACDecoder, + buf, sizeof(buf), AACDEC_FLUSH | AACDEC_CLRHIST | AACDEC_INTR); + aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1); + mDecoderHasData = false; +} + +void SoftAAC2::onReset() { + drainDecoder(); + // reset the "configured" state + mInputBufferCount = 0; + mNumSamplesOutput = 0; + // To make the codec behave the same before and after a reset, we need to invalidate the + // streaminfo struct. This does that: + mStreamInfo->sampleRate = 0; +} + void SoftAAC2::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) { if (portIndex != 1) { return; diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.h b/media/libstagefright/codecs/aacdec/SoftAAC2.h index 0353196..2d960ab 100644 --- a/media/libstagefright/codecs/aacdec/SoftAAC2.h +++ b/media/libstagefright/codecs/aacdec/SoftAAC2.h @@ -41,6 +41,7 @@ protected: virtual void onQueueFilled(OMX_U32 portIndex); virtual void onPortFlushCompleted(OMX_U32 portIndex); virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled); + virtual void onReset(); private: enum { @@ -51,7 +52,7 @@ private: HANDLE_AACDECODER mAACDecoder; CStreamInfo *mStreamInfo; bool mIsADTS; - bool mIsFirst; + bool mDecoderHasData; size_t mInputBufferCount; bool mSignalledError; int64_t mAnchorTimeUs; @@ -67,6 +68,7 @@ private: status_t initDecoder(); bool isConfigured() const; void maybeConfigureDownmix() const; + void drainDecoder(); DISALLOW_EVIL_CONSTRUCTORS(SoftAAC2); }; diff --git a/media/libstagefright/codecs/aacenc/Android.mk b/media/libstagefright/codecs/aacenc/Android.mk index 820734d..057c69b 100644 --- a/media/libstagefright/codecs/aacenc/Android.mk +++ b/media/libstagefright/codecs/aacenc/Android.mk @@ -109,7 +109,7 @@ ifeq ($(AAC_LIBRARY), fraunhofer) LOCAL_STATIC_LIBRARIES := libFraunhoferAAC LOCAL_SHARED_LIBRARIES := \ - libstagefright_omx libstagefright_foundation libutils + libstagefright_omx libstagefright_foundation libutils liblog LOCAL_MODULE := libstagefright_soft_aacenc LOCAL_MODULE_TAGS := optional @@ -132,7 +132,7 @@ else # visualon libstagefright_aacenc LOCAL_SHARED_LIBRARIES := \ - libstagefright_omx libstagefright_foundation libutils \ + libstagefright_omx libstagefright_foundation libutils liblog \ libstagefright_enc_common LOCAL_MODULE := libstagefright_soft_aacenc diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp index 7719435..5749733 100644 --- a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp +++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp @@ -481,7 +481,7 @@ void SoftAACEncoder2::onQueueFilled(OMX_U32 portIndex) { void* inBuffer[] = { (unsigned char *)mInputFrame }; INT inBufferIds[] = { IN_AUDIO_DATA }; - INT inBufferSize[] = { numBytesPerInputFrame }; + INT inBufferSize[] = { (INT)numBytesPerInputFrame }; INT inBufferElSize[] = { sizeof(int16_t) }; AACENC_BufDesc inBufDesc; diff --git a/media/libstagefright/codecs/amrnb/dec/Android.mk b/media/libstagefright/codecs/amrnb/dec/Android.mk index b48a459..8d6c6f8 100644 --- a/media/libstagefright/codecs/amrnb/dec/Android.mk +++ b/media/libstagefright/codecs/amrnb/dec/Android.mk @@ -72,7 +72,7 @@ LOCAL_STATIC_LIBRARIES := \ libstagefright_amrnbdec libstagefright_amrwbdec LOCAL_SHARED_LIBRARIES := \ - libstagefright_omx libstagefright_foundation libutils \ + libstagefright_omx libstagefright_foundation libutils liblog \ libstagefright_amrnb_common LOCAL_MODULE := libstagefright_soft_amrdec diff --git a/media/libstagefright/codecs/amrnb/enc/Android.mk b/media/libstagefright/codecs/amrnb/enc/Android.mk index 457656a..f4e467a 100644 --- a/media/libstagefright/codecs/amrnb/enc/Android.mk +++ b/media/libstagefright/codecs/amrnb/enc/Android.mk @@ -92,7 +92,7 @@ LOCAL_STATIC_LIBRARIES := \ libstagefright_amrnbenc LOCAL_SHARED_LIBRARIES := \ - libstagefright_omx libstagefright_foundation libutils \ + libstagefright_omx libstagefright_foundation libutils liblog \ libstagefright_amrnb_common LOCAL_MODULE := libstagefright_soft_amrnbenc diff --git a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp index 07f8b4f..50b739c 100644 --- a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp +++ b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp @@ -257,7 +257,7 @@ OMX_ERRORTYPE SoftAMRNBEncoder::internalSetParameter( } if (pcmParams->nChannels != 1 - || pcmParams->nSamplingRate != kSampleRate) { + || pcmParams->nSamplingRate != (OMX_U32)kSampleRate) { return OMX_ErrorUndefined; } diff --git a/media/libstagefright/codecs/amrwbenc/Android.mk b/media/libstagefright/codecs/amrwbenc/Android.mk index edfd7b7..c5b8e0c 100644 --- a/media/libstagefright/codecs/amrwbenc/Android.mk +++ b/media/libstagefright/codecs/amrwbenc/Android.mk @@ -130,7 +130,7 @@ LOCAL_STATIC_LIBRARIES := \ libstagefright_amrwbenc LOCAL_SHARED_LIBRARIES := \ - libstagefright_omx libstagefright_foundation libutils \ + libstagefright_omx libstagefright_foundation libutils liblog \ libstagefright_enc_common LOCAL_MODULE := libstagefright_soft_amrwbenc diff --git a/media/libstagefright/codecs/avc/enc/Android.mk b/media/libstagefright/codecs/avc/enc/Android.mk index cffe469..7d17c2a 100644 --- a/media/libstagefright/codecs/avc/enc/Android.mk +++ b/media/libstagefright/codecs/avc/enc/Android.mk @@ -62,6 +62,7 @@ LOCAL_SHARED_LIBRARIES := \ libstagefright_foundation \ libstagefright_omx \ libutils \ + liblog \ libui diff --git a/media/libstagefright/codecs/flac/enc/Android.mk b/media/libstagefright/codecs/flac/enc/Android.mk index 546a357..f01d605 100644 --- a/media/libstagefright/codecs/flac/enc/Android.mk +++ b/media/libstagefright/codecs/flac/enc/Android.mk @@ -10,7 +10,7 @@ LOCAL_C_INCLUDES := \ external/flac/include LOCAL_SHARED_LIBRARIES := \ - libstagefright libstagefright_omx libstagefright_foundation libutils + libstagefright libstagefright_omx libstagefright_foundation libutils liblog LOCAL_STATIC_LIBRARIES := \ libFLAC \ diff --git a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp index 233aed3..e64fe72 100644 --- a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp +++ b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp @@ -109,7 +109,7 @@ void SoftFlacEncoder::initPorts() { def.eDir = OMX_DirInput; def.nBufferCountMin = kNumBuffers;// TODO verify that 1 is enough def.nBufferCountActual = def.nBufferCountMin; - def.nBufferSize = kMaxNumSamplesPerFrame * sizeof(int16_t) * 2; + def.nBufferSize = kMaxInputBufferSize; def.bEnabled = OMX_TRUE; def.bPopulated = OMX_FALSE; def.eDomain = OMX_PortDomainAudio; @@ -234,6 +234,22 @@ OMX_ERRORTYPE SoftFlacEncoder::internalSetParameter( return OMX_ErrorNone; } + case OMX_IndexParamPortDefinition: + { + OMX_PARAM_PORTDEFINITIONTYPE *defParams = + (OMX_PARAM_PORTDEFINITIONTYPE *)params; + + if (defParams->nPortIndex == 0) { + if (defParams->nBufferSize > kMaxInputBufferSize) { + ALOGE("Input buffer size must be at most %zu bytes", + kMaxInputBufferSize); + return OMX_ErrorUnsupportedSetting; + } + } + + // fall through + } + default: ALOGV("SoftFlacEncoder::internalSetParameter(default)"); return SimpleSoftOMXComponent::internalSetParameter(index, params); @@ -273,7 +289,7 @@ void SoftFlacEncoder::onQueueFilled(OMX_U32 portIndex) { return; } - if (inHeader->nFilledLen > kMaxNumSamplesPerFrame * sizeof(FLAC__int32) * 2) { + if (inHeader->nFilledLen > kMaxInputBufferSize) { ALOGE("input buffer too large (%ld).", inHeader->nFilledLen); mSignalledError = true; notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); @@ -290,6 +306,7 @@ void SoftFlacEncoder::onQueueFilled(OMX_U32 portIndex) { const unsigned nbInputSamples = inHeader->nFilledLen / 2; const OMX_S16 * const pcm16 = reinterpret_cast<OMX_S16 *>(inHeader->pBuffer); + CHECK_LE(nbInputSamples, 2 * kMaxNumSamplesPerFrame); for (unsigned i=0 ; i < nbInputSamples ; i++) { mInputBufferPcm32[i] = (FLAC__int32) pcm16[i]; } diff --git a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.h b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.h index 1e0148a..97361fa 100644 --- a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.h +++ b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.h @@ -52,6 +52,7 @@ private: enum { kNumBuffers = 2, kMaxNumSamplesPerFrame = 1152, + kMaxInputBufferSize = kMaxNumSamplesPerFrame * sizeof(int16_t) * 2, kMaxOutputBufferSize = 65536, //TODO check if this can be reduced }; diff --git a/media/libstagefright/codecs/g711/dec/Android.mk b/media/libstagefright/codecs/g711/dec/Android.mk index 28be646..4c80da6 100644 --- a/media/libstagefright/codecs/g711/dec/Android.mk +++ b/media/libstagefright/codecs/g711/dec/Android.mk @@ -9,7 +9,7 @@ LOCAL_C_INCLUDES := \ frameworks/native/include/media/openmax LOCAL_SHARED_LIBRARIES := \ - libstagefright libstagefright_omx libstagefright_foundation libutils + libstagefright libstagefright_omx libstagefright_foundation libutils liblog LOCAL_MODULE := libstagefright_soft_g711dec LOCAL_MODULE_TAGS := optional diff --git a/media/libstagefright/codecs/gsm/Android.mk b/media/libstagefright/codecs/gsm/Android.mk new file mode 100644 index 0000000..2e43120 --- /dev/null +++ b/media/libstagefright/codecs/gsm/Android.mk @@ -0,0 +1,4 @@ +LOCAL_PATH:= $(call my-dir) +include $(CLEAR_VARS) + +include $(call all-makefiles-under,$(LOCAL_PATH)) diff --git a/media/libstagefright/codecs/gsm/dec/Android.mk b/media/libstagefright/codecs/gsm/dec/Android.mk new file mode 100644 index 0000000..71613d2 --- /dev/null +++ b/media/libstagefright/codecs/gsm/dec/Android.mk @@ -0,0 +1,21 @@ +LOCAL_PATH:= $(call my-dir) +include $(CLEAR_VARS) + +LOCAL_SRC_FILES := \ + SoftGSM.cpp + +LOCAL_C_INCLUDES := \ + frameworks/av/media/libstagefright/include \ + frameworks/native/include/media/openmax \ + external/libgsm/inc + +LOCAL_SHARED_LIBRARIES := \ + libstagefright libstagefright_omx libstagefright_foundation libutils liblog + +LOCAL_STATIC_LIBRARIES := \ + libgsm + +LOCAL_MODULE := libstagefright_soft_gsmdec +LOCAL_MODULE_TAGS := optional + +include $(BUILD_SHARED_LIBRARY) diff --git a/services/camera/tests/CameraServiceTest/MODULE_LICENSE_APACHE2 b/media/libstagefright/codecs/gsm/dec/MODULE_LICENSE_APACHE2 index e69de29..e69de29 100644 --- a/services/camera/tests/CameraServiceTest/MODULE_LICENSE_APACHE2 +++ b/media/libstagefright/codecs/gsm/dec/MODULE_LICENSE_APACHE2 diff --git a/services/camera/tests/CameraServiceTest/NOTICE b/media/libstagefright/codecs/gsm/dec/NOTICE index c5b1efa..c5b1efa 100644 --- a/services/camera/tests/CameraServiceTest/NOTICE +++ b/media/libstagefright/codecs/gsm/dec/NOTICE diff --git a/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp b/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp new file mode 100644 index 0000000..00e0c85 --- /dev/null +++ b/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp @@ -0,0 +1,269 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "SoftGSM" +#include <utils/Log.h> + +#include "SoftGSM.h" + +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/MediaDefs.h> + +namespace android { + +template<class T> +static void InitOMXParams(T *params) { + params->nSize = sizeof(T); + params->nVersion.s.nVersionMajor = 1; + params->nVersion.s.nVersionMinor = 0; + params->nVersion.s.nRevision = 0; + params->nVersion.s.nStep = 0; +} + +SoftGSM::SoftGSM( + const char *name, + const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, + OMX_COMPONENTTYPE **component) + : SimpleSoftOMXComponent(name, callbacks, appData, component), + mSignalledError(false) { + + CHECK(!strcmp(name, "OMX.google.gsm.decoder")); + + mGsm = gsm_create(); + CHECK(mGsm); + int msopt = 1; + gsm_option(mGsm, GSM_OPT_WAV49, &msopt); + + initPorts(); +} + +SoftGSM::~SoftGSM() { + gsm_destroy(mGsm); +} + +void SoftGSM::initPorts() { + OMX_PARAM_PORTDEFINITIONTYPE def; + InitOMXParams(&def); + + def.nPortIndex = 0; + def.eDir = OMX_DirInput; + def.nBufferCountMin = kNumBuffers; + def.nBufferCountActual = def.nBufferCountMin; + def.nBufferSize = sizeof(gsm_frame); + def.bEnabled = OMX_TRUE; + def.bPopulated = OMX_FALSE; + def.eDomain = OMX_PortDomainAudio; + def.bBuffersContiguous = OMX_FALSE; + def.nBufferAlignment = 1; + + def.format.audio.cMIMEType = + const_cast<char *>(MEDIA_MIMETYPE_AUDIO_MSGSM); + + def.format.audio.pNativeRender = NULL; + def.format.audio.bFlagErrorConcealment = OMX_FALSE; + def.format.audio.eEncoding = OMX_AUDIO_CodingGSMFR; + + addPort(def); + + def.nPortIndex = 1; + def.eDir = OMX_DirOutput; + def.nBufferCountMin = kNumBuffers; + def.nBufferCountActual = def.nBufferCountMin; + def.nBufferSize = kMaxNumSamplesPerFrame * sizeof(int16_t); + def.bEnabled = OMX_TRUE; + def.bPopulated = OMX_FALSE; + def.eDomain = OMX_PortDomainAudio; + def.bBuffersContiguous = OMX_FALSE; + def.nBufferAlignment = 2; + + def.format.audio.cMIMEType = const_cast<char *>("audio/raw"); + def.format.audio.pNativeRender = NULL; + def.format.audio.bFlagErrorConcealment = OMX_FALSE; + def.format.audio.eEncoding = OMX_AUDIO_CodingPCM; + + addPort(def); +} + +OMX_ERRORTYPE SoftGSM::internalGetParameter( + OMX_INDEXTYPE index, OMX_PTR params) { + switch (index) { + case OMX_IndexParamAudioPcm: + { + OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams = + (OMX_AUDIO_PARAM_PCMMODETYPE *)params; + + if (pcmParams->nPortIndex > 1) { + return OMX_ErrorUndefined; + } + + pcmParams->eNumData = OMX_NumericalDataSigned; + pcmParams->eEndian = OMX_EndianBig; + pcmParams->bInterleaved = OMX_TRUE; + pcmParams->nBitPerSample = 16; + pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear; + pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF; + pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF; + + pcmParams->nChannels = 1; + pcmParams->nSamplingRate = 8000; + + return OMX_ErrorNone; + } + + default: + return SimpleSoftOMXComponent::internalGetParameter(index, params); + } +} + +OMX_ERRORTYPE SoftGSM::internalSetParameter( + OMX_INDEXTYPE index, const OMX_PTR params) { + switch (index) { + case OMX_IndexParamAudioPcm: + { + OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams = + (OMX_AUDIO_PARAM_PCMMODETYPE *)params; + + if (pcmParams->nPortIndex != 0 && pcmParams->nPortIndex != 1) { + return OMX_ErrorUndefined; + } + + if (pcmParams->nChannels != 1) { + return OMX_ErrorUndefined; + } + + if (pcmParams->nSamplingRate != 8000) { + return OMX_ErrorUndefined; + } + + return OMX_ErrorNone; + } + + case OMX_IndexParamStandardComponentRole: + { + const OMX_PARAM_COMPONENTROLETYPE *roleParams = + (const OMX_PARAM_COMPONENTROLETYPE *)params; + + if (strncmp((const char *)roleParams->cRole, + "audio_decoder.gsm", + OMX_MAX_STRINGNAME_SIZE - 1)) { + return OMX_ErrorUndefined; + } + + return OMX_ErrorNone; + } + + default: + return SimpleSoftOMXComponent::internalSetParameter(index, params); + } +} + +void SoftGSM::onQueueFilled(OMX_U32 portIndex) { + if (mSignalledError) { + return; + } + + List<BufferInfo *> &inQueue = getPortQueue(0); + List<BufferInfo *> &outQueue = getPortQueue(1); + + while (!inQueue.empty() && !outQueue.empty()) { + BufferInfo *inInfo = *inQueue.begin(); + OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader; + + BufferInfo *outInfo = *outQueue.begin(); + OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader; + + if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { + inQueue.erase(inQueue.begin()); + inInfo->mOwnedByUs = false; + notifyEmptyBufferDone(inHeader); + + outHeader->nFilledLen = 0; + outHeader->nFlags = OMX_BUFFERFLAG_EOS; + + outQueue.erase(outQueue.begin()); + outInfo->mOwnedByUs = false; + notifyFillBufferDone(outHeader); + return; + } + + if (inHeader->nFilledLen > kMaxNumSamplesPerFrame) { + ALOGE("input buffer too large (%ld).", inHeader->nFilledLen); + notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); + mSignalledError = true; + } + + if(((inHeader->nFilledLen / 65) * 65) != inHeader->nFilledLen) { + ALOGE("input buffer not multiple of 65 (%ld).", inHeader->nFilledLen); + notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); + mSignalledError = true; + } + + uint8_t *inputptr = inHeader->pBuffer + inHeader->nOffset; + + int n = mSignalledError ? 0 : DecodeGSM(mGsm, + reinterpret_cast<int16_t *>(outHeader->pBuffer), inputptr, inHeader->nFilledLen); + + outHeader->nTimeStamp = inHeader->nTimeStamp; + outHeader->nOffset = 0; + outHeader->nFilledLen = n * sizeof(int16_t); + outHeader->nFlags = 0; + + inInfo->mOwnedByUs = false; + inQueue.erase(inQueue.begin()); + inInfo = NULL; + notifyEmptyBufferDone(inHeader); + inHeader = NULL; + + outInfo->mOwnedByUs = false; + outQueue.erase(outQueue.begin()); + outInfo = NULL; + notifyFillBufferDone(outHeader); + outHeader = NULL; + } +} + + +// static +int SoftGSM::DecodeGSM(gsm handle, + int16_t *out, uint8_t *in, size_t inSize) { + + int ret = 0; + while (inSize > 0) { + gsm_decode(handle, in, out); + in += 33; + inSize -= 33; + out += 160; + ret += 160; + gsm_decode(handle, in, out); + in += 32; + inSize -= 32; + out += 160; + ret += 160; + } + return ret; +} + + +} // namespace android + +android::SoftOMXComponent *createSoftOMXComponent( + const char *name, const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, OMX_COMPONENTTYPE **component) { + return new android::SoftGSM(name, callbacks, appData, component); +} + diff --git a/media/libstagefright/codecs/gsm/dec/SoftGSM.h b/media/libstagefright/codecs/gsm/dec/SoftGSM.h new file mode 100644 index 0000000..8ab6116 --- /dev/null +++ b/media/libstagefright/codecs/gsm/dec/SoftGSM.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SOFT_GSM_H_ + +#define SOFT_GSM_H_ + +#include "SimpleSoftOMXComponent.h" + +extern "C" { +#include "gsm.h" +} + +namespace android { + +struct SoftGSM : public SimpleSoftOMXComponent { + SoftGSM(const char *name, + const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, + OMX_COMPONENTTYPE **component); + +protected: + virtual ~SoftGSM(); + + virtual OMX_ERRORTYPE internalGetParameter( + OMX_INDEXTYPE index, OMX_PTR params); + + virtual OMX_ERRORTYPE internalSetParameter( + OMX_INDEXTYPE index, const OMX_PTR params); + + virtual void onQueueFilled(OMX_U32 portIndex); + +private: + enum { + kNumBuffers = 4, + kMaxNumSamplesPerFrame = 16384, + }; + + bool mSignalledError; + gsm mGsm; + + void initPorts(); + + static int DecodeGSM(gsm handle, int16_t *out, uint8_t *in, size_t inSize); + + DISALLOW_EVIL_CONSTRUCTORS(SoftGSM); +}; + +} // namespace android + +#endif // SOFT_GSM_H_ + diff --git a/media/libstagefright/codecs/m4v_h263/dec/Android.mk b/media/libstagefright/codecs/m4v_h263/dec/Android.mk index a6b1edc..a3d5779 100644 --- a/media/libstagefright/codecs/m4v_h263/dec/Android.mk +++ b/media/libstagefright/codecs/m4v_h263/dec/Android.mk @@ -67,7 +67,7 @@ LOCAL_STATIC_LIBRARIES := \ libstagefright_m4vh263dec LOCAL_SHARED_LIBRARIES := \ - libstagefright libstagefright_omx libstagefright_foundation libutils + libstagefright libstagefright_omx libstagefright_foundation libutils liblog LOCAL_MODULE := libstagefright_soft_mpeg4dec LOCAL_MODULE_TAGS := optional diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp index d527fde..020cc0a 100644 --- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp +++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp @@ -326,7 +326,7 @@ void SoftMPEG4::onQueueFilled(OMX_U32 portIndex) { OMX_BUFFERHEADERTYPE *outHeader = port->mBuffers.editItemAt(mNumSamplesOutput & 1).mHeader; - if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { + if ((inHeader->nFlags & OMX_BUFFERFLAG_EOS) && inHeader->nFilledLen == 0) { inQueue.erase(inQueue.begin()); inInfo->mOwnedByUs = false; notifyEmptyBufferDone(inHeader); @@ -445,6 +445,11 @@ void SoftMPEG4::onQueueFilled(OMX_U32 portIndex) { inHeader->nOffset += bufferSize; inHeader->nFilledLen = 0; + if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { + outHeader->nFlags = OMX_BUFFERFLAG_EOS; + } else { + outHeader->nFlags = 0; + } if (inHeader->nFilledLen == 0) { inInfo->mOwnedByUs = false; @@ -458,7 +463,6 @@ void SoftMPEG4::onQueueFilled(OMX_U32 portIndex) { outHeader->nOffset = 0; outHeader->nFilledLen = (mWidth * mHeight * 3) / 2; - outHeader->nFlags = 0; List<BufferInfo *>::iterator it = outQueue.begin(); while ((*it)->mHeader != outHeader) { diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.mk b/media/libstagefright/codecs/m4v_h263/enc/Android.mk index 865cc9c..83a2dd2 100644 --- a/media/libstagefright/codecs/m4v_h263/enc/Android.mk +++ b/media/libstagefright/codecs/m4v_h263/enc/Android.mk @@ -65,6 +65,7 @@ LOCAL_SHARED_LIBRARIES := \ libstagefright_foundation \ libstagefright_omx \ libutils \ + liblog \ libui diff --git a/media/libstagefright/codecs/mp3dec/Android.mk b/media/libstagefright/codecs/mp3dec/Android.mk index ec8d7ec..135c715 100644 --- a/media/libstagefright/codecs/mp3dec/Android.mk +++ b/media/libstagefright/codecs/mp3dec/Android.mk @@ -70,7 +70,7 @@ LOCAL_C_INCLUDES := \ $(LOCAL_PATH)/include LOCAL_SHARED_LIBRARIES := \ - libstagefright libstagefright_omx libstagefright_foundation libutils + libstagefright libstagefright_omx libstagefright_foundation libutils liblog LOCAL_STATIC_LIBRARIES := \ libstagefright_mp3dec diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp index fb1135c..9f25536 100644 --- a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp +++ b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp @@ -166,6 +166,21 @@ OMX_ERRORTYPE SoftMP3::internalSetParameter( return OMX_ErrorNone; } + case OMX_IndexParamAudioPcm: + { + const OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams = + (const OMX_AUDIO_PARAM_PCMMODETYPE *)params; + + if (pcmParams->nPortIndex != 1) { + return OMX_ErrorUndefined; + } + + mNumChannels = pcmParams->nChannels; + mSamplingRate = pcmParams->nSamplingRate; + + return OMX_ErrorNone; + } + default: return SimpleSoftOMXComponent::internalSetParameter(index, params); } @@ -343,6 +358,11 @@ void SoftMP3::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) { } } +void SoftMP3::onReset() { + pvmp3_InitDecoder(mConfig, mDecoderBuf); + mIsFirst = true; +} + } // namespace android android::SoftOMXComponent *createSoftOMXComponent( diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.h b/media/libstagefright/codecs/mp3dec/SoftMP3.h index 3a05466..4af91ea 100644 --- a/media/libstagefright/codecs/mp3dec/SoftMP3.h +++ b/media/libstagefright/codecs/mp3dec/SoftMP3.h @@ -42,6 +42,7 @@ protected: virtual void onQueueFilled(OMX_U32 portIndex); virtual void onPortFlushCompleted(OMX_U32 portIndex); virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled); + virtual void onReset(); private: enum { diff --git a/media/libstagefright/codecs/on2/dec/Android.mk b/media/libstagefright/codecs/on2/dec/Android.mk index 0082d7c..7f2c46d 100644 --- a/media/libstagefright/codecs/on2/dec/Android.mk +++ b/media/libstagefright/codecs/on2/dec/Android.mk @@ -15,7 +15,7 @@ LOCAL_STATIC_LIBRARIES := \ libvpx LOCAL_SHARED_LIBRARIES := \ - libstagefright libstagefright_omx libstagefright_foundation libutils + libstagefright libstagefright_omx libstagefright_foundation libutils liblog LOCAL_MODULE := libstagefright_soft_vpxdec LOCAL_MODULE_TAGS := optional diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp index bf9ab3a..a400b4c 100644 --- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp +++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp @@ -66,7 +66,7 @@ void SoftVPX::initPorts() { def.eDir = OMX_DirInput; def.nBufferCountMin = kNumBuffers; def.nBufferCountActual = def.nBufferCountMin; - def.nBufferSize = 256 * 1024; + def.nBufferSize = 768 * 1024; def.bEnabled = OMX_TRUE; def.bPopulated = OMX_FALSE; def.eDomain = OMX_PortDomainVideo; diff --git a/media/libstagefright/codecs/on2/enc/Android.mk b/media/libstagefright/codecs/on2/enc/Android.mk new file mode 100644 index 0000000..a92d376 --- /dev/null +++ b/media/libstagefright/codecs/on2/enc/Android.mk @@ -0,0 +1,24 @@ +LOCAL_PATH := $(call my-dir) +include $(CLEAR_VARS) + +LOCAL_SRC_FILES := \ + SoftVPXEncoder.cpp + +LOCAL_C_INCLUDES := \ + $(TOP)/external/libvpx/libvpx \ + $(TOP)/external/openssl/include \ + $(TOP)/external/libvpx/libvpx/vpx_codec \ + $(TOP)/external/libvpx/libvpx/vpx_ports \ + frameworks/av/media/libstagefright/include \ + frameworks/native/include/media/openmax \ + +LOCAL_STATIC_LIBRARIES := \ + libvpx + +LOCAL_SHARED_LIBRARIES := \ + libstagefright libstagefright_omx libstagefright_foundation libutils liblog \ + +LOCAL_MODULE := libstagefright_soft_vpxenc +LOCAL_MODULE_TAGS := optional + +include $(BUILD_SHARED_LIBRARY) diff --git a/media/libstagefright/codecs/on2/enc/MODULE_LICENSE_APACHE2 b/media/libstagefright/codecs/on2/enc/MODULE_LICENSE_APACHE2 new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/media/libstagefright/codecs/on2/enc/MODULE_LICENSE_APACHE2 diff --git a/media/libstagefright/codecs/on2/enc/NOTICE b/media/libstagefright/codecs/on2/enc/NOTICE new file mode 100644 index 0000000..faed58a --- /dev/null +++ b/media/libstagefright/codecs/on2/enc/NOTICE @@ -0,0 +1,190 @@ + + Copyright (c) 2005-2013, The Android Open Source Project + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp new file mode 100644 index 0000000..e25637a --- /dev/null +++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp @@ -0,0 +1,686 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// #define LOG_NDEBUG 0 +#define LOG_TAG "SoftVPXEncoder" +#include "SoftVPXEncoder.h" + +#include <utils/Log.h> + +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/MediaDefs.h> + +namespace android { + + +template<class T> +static void InitOMXParams(T *params) { + params->nSize = sizeof(T); + // OMX IL 1.1.2 + params->nVersion.s.nVersionMajor = 1; + params->nVersion.s.nVersionMinor = 1; + params->nVersion.s.nRevision = 2; + params->nVersion.s.nStep = 0; +} + + +static int GetCPUCoreCount() { + int cpuCoreCount = 1; +#if defined(_SC_NPROCESSORS_ONLN) + cpuCoreCount = sysconf(_SC_NPROCESSORS_ONLN); +#else + // _SC_NPROC_ONLN must be defined... + cpuCoreCount = sysconf(_SC_NPROC_ONLN); +#endif + CHECK_GE(cpuCoreCount, 1); + return cpuCoreCount; +} + + +// This color conversion utility is copied from SoftMPEG4Encoder.cpp +inline static void ConvertSemiPlanarToPlanar(uint8_t *inyuv, + uint8_t* outyuv, + int32_t width, + int32_t height) { + int32_t outYsize = width * height; + uint32_t *outy = (uint32_t *) outyuv; + uint16_t *outcb = (uint16_t *) (outyuv + outYsize); + uint16_t *outcr = (uint16_t *) (outyuv + outYsize + (outYsize >> 2)); + + /* Y copying */ + memcpy(outy, inyuv, outYsize); + + /* U & V copying */ + uint32_t *inyuv_4 = (uint32_t *) (inyuv + outYsize); + for (int32_t i = height >> 1; i > 0; --i) { + for (int32_t j = width >> 2; j > 0; --j) { + uint32_t temp = *inyuv_4++; + uint32_t tempU = temp & 0xFF; + tempU = tempU | ((temp >> 8) & 0xFF00); + + uint32_t tempV = (temp >> 8) & 0xFF; + tempV = tempV | ((temp >> 16) & 0xFF00); + + // Flip U and V + *outcb++ = tempV; + *outcr++ = tempU; + } + } +} + + +SoftVPXEncoder::SoftVPXEncoder(const char *name, + const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, + OMX_COMPONENTTYPE **component) + : SimpleSoftOMXComponent(name, callbacks, appData, component), + mCodecContext(NULL), + mCodecConfiguration(NULL), + mCodecInterface(NULL), + mWidth(176), + mHeight(144), + mBitrate(192000), // in bps + mBitrateControlMode(VPX_VBR), // variable bitrate + mFrameDurationUs(33333), // Defaults to 30 fps + mDCTPartitions(0), + mErrorResilience(OMX_FALSE), + mColorFormat(OMX_COLOR_FormatYUV420Planar), + mLevel(OMX_VIDEO_VP8Level_Version0), + mConversionBuffer(NULL) { + + initPorts(); +} + + +SoftVPXEncoder::~SoftVPXEncoder() { + releaseEncoder(); +} + + +void SoftVPXEncoder::initPorts() { + OMX_PARAM_PORTDEFINITIONTYPE inputPort; + OMX_PARAM_PORTDEFINITIONTYPE outputPort; + + InitOMXParams(&inputPort); + InitOMXParams(&outputPort); + + inputPort.nBufferCountMin = kNumBuffers; + inputPort.nBufferCountActual = inputPort.nBufferCountMin; + inputPort.bEnabled = OMX_TRUE; + inputPort.bPopulated = OMX_FALSE; + inputPort.eDomain = OMX_PortDomainVideo; + inputPort.bBuffersContiguous = OMX_FALSE; + inputPort.format.video.pNativeRender = NULL; + inputPort.format.video.nFrameWidth = mWidth; + inputPort.format.video.nFrameHeight = mHeight; + inputPort.format.video.nStride = inputPort.format.video.nFrameWidth; + inputPort.format.video.nSliceHeight = inputPort.format.video.nFrameHeight; + inputPort.format.video.nBitrate = 0; + // frameRate is reciprocal of frameDuration, which is + // in microseconds. It is also in Q16 format. + inputPort.format.video.xFramerate = (1000000/mFrameDurationUs) << 16; + inputPort.format.video.bFlagErrorConcealment = OMX_FALSE; + inputPort.nPortIndex = kInputPortIndex; + inputPort.eDir = OMX_DirInput; + inputPort.nBufferAlignment = kInputBufferAlignment; + inputPort.format.video.cMIMEType = + const_cast<char *>(MEDIA_MIMETYPE_VIDEO_RAW); + inputPort.format.video.eCompressionFormat = OMX_VIDEO_CodingUnused; + inputPort.format.video.eColorFormat = mColorFormat; + inputPort.format.video.pNativeWindow = NULL; + inputPort.nBufferSize = + (inputPort.format.video.nStride * + inputPort.format.video.nSliceHeight * 3) / 2; + + addPort(inputPort); + + outputPort.nBufferCountMin = kNumBuffers; + outputPort.nBufferCountActual = outputPort.nBufferCountMin; + outputPort.bEnabled = OMX_TRUE; + outputPort.bPopulated = OMX_FALSE; + outputPort.eDomain = OMX_PortDomainVideo; + outputPort.bBuffersContiguous = OMX_FALSE; + outputPort.format.video.pNativeRender = NULL; + outputPort.format.video.nFrameWidth = mWidth; + outputPort.format.video.nFrameHeight = mHeight; + outputPort.format.video.nStride = outputPort.format.video.nFrameWidth; + outputPort.format.video.nSliceHeight = outputPort.format.video.nFrameHeight; + outputPort.format.video.nBitrate = mBitrate; + outputPort.format.video.xFramerate = 0; + outputPort.format.video.bFlagErrorConcealment = OMX_FALSE; + outputPort.nPortIndex = kOutputPortIndex; + outputPort.eDir = OMX_DirOutput; + outputPort.nBufferAlignment = kOutputBufferAlignment; + outputPort.format.video.cMIMEType = + const_cast<char *>(MEDIA_MIMETYPE_VIDEO_VPX); + outputPort.format.video.eCompressionFormat = OMX_VIDEO_CodingVPX; + outputPort.format.video.eColorFormat = OMX_COLOR_FormatUnused; + outputPort.format.video.pNativeWindow = NULL; + outputPort.nBufferSize = 256 * 1024; // arbitrary + + addPort(outputPort); +} + + +status_t SoftVPXEncoder::initEncoder() { + vpx_codec_err_t codec_return; + + mCodecContext = new vpx_codec_ctx_t; + mCodecConfiguration = new vpx_codec_enc_cfg_t; + mCodecInterface = vpx_codec_vp8_cx(); + + if (mCodecInterface == NULL) { + return UNKNOWN_ERROR; + } + + codec_return = vpx_codec_enc_config_default(mCodecInterface, + mCodecConfiguration, + 0); // Codec specific flags + + if (codec_return != VPX_CODEC_OK) { + ALOGE("Error populating default configuration for vpx encoder."); + return UNKNOWN_ERROR; + } + + mCodecConfiguration->g_w = mWidth; + mCodecConfiguration->g_h = mHeight; + mCodecConfiguration->g_threads = GetCPUCoreCount(); + mCodecConfiguration->g_error_resilient = mErrorResilience; + + switch (mLevel) { + case OMX_VIDEO_VP8Level_Version0: + mCodecConfiguration->g_profile = 0; + break; + + case OMX_VIDEO_VP8Level_Version1: + mCodecConfiguration->g_profile = 1; + break; + + case OMX_VIDEO_VP8Level_Version2: + mCodecConfiguration->g_profile = 2; + break; + + case OMX_VIDEO_VP8Level_Version3: + mCodecConfiguration->g_profile = 3; + break; + + default: + mCodecConfiguration->g_profile = 0; + } + + // OMX timebase unit is microsecond + // g_timebase is in seconds (i.e. 1/1000000 seconds) + mCodecConfiguration->g_timebase.num = 1; + mCodecConfiguration->g_timebase.den = 1000000; + // rc_target_bitrate is in kbps, mBitrate in bps + mCodecConfiguration->rc_target_bitrate = mBitrate/1000; + mCodecConfiguration->rc_end_usage = mBitrateControlMode; + + codec_return = vpx_codec_enc_init(mCodecContext, + mCodecInterface, + mCodecConfiguration, + 0); // flags + + if (codec_return != VPX_CODEC_OK) { + ALOGE("Error initializing vpx encoder"); + return UNKNOWN_ERROR; + } + + codec_return = vpx_codec_control(mCodecContext, + VP8E_SET_TOKEN_PARTITIONS, + mDCTPartitions); + if (codec_return != VPX_CODEC_OK) { + ALOGE("Error setting dct partitions for vpx encoder."); + return UNKNOWN_ERROR; + } + + if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) { + if (mConversionBuffer == NULL) { + mConversionBuffer = (uint8_t *)malloc(mWidth * mHeight * 3 / 2); + if (mConversionBuffer == NULL) { + ALOGE("Allocating conversion buffer failed."); + return UNKNOWN_ERROR; + } + } + } + return OK; +} + + +status_t SoftVPXEncoder::releaseEncoder() { + if (mCodecContext != NULL) { + vpx_codec_destroy(mCodecContext); + delete mCodecContext; + mCodecContext = NULL; + } + + if (mCodecConfiguration != NULL) { + delete mCodecConfiguration; + mCodecConfiguration = NULL; + } + + if (mConversionBuffer != NULL) { + delete mConversionBuffer; + mConversionBuffer = NULL; + } + + // this one is not allocated by us + mCodecInterface = NULL; + + return OK; +} + + +OMX_ERRORTYPE SoftVPXEncoder::internalGetParameter(OMX_INDEXTYPE index, + OMX_PTR param) { + // can include extension index OMX_INDEXEXTTYPE + const int32_t indexFull = index; + + switch (indexFull) { + case OMX_IndexParamVideoPortFormat: { + OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams = + (OMX_VIDEO_PARAM_PORTFORMATTYPE *)param; + + if (formatParams->nPortIndex == kInputPortIndex) { + if (formatParams->nIndex >= kNumberOfSupportedColorFormats) { + return OMX_ErrorNoMore; + } + + // Color formats, in order of preference + if (formatParams->nIndex == 0) { + formatParams->eColorFormat = OMX_COLOR_FormatYUV420Planar; + } else if (formatParams->nIndex == 1) { + formatParams->eColorFormat = + OMX_COLOR_FormatYUV420SemiPlanar; + } else { + formatParams->eColorFormat = OMX_COLOR_FormatAndroidOpaque; + } + + formatParams->eCompressionFormat = OMX_VIDEO_CodingUnused; + // Converting from microseconds + // Also converting to Q16 format + formatParams->xFramerate = (1000000/mFrameDurationUs) << 16; + return OMX_ErrorNone; + } else if (formatParams->nPortIndex == kOutputPortIndex) { + formatParams->eCompressionFormat = OMX_VIDEO_CodingVPX; + formatParams->eColorFormat = OMX_COLOR_FormatUnused; + formatParams->xFramerate = 0; + return OMX_ErrorNone; + } else { + return OMX_ErrorBadPortIndex; + } + } + + case OMX_IndexParamVideoBitrate: { + OMX_VIDEO_PARAM_BITRATETYPE *bitrate = + (OMX_VIDEO_PARAM_BITRATETYPE *)param; + + if (bitrate->nPortIndex != kOutputPortIndex) { + return OMX_ErrorUnsupportedIndex; + } + + bitrate->nTargetBitrate = mBitrate; + + if (mBitrateControlMode == VPX_VBR) { + bitrate->eControlRate = OMX_Video_ControlRateVariable; + } else if (mBitrateControlMode == VPX_CBR) { + bitrate->eControlRate = OMX_Video_ControlRateConstant; + } else { + return OMX_ErrorUnsupportedSetting; + } + return OMX_ErrorNone; + } + + // VP8 specific parameters that use extension headers + case OMX_IndexParamVideoVp8: { + OMX_VIDEO_PARAM_VP8TYPE *vp8Params = + (OMX_VIDEO_PARAM_VP8TYPE *)param; + + if (vp8Params->nPortIndex != kOutputPortIndex) { + return OMX_ErrorUnsupportedIndex; + } + + vp8Params->eProfile = OMX_VIDEO_VP8ProfileMain; + vp8Params->eLevel = mLevel; + vp8Params->nDCTPartitions = mDCTPartitions; + vp8Params->bErrorResilientMode = mErrorResilience; + return OMX_ErrorNone; + } + + case OMX_IndexParamVideoProfileLevelQuerySupported: { + OMX_VIDEO_PARAM_PROFILELEVELTYPE *profileAndLevel = + (OMX_VIDEO_PARAM_PROFILELEVELTYPE *)param; + + if (profileAndLevel->nPortIndex != kOutputPortIndex) { + return OMX_ErrorUnsupportedIndex; + } + + switch (profileAndLevel->nProfileIndex) { + case 0: + profileAndLevel->eLevel = OMX_VIDEO_VP8Level_Version0; + break; + + case 1: + profileAndLevel->eLevel = OMX_VIDEO_VP8Level_Version1; + break; + + case 2: + profileAndLevel->eLevel = OMX_VIDEO_VP8Level_Version2; + break; + + case 3: + profileAndLevel->eLevel = OMX_VIDEO_VP8Level_Version3; + break; + + default: + return OMX_ErrorNoMore; + } + + profileAndLevel->eProfile = OMX_VIDEO_VP8ProfileMain; + return OMX_ErrorNone; + } + + case OMX_IndexParamVideoProfileLevelCurrent: { + OMX_VIDEO_PARAM_PROFILELEVELTYPE *profileAndLevel = + (OMX_VIDEO_PARAM_PROFILELEVELTYPE *)param; + + if (profileAndLevel->nPortIndex != kOutputPortIndex) { + return OMX_ErrorUnsupportedIndex; + } + + profileAndLevel->eLevel = mLevel; + profileAndLevel->eProfile = OMX_VIDEO_VP8ProfileMain; + return OMX_ErrorNone; + } + + default: + return SimpleSoftOMXComponent::internalGetParameter(index, param); + } +} + + +OMX_ERRORTYPE SoftVPXEncoder::internalSetParameter(OMX_INDEXTYPE index, + const OMX_PTR param) { + // can include extension index OMX_INDEXEXTTYPE + const int32_t indexFull = index; + + switch (indexFull) { + case OMX_IndexParamStandardComponentRole: + return internalSetRoleParams( + (const OMX_PARAM_COMPONENTROLETYPE *)param); + + case OMX_IndexParamVideoBitrate: + return internalSetBitrateParams( + (const OMX_VIDEO_PARAM_BITRATETYPE *)param); + + case OMX_IndexParamPortDefinition: + return internalSetPortParams( + (const OMX_PARAM_PORTDEFINITIONTYPE *)param); + + case OMX_IndexParamVideoPortFormat: + return internalSetFormatParams( + (const OMX_VIDEO_PARAM_PORTFORMATTYPE *)param); + + case OMX_IndexParamVideoVp8: + return internalSetVp8Params( + (const OMX_VIDEO_PARAM_VP8TYPE *)param); + + case OMX_IndexParamVideoProfileLevelCurrent: + return internalSetProfileLevel( + (const OMX_VIDEO_PARAM_PROFILELEVELTYPE *)param); + + default: + return SimpleSoftOMXComponent::internalSetParameter(index, param); + } +} + +OMX_ERRORTYPE SoftVPXEncoder::internalSetProfileLevel( + const OMX_VIDEO_PARAM_PROFILELEVELTYPE* profileAndLevel) { + if (profileAndLevel->nPortIndex != kOutputPortIndex) { + return OMX_ErrorUnsupportedIndex; + } + + if (profileAndLevel->eProfile != OMX_VIDEO_VP8ProfileMain) { + return OMX_ErrorBadParameter; + } + + if (profileAndLevel->eLevel == OMX_VIDEO_VP8Level_Version0 || + profileAndLevel->eLevel == OMX_VIDEO_VP8Level_Version1 || + profileAndLevel->eLevel == OMX_VIDEO_VP8Level_Version2 || + profileAndLevel->eLevel == OMX_VIDEO_VP8Level_Version3) { + mLevel = (OMX_VIDEO_VP8LEVELTYPE)profileAndLevel->eLevel; + } else { + return OMX_ErrorBadParameter; + } + + return OMX_ErrorNone; +} + + +OMX_ERRORTYPE SoftVPXEncoder::internalSetVp8Params( + const OMX_VIDEO_PARAM_VP8TYPE* vp8Params) { + if (vp8Params->nPortIndex != kOutputPortIndex) { + return OMX_ErrorUnsupportedIndex; + } + + if (vp8Params->eProfile != OMX_VIDEO_VP8ProfileMain) { + return OMX_ErrorBadParameter; + } + + if (vp8Params->eLevel == OMX_VIDEO_VP8Level_Version0 || + vp8Params->eLevel == OMX_VIDEO_VP8Level_Version1 || + vp8Params->eLevel == OMX_VIDEO_VP8Level_Version2 || + vp8Params->eLevel == OMX_VIDEO_VP8Level_Version3) { + mLevel = vp8Params->eLevel; + } else { + return OMX_ErrorBadParameter; + } + + if (vp8Params->nDCTPartitions <= kMaxDCTPartitions) { + mDCTPartitions = vp8Params->nDCTPartitions; + } else { + return OMX_ErrorBadParameter; + } + + mErrorResilience = vp8Params->bErrorResilientMode; + return OMX_ErrorNone; +} + + +OMX_ERRORTYPE SoftVPXEncoder::internalSetFormatParams( + const OMX_VIDEO_PARAM_PORTFORMATTYPE* format) { + if (format->nPortIndex == kInputPortIndex) { + if (format->eColorFormat == OMX_COLOR_FormatYUV420Planar || + format->eColorFormat == OMX_COLOR_FormatYUV420SemiPlanar || + format->eColorFormat == OMX_COLOR_FormatAndroidOpaque) { + mColorFormat = format->eColorFormat; + return OMX_ErrorNone; + } else { + ALOGE("Unsupported color format %i", format->eColorFormat); + return OMX_ErrorUnsupportedSetting; + } + } else if (format->nPortIndex == kOutputPortIndex) { + if (format->eCompressionFormat == OMX_VIDEO_CodingVPX) { + return OMX_ErrorNone; + } else { + return OMX_ErrorUnsupportedSetting; + } + } else { + return OMX_ErrorBadPortIndex; + } +} + + +OMX_ERRORTYPE SoftVPXEncoder::internalSetRoleParams( + const OMX_PARAM_COMPONENTROLETYPE* role) { + const char* roleText = (const char*)role->cRole; + const size_t roleTextMaxSize = OMX_MAX_STRINGNAME_SIZE - 1; + + if (strncmp(roleText, "video_encoder.vpx", roleTextMaxSize)) { + ALOGE("Unsupported component role"); + return OMX_ErrorBadParameter; + } + + return OMX_ErrorNone; +} + + +OMX_ERRORTYPE SoftVPXEncoder::internalSetPortParams( + const OMX_PARAM_PORTDEFINITIONTYPE* port) { + if (port->nPortIndex == kInputPortIndex) { + mWidth = port->format.video.nFrameWidth; + mHeight = port->format.video.nFrameHeight; + + // xFramerate comes in Q16 format, in frames per second unit + const uint32_t framerate = port->format.video.xFramerate >> 16; + // frame duration is in microseconds + mFrameDurationUs = (1000000/framerate); + + if (port->format.video.eColorFormat == OMX_COLOR_FormatYUV420Planar || + port->format.video.eColorFormat == OMX_COLOR_FormatYUV420SemiPlanar || + port->format.video.eColorFormat == OMX_COLOR_FormatAndroidOpaque) { + mColorFormat = port->format.video.eColorFormat; + } else { + return OMX_ErrorUnsupportedSetting; + } + + return OMX_ErrorNone; + } else if (port->nPortIndex == kOutputPortIndex) { + mBitrate = port->format.video.nBitrate; + return OMX_ErrorNone; + } else { + return OMX_ErrorBadPortIndex; + } +} + + +OMX_ERRORTYPE SoftVPXEncoder::internalSetBitrateParams( + const OMX_VIDEO_PARAM_BITRATETYPE* bitrate) { + if (bitrate->nPortIndex != kOutputPortIndex) { + return OMX_ErrorUnsupportedIndex; + } + + mBitrate = bitrate->nTargetBitrate; + + if (bitrate->eControlRate == OMX_Video_ControlRateVariable) { + mBitrateControlMode = VPX_VBR; + } else if (bitrate->eControlRate == OMX_Video_ControlRateConstant) { + mBitrateControlMode = VPX_CBR; + } else { + return OMX_ErrorUnsupportedSetting; + } + + return OMX_ErrorNone; +} + + +void SoftVPXEncoder::onQueueFilled(OMX_U32 portIndex) { + // Initialize encoder if not already + if (mCodecContext == NULL) { + if (OK != initEncoder()) { + ALOGE("Failed to initialize encoder"); + notify(OMX_EventError, + OMX_ErrorUndefined, + 0, // Extra notification data + NULL); // Notification data pointer + return; + } + } + + vpx_codec_err_t codec_return; + List<BufferInfo *> &inputBufferInfoQueue = getPortQueue(kInputPortIndex); + List<BufferInfo *> &outputBufferInfoQueue = getPortQueue(kOutputPortIndex); + + while (!inputBufferInfoQueue.empty() && !outputBufferInfoQueue.empty()) { + BufferInfo *inputBufferInfo = *inputBufferInfoQueue.begin(); + OMX_BUFFERHEADERTYPE *inputBufferHeader = inputBufferInfo->mHeader; + + BufferInfo *outputBufferInfo = *outputBufferInfoQueue.begin(); + OMX_BUFFERHEADERTYPE *outputBufferHeader = outputBufferInfo->mHeader; + + if (inputBufferHeader->nFlags & OMX_BUFFERFLAG_EOS) { + inputBufferInfoQueue.erase(inputBufferInfoQueue.begin()); + inputBufferInfo->mOwnedByUs = false; + notifyEmptyBufferDone(inputBufferHeader); + + outputBufferHeader->nFilledLen = 0; + outputBufferHeader->nFlags = OMX_BUFFERFLAG_EOS; + + outputBufferInfoQueue.erase(outputBufferInfoQueue.begin()); + outputBufferInfo->mOwnedByUs = false; + notifyFillBufferDone(outputBufferHeader); + return; + } + + uint8_t* source = inputBufferHeader->pBuffer + inputBufferHeader->nOffset; + + // NOTE: As much as nothing is known about color format + // when it is denoted as AndroidOpaque, it is at least + // assumed to be planar. + if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) { + ConvertSemiPlanarToPlanar(source, mConversionBuffer, mWidth, mHeight); + source = mConversionBuffer; + } + vpx_image_t raw_frame; + vpx_img_wrap(&raw_frame, VPX_IMG_FMT_I420, mWidth, mHeight, + kInputBufferAlignment, source); + codec_return = vpx_codec_encode(mCodecContext, + &raw_frame, + inputBufferHeader->nTimeStamp, // in timebase units + mFrameDurationUs, // frame duration in timebase units + 0, // frame flags + VPX_DL_REALTIME); // encoding deadline + if (codec_return != VPX_CODEC_OK) { + ALOGE("vpx encoder failed to encode frame"); + notify(OMX_EventError, + OMX_ErrorUndefined, + 0, // Extra notification data + NULL); // Notification data pointer + return; + } + + vpx_codec_iter_t encoded_packet_iterator = NULL; + const vpx_codec_cx_pkt_t* encoded_packet; + + while ((encoded_packet = vpx_codec_get_cx_data( + mCodecContext, &encoded_packet_iterator))) { + if (encoded_packet->kind == VPX_CODEC_CX_FRAME_PKT) { + outputBufferHeader->nTimeStamp = encoded_packet->data.frame.pts; + outputBufferHeader->nFlags = 0; + outputBufferHeader->nOffset = 0; + outputBufferHeader->nFilledLen = encoded_packet->data.frame.sz; + memcpy(outputBufferHeader->pBuffer, + encoded_packet->data.frame.buf, + encoded_packet->data.frame.sz); + outputBufferInfo->mOwnedByUs = false; + outputBufferInfoQueue.erase(outputBufferInfoQueue.begin()); + notifyFillBufferDone(outputBufferHeader); + } + } + + inputBufferInfo->mOwnedByUs = false; + inputBufferInfoQueue.erase(inputBufferInfoQueue.begin()); + notifyEmptyBufferDone(inputBufferHeader); + } +} +} // namespace android + + +android::SoftOMXComponent *createSoftOMXComponent( + const char *name, const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, OMX_COMPONENTTYPE **component) { + return new android::SoftVPXEncoder(name, callbacks, appData, component); +} diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h new file mode 100644 index 0000000..3bc05c0 --- /dev/null +++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h @@ -0,0 +1,203 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SOFT_VPX_ENCODER_H_ + +#define SOFT_VPX_ENCODER_H_ + +#include "SimpleSoftOMXComponent.h" + +#include <OMX_VideoExt.h> +#include <OMX_IndexExt.h> + +#include "vpx/vpx_encoder.h" +#include "vpx/vpx_codec.h" +#include "vpx/vp8cx.h" + +namespace android { + +// Exposes a vpx encoder as an OMX Component +// +// Boilerplate for callback bindings are taken care +// by the base class SimpleSoftOMXComponent and its +// parent SoftOMXComponent. +// +// Only following encoder settings are available +// - target bitrate +// - rate control (constant / variable) +// - frame rate +// - error resilience +// - token partitioning +// - reconstruction & loop filters (g_profile) +// +// Only following color formats are recognized +// - YUV420Planar +// - YUV420SemiPlanar +// - AndroidOpaque +// +// Following settings are not configurable by the client +// - encoding deadline is realtime +// - multithreaded encoding utilizes a number of threads equal +// to online cpu's available +// - the algorithm interface for encoder is vp8 +// - fractional bits of frame rate is discarded +// - OMX timestamps are in microseconds, therefore +// encoder timebase is fixed to 1/1000000 + +class SoftVPXEncoder : public SimpleSoftOMXComponent { + public: + SoftVPXEncoder(const char *name, + const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, + OMX_COMPONENTTYPE **component); + + protected: + virtual ~SoftVPXEncoder(); + + // Returns current values for requested OMX + // parameters + virtual OMX_ERRORTYPE internalGetParameter( + OMX_INDEXTYPE index, OMX_PTR param); + + // Validates, extracts and stores relevant OMX + // parameters + virtual OMX_ERRORTYPE internalSetParameter( + OMX_INDEXTYPE index, const OMX_PTR param); + + // OMX callback when buffers available + // Note that both an input and output buffer + // is expected to be available to carry out + // encoding of the frame + virtual void onQueueFilled(OMX_U32 portIndex); + + private: + // number of buffers allocated per port + static const uint32_t kNumBuffers = 4; + + // OMX port indexes that refer to input and + // output ports respectively + static const uint32_t kInputPortIndex = 0; + static const uint32_t kOutputPortIndex = 1; + + // Byte-alignment required for buffers + static const uint32_t kInputBufferAlignment = 1; + static const uint32_t kOutputBufferAlignment = 2; + + // Max value supported for DCT partitions + static const uint32_t kMaxDCTPartitions = 3; + + // Number of supported input color formats + static const uint32_t kNumberOfSupportedColorFormats = 3; + + // vpx specific opaque data structure that + // stores encoder state + vpx_codec_ctx_t* mCodecContext; + + // vpx specific data structure that + // stores encoder configuration + vpx_codec_enc_cfg_t* mCodecConfiguration; + + // vpx specific read-only data structure + // that specifies algorithm interface (e.g. vp8) + vpx_codec_iface_t* mCodecInterface; + + // Width of the input frames + int32_t mWidth; + + // Height of the input frames + int32_t mHeight; + + // Target bitrate set for the encoder, in bits per second. + int32_t mBitrate; + + // Bitrate control mode, either constant or variable + vpx_rc_mode mBitrateControlMode; + + // Frame duration is the reciprocal of framerate, denoted + // in microseconds + uint64_t mFrameDurationUs; + + // vp8 specific configuration parameter + // that enables token partitioning of + // the stream into substreams + int32_t mDCTPartitions; + + // Parameter that denotes whether error resilience + // is enabled in encoder + OMX_BOOL mErrorResilience; + + // Color format for the input port + OMX_COLOR_FORMATTYPE mColorFormat; + + // Encoder profile corresponding to OMX level parameter + // + // The inconsistency in the naming is caused by + // OMX spec referring vpx profiles (g_profile) + // as "levels" whereas using the name "profile" for + // something else. + OMX_VIDEO_VP8LEVELTYPE mLevel; + + // Conversion buffer is needed to convert semi + // planar yuv420 to planar format + // It is only allocated if input format is + // indeed YUV420SemiPlanar. + uint8_t* mConversionBuffer; + + // Initializes input and output OMX ports with sensible + // default values. + void initPorts(); + + // Initializes vpx encoder with available settings. + status_t initEncoder(); + + // Releases vpx encoder instance, with it's associated + // data structures. + // + // Unless called earlier, this is handled by the + // dtor. + status_t releaseEncoder(); + + // Handles port changes with respect to color formats + OMX_ERRORTYPE internalSetFormatParams( + const OMX_VIDEO_PARAM_PORTFORMATTYPE* format); + + // Verifies the component role tried to be set to this OMX component is + // strictly video_encoder.vpx + OMX_ERRORTYPE internalSetRoleParams( + const OMX_PARAM_COMPONENTROLETYPE* role); + + // Updates bitrate to reflect port settings. + OMX_ERRORTYPE internalSetBitrateParams( + const OMX_VIDEO_PARAM_BITRATETYPE* bitrate); + + // Handles port definition changes. + OMX_ERRORTYPE internalSetPortParams( + const OMX_PARAM_PORTDEFINITIONTYPE* port); + + // Handles vp8 specific parameters. + OMX_ERRORTYPE internalSetVp8Params( + const OMX_VIDEO_PARAM_VP8TYPE* vp8Params); + + // Updates encoder profile + OMX_ERRORTYPE internalSetProfileLevel( + const OMX_VIDEO_PARAM_PROFILELEVELTYPE* profileAndLevel); + + DISALLOW_EVIL_CONSTRUCTORS(SoftVPXEncoder); +}; + +} // namespace android + +#endif // SOFT_VPX_ENCODER_H_ diff --git a/media/libstagefright/codecs/on2/h264dec/Android.mk b/media/libstagefright/codecs/on2/h264dec/Android.mk index 772fd60..2539f98 100644 --- a/media/libstagefright/codecs/on2/h264dec/Android.mk +++ b/media/libstagefright/codecs/on2/h264dec/Android.mk @@ -97,7 +97,7 @@ ifeq ($(ARCH_ARM_HAVE_NEON),true) endif LOCAL_SHARED_LIBRARIES := \ - libstagefright libstagefright_omx libstagefright_foundation libutils \ + libstagefright libstagefright_omx libstagefright_foundation libutils liblog \ LOCAL_MODULE := libstagefright_soft_h264dec @@ -124,4 +124,3 @@ LOCAL_MODULE_TAGS := debug LOCAL_MODULE := decoder include $(BUILD_EXECUTABLE) - diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp index 6c3f834..6e36651 100644 --- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp +++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp @@ -311,18 +311,14 @@ void SoftAVC::onQueueFilled(OMX_U32 portIndex) { BufferInfo *inInfo = *inQueue.begin(); OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader; ++mPicId; - if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { - inQueue.erase(inQueue.begin()); - inInfo->mOwnedByUs = false; - notifyEmptyBufferDone(inHeader); - mEOSStatus = INPUT_EOS_SEEN; - continue; - } OMX_BUFFERHEADERTYPE *header = new OMX_BUFFERHEADERTYPE; memset(header, 0, sizeof(OMX_BUFFERHEADERTYPE)); header->nTimeStamp = inHeader->nTimeStamp; header->nFlags = inHeader->nFlags; + if (header->nFlags & OMX_BUFFERFLAG_EOS) { + mEOSStatus = INPUT_EOS_SEEN; + } mPicToHeaderMap.add(mPicId, header); inQueue.erase(inQueue.begin()); diff --git a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_util.c b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_util.c index 53b2fd8..cc838fd 100755 --- a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_util.c +++ b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_util.c @@ -220,7 +220,7 @@ u32 h264bsdNextMbAddress(u32 *pSliceGroupMap, u32 picSizeInMbs, u32 currMbAddr) /* Variables */ - u32 i, sliceGroup, tmp; + u32 i, sliceGroup; /* Code */ @@ -231,11 +231,9 @@ u32 h264bsdNextMbAddress(u32 *pSliceGroupMap, u32 picSizeInMbs, u32 currMbAddr) sliceGroup = pSliceGroupMap[currMbAddr]; i = currMbAddr + 1; - tmp = pSliceGroupMap[i]; - while ((i < picSizeInMbs) && (tmp != sliceGroup)) + while ((i < picSizeInMbs) && (pSliceGroupMap[i] != sliceGroup)) { i++; - tmp = pSliceGroupMap[i]; } if (i == picSizeInMbs) diff --git a/media/libstagefright/codecs/raw/Android.mk b/media/libstagefright/codecs/raw/Android.mk index 285c747..fe90a03 100644 --- a/media/libstagefright/codecs/raw/Android.mk +++ b/media/libstagefright/codecs/raw/Android.mk @@ -9,7 +9,7 @@ LOCAL_C_INCLUDES := \ frameworks/native/include/media/openmax LOCAL_SHARED_LIBRARIES := \ - libstagefright_omx libstagefright_foundation libutils + libstagefright_omx libstagefright_foundation libutils liblog LOCAL_MODULE := libstagefright_soft_rawdec LOCAL_MODULE_TAGS := optional diff --git a/media/libstagefright/codecs/vorbis/dec/Android.mk b/media/libstagefright/codecs/vorbis/dec/Android.mk index 395dd6b..2232353 100644 --- a/media/libstagefright/codecs/vorbis/dec/Android.mk +++ b/media/libstagefright/codecs/vorbis/dec/Android.mk @@ -11,10 +11,9 @@ LOCAL_C_INCLUDES := \ LOCAL_SHARED_LIBRARIES := \ libvorbisidec libstagefright libstagefright_omx \ - libstagefright_foundation libutils + libstagefright_foundation libutils liblog LOCAL_MODULE := libstagefright_soft_vorbisdec LOCAL_MODULE_TAGS := optional include $(BUILD_SHARED_LIBRARY) - diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp index fab0b0c..4115324 100644 --- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp +++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp @@ -410,6 +410,22 @@ void SoftVorbis::onPortFlushCompleted(OMX_U32 portIndex) { } } +void SoftVorbis::onReset() { + mInputBufferCount = 0; + mNumFramesOutput = 0; + if (mState != NULL) { + vorbis_dsp_clear(mState); + delete mState; + mState = NULL; + } + + if (mVi != NULL) { + vorbis_info_clear(mVi); + delete mVi; + mVi = NULL; + } +} + void SoftVorbis::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) { if (portIndex != 1) { return; diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h index e252f55..cb628a0 100644 --- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h +++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h @@ -43,6 +43,7 @@ protected: virtual void onQueueFilled(OMX_U32 portIndex); virtual void onPortFlushCompleted(OMX_U32 portIndex); virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled); + virtual void onReset(); private: enum { diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp index 2704a37..77f21b7 100644 --- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp +++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp @@ -24,7 +24,7 @@ #include <media/stagefright/MetaData.h> #include <system/window.h> #include <ui/GraphicBufferMapper.h> -#include <gui/ISurfaceTexture.h> +#include <gui/IGraphicBufferProducer.h> namespace android { diff --git a/media/libstagefright/foundation/ALooperRoster.cpp b/media/libstagefright/foundation/ALooperRoster.cpp index dff931d..ad10d2b 100644 --- a/media/libstagefright/foundation/ALooperRoster.cpp +++ b/media/libstagefright/foundation/ALooperRoster.cpp @@ -82,7 +82,8 @@ status_t ALooperRoster::postMessage_l( ssize_t index = mHandlers.indexOfKey(msg->target()); if (index < 0) { - ALOGW("failed to post message. Target handler not registered."); + ALOGW("failed to post message '%s'. Target handler not registered.", + msg->debugString().c_str()); return -ENOENT; } diff --git a/media/libstagefright/foundation/Android.mk b/media/libstagefright/foundation/Android.mk index b7577d6..d65e213 100644 --- a/media/libstagefright/foundation/Android.mk +++ b/media/libstagefright/foundation/Android.mk @@ -20,6 +20,7 @@ LOCAL_C_INCLUDES:= \ LOCAL_SHARED_LIBRARIES := \ libbinder \ libutils \ + liblog LOCAL_CFLAGS += -Wno-multichar diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp index 733753b..505bdb3 100644 --- a/media/libstagefright/httplive/LiveSession.cpp +++ b/media/libstagefright/httplive/LiveSession.cpp @@ -40,10 +40,13 @@ namespace android { -LiveSession::LiveSession(uint32_t flags, bool uidValid, uid_t uid) - : mFlags(flags), +LiveSession::LiveSession( + const sp<AMessage> ¬ify, uint32_t flags, bool uidValid, uid_t uid) + : mNotify(notify), + mFlags(flags), mUIDValid(uidValid), mUID(uid), + mInPreparationPhase(true), mDataSource(new LiveDataSource), mHTTPDataSource( HTTPBase::Create( @@ -179,7 +182,7 @@ void LiveSession::onConnect(const sp<AMessage> &msg) { if (playlist == NULL) { ALOGE("unable to fetch master playlist '%s'.", url.c_str()); - mDataSource->queueEOS(ERROR_IO); + signalEOS(ERROR_IO); return; } @@ -207,7 +210,7 @@ void LiveSession::onConnect(const sp<AMessage> &msg) { void LiveSession::onDisconnect() { ALOGI("onDisconnect"); - mDataSource->queueEOS(ERROR_END_OF_STREAM); + signalEOS(ERROR_END_OF_STREAM); Mutex::Autolock autoLock(mLock); mDisconnectPending = false; @@ -561,7 +564,8 @@ rinse_repeat: // unchanged from the last time we tried. } else { ALOGE("failed to load playlist at url '%s'", url.c_str()); - mDataSource->queueEOS(ERROR_IO); + signalEOS(ERROR_IO); + return; } } else { @@ -627,22 +631,20 @@ rinse_repeat: if (index < mPlaylist->size()) { int32_t newSeqNumber = firstSeqNumberInPlaylist + index; - if (newSeqNumber != mSeqNumber) { - ALOGI("seeking to seq no %d", newSeqNumber); + ALOGI("seeking to seq no %d", newSeqNumber); - mSeqNumber = newSeqNumber; + mSeqNumber = newSeqNumber; - mDataSource->reset(); + mDataSource->reset(); - // reseting the data source will have had the - // side effect of discarding any previously queued - // bandwidth change discontinuity. - // Therefore we'll need to treat these seek - // discontinuities as involving a bandwidth change - // even if they aren't directly. - seekDiscontinuity = true; - bandwidthChanged = true; - } + // reseting the data source will have had the + // side effect of discarding any previously queued + // bandwidth change discontinuity. + // Therefore we'll need to treat these seek + // discontinuities as involving a bandwidth change + // even if they aren't directly. + seekDiscontinuity = true; + bandwidthChanged = true; } } @@ -704,7 +706,7 @@ rinse_repeat: mSeqNumber, firstSeqNumberInPlaylist, firstSeqNumberInPlaylist + mPlaylist->size() - 1); - mDataSource->queueEOS(ERROR_END_OF_STREAM); + signalEOS(ERROR_END_OF_STREAM); return; } } @@ -737,7 +739,7 @@ rinse_repeat: status_t err = fetchFile(uri.c_str(), &buffer, range_offset, range_length); if (err != OK) { ALOGE("failed to fetch .ts segment at url '%s'", uri.c_str()); - mDataSource->queueEOS(err); + signalEOS(err); return; } @@ -748,7 +750,7 @@ rinse_repeat: if (err != OK) { ALOGE("decryptBuffer failed w/ error %d", err); - mDataSource->queueEOS(err); + signalEOS(err); return; } @@ -760,7 +762,7 @@ rinse_repeat: mBandwidthItems.removeAt(bandwidthIndex); if (mBandwidthItems.isEmpty()) { - mDataSource->queueEOS(ERROR_UNSUPPORTED); + signalEOS(ERROR_UNSUPPORTED); return; } @@ -824,11 +826,42 @@ rinse_repeat: postMonitorQueue(); } +void LiveSession::signalEOS(status_t err) { + if (mInPreparationPhase && mNotify != NULL) { + sp<AMessage> notify = mNotify->dup(); + + notify->setInt32( + "what", + err == ERROR_END_OF_STREAM + ? kWhatPrepared : kWhatPreparationFailed); + + if (err != ERROR_END_OF_STREAM) { + notify->setInt32("err", err); + } + + notify->post(); + + mInPreparationPhase = false; + } + + mDataSource->queueEOS(err); +} + void LiveSession::onMonitorQueue() { if (mSeekTimeUs >= 0 || mDataSource->countQueuedBuffers() < kMaxNumQueuedFragments) { onDownloadNext(); } else { + if (mInPreparationPhase) { + if (mNotify != NULL) { + sp<AMessage> notify = mNotify->dup(); + notify->setInt32("what", kWhatPrepared); + notify->post(); + } + + mInPreparationPhase = false; + } + postMonitorQueue(1000000ll); } } diff --git a/media/libstagefright/id3/Android.mk b/media/libstagefright/id3/Android.mk index ff35d4a..80a1a3a 100644 --- a/media/libstagefright/id3/Android.mk +++ b/media/libstagefright/id3/Android.mk @@ -16,7 +16,7 @@ LOCAL_SRC_FILES := \ testid3.cpp LOCAL_SHARED_LIBRARIES := \ - libstagefright libutils libbinder libstagefright_foundation + libstagefright libutils liblog libbinder libstagefright_foundation LOCAL_STATIC_LIBRARIES := \ libstagefright_id3 diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h index 1422687..2306f31 100644 --- a/media/libstagefright/include/AwesomePlayer.h +++ b/media/libstagefright/include/AwesomePlayer.h @@ -36,7 +36,7 @@ struct MediaBuffer; struct MediaExtractor; struct MediaSource; struct NuCachedSource2; -struct ISurfaceTexture; +struct IGraphicBufferProducer; class DrmManagerClinet; class DecryptHandle; @@ -81,7 +81,7 @@ struct AwesomePlayer { bool isPlaying() const; - status_t setSurfaceTexture(const sp<ISurfaceTexture> &surfaceTexture); + status_t setSurfaceTexture(const sp<IGraphicBufferProducer> &bufferProducer); void setAudioSink(const sp<MediaPlayerBase::AudioSink> &audioSink); status_t setLooping(bool shouldLoop); diff --git a/media/libstagefright/include/ChromiumHTTPDataSource.h b/media/libstagefright/include/ChromiumHTTPDataSource.h index 82e08fd..785f939 100644 --- a/media/libstagefright/include/ChromiumHTTPDataSource.h +++ b/media/libstagefright/include/ChromiumHTTPDataSource.h @@ -53,6 +53,9 @@ struct ChromiumHTTPDataSource : public HTTPBase { virtual status_t reconnectAtOffset(off64_t offset); + static status_t UpdateProxyConfig( + const char *host, int32_t port, const char *exclusionList); + protected: virtual ~ChromiumHTTPDataSource(); diff --git a/media/libstagefright/include/FragmentedMP4Extractor.h b/media/libstagefright/include/FragmentedMP4Extractor.h deleted file mode 100644 index 763cd3a..0000000 --- a/media/libstagefright/include/FragmentedMP4Extractor.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef FRAGMENTED_MP4_EXTRACTOR_H_ - -#define FRAGMENTED_MP4_EXTRACTOR_H_ - -#include "include/FragmentedMP4Parser.h" - -#include <media/stagefright/MediaExtractor.h> -#include <utils/Vector.h> -#include <utils/String8.h> - -namespace android { - -struct AMessage; -class DataSource; -class SampleTable; -class String8; - -class FragmentedMP4Extractor : public MediaExtractor { -public: - // Extractor assumes ownership of "source". - FragmentedMP4Extractor(const sp<DataSource> &source); - - virtual size_t countTracks(); - virtual sp<MediaSource> getTrack(size_t index); - virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags); - virtual sp<MetaData> getMetaData(); - virtual uint32_t flags() const; - -protected: - virtual ~FragmentedMP4Extractor(); - -private: - sp<ALooper> mLooper; - sp<FragmentedMP4Parser> mParser; - sp<DataSource> mDataSource; - status_t mInitCheck; - size_t mAudioTrackIndex; - size_t mTrackCount; - - sp<MetaData> mFileMetaData; - - Vector<uint32_t> mPath; - - FragmentedMP4Extractor(const FragmentedMP4Extractor &); - FragmentedMP4Extractor &operator=(const FragmentedMP4Extractor &); -}; - -bool SniffFragmentedMP4( - const sp<DataSource> &source, String8 *mimeType, float *confidence, - sp<AMessage> *); - -} // namespace android - -#endif // MPEG4_EXTRACTOR_H_ diff --git a/media/libstagefright/include/FragmentedMP4Parser.h b/media/libstagefright/include/FragmentedMP4Parser.h index 0edafb9..dbe02b8 100644 --- a/media/libstagefright/include/FragmentedMP4Parser.h +++ b/media/libstagefright/include/FragmentedMP4Parser.h @@ -263,7 +263,7 @@ private: void copyBuffer( sp<ABuffer> *dst, - size_t offset, uint64_t size, size_t extra = 0) const; + size_t offset, uint64_t size) const; DISALLOW_EVIL_CONSTRUCTORS(FragmentedMP4Parser); }; diff --git a/media/libstagefright/include/HTTPBase.h b/media/libstagefright/include/HTTPBase.h index b8e10f7..c2dc351 100644 --- a/media/libstagefright/include/HTTPBase.h +++ b/media/libstagefright/include/HTTPBase.h @@ -48,6 +48,9 @@ struct HTTPBase : public DataSource { virtual status_t setBandwidthStatCollectFreq(int32_t freqMs); + static status_t UpdateProxyConfig( + const char *host, int32_t port, const char *exclusionList); + void setUID(uid_t uid); bool getUID(uid_t *uid) const; diff --git a/media/libstagefright/include/LiveSession.h b/media/libstagefright/include/LiveSession.h index f329cc9..db44a33 100644 --- a/media/libstagefright/include/LiveSession.h +++ b/media/libstagefright/include/LiveSession.h @@ -35,7 +35,9 @@ struct LiveSession : public AHandler { // Don't log any URLs. kFlagIncognito = 1, }; - LiveSession(uint32_t flags = 0, bool uidValid = false, uid_t uid = 0); + LiveSession( + const sp<AMessage> ¬ify, + uint32_t flags = 0, bool uidValid = false, uid_t uid = 0); sp<DataSource> getDataSource(); @@ -53,6 +55,12 @@ struct LiveSession : public AHandler { bool isSeekable() const; bool hasDynamicDuration() const; + // Posted notification's "what" field will carry one of the following: + enum { + kWhatPrepared, + kWhatPreparationFailed, + }; + protected: virtual ~LiveSession(); @@ -76,10 +84,13 @@ private: unsigned long mBandwidth; }; + sp<AMessage> mNotify; uint32_t mFlags; bool mUIDValid; uid_t mUID; + bool mInPreparationPhase; + sp<LiveDataSource> mDataSource; sp<HTTPBase> mHTTPDataSource; @@ -144,6 +155,8 @@ private: // This is computed by summing the durations of all segments before it. int64_t getSegmentStartTimeUs(int32_t seqNumber) const; + void signalEOS(status_t err); + DISALLOW_EVIL_CONSTRUCTORS(LiveSession); }; diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h index 5c549e0..35eff96 100644 --- a/media/libstagefright/include/MPEG4Extractor.h +++ b/media/libstagefright/include/MPEG4Extractor.h @@ -18,7 +18,12 @@ #define MPEG4_EXTRACTOR_H_ +#include <arpa/inet.h> + +#include <media/stagefright/DataSource.h> #include <media/stagefright/MediaExtractor.h> +#include <media/stagefright/Utils.h> +#include <utils/List.h> #include <utils/Vector.h> #include <utils/String8.h> @@ -29,6 +34,11 @@ class DataSource; class SampleTable; class String8; +struct SidxEntry { + size_t mSize; + uint32_t mDurationUs; +}; + class MPEG4Extractor : public MediaExtractor { public: // Extractor assumes ownership of "source". @@ -39,6 +49,7 @@ public: virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags); virtual sp<MetaData> getMetaData(); + virtual uint32_t flags() const; // for DRM virtual char* getDrmTrackInfo(size_t trackID, int *len); @@ -47,6 +58,12 @@ protected: virtual ~MPEG4Extractor(); private: + + struct PsshInfo { + uint8_t uuid[16]; + uint32_t datalen; + uint8_t *data; + }; struct Track { Track *next; sp<MetaData> meta; @@ -56,6 +73,12 @@ private: bool skipTrack; }; + Vector<SidxEntry> mSidxEntries; + uint64_t mSidxDuration; + off64_t mMoofOffset; + + Vector<PsshInfo> mPssh; + sp<DataSource> mDataSource; status_t mInitCheck; bool mHasVideo; @@ -93,6 +116,8 @@ private: status_t parseTrackHeader(off64_t data_offset, off64_t data_size); + status_t parseSegmentIndex(off64_t data_offset, size_t data_size); + Track *findTrackByMimePrefix(const char *mimePrefix); MPEG4Extractor(const MPEG4Extractor &); diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h index 2c87b34..24b8d98 100644 --- a/media/libstagefright/include/OMX.h +++ b/media/libstagefright/include/OMX.h @@ -79,6 +79,12 @@ public: node_id node, OMX_U32 port_index, const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer); + virtual status_t createInputSurface( + node_id node, OMX_U32 port_index, + sp<IGraphicBufferProducer> *bufferProducer); + + virtual status_t signalEndOfInputStream(node_id node); + virtual status_t allocateBuffer( node_id node, OMX_U32 port_index, size_t size, buffer_id *buffer, void **buffer_data); diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h index 47ca579..67aba6b 100644 --- a/media/libstagefright/include/OMXNodeInstance.h +++ b/media/libstagefright/include/OMXNodeInstance.h @@ -27,6 +27,7 @@ namespace android { class IOMXObserver; struct OMXMaster; +struct GraphicBufferSource; struct OMXNodeInstance { OMXNodeInstance( @@ -65,6 +66,11 @@ struct OMXNodeInstance { OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer, OMX::buffer_id *buffer); + status_t createInputSurface( + OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer); + + status_t signalEndOfInputStream(); + status_t allocateBuffer( OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer, void **buffer_data); @@ -82,12 +88,18 @@ struct OMXNodeInstance { OMX_U32 rangeOffset, OMX_U32 rangeLength, OMX_U32 flags, OMX_TICKS timestamp); + status_t emptyDirectBuffer( + OMX_BUFFERHEADERTYPE *header, + OMX_U32 rangeOffset, OMX_U32 rangeLength, + OMX_U32 flags, OMX_TICKS timestamp); + status_t getExtensionIndex( const char *parameterName, OMX_INDEXTYPE *index); void onMessage(const omx_message &msg); void onObserverDied(OMXMaster *master); void onGetHandleFailed(); + void onEvent(OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2); static OMX_CALLBACKTYPE kCallbacks; @@ -100,6 +112,13 @@ private: sp<IOMXObserver> mObserver; bool mDying; + // Lock only covers mGraphicBufferSource. We can't always use mLock + // because of rare instances where we'd end up locking it recursively. + Mutex mGraphicBufferSourceLock; + // Access this through getGraphicBufferSource(). + sp<GraphicBufferSource> mGraphicBufferSource; + + struct ActiveBuffer { OMX_U32 mPortIndex; OMX::buffer_id mID; @@ -132,6 +151,11 @@ private: OMX_IN OMX_PTR pAppData, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer); + status_t storeMetaDataInBuffers_l(OMX_U32 portIndex, OMX_BOOL enable); + + sp<GraphicBufferSource> getGraphicBufferSource(); + void setGraphicBufferSource(const sp<GraphicBufferSource>& bufferSource); + OMXNodeInstance(const OMXNodeInstance &); OMXNodeInstance &operator=(const OMXNodeInstance &); }; diff --git a/media/libstagefright/include/SDPLoader.h b/media/libstagefright/include/SDPLoader.h new file mode 100644 index 0000000..ca59dc0 --- /dev/null +++ b/media/libstagefright/include/SDPLoader.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SDP_LOADER_H_ + +#define SDP_LOADER_H_ + +#include <media/stagefright/foundation/AMessage.h> +#include <media/stagefright/foundation/AHandler.h> +#include <utils/String8.h> + +namespace android { + +struct HTTPBase; + +struct SDPLoader : public AHandler { + enum Flags { + // Don't log any URLs. + kFlagIncognito = 1, + }; + enum { + kWhatSDPLoaded = 'sdpl' + }; + SDPLoader(const sp<AMessage> ¬ify, uint32_t flags = 0, bool uidValid = false, uid_t uid = 0); + + void load(const char* url, const KeyedVector<String8, String8> *headers); + + void cancel(); + +protected: + virtual ~SDPLoader() {} + + virtual void onMessageReceived(const sp<AMessage> &msg); + +private: + enum { + kWhatLoad = 'load', + }; + + void onLoad(const sp<AMessage> &msg); + + sp<AMessage> mNotify; + const char* mUrl; + uint32_t mFlags; + bool mUIDValid; + uid_t mUID; + sp<ALooper> mNetLooper; + bool mCancelled; + + sp<HTTPBase> mHTTPDataSource; + + DISALLOW_EVIL_CONSTRUCTORS(SDPLoader); +}; + +} // namespace android + +#endif // SDP_LOADER_H_ diff --git a/media/libstagefright/include/SimpleSoftOMXComponent.h b/media/libstagefright/include/SimpleSoftOMXComponent.h index 50cd275..f8c61eb 100644 --- a/media/libstagefright/include/SimpleSoftOMXComponent.h +++ b/media/libstagefright/include/SimpleSoftOMXComponent.h @@ -71,6 +71,7 @@ protected: virtual void onPortFlushCompleted(OMX_U32 portIndex); virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled); + virtual void onReset(); PortInfo *editPortInfo(OMX_U32 portIndex); diff --git a/media/libstagefright/include/ThrottledSource.h b/media/libstagefright/include/ThrottledSource.h index 7fe7c06..673268b 100644 --- a/media/libstagefright/include/ThrottledSource.h +++ b/media/libstagefright/include/ThrottledSource.h @@ -28,18 +28,44 @@ struct ThrottledSource : public DataSource { const sp<DataSource> &source, int32_t bandwidthLimitBytesPerSecond); - virtual status_t initCheck() const; - + // implementation of readAt() that sleeps to achieve the desired max throughput virtual ssize_t readAt(off64_t offset, void *data, size_t size); - virtual status_t getSize(off64_t *size); - virtual uint32_t flags(); + // returns an empty string to prevent callers from using the Uri to construct a new datasource + virtual String8 getUri() { + return String8(); + } + + // following methods all call through to the wrapped DataSource's methods + + status_t initCheck() const { + return mSource->initCheck(); + } + + virtual status_t getSize(off64_t *size) { + return mSource->getSize(size); + } + + virtual uint32_t flags() { + return mSource->flags(); + } + + virtual status_t reconnectAtOffset(off64_t offset) { + return mSource->reconnectAtOffset(offset); + } + + virtual sp<DecryptHandle> DrmInitialization(const char *mime = NULL) { + return mSource->DrmInitialization(mime); + } + + virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client) { + mSource->getDrmInfo(handle, client); + }; virtual String8 getMIMEType() const { return mSource->getMIMEType(); } - private: Mutex mLock; diff --git a/media/libstagefright/include/avc_utils.h b/media/libstagefright/include/avc_utils.h index e418822..d517320 100644 --- a/media/libstagefright/include/avc_utils.h +++ b/media/libstagefright/include/avc_utils.h @@ -36,8 +36,11 @@ enum { kAVCProfileCAVLC444Intra = 0x2c }; +// Optionally returns sample aspect ratio as well. void FindAVCDimensions( - const sp<ABuffer> &seqParamSet, int32_t *width, int32_t *height); + const sp<ABuffer> &seqParamSet, + int32_t *width, int32_t *height, + int32_t *sarWidth = NULL, int32_t *sarHeight = NULL); unsigned parseUE(ABitReader *br); diff --git a/media/libstagefright/include/chromium_http_stub.h b/media/libstagefright/include/chromium_http_stub.h index 869d4ac..e0651a4 100644 --- a/media/libstagefright/include/chromium_http_stub.h +++ b/media/libstagefright/include/chromium_http_stub.h @@ -23,6 +23,10 @@ namespace android { extern "C" { HTTPBase *createChromiumHTTPDataSource(uint32_t flags); + +status_t UpdateChromiumHTTPDataSourceProxyConfig( + const char *host, int32_t port, const char *exclusionList); + DataSource *createDataUriSource(const char *uri); } } diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp index 8f7d12b..b304749 100644 --- a/media/libstagefright/matroska/MatroskaExtractor.cpp +++ b/media/libstagefright/matroska/MatroskaExtractor.cpp @@ -263,8 +263,8 @@ void BlockIterator::advance_l() { mCluster, nextCluster, pos, len); ALOGV("ParseNext returned %ld", res); - if (res > 0) { - // EOF + if (res != 0) { + // EOF or error mCluster = NULL; break; @@ -758,31 +758,69 @@ static void addESDSFromCodecPrivate( esds = NULL; } -void addVorbisCodecInfo( +status_t addVorbisCodecInfo( const sp<MetaData> &meta, const void *_codecPrivate, size_t codecPrivateSize) { - // printf("vorbis private data follows:\n"); // hexdump(_codecPrivate, codecPrivateSize); - CHECK(codecPrivateSize >= 3); + if (codecPrivateSize < 1) { + return ERROR_MALFORMED; + } const uint8_t *codecPrivate = (const uint8_t *)_codecPrivate; - CHECK(codecPrivate[0] == 0x02); - size_t len1 = codecPrivate[1]; - size_t len2 = codecPrivate[2]; + if (codecPrivate[0] != 0x02) { + return ERROR_MALFORMED; + } - CHECK(codecPrivateSize > 3 + len1 + len2); + // codecInfo starts with two lengths, len1 and len2, that are + // "Xiph-style-lacing encoded"... - CHECK(codecPrivate[3] == 0x01); - meta->setData(kKeyVorbisInfo, 0, &codecPrivate[3], len1); + size_t offset = 1; + size_t len1 = 0; + while (offset < codecPrivateSize && codecPrivate[offset] == 0xff) { + len1 += 0xff; + ++offset; + } + if (offset >= codecPrivateSize) { + return ERROR_MALFORMED; + } + len1 += codecPrivate[offset++]; - CHECK(codecPrivate[len1 + 3] == 0x03); + size_t len2 = 0; + while (offset < codecPrivateSize && codecPrivate[offset] == 0xff) { + len2 += 0xff; + ++offset; + } + if (offset >= codecPrivateSize) { + return ERROR_MALFORMED; + } + len2 += codecPrivate[offset++]; + + if (codecPrivateSize < offset + len1 + len2) { + return ERROR_MALFORMED; + } + + if (codecPrivate[offset] != 0x01) { + return ERROR_MALFORMED; + } + meta->setData(kKeyVorbisInfo, 0, &codecPrivate[offset], len1); + + offset += len1; + if (codecPrivate[offset] != 0x03) { + return ERROR_MALFORMED; + } + + offset += len2; + if (codecPrivate[offset] != 0x05) { + return ERROR_MALFORMED; + } - CHECK(codecPrivate[len1 + len2 + 3] == 0x05); meta->setData( - kKeyVorbisBooks, 0, &codecPrivate[len1 + len2 + 3], - codecPrivateSize - len1 - len2 - 3); + kKeyVorbisBooks, 0, &codecPrivate[offset], + codecPrivateSize - offset); + + return OK; } void MatroskaExtractor::addTracks() { @@ -809,6 +847,8 @@ void MatroskaExtractor::addTracks() { sp<MetaData> meta = new MetaData; + status_t err = OK; + switch (track->GetType()) { case VIDEO_TRACK: { @@ -855,7 +895,8 @@ void MatroskaExtractor::addTracks() { } else if (!strcmp("A_VORBIS", codecID)) { meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_VORBIS); - addVorbisCodecInfo(meta, codecPrivate, codecPrivateSize); + err = addVorbisCodecInfo( + meta, codecPrivate, codecPrivateSize); } else if (!strcmp("A_MPEG/L3", codecID)) { meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG); } else { @@ -872,6 +913,11 @@ void MatroskaExtractor::addTracks() { continue; } + if (err != OK) { + ALOGE("skipping track, codec specific data was malformed."); + continue; + } + long long durationNs = mSegment->GetDuration(); meta->setInt64(kKeyDuration, (durationNs + 500) / 1000); diff --git a/media/libstagefright/mp4/FragmentedMP4Parser.cpp b/media/libstagefright/mp4/FragmentedMP4Parser.cpp index 451c837..0102656 100644 --- a/media/libstagefright/mp4/FragmentedMP4Parser.cpp +++ b/media/libstagefright/mp4/FragmentedMP4Parser.cpp @@ -18,6 +18,7 @@ #define LOG_TAG "FragmentedMP4Parser" #include <utils/Log.h> +#include "include/avc_utils.h" #include "include/ESDS.h" #include "include/FragmentedMP4Parser.h" #include "TrackFragment.h" @@ -323,8 +324,7 @@ status_t FragmentedMP4Parser::onSeekTo(bool wantAudio, int64_t position) { off_t totalOffset = mFirstMoofOffset; for (int i = 0; i < numSidxEntries; i++) { const SidxEntry *se = &info->mSidx[i]; - totalTime += se->mDurationUs; - if (totalTime > position) { + if (totalTime + se->mDurationUs > position) { mBuffer->setRange(0,0); mBufferPos = totalOffset; if (mFinalResult == ERROR_END_OF_STREAM) { @@ -333,9 +333,10 @@ status_t FragmentedMP4Parser::onSeekTo(bool wantAudio, int64_t position) { resumeIfNecessary(); } info->mFragments.clear(); - info->mDecodingTime = position * info->mMediaTimeScale / 1000000ll; + info->mDecodingTime = totalTime * info->mMediaTimeScale / 1000000ll; return OK; } + totalTime += se->mDurationUs; totalOffset += se->mSize; } } @@ -965,6 +966,10 @@ status_t FragmentedMP4Parser::makeAccessUnit( sample.mSize); (*accessUnit)->meta()->setInt64("timeUs", presentationTimeUs); + if (IsIDR(*accessUnit)) { + (*accessUnit)->meta()->setInt32("is-sync-frame", 1); + } + return OK; } @@ -1007,6 +1012,9 @@ status_t FragmentedMP4Parser::makeAccessUnit( "timeUs", presentationTimeUs); } } + if (IsIDR(*accessUnit)) { + (*accessUnit)->meta()->setInt32("is-sync-frame", 1); + } return OK; } @@ -1975,8 +1983,8 @@ status_t FragmentedMP4Parser::parseTrackFragmentRun( } void FragmentedMP4Parser::copyBuffer( - sp<ABuffer> *dst, size_t offset, uint64_t size, size_t extra) const { - sp<ABuffer> buf = new ABuffer(size + extra); + sp<ABuffer> *dst, size_t offset, uint64_t size) const { + sp<ABuffer> buf = new ABuffer(size); memcpy(buf->data(), mBuffer->data() + offset, size); *dst = buf; diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp index 4f6c4b2..9850a46 100644 --- a/media/libstagefright/mpeg2ts/ATSParser.cpp +++ b/media/libstagefright/mpeg2ts/ATSParser.cpp @@ -452,6 +452,10 @@ int64_t ATSParser::Program::convertPTSToTimestamp(uint64_t PTS) { timeUs += mParser->mAbsoluteTimeAnchorUs; } + if (mParser->mTimeOffsetValid) { + timeUs += mParser->mTimeOffsetUs; + } + return timeUs; } @@ -534,6 +538,16 @@ status_t ATSParser::Stream::parse( mBuffer->setRange(0, 0); mExpectedContinuityCounter = -1; +#if 0 + // Uncomment this if you'd rather see no corruption whatsoever on + // screen and suspend updates until we come across another IDR frame. + + if (mStreamType == STREAMTYPE_H264) { + ALOGI("clearing video queue"); + mQueue->clear(true /* clearFormat */); + } +#endif + return OK; } @@ -920,6 +934,8 @@ sp<MediaSource> ATSParser::Stream::getSource(SourceType type) { ATSParser::ATSParser(uint32_t flags) : mFlags(flags), mAbsoluteTimeAnchorUs(-1ll), + mTimeOffsetValid(false), + mTimeOffsetUs(0ll), mNumTSPacketsParsed(0), mNumPCRs(0) { mPSISections.add(0 /* PID */, new PSISection); @@ -950,6 +966,13 @@ void ATSParser::signalDiscontinuity( CHECK(mPrograms.empty()); mAbsoluteTimeAnchorUs = timeUs; return; + } else if (type == DISCONTINUITY_TIME_OFFSET) { + int64_t offset; + CHECK(extra->findInt64("offset", &offset)); + + mTimeOffsetValid = true; + mTimeOffsetUs = offset; + return; } for (size_t i = 0; i < mPrograms.size(); ++i) { @@ -1036,7 +1059,7 @@ status_t ATSParser::parsePID( ssize_t sectionIndex = mPSISections.indexOfKey(PID); if (sectionIndex >= 0) { - const sp<PSISection> §ion = mPSISections.valueAt(sectionIndex); + sp<PSISection> section = mPSISections.valueAt(sectionIndex); if (payload_unit_start_indicator) { CHECK(section->isEmpty()); @@ -1045,7 +1068,6 @@ status_t ATSParser::parsePID( br->skipBits(skip * 8); } - CHECK((br->numBitsLeft() % 8) == 0); status_t err = section->append(br->data(), br->numBitsLeft() / 8); @@ -1080,10 +1102,13 @@ status_t ATSParser::parsePID( if (!handled) { mPSISections.removeItem(PID); + section.clear(); } } - section->clear(); + if (section != NULL) { + section->clear(); + } return OK; } diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h index 46edc45..a10edc9 100644 --- a/media/libstagefright/mpeg2ts/ATSParser.h +++ b/media/libstagefright/mpeg2ts/ATSParser.h @@ -39,6 +39,7 @@ struct ATSParser : public RefBase { DISCONTINUITY_AUDIO_FORMAT = 2, DISCONTINUITY_VIDEO_FORMAT = 4, DISCONTINUITY_ABSOLUTE_TIME = 8, + DISCONTINUITY_TIME_OFFSET = 16, DISCONTINUITY_SEEK = DISCONTINUITY_TIME, @@ -106,6 +107,9 @@ private: int64_t mAbsoluteTimeAnchorUs; + bool mTimeOffsetValid; + int64_t mTimeOffsetUs; + size_t mNumTSPacketsParsed; void parseProgramAssociationTable(ABitReader *br); diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp index a605a05..3de3a61 100644 --- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp +++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp @@ -28,9 +28,12 @@ namespace android { +const int64_t kNearEOSMarkUs = 2000000ll; // 2 secs + AnotherPacketSource::AnotherPacketSource(const sp<MetaData> &meta) : mIsAudio(false), mFormat(meta), + mLastQueuedTimeUs(0), mEOSResult(OK) { const char *mime; CHECK(meta->findCString(kKeyMIMEType, &mime)); @@ -141,9 +144,8 @@ void AnotherPacketSource::queueAccessUnit(const sp<ABuffer> &buffer) { return; } - int64_t timeUs; - CHECK(buffer->meta()->findInt64("timeUs", &timeUs)); - ALOGV("queueAccessUnit timeUs=%lld us (%.2f secs)", timeUs, timeUs / 1E6); + CHECK(buffer->meta()->findInt64("timeUs", &mLastQueuedTimeUs)); + ALOGV("queueAccessUnit timeUs=%lld us (%.2f secs)", mLastQueuedTimeUs, mLastQueuedTimeUs / 1E6); Mutex::Autolock autoLock(mLock); mBuffers.push_back(buffer); @@ -171,6 +173,7 @@ void AnotherPacketSource::queueDiscontinuity( } mEOSResult = OK; + mLastQueuedTimeUs = 0; sp<ABuffer> buffer = new ABuffer(0); buffer->meta()->setInt32("discontinuity", static_cast<int32_t>(type)); @@ -247,4 +250,15 @@ status_t AnotherPacketSource::nextBufferTime(int64_t *timeUs) { return OK; } +bool AnotherPacketSource::isFinished(int64_t duration) const { + if (duration > 0) { + int64_t diff = duration - mLastQueuedTimeUs; + if (diff < kNearEOSMarkUs && diff > -kNearEOSMarkUs) { + ALOGV("Detecting EOS due to near end"); + return true; + } + } + return (mEOSResult != OK); +} + } // namespace android diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/AnotherPacketSource.h index d685b98..1db4068 100644 --- a/media/libstagefright/mpeg2ts/AnotherPacketSource.h +++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.h @@ -58,6 +58,8 @@ struct AnotherPacketSource : public MediaSource { status_t dequeueAccessUnit(sp<ABuffer> *buffer); + bool isFinished(int64_t duration) const; + protected: virtual ~AnotherPacketSource(); @@ -67,6 +69,7 @@ private: bool mIsAudio; sp<MetaData> mFormat; + int64_t mLastQueuedTimeUs; List<sp<ABuffer> > mBuffers; status_t mEOSResult; diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp index 82fb637..9f3b19c 100644 --- a/media/libstagefright/mpeg2ts/ESQueue.cpp +++ b/media/libstagefright/mpeg2ts/ESQueue.cpp @@ -147,9 +147,9 @@ status_t ElementaryStreamQueue::appendData( } if (startOffset > 0) { - ALOGI("found something resembling an H.264/MPEG syncword at " - "offset %ld", - startOffset); + ALOGI("found something resembling an H.264/MPEG syncword " + "at offset %d", + startOffset); } data = &ptr[startOffset]; @@ -180,9 +180,9 @@ status_t ElementaryStreamQueue::appendData( } if (startOffset > 0) { - ALOGI("found something resembling an H.264/MPEG syncword at " - "offset %ld", - startOffset); + ALOGI("found something resembling an H.264/MPEG syncword " + "at offset %d", + startOffset); } data = &ptr[startOffset]; @@ -213,8 +213,9 @@ status_t ElementaryStreamQueue::appendData( } if (startOffset > 0) { - ALOGI("found something resembling an AAC syncword at offset %ld", - startOffset); + ALOGI("found something resembling an AAC syncword at " + "offset %d", + startOffset); } data = &ptr[startOffset]; @@ -241,8 +242,8 @@ status_t ElementaryStreamQueue::appendData( if (startOffset > 0) { ALOGI("found something resembling an MPEG audio " - "syncword at offset %ld", - startOffset); + "syncword at offset %d", + startOffset); } data = &ptr[startOffset]; @@ -394,10 +395,30 @@ sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitPCMAudio() { } sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAAC() { - int64_t timeUs; + if (mBuffer->size() == 0) { + return NULL; + } + + CHECK(!mRangeInfos.empty()); + const RangeInfo &info = *mRangeInfos.begin(); + if (mBuffer->size() < info.mLength) { + return NULL; + } + + CHECK_GE(info.mTimestampUs, 0ll); + + // The idea here is consume all AAC frames starting at offsets before + // info.mLength so we can assign a meaningful timestamp without + // having to interpolate. + // The final AAC frame may well extend into the next RangeInfo but + // that's ok. size_t offset = 0; - while (offset + 7 <= mBuffer->size()) { + while (offset < info.mLength) { + if (offset + 7 > mBuffer->size()) { + return NULL; + } + ABitReader bits(mBuffer->data() + offset, mBuffer->size() - offset); // adts_fixed_header @@ -450,24 +471,15 @@ sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAAC() { } if (offset + aac_frame_length > mBuffer->size()) { - break; + return NULL; } size_t headerSize = protection_absent ? 7 : 9; - int64_t tmpUs = fetchTimestamp(aac_frame_length); - CHECK_GE(tmpUs, 0ll); - - if (offset == 0) { - timeUs = tmpUs; - } - offset += aac_frame_length; } - if (offset == 0) { - return NULL; - } + int64_t timeUs = fetchTimestamp(offset); sp<ABuffer> accessUnit = new ABuffer(offset); memcpy(accessUnit->data(), mBuffer->data(), offset); @@ -492,7 +504,6 @@ int64_t ElementaryStreamQueue::fetchTimestamp(size_t size) { if (first) { timeUs = info->mTimestampUs; - first = false; } if (info->mLength > size) { @@ -509,6 +520,8 @@ int64_t ElementaryStreamQueue::fetchTimestamp(size_t size) { mRangeInfos.erase(mRangeInfos.begin()); info = NULL; } + + first = false; } if (timeUs == 0ll) { @@ -536,7 +549,7 @@ sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitH264() { size_t nalSize; bool foundSlice = false; while ((err = getNextNALUnit(&data, &size, &nalStart, &nalSize)) == OK) { - CHECK_GT(nalSize, 0u); + if (nalSize == 0) continue; unsigned nalType = nalStart[0] & 0x1f; bool flush = false; diff --git a/media/libstagefright/omx/Android.mk b/media/libstagefright/omx/Android.mk index d7fbbbe..a8b4939 100644 --- a/media/libstagefright/omx/Android.mk +++ b/media/libstagefright/omx/Android.mk @@ -2,6 +2,7 @@ LOCAL_PATH:= $(call my-dir) include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ + GraphicBufferSource.cpp \ OMX.cpp \ OMXMaster.cpp \ OMXNodeInstance.cpp \ @@ -18,7 +19,9 @@ LOCAL_SHARED_LIBRARIES := \ libbinder \ libmedia \ libutils \ + liblog \ libui \ + libgui \ libcutils \ libstagefright_foundation \ libdl diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp new file mode 100644 index 0000000..ef27879 --- /dev/null +++ b/media/libstagefright/omx/GraphicBufferSource.cpp @@ -0,0 +1,467 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "GraphicBufferSource" +//#define LOG_NDEBUG 0 +#include <utils/Log.h> + +#include <GraphicBufferSource.h> + +#include <OMX_Core.h> +#include <media/stagefright/foundation/ADebug.h> + +#include <MetadataBufferType.h> +#include <ui/GraphicBuffer.h> + +namespace android { + +static const bool EXTRA_CHECK = true; + + +GraphicBufferSource::GraphicBufferSource(OMXNodeInstance* nodeInstance, + uint32_t bufferWidth, uint32_t bufferHeight, uint32_t bufferCount) : + mInitCheck(UNKNOWN_ERROR), + mNodeInstance(nodeInstance), + mExecuting(false), + mNumFramesAvailable(0), + mEndOfStream(false), + mEndOfStreamSent(false) { + + ALOGV("GraphicBufferSource w=%u h=%u c=%u", + bufferWidth, bufferHeight, bufferCount); + + if (bufferWidth == 0 || bufferHeight == 0) { + ALOGE("Invalid dimensions %ux%u", bufferWidth, bufferHeight); + mInitCheck = BAD_VALUE; + return; + } + + String8 name("GraphicBufferSource"); + + mBufferQueue = new BufferQueue(true); + mBufferQueue->setConsumerName(name); + mBufferQueue->setDefaultBufferSize(bufferWidth, bufferHeight); + mBufferQueue->setSynchronousMode(true); + mBufferQueue->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER | + GRALLOC_USAGE_HW_TEXTURE); + + mInitCheck = mBufferQueue->setMaxAcquiredBufferCount(bufferCount); + if (mInitCheck != NO_ERROR) { + ALOGE("Unable to set BQ max acquired buffer count to %u: %d", + bufferCount, mInitCheck); + return; + } + + // Note that we can't create an sp<...>(this) in a ctor that will not keep a + // reference once the ctor ends, as that would cause the refcount of 'this' + // dropping to 0 at the end of the ctor. Since all we need is a wp<...> + // that's what we create. + wp<BufferQueue::ConsumerListener> listener; + listener = static_cast<BufferQueue::ConsumerListener*>(this); + + sp<BufferQueue::ConsumerListener> proxy; + proxy = new BufferQueue::ProxyConsumerListener(listener); + + mInitCheck = mBufferQueue->consumerConnect(proxy); + if (mInitCheck != NO_ERROR) { + ALOGE("Error connecting to BufferQueue: %s (%d)", + strerror(-mInitCheck), mInitCheck); + return; + } + + CHECK(mInitCheck == NO_ERROR); +} + +GraphicBufferSource::~GraphicBufferSource() { + ALOGV("~GraphicBufferSource"); + if (mBufferQueue != NULL) { + status_t err = mBufferQueue->consumerDisconnect(); + if (err != NO_ERROR) { + ALOGW("consumerDisconnect failed: %d", err); + } + } +} + +void GraphicBufferSource::omxExecuting() { + Mutex::Autolock autoLock(mMutex); + ALOGV("--> executing; avail=%d, codec vec size=%zd", + mNumFramesAvailable, mCodecBuffers.size()); + CHECK(!mExecuting); + mExecuting = true; + + // Start by loading up as many buffers as possible. We want to do this, + // rather than just submit the first buffer, to avoid a degenerate case: + // if all BQ buffers arrive before we start executing, and we only submit + // one here, the other BQ buffers will just sit until we get notified + // that the codec buffer has been released. We'd then acquire and + // submit a single additional buffer, repeatedly, never using more than + // one codec buffer simultaneously. (We could instead try to submit + // all BQ buffers whenever any codec buffer is freed, but if we get the + // initial conditions right that will never be useful.) + while (mNumFramesAvailable) { + if (!fillCodecBuffer_l()) { + ALOGV("stop load with frames available (codecAvail=%d)", + isCodecBufferAvailable_l()); + break; + } + } + + ALOGV("done loading initial frames, avail=%d", mNumFramesAvailable); + + // If EOS has already been signaled, and there are no more frames to + // submit, try to send EOS now as well. + if (mEndOfStream && mNumFramesAvailable == 0) { + submitEndOfInputStream_l(); + } +} + +void GraphicBufferSource::omxLoaded(){ + Mutex::Autolock autoLock(mMutex); + ALOGV("--> loaded"); + CHECK(mExecuting); + + ALOGV("Dropped down to loaded, avail=%d eos=%d eosSent=%d", + mNumFramesAvailable, mEndOfStream, mEndOfStreamSent); + + // Codec is no longer executing. Discard all codec-related state. + mCodecBuffers.clear(); + // TODO: scan mCodecBuffers to verify that all mGraphicBuffer entries + // are null; complain if not + + mExecuting = false; +} + +void GraphicBufferSource::addCodecBuffer(OMX_BUFFERHEADERTYPE* header) { + Mutex::Autolock autoLock(mMutex); + + if (mExecuting) { + // This should never happen -- buffers can only be allocated when + // transitioning from "loaded" to "idle". + ALOGE("addCodecBuffer: buffer added while executing"); + return; + } + + ALOGV("addCodecBuffer h=%p size=%lu p=%p", + header, header->nAllocLen, header->pBuffer); + CodecBuffer codecBuffer; + codecBuffer.mHeader = header; + mCodecBuffers.add(codecBuffer); +} + +void GraphicBufferSource::codecBufferEmptied(OMX_BUFFERHEADERTYPE* header) { + Mutex::Autolock autoLock(mMutex); + + CHECK(mExecuting); // could this happen if app stop()s early? + + int cbi = findMatchingCodecBuffer_l(header); + if (cbi < 0) { + // This should never happen. + ALOGE("codecBufferEmptied: buffer not recognized (h=%p)", header); + return; + } + + ALOGV("codecBufferEmptied h=%p size=%lu filled=%lu p=%p", + header, header->nAllocLen, header->nFilledLen, + header->pBuffer); + CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi)); + + // header->nFilledLen may not be the original value, so we can't compare + // that to zero to see of this was the EOS buffer. Instead we just + // see if the GraphicBuffer reference was null, which should only ever + // happen for EOS. + if (codecBuffer.mGraphicBuffer == NULL) { + CHECK(mEndOfStream && mEndOfStreamSent); + // No GraphicBuffer to deal with, no additional input or output is + // expected, so just return. + return; + } + + if (EXTRA_CHECK) { + // Pull the graphic buffer handle back out of the buffer, and confirm + // that it matches expectations. + OMX_U8* data = header->pBuffer; + buffer_handle_t bufferHandle; + memcpy(&bufferHandle, data + 4, sizeof(buffer_handle_t)); + if (bufferHandle != codecBuffer.mGraphicBuffer->handle) { + // should never happen + ALOGE("codecBufferEmptied: buffer's handle is %p, expected %p", + bufferHandle, codecBuffer.mGraphicBuffer->handle); + CHECK(!"codecBufferEmptied: mismatched buffer"); + } + } + + // Find matching entry in our cached copy of the BufferQueue slots. + // If we find a match, release that slot. If we don't, the BufferQueue + // has dropped that GraphicBuffer, and there's nothing for us to release. + // + // (We could store "id" in CodecBuffer and avoid the slot search.) + int id; + for (id = 0; id < BufferQueue::NUM_BUFFER_SLOTS; id++) { + if (mBufferSlot[id] == NULL) { + continue; + } + + if (mBufferSlot[id]->handle == codecBuffer.mGraphicBuffer->handle) { + ALOGV("cbi %d matches bq slot %d, handle=%p", + cbi, id, mBufferSlot[id]->handle); + + mBufferQueue->releaseBuffer(id, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, + Fence::NO_FENCE); + break; + } + } + if (id == BufferQueue::NUM_BUFFER_SLOTS) { + ALOGV("codecBufferEmptied: no match for emptied buffer in cbi %d", + cbi); + } + + // Mark the codec buffer as available by clearing the GraphicBuffer ref. + codecBuffer.mGraphicBuffer = NULL; + + if (mNumFramesAvailable) { + // Fill this codec buffer. + CHECK(!mEndOfStreamSent); + ALOGV("buffer freed, %d frames avail (eos=%d)", + mNumFramesAvailable, mEndOfStream); + fillCodecBuffer_l(); + } else if (mEndOfStream) { + // No frames available, but EOS is pending, so use this buffer to + // send that. + ALOGV("buffer freed, EOS pending"); + submitEndOfInputStream_l(); + } + return; +} + +bool GraphicBufferSource::fillCodecBuffer_l() { + CHECK(mExecuting && mNumFramesAvailable > 0); + + int cbi = findAvailableCodecBuffer_l(); + if (cbi < 0) { + // No buffers available, bail. + ALOGV("fillCodecBuffer_l: no codec buffers, avail now %d", + mNumFramesAvailable); + return false; + } + + ALOGV("fillCodecBuffer_l: acquiring buffer, avail=%d", + mNumFramesAvailable); + BufferQueue::BufferItem item; + status_t err = mBufferQueue->acquireBuffer(&item); + if (err == BufferQueue::NO_BUFFER_AVAILABLE) { + // shouldn't happen + ALOGW("fillCodecBuffer_l: frame was not available"); + return false; + } else if (err != OK) { + // now what? fake end-of-stream? + ALOGW("fillCodecBuffer_l: acquireBuffer returned err=%d", err); + return false; + } + + mNumFramesAvailable--; + + // Wait for it to become available. + err = item.mFence->waitForever("GraphicBufferSource::fillCodecBuffer_l"); + if (err != OK) { + ALOGW("failed to wait for buffer fence: %d", err); + // keep going + } + + // If this is the first time we're seeing this buffer, add it to our + // slot table. + if (item.mGraphicBuffer != NULL) { + ALOGV("fillCodecBuffer_l: setting mBufferSlot %d", item.mBuf); + mBufferSlot[item.mBuf] = item.mGraphicBuffer; + } + + err = submitBuffer_l(mBufferSlot[item.mBuf], item.mTimestamp / 1000, cbi); + if (err != OK) { + ALOGV("submitBuffer_l failed, releasing bq buf %d", item.mBuf); + mBufferQueue->releaseBuffer(item.mBuf, EGL_NO_DISPLAY, + EGL_NO_SYNC_KHR, Fence::NO_FENCE); + } else { + ALOGV("buffer submitted (bq %d, cbi %d)", item.mBuf, cbi); + } + + return true; +} + +status_t GraphicBufferSource::signalEndOfInputStream() { + Mutex::Autolock autoLock(mMutex); + ALOGV("signalEndOfInputStream: exec=%d avail=%d eos=%d", + mExecuting, mNumFramesAvailable, mEndOfStream); + + if (mEndOfStream) { + ALOGE("EOS was already signaled"); + return INVALID_OPERATION; + } + + // Set the end-of-stream flag. If no frames are pending from the + // BufferQueue, and a codec buffer is available, and we're executing, + // we initiate the EOS from here. Otherwise, we'll let + // codecBufferEmptied() (or omxExecuting) do it. + // + // Note: if there are no pending frames and all codec buffers are + // available, we *must* submit the EOS from here or we'll just + // stall since no future events are expected. + mEndOfStream = true; + + if (mExecuting && mNumFramesAvailable == 0) { + submitEndOfInputStream_l(); + } + + return OK; +} + +status_t GraphicBufferSource::submitBuffer_l(sp<GraphicBuffer>& graphicBuffer, + int64_t timestampUsec, int cbi) { + ALOGV("submitBuffer_l cbi=%d", cbi); + CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi)); + codecBuffer.mGraphicBuffer = graphicBuffer; + + OMX_BUFFERHEADERTYPE* header = codecBuffer.mHeader; + CHECK(header->nAllocLen >= 4 + sizeof(buffer_handle_t)); + OMX_U8* data = header->pBuffer; + const OMX_U32 type = kMetadataBufferTypeGrallocSource; + buffer_handle_t handle = codecBuffer.mGraphicBuffer->handle; + memcpy(data, &type, 4); + memcpy(data + 4, &handle, sizeof(buffer_handle_t)); + + status_t err = mNodeInstance->emptyDirectBuffer(header, 0, + 4 + sizeof(buffer_handle_t), OMX_BUFFERFLAG_ENDOFFRAME, + timestampUsec); + if (err != OK) { + ALOGW("WARNING: emptyDirectBuffer failed: 0x%x", err); + codecBuffer.mGraphicBuffer = NULL; + return err; + } + + ALOGV("emptyDirectBuffer succeeded, h=%p p=%p bufhandle=%p", + header, header->pBuffer, handle); + return OK; +} + +void GraphicBufferSource::submitEndOfInputStream_l() { + CHECK(mEndOfStream); + if (mEndOfStreamSent) { + ALOGV("EOS already sent"); + return; + } + + int cbi = findAvailableCodecBuffer_l(); + if (cbi < 0) { + ALOGV("submitEndOfInputStream_l: no codec buffers available"); + return; + } + + // We reject any additional incoming graphic buffers, so there's no need + // to stick a placeholder into codecBuffer.mGraphicBuffer to mark it as + // in-use. + CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi)); + + OMX_BUFFERHEADERTYPE* header = codecBuffer.mHeader; + if (EXTRA_CHECK) { + // Guard against implementations that don't check nFilledLen. + size_t fillLen = 4 + sizeof(buffer_handle_t); + CHECK(header->nAllocLen >= fillLen); + OMX_U8* data = header->pBuffer; + memset(data, 0xcd, fillLen); + } + + uint64_t timestamp = 0; // does this matter? + + status_t err = mNodeInstance->emptyDirectBuffer(header, /*offset*/ 0, + /*length*/ 0, OMX_BUFFERFLAG_ENDOFFRAME | OMX_BUFFERFLAG_EOS, + timestamp); + if (err != OK) { + ALOGW("emptyDirectBuffer EOS failed: 0x%x", err); + } else { + ALOGV("submitEndOfInputStream_l: buffer submitted, header=%p cbi=%d", + header, cbi); + mEndOfStreamSent = true; + } +} + +int GraphicBufferSource::findAvailableCodecBuffer_l() { + CHECK(mCodecBuffers.size() > 0); + + for (int i = (int)mCodecBuffers.size() - 1; i>= 0; --i) { + if (mCodecBuffers[i].mGraphicBuffer == NULL) { + return i; + } + } + return -1; +} + +int GraphicBufferSource::findMatchingCodecBuffer_l( + const OMX_BUFFERHEADERTYPE* header) { + for (int i = (int)mCodecBuffers.size() - 1; i>= 0; --i) { + if (mCodecBuffers[i].mHeader == header) { + return i; + } + } + return -1; +} + +// BufferQueue::ConsumerListener callback +void GraphicBufferSource::onFrameAvailable() { + Mutex::Autolock autoLock(mMutex); + + ALOGV("onFrameAvailable exec=%d avail=%d", + mExecuting, mNumFramesAvailable); + + if (mEndOfStream) { + // This should only be possible if a new buffer was queued after + // EOS was signaled, i.e. the app is misbehaving. + ALOGW("onFrameAvailable: EOS is set, ignoring frame"); + + BufferQueue::BufferItem item; + status_t err = mBufferQueue->acquireBuffer(&item); + if (err == OK) { + mBufferQueue->releaseBuffer(item.mBuf, EGL_NO_DISPLAY, + EGL_NO_SYNC_KHR, item.mFence); + } + return; + } + + mNumFramesAvailable++; + + if (mExecuting) { + fillCodecBuffer_l(); + } +} + +// BufferQueue::ConsumerListener callback +void GraphicBufferSource::onBuffersReleased() { + Mutex::Autolock lock(mMutex); + + uint32_t slotMask; + if (mBufferQueue->getReleasedBuffers(&slotMask) != NO_ERROR) { + ALOGW("onBuffersReleased: unable to get released buffer set"); + slotMask = 0xffffffff; + } + + ALOGV("onBuffersReleased: 0x%08x", slotMask); + + for (int i = 0; i < BufferQueue::NUM_BUFFER_SLOTS; i++) { + if ((slotMask & 0x01) != 0) { + mBufferSlot[i] = NULL; + } + slotMask >>= 1; + } +} + +} // namespace android diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h new file mode 100644 index 0000000..562d342 --- /dev/null +++ b/media/libstagefright/omx/GraphicBufferSource.h @@ -0,0 +1,177 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GRAPHIC_BUFFER_SOURCE_H_ + +#define GRAPHIC_BUFFER_SOURCE_H_ + +#include <gui/IGraphicBufferProducer.h> +#include <gui/BufferQueue.h> +#include <utils/RefBase.h> + +#include <OMX_Core.h> +#include "../include/OMXNodeInstance.h" +#include <media/stagefright/foundation/ABase.h> + +namespace android { + +/* + * This class is used to feed OMX codecs from a Surface via BufferQueue. + * + * Instances of the class don't run on a dedicated thread. Instead, + * various events trigger data movement: + * + * - Availability of a new frame of data from the BufferQueue (notified + * via the onFrameAvailable callback). + * - The return of a codec buffer (via OnEmptyBufferDone). + * - Application signaling end-of-stream. + * - Transition to or from "executing" state. + * + * Frames of data (and, perhaps, the end-of-stream indication) can arrive + * before the codec is in the "executing" state, so we need to queue + * things up until we're ready to go. + */ +class GraphicBufferSource : public BufferQueue::ConsumerListener { +public: + GraphicBufferSource(OMXNodeInstance* nodeInstance, + uint32_t bufferWidth, uint32_t bufferHeight, uint32_t bufferCount); + virtual ~GraphicBufferSource(); + + // We can't throw an exception if the constructor fails, so we just set + // this and require that the caller test the value. + status_t initCheck() const { + return mInitCheck; + } + + // Returns the handle to the producer side of the BufferQueue. Buffers + // queued on this will be received by GraphicBufferSource. + sp<IGraphicBufferProducer> getIGraphicBufferProducer() const { + return mBufferQueue; + } + + // This is called when OMX transitions to OMX_StateExecuting, which means + // we can start handing it buffers. If we already have buffers of data + // sitting in the BufferQueue, this will send them to the codec. + void omxExecuting(); + + // This is called when OMX transitions to OMX_StateLoaded, indicating that + // we are shutting down. + void omxLoaded(); + + // A "codec buffer", i.e. a buffer that can be used to pass data into + // the encoder, has been allocated. (This call does not call back into + // OMXNodeInstance.) + void addCodecBuffer(OMX_BUFFERHEADERTYPE* header); + + // Called from OnEmptyBufferDone. If we have a BQ buffer available, + // fill it with a new frame of data; otherwise, just mark it as available. + void codecBufferEmptied(OMX_BUFFERHEADERTYPE* header); + + // This is called after the last input frame has been submitted. We + // need to submit an empty buffer with the EOS flag set. If we don't + // have a codec buffer ready, we just set the mEndOfStream flag. + status_t signalEndOfInputStream(); + +protected: + // BufferQueue::ConsumerListener interface, called when a new frame of + // data is available. If we're executing and a codec buffer is + // available, we acquire the buffer, copy the GraphicBuffer reference + // into the codec buffer, and call Empty[This]Buffer. If we're not yet + // executing or there's no codec buffer available, we just increment + // mNumFramesAvailable and return. + virtual void onFrameAvailable(); + + // BufferQueue::ConsumerListener interface, called when the client has + // released one or more GraphicBuffers. We clear out the appropriate + // set of mBufferSlot entries. + virtual void onBuffersReleased(); + +private: + // Keep track of codec input buffers. They may either be available + // (mGraphicBuffer == NULL) or in use by the codec. + struct CodecBuffer { + OMX_BUFFERHEADERTYPE* mHeader; + sp<GraphicBuffer> mGraphicBuffer; + }; + + // Returns the index of an available codec buffer. If none are + // available, returns -1. Mutex must be held by caller. + int findAvailableCodecBuffer_l(); + + // Returns true if a codec buffer is available. + bool isCodecBufferAvailable_l() { + return findAvailableCodecBuffer_l() >= 0; + } + + // Finds the mCodecBuffers entry that matches. Returns -1 if not found. + int findMatchingCodecBuffer_l(const OMX_BUFFERHEADERTYPE* header); + + // Fills a codec buffer with a frame from the BufferQueue. This must + // only be called when we know that a frame of data is ready (i.e. we're + // in the onFrameAvailable callback, or if we're in codecBufferEmptied + // and mNumFramesAvailable is nonzero). Returns without doing anything if + // we don't have a codec buffer available. + // + // Returns true if we successfully filled a codec buffer with a BQ buffer. + bool fillCodecBuffer_l(); + + // Marks the mCodecBuffers entry as in-use, copies the GraphicBuffer + // reference into the codec buffer, and submits the data to the codec. + status_t submitBuffer_l(sp<GraphicBuffer>& graphicBuffer, + int64_t timestampUsec, int cbi); + + // Submits an empty buffer, with the EOS flag set. Returns without + // doing anything if we don't have a codec buffer available. + void submitEndOfInputStream_l(); + + // Lock, covers all member variables. + mutable Mutex mMutex; + + // Used to report constructor failure. + status_t mInitCheck; + + // Pointer back to the object that contains us. We send buffers here. + OMXNodeInstance* mNodeInstance; + + // Set by omxExecuting() / omxIdling(). + bool mExecuting; + + // We consume graphic buffers from this. + sp<BufferQueue> mBufferQueue; + + // Number of frames pending in BufferQueue that haven't yet been + // forwarded to the codec. + size_t mNumFramesAvailable; + + // Set to true if we want to send end-of-stream after we run out of + // frames in BufferQueue. + bool mEndOfStream; + bool mEndOfStreamSent; + + // Cache of GraphicBuffers from the buffer queue. When the codec + // is done processing a GraphicBuffer, we can use this to map back + // to a slot number. + sp<GraphicBuffer> mBufferSlot[BufferQueue::NUM_BUFFER_SLOTS]; + + // Tracks codec buffers. + Vector<CodecBuffer> mCodecBuffers; + + DISALLOW_EVIL_CONSTRUCTORS(GraphicBufferSource); +}; + +} // namespace android + +#endif // GRAPHIC_BUFFER_SOURCE_H_ diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp index 29bc733..3987ead 100644 --- a/media/libstagefright/omx/OMX.cpp +++ b/media/libstagefright/omx/OMX.cpp @@ -345,6 +345,17 @@ status_t OMX::useGraphicBuffer( port_index, graphicBuffer, buffer); } +status_t OMX::createInputSurface( + node_id node, OMX_U32 port_index, + sp<IGraphicBufferProducer> *bufferProducer) { + return findInstance(node)->createInputSurface( + port_index, bufferProducer); +} + +status_t OMX::signalEndOfInputStream(node_id node) { + return findInstance(node)->signalEndOfInputStream(); +} + status_t OMX::allocateBuffer( node_id node, OMX_U32 port_index, size_t size, buffer_id *buffer, void **buffer_data) { @@ -393,6 +404,9 @@ OMX_ERRORTYPE OMX::OnEvent( OMX_IN OMX_PTR pEventData) { ALOGV("OnEvent(%d, %ld, %ld)", eEvent, nData1, nData2); + // Forward to OMXNodeInstance. + findInstance(node)->onEvent(eEvent, nData1, nData2); + omx_message msg; msg.type = omx_message::EVENT; msg.node = node; diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp index bff3def..a9eb94f 100644 --- a/media/libstagefright/omx/OMXNodeInstance.cpp +++ b/media/libstagefright/omx/OMXNodeInstance.cpp @@ -20,14 +20,18 @@ #include "../include/OMXNodeInstance.h" #include "OMXMaster.h" +#include "GraphicBufferSource.h" #include <OMX_Component.h> #include <binder/IMemory.h> +#include <gui/BufferQueue.h> #include <HardwareAPI.h> #include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/MediaErrors.h> +static const OMX_U32 kPortIndexInput = 0; + namespace android { struct BufferMeta { @@ -100,6 +104,17 @@ void OMXNodeInstance::setHandle(OMX::node_id node_id, OMX_HANDLETYPE handle) { mHandle = handle; } +sp<GraphicBufferSource> OMXNodeInstance::getGraphicBufferSource() { + Mutex::Autolock autoLock(mGraphicBufferSourceLock); + return mGraphicBufferSource; +} + +void OMXNodeInstance::setGraphicBufferSource( + const sp<GraphicBufferSource>& bufferSource) { + Mutex::Autolock autoLock(mGraphicBufferSourceLock); + mGraphicBufferSource = bufferSource; +} + OMX *OMXNodeInstance::owner() { return mOwner; } @@ -277,15 +292,16 @@ status_t OMXNodeInstance::getState(OMX_STATETYPE* state) { status_t OMXNodeInstance::enableGraphicBuffers( OMX_U32 portIndex, OMX_BOOL enable) { Mutex::Autolock autoLock(mLock); + OMX_STRING name = const_cast<OMX_STRING>( + "OMX.google.android.index.enableAndroidNativeBuffers"); OMX_INDEXTYPE index; - OMX_ERRORTYPE err = OMX_GetExtensionIndex( - mHandle, - const_cast<OMX_STRING>("OMX.google.android.index.enableAndroidNativeBuffers"), - &index); + OMX_ERRORTYPE err = OMX_GetExtensionIndex(mHandle, name, &index); if (err != OMX_ErrorNone) { - ALOGE("OMX_GetExtensionIndex failed"); + if (enable) { + ALOGE("OMX_GetExtensionIndex %s failed", name); + } return StatusFromOMXError(err); } @@ -316,14 +332,12 @@ status_t OMXNodeInstance::getGraphicBufferUsage( Mutex::Autolock autoLock(mLock); OMX_INDEXTYPE index; - OMX_ERRORTYPE err = OMX_GetExtensionIndex( - mHandle, - const_cast<OMX_STRING>( - "OMX.google.android.index.getAndroidNativeBufferUsage"), - &index); + OMX_STRING name = const_cast<OMX_STRING>( + "OMX.google.android.index.getAndroidNativeBufferUsage"); + OMX_ERRORTYPE err = OMX_GetExtensionIndex(mHandle, name, &index); if (err != OMX_ErrorNone) { - ALOGE("OMX_GetExtensionIndex failed"); + ALOGE("OMX_GetExtensionIndex %s failed", name); return StatusFromOMXError(err); } @@ -354,7 +368,12 @@ status_t OMXNodeInstance::storeMetaDataInBuffers( OMX_U32 portIndex, OMX_BOOL enable) { Mutex::Autolock autolock(mLock); + return storeMetaDataInBuffers_l(portIndex, enable); +} +status_t OMXNodeInstance::storeMetaDataInBuffers_l( + OMX_U32 portIndex, + OMX_BOOL enable) { OMX_INDEXTYPE index; OMX_STRING name = const_cast<OMX_STRING>( "OMX.google.android.index.storeMetaDataInBuffers"); @@ -362,6 +381,7 @@ status_t OMXNodeInstance::storeMetaDataInBuffers( OMX_ERRORTYPE err = OMX_GetExtensionIndex(mHandle, name, &index); if (err != OMX_ErrorNone) { ALOGE("OMX_GetExtensionIndex %s failed", name); + return StatusFromOMXError(err); } @@ -411,6 +431,11 @@ status_t OMXNodeInstance::useBuffer( addActiveBuffer(portIndex, *buffer); + sp<GraphicBufferSource> bufferSource(getGraphicBufferSource()); + if (bufferSource != NULL && portIndex == kPortIndexInput) { + bufferSource->addCodecBuffer(header); + } + return OK; } @@ -482,13 +507,12 @@ status_t OMXNodeInstance::useGraphicBuffer( return useGraphicBuffer2_l(portIndex, graphicBuffer, buffer); } - OMX_ERRORTYPE err = OMX_GetExtensionIndex( - mHandle, - const_cast<OMX_STRING>("OMX.google.android.index.useAndroidNativeBuffer"), - &index); + OMX_STRING name = const_cast<OMX_STRING>( + "OMX.google.android.index.useAndroidNativeBuffer"); + OMX_ERRORTYPE err = OMX_GetExtensionIndex(mHandle, name, &index); if (err != OMX_ErrorNone) { - ALOGE("OMX_GetExtensionIndex failed"); + ALOGE("OMX_GetExtensionIndex %s failed", name); return StatusFromOMXError(err); } @@ -530,6 +554,65 @@ status_t OMXNodeInstance::useGraphicBuffer( return OK; } +status_t OMXNodeInstance::createInputSurface( + OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer) { + Mutex::Autolock autolock(mLock); + status_t err; + + const sp<GraphicBufferSource>& surfaceCheck = getGraphicBufferSource(); + if (surfaceCheck != NULL) { + return ALREADY_EXISTS; + } + + // Input buffers will hold meta-data (gralloc references). + err = storeMetaDataInBuffers_l(portIndex, OMX_TRUE); + if (err != OK) { + return err; + } + + // Retrieve the width and height of the graphic buffer, set when the + // codec was configured. + OMX_PARAM_PORTDEFINITIONTYPE def; + def.nSize = sizeof(def); + def.nVersion.s.nVersionMajor = 1; + def.nVersion.s.nVersionMinor = 0; + def.nVersion.s.nRevision = 0; + def.nVersion.s.nStep = 0; + def.nPortIndex = portIndex; + OMX_ERRORTYPE oerr = OMX_GetParameter( + mHandle, OMX_IndexParamPortDefinition, &def); + CHECK(oerr == OMX_ErrorNone); + + if (def.format.video.eColorFormat != OMX_COLOR_FormatAndroidOpaque) { + ALOGE("createInputSurface requires AndroidOpaque color format"); + return INVALID_OPERATION; + } + + GraphicBufferSource* bufferSource = new GraphicBufferSource( + this, def.format.video.nFrameWidth, def.format.video.nFrameHeight, + def.nBufferCountActual); + if ((err = bufferSource->initCheck()) != OK) { + delete bufferSource; + return err; + } + setGraphicBufferSource(bufferSource); + + *bufferProducer = bufferSource->getIGraphicBufferProducer(); + return OK; +} + +status_t OMXNodeInstance::signalEndOfInputStream() { + // For non-Surface input, the MediaCodec should convert the call to a + // pair of requests (dequeue input buffer, queue input buffer with EOS + // flag set). Seems easier than doing the equivalent from here. + sp<GraphicBufferSource> bufferSource(getGraphicBufferSource()); + if (bufferSource == NULL) { + ALOGW("signalEndOfInputStream can only be used with Surface input"); + return INVALID_OPERATION; + }; + return bufferSource->signalEndOfInputStream(); +} + status_t OMXNodeInstance::allocateBuffer( OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer, void **buffer_data) { @@ -560,6 +643,11 @@ status_t OMXNodeInstance::allocateBuffer( addActiveBuffer(portIndex, *buffer); + sp<GraphicBufferSource> bufferSource(getGraphicBufferSource()); + if (bufferSource != NULL && portIndex == kPortIndexInput) { + bufferSource->addCodecBuffer(header); + } + return OK; } @@ -592,6 +680,11 @@ status_t OMXNodeInstance::allocateBufferWithBackup( addActiveBuffer(portIndex, *buffer); + sp<GraphicBufferSource> bufferSource(getGraphicBufferSource()); + if (bufferSource != NULL && portIndex == kPortIndexInput) { + bufferSource->addCodecBuffer(header); + } + return OK; } @@ -646,6 +739,26 @@ status_t OMXNodeInstance::emptyBuffer( return StatusFromOMXError(err); } +// like emptyBuffer, but the data is already in header->pBuffer +status_t OMXNodeInstance::emptyDirectBuffer( + OMX_BUFFERHEADERTYPE *header, + OMX_U32 rangeOffset, OMX_U32 rangeLength, + OMX_U32 flags, OMX_TICKS timestamp) { + Mutex::Autolock autoLock(mLock); + + header->nFilledLen = rangeLength; + header->nOffset = rangeOffset; + header->nFlags = flags; + header->nTimeStamp = timestamp; + + OMX_ERRORTYPE err = OMX_EmptyThisBuffer(mHandle, header); + if (err != OMX_ErrorNone) { + ALOGW("emptyDirectBuffer failed, OMX err=0x%x", err); + } + + return StatusFromOMXError(err); +} + status_t OMXNodeInstance::getExtensionIndex( const char *parameterName, OMX_INDEXTYPE *index) { Mutex::Autolock autoLock(mLock); @@ -666,6 +779,23 @@ void OMXNodeInstance::onMessage(const omx_message &msg) { static_cast<BufferMeta *>(buffer->pAppPrivate); buffer_meta->CopyFromOMX(buffer); + } else if (msg.type == omx_message::EMPTY_BUFFER_DONE) { + const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource()); + + if (bufferSource != NULL) { + // This is one of the buffers used exclusively by + // GraphicBufferSource. + // Don't dispatch a message back to ACodec, since it doesn't + // know that anyone asked to have the buffer emptied and will + // be very confused. + + OMX_BUFFERHEADERTYPE *buffer = + static_cast<OMX_BUFFERHEADERTYPE *>( + msg.u.buffer_data.buffer); + + bufferSource->codecBufferEmptied(buffer); + return; + } } mObserver->onMessage(msg); @@ -682,6 +812,25 @@ void OMXNodeInstance::onGetHandleFailed() { delete this; } +// OMXNodeInstance::OnEvent calls OMX::OnEvent, which then calls here. +// Don't try to acquire mLock here -- in rare circumstances this will hang. +void OMXNodeInstance::onEvent( + OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2) { + const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource()); + + if (bufferSource != NULL && event == OMX_EventCmdComplete && + arg1 == OMX_CommandStateSet) { + if (arg2 == OMX_StateExecuting) { + bufferSource->omxExecuting(); + } else if (arg2 == OMX_StateLoaded) { + // Must be shutting down -- won't have a GraphicBufferSource + // on the way up. + bufferSource->omxLoaded(); + setGraphicBufferSource(NULL); + } + } +} + // static OMX_ERRORTYPE OMXNodeInstance::OnEvent( OMX_IN OMX_HANDLETYPE hComponent, diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp index c79e01f..4999663 100644 --- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp +++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp @@ -450,6 +450,10 @@ void SimpleSoftOMXComponent::onChangeState(OMX_STATETYPE state) { checkTransitions(); } +void SimpleSoftOMXComponent::onReset() { + // no-op +} + void SimpleSoftOMXComponent::onPortEnable(OMX_U32 portIndex, bool enable) { CHECK_LT(portIndex, mPorts.size()); @@ -581,6 +585,10 @@ void SimpleSoftOMXComponent::checkTransitions() { if (transitionComplete) { mState = mTargetState; + if (mState == OMX_StateLoaded) { + onReset(); + } + notify(OMX_EventCmdComplete, OMX_CommandStateSet, mState, NULL); } } diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp index 3747b3b..b3fe98e 100644 --- a/media/libstagefright/omx/SoftOMXPlugin.cpp +++ b/media/libstagefright/omx/SoftOMXPlugin.cpp @@ -51,8 +51,10 @@ static const struct { { "OMX.google.mp3.decoder", "mp3dec", "audio_decoder.mp3" }, { "OMX.google.vorbis.decoder", "vorbisdec", "audio_decoder.vorbis" }, { "OMX.google.vpx.decoder", "vpxdec", "video_decoder.vpx" }, + { "OMX.google.vpx.encoder", "vpxenc", "video_encoder.vpx" }, { "OMX.google.raw.decoder", "rawdec", "audio_decoder.raw" }, { "OMX.google.flac.encoder", "flacenc", "audio_encoder.flac" }, + { "OMX.google.gsm.decoder", "gsmdec", "audio_decoder.gsm" }, }; static const size_t kNumComponents = diff --git a/media/libstagefright/omx/tests/Android.mk b/media/libstagefright/omx/tests/Android.mk index 04441ca..1061c39 100644 --- a/media/libstagefright/omx/tests/Android.mk +++ b/media/libstagefright/omx/tests/Android.mk @@ -5,7 +5,7 @@ LOCAL_SRC_FILES = \ OMXHarness.cpp \ LOCAL_SHARED_LIBRARIES := \ - libstagefright libbinder libmedia libutils libstagefright_foundation + libstagefright libbinder libmedia libutils liblog libstagefright_foundation LOCAL_C_INCLUDES := \ $(TOP)/frameworks/av/media/libstagefright \ diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp index 161bd4f..3068541 100644 --- a/media/libstagefright/rtsp/ARTSPConnection.cpp +++ b/media/libstagefright/rtsp/ARTSPConnection.cpp @@ -20,13 +20,12 @@ #include "ARTSPConnection.h" -#include <cutils/properties.h> - #include <media/stagefright/foundation/ABuffer.h> #include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/foundation/AMessage.h> #include <media/stagefright/foundation/base64.h> #include <media/stagefright/MediaErrors.h> +#include <media/stagefright/Utils.h> #include <arpa/inet.h> #include <fcntl.h> @@ -41,6 +40,10 @@ namespace android { // static const int64_t ARTSPConnection::kSelectTimeoutUs = 1000ll; +// static +const AString ARTSPConnection::sUserAgent = + StringPrintf("User-Agent: %s\r\n", MakeUserAgent().c_str()); + ARTSPConnection::ARTSPConnection(bool uidValid, uid_t uid) : mUIDValid(uidValid), mUID(uid), @@ -50,7 +53,6 @@ ARTSPConnection::ARTSPConnection(bool uidValid, uid_t uid) mConnectionID(0), mNextCSeq(0), mReceiveResponseEventPending(false) { - MakeUserAgent(&mUserAgent); } ARTSPConnection::~ARTSPConnection() { @@ -1032,27 +1034,12 @@ void ARTSPConnection::addAuthentication(AString *request) { #endif } -// static -void ARTSPConnection::MakeUserAgent(AString *userAgent) { - userAgent->clear(); - userAgent->setTo("User-Agent: stagefright/1.1 (Linux;Android "); - -#if (PROPERTY_VALUE_MAX < 8) -#error "PROPERTY_VALUE_MAX must be at least 8" -#endif - - char value[PROPERTY_VALUE_MAX]; - property_get("ro.build.version.release", value, "Unknown"); - userAgent->append(value); - userAgent->append(")\r\n"); -} - void ARTSPConnection::addUserAgent(AString *request) const { // Find the boundary between headers and the body. ssize_t i = request->find("\r\n\r\n"); CHECK_GE(i, 0); - request->insert(mUserAgent, i + 2); + request->insert(sUserAgent, i + 2); } } // namespace android diff --git a/media/libstagefright/rtsp/ARTSPConnection.h b/media/libstagefright/rtsp/ARTSPConnection.h index 68f2d59..1fe9c99 100644 --- a/media/libstagefright/rtsp/ARTSPConnection.h +++ b/media/libstagefright/rtsp/ARTSPConnection.h @@ -74,6 +74,8 @@ private: static const int64_t kSelectTimeoutUs; + static const AString sUserAgent; + bool mUIDValid; uid_t mUID; State mState; @@ -89,8 +91,6 @@ private: sp<AMessage> mObserveBinaryMessage; - AString mUserAgent; - void performDisconnect(); void onConnect(const sp<AMessage> &msg); @@ -122,8 +122,6 @@ private: static bool ParseSingleUnsignedLong( const char *from, unsigned long *x); - static void MakeUserAgent(AString *userAgent); - DISALLOW_EVIL_CONSTRUCTORS(ARTSPConnection); }; diff --git a/media/libstagefright/rtsp/Android.mk b/media/libstagefright/rtsp/Android.mk index 49e2daf..9e2724d 100644 --- a/media/libstagefright/rtsp/Android.mk +++ b/media/libstagefright/rtsp/Android.mk @@ -17,6 +17,7 @@ LOCAL_SRC_FILES:= \ ARTPWriter.cpp \ ARTSPConnection.cpp \ ASessionDescription.cpp \ + SDPLoader.cpp \ LOCAL_C_INCLUDES:= \ $(TOP)/frameworks/av/media/libstagefright/include \ diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h index b2f0e5e..e067e20 100644 --- a/media/libstagefright/rtsp/MyHandler.h +++ b/media/libstagefright/rtsp/MyHandler.h @@ -28,13 +28,13 @@ #include "ASessionDescription.h" #include <ctype.h> -#include <cutils/properties.h> #include <media/stagefright/foundation/ABuffer.h> #include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/foundation/ALooper.h> #include <media/stagefright/foundation/AMessage.h> #include <media/stagefright/MediaErrors.h> +#include <media/stagefright/Utils.h> #include <arpa/inet.h> #include <sys/socket.h> @@ -52,20 +52,9 @@ static int64_t kStartupTimeoutUs = 10000000ll; static int64_t kDefaultKeepAliveTimeoutUs = 60000000ll; -namespace android { - -static void MakeUserAgentString(AString *s) { - s->setTo("stagefright/1.1 (Linux;Android "); - -#if (PROPERTY_VALUE_MAX < 8) -#error "PROPERTY_VALUE_MAX must be at least 8" -#endif +static int64_t kPauseDelayUs = 3000000ll; - char value[PROPERTY_VALUE_MAX]; - property_get("ro.build.version.release", value, "Unknown"); - s->append(value); - s->append(")"); -} +namespace android { static bool GetAttribute(const char *s, const char *key, AString *value) { value->clear(); @@ -129,13 +118,16 @@ struct MyHandler : public AHandler { mNumAccessUnitsReceived(0), mCheckPending(false), mCheckGeneration(0), + mCheckTimeoutGeneration(0), mTryTCPInterleaving(false), mTryFakeRTCP(false), mReceivedFirstRTCPPacket(false), mReceivedFirstRTPPacket(false), - mSeekable(false), + mSeekable(true), mKeepAliveTimeoutUs(kDefaultKeepAliveTimeoutUs), - mKeepAliveGeneration(0) { + mKeepAliveGeneration(0), + mPausing(false), + mPauseGeneration(0) { mNetLooper->setName("rtsp net"); mNetLooper->start(false /* runOnCallingThread */, false /* canCallJava */, @@ -173,6 +165,39 @@ struct MyHandler : public AHandler { mConn->connect(mOriginalSessionURL.c_str(), reply); } + void loadSDP(const sp<ASessionDescription>& desc) { + looper()->registerHandler(mConn); + (1 ? mNetLooper : looper())->registerHandler(mRTPConn); + + sp<AMessage> notify = new AMessage('biny', id()); + mConn->observeBinaryData(notify); + + sp<AMessage> reply = new AMessage('sdpl', id()); + reply->setObject("description", desc); + mConn->connect(mOriginalSessionURL.c_str(), reply); + } + + AString getControlURL(sp<ASessionDescription> desc) { + AString sessionLevelControlURL; + if (mSessionDesc->findAttribute( + 0, + "a=control", + &sessionLevelControlURL)) { + if (sessionLevelControlURL.compare("*") == 0) { + return mBaseURL; + } else { + AString controlURL; + CHECK(MakeURL( + mBaseURL.c_str(), + sessionLevelControlURL.c_str(), + &controlURL)); + return controlURL; + } + } else { + return mSessionURL; + } + } + void disconnect() { (new AMessage('abor', id()))->post(); } @@ -180,6 +205,24 @@ struct MyHandler : public AHandler { void seek(int64_t timeUs) { sp<AMessage> msg = new AMessage('seek', id()); msg->setInt64("time", timeUs); + mPauseGeneration++; + msg->post(); + } + + bool isSeekable() const { + return mSeekable; + } + + void pause() { + sp<AMessage> msg = new AMessage('paus', id()); + mPauseGeneration++; + msg->setInt32("pausecheck", mPauseGeneration); + msg->post(kPauseDelayUs); + } + + void resume() { + sp<AMessage> msg = new AMessage('resu', id()); + mPauseGeneration++; msg->post(); } @@ -223,8 +266,7 @@ struct MyHandler : public AHandler { data[offset++] = 6; // TOOL - AString tool; - MakeUserAgentString(&tool); + AString tool = MakeUserAgent(); data[offset++] = tool.size(); @@ -348,6 +390,39 @@ struct MyHandler : public AHandler { return true; } + static bool isLiveStream(const sp<ASessionDescription> &desc) { + AString attrLiveStream; + if (desc->findAttribute(0, "a=LiveStream", &attrLiveStream)) { + ssize_t semicolonPos = attrLiveStream.find(";", 2); + + const char* liveStreamValue; + if (semicolonPos < 0) { + liveStreamValue = attrLiveStream.c_str(); + } else { + AString valString; + valString.setTo(attrLiveStream, + semicolonPos + 1, + attrLiveStream.size() - semicolonPos - 1); + liveStreamValue = valString.c_str(); + } + + uint32_t value = strtoul(liveStreamValue, NULL, 10); + if (value == 1) { + ALOGV("found live stream"); + return true; + } + } else { + // It is a live stream if no duration is returned + int64_t durationUs; + if (!desc->getDurationUs(&durationUs)) { + ALOGV("No duration found, assume live stream"); + return true; + } + } + + return false; + } + virtual void onMessageReceived(const sp<AMessage> &msg) { switch (msg->what()) { case 'conn': @@ -448,6 +523,8 @@ struct MyHandler : public AHandler { } } + mSeekable = !isLiveStream(mSessionDesc); + if (!mBaseURL.startsWith("rtsp://")) { // Some misbehaving servers specify a relative // URL in one of the locations above, combine @@ -467,6 +544,8 @@ struct MyHandler : public AHandler { mBaseURL = tmp; } + mControlURL = getControlURL(mSessionDesc); + if (mSessionDesc->countTracks() < 2) { // There's no actual tracks in this session. // The first "track" is merely session meta @@ -489,6 +568,51 @@ struct MyHandler : public AHandler { break; } + case 'sdpl': + { + int32_t result; + CHECK(msg->findInt32("result", &result)); + + ALOGI("SDP connection request completed with result %d (%s)", + result, strerror(-result)); + + if (result == OK) { + sp<RefBase> obj; + CHECK(msg->findObject("description", &obj)); + mSessionDesc = + static_cast<ASessionDescription *>(obj.get()); + + if (!mSessionDesc->isValid()) { + ALOGE("Failed to parse session description."); + result = ERROR_MALFORMED; + } else { + mBaseURL = mSessionURL; + + mSeekable = !isLiveStream(mSessionDesc); + + mControlURL = getControlURL(mSessionDesc); + + if (mSessionDesc->countTracks() < 2) { + // There's no actual tracks in this session. + // The first "track" is merely session meta + // data. + + ALOGW("Session doesn't contain any playable " + "tracks. Aborting."); + result = ERROR_UNSUPPORTED; + } else { + setupTrack(1); + } + } + } + + if (result != OK) { + sp<AMessage> reply = new AMessage('disc', id()); + mConn->disconnect(reply); + } + break; + } + case 'setu': { size_t index; @@ -606,7 +730,7 @@ struct MyHandler : public AHandler { postKeepAlive(); AString request = "PLAY "; - request.append(mSessionURL); + request.append(mControlURL); request.append(" RTSP/1.0\r\n"); request.append("Session: "); @@ -644,6 +768,8 @@ struct MyHandler : public AHandler { parsePlayResponse(response); sp<AMessage> timeout = new AMessage('tiou', id()); + mCheckTimeoutGeneration++; + timeout->setInt32("tioucheck", mCheckTimeoutGeneration); timeout->post(kStartupTimeoutUs); } } @@ -733,7 +859,8 @@ struct MyHandler : public AHandler { mNumAccessUnitsReceived = 0; mReceivedFirstRTCPPacket = false; mReceivedFirstRTPPacket = false; - mSeekable = false; + mPausing = false; + mSeekable = true; sp<AMessage> reply = new AMessage('tear', id()); @@ -854,9 +981,16 @@ struct MyHandler : public AHandler { int32_t eos; if (msg->findInt32("eos", &eos)) { ALOGI("received BYE on track index %d", trackIndex); -#if 0 - track->mPacketSource->signalEOS(ERROR_END_OF_STREAM); -#endif + if (!mAllTracksHaveTime && dataReceivedOnAllChannels()) { + ALOGI("No time established => fake existing data"); + + track->mEOSReceived = true; + mTryFakeRTCP = true; + mReceivedFirstRTCPPacket = true; + fakeTimestamps(); + } else { + postQueueEOS(trackIndex, ERROR_END_OF_STREAM); + } return; } @@ -884,6 +1018,115 @@ struct MyHandler : public AHandler { break; } + case 'paus': + { + int32_t generation; + CHECK(msg->findInt32("pausecheck", &generation)); + if (generation != mPauseGeneration) { + ALOGV("Ignoring outdated pause message."); + break; + } + + if (!mSeekable) { + ALOGW("This is a live stream, ignoring pause request."); + break; + } + mCheckPending = true; + ++mCheckGeneration; + mPausing = true; + + AString request = "PAUSE "; + request.append(mControlURL); + request.append(" RTSP/1.0\r\n"); + + request.append("Session: "); + request.append(mSessionID); + request.append("\r\n"); + + request.append("\r\n"); + + sp<AMessage> reply = new AMessage('pau2', id()); + mConn->sendRequest(request.c_str(), reply); + break; + } + + case 'pau2': + { + int32_t result; + CHECK(msg->findInt32("result", &result)); + mCheckTimeoutGeneration++; + + ALOGI("PAUSE completed with result %d (%s)", + result, strerror(-result)); + break; + } + + case 'resu': + { + if (mPausing && mSeekPending) { + // If seeking, Play will be sent from see1 instead + break; + } + + if (!mPausing) { + // Dont send PLAY if we have not paused + break; + } + AString request = "PLAY "; + request.append(mControlURL); + request.append(" RTSP/1.0\r\n"); + + request.append("Session: "); + request.append(mSessionID); + request.append("\r\n"); + + request.append("\r\n"); + + sp<AMessage> reply = new AMessage('res2', id()); + mConn->sendRequest(request.c_str(), reply); + break; + } + + case 'res2': + { + int32_t result; + CHECK(msg->findInt32("result", &result)); + + ALOGI("PLAY completed with result %d (%s)", + result, strerror(-result)); + + mCheckPending = false; + postAccessUnitTimeoutCheck(); + + if (result == OK) { + sp<RefBase> obj; + CHECK(msg->findObject("response", &obj)); + sp<ARTSPResponse> response = + static_cast<ARTSPResponse *>(obj.get()); + + if (response->mStatusCode != 200) { + result = UNKNOWN_ERROR; + } else { + parsePlayResponse(response); + + // Post new timeout in order to make sure to use + // fake timestamps if no new Sender Reports arrive + sp<AMessage> timeout = new AMessage('tiou', id()); + mCheckTimeoutGeneration++; + timeout->setInt32("tioucheck", mCheckTimeoutGeneration); + timeout->post(kStartupTimeoutUs); + } + } + + if (result != OK) { + ALOGE("resume failed, aborting."); + (new AMessage('abor', id()))->post(); + } + + mPausing = false; + break; + } + case 'seek': { if (!mSeekable) { @@ -905,8 +1148,17 @@ struct MyHandler : public AHandler { mCheckPending = true; ++mCheckGeneration; + sp<AMessage> reply = new AMessage('see1', id()); + reply->setInt64("time", timeUs); + + if (mPausing) { + // PAUSE already sent + ALOGI("Pause already sent"); + reply->post(); + break; + } AString request = "PAUSE "; - request.append(mSessionURL); + request.append(mControlURL); request.append(" RTSP/1.0\r\n"); request.append("Session: "); @@ -915,8 +1167,6 @@ struct MyHandler : public AHandler { request.append("\r\n"); - sp<AMessage> reply = new AMessage('see1', id()); - reply->setInt64("time", timeUs); mConn->sendRequest(request.c_str(), reply); break; } @@ -928,6 +1178,7 @@ struct MyHandler : public AHandler { TrackInfo *info = &mTracks.editItemAt(i); postQueueSeekDiscontinuity(i); + info->mEOSReceived = false; info->mRTPAnchor = 0; info->mNTPAnchorUs = -1; @@ -936,11 +1187,18 @@ struct MyHandler : public AHandler { mAllTracksHaveTime = false; mNTPAnchorUs = -1; + // Start new timeoutgeneration to avoid getting timeout + // before PLAY response arrive + sp<AMessage> timeout = new AMessage('tiou', id()); + mCheckTimeoutGeneration++; + timeout->setInt32("tioucheck", mCheckTimeoutGeneration); + timeout->post(kStartupTimeoutUs); + int64_t timeUs; CHECK(msg->findInt64("time", &timeUs)); AString request = "PLAY "; - request.append(mSessionURL); + request.append(mControlURL); request.append(" RTSP/1.0\r\n"); request.append("Session: "); @@ -960,7 +1218,10 @@ struct MyHandler : public AHandler { case 'see2': { - CHECK(mSeekPending); + if (mTracks.size() == 0) { + // We have already hit abor, break + break; + } int32_t result; CHECK(msg->findInt32("result", &result)); @@ -982,6 +1243,13 @@ struct MyHandler : public AHandler { } else { parsePlayResponse(response); + // Post new timeout in order to make sure to use + // fake timestamps if no new Sender Reports arrive + sp<AMessage> timeout = new AMessage('tiou', id()); + mCheckTimeoutGeneration++; + timeout->setInt32("tioucheck", mCheckTimeoutGeneration); + timeout->post(kStartupTimeoutUs); + ssize_t i = response->mHeaders.indexOfKey("rtp-info"); CHECK_GE(i, 0); @@ -996,6 +1264,7 @@ struct MyHandler : public AHandler { (new AMessage('abor', id()))->post(); } + mPausing = false; mSeekPending = false; sp<AMessage> msg = mNotify->dup(); @@ -1018,8 +1287,17 @@ struct MyHandler : public AHandler { case 'tiou': { + int32_t timeoutGenerationCheck; + CHECK(msg->findInt32("tioucheck", &timeoutGenerationCheck)); + if (timeoutGenerationCheck != mCheckTimeoutGeneration) { + // This is an outdated message. Ignore. + // This typically happens if a lot of seeks are + // performed, since new timeout messages now are + // posted at seek as well. + break; + } if (!mReceivedFirstRTCPPacket) { - if (mReceivedFirstRTPPacket && !mTryFakeRTCP) { + if (dataReceivedOnAllChannels() && !mTryFakeRTCP) { ALOGW("We received RTP packets but no RTCP packets, " "using fake timestamps."); @@ -1093,7 +1371,6 @@ struct MyHandler : public AHandler { } void parsePlayResponse(const sp<ARTSPResponse> &response) { - mSeekable = false; if (mTracks.size() == 0) { ALOGV("parsePlayResponse: late packets ignored."); return; @@ -1168,8 +1445,6 @@ struct MyHandler : public AHandler { ++n; } - - mSeekable = true; } sp<MetaData> getTrackFormat(size_t index, int32_t *timeScale) { @@ -1199,6 +1474,7 @@ private: uint32_t mRTPAnchor; int64_t mNTPAnchorUs; int32_t mTimeScale; + bool mEOSReceived; uint32_t mNormalPlayTimeRTP; int64_t mNormalPlayTimeUs; @@ -1221,6 +1497,7 @@ private: AString mSessionURL; AString mSessionHost; AString mBaseURL; + AString mControlURL; AString mSessionID; bool mSetupTracksSuccessful; bool mSeekPending; @@ -1234,6 +1511,7 @@ private: int64_t mNumAccessUnitsReceived; bool mCheckPending; int32_t mCheckGeneration; + int32_t mCheckTimeoutGeneration; bool mTryTCPInterleaving; bool mTryFakeRTCP; bool mReceivedFirstRTCPPacket; @@ -1241,6 +1519,8 @@ private: bool mSeekable; int64_t mKeepAliveTimeoutUs; int32_t mKeepAliveGeneration; + bool mPausing; + int32_t mPauseGeneration; Vector<TrackInfo> mTracks; @@ -1287,6 +1567,7 @@ private: formatDesc.c_str(), ×cale, &numChannels); info->mTimeScale = timescale; + info->mEOSReceived = false; ALOGV("track #%d URL=%s", mTracks.size(), trackURL.c_str()); @@ -1379,6 +1660,17 @@ private: } } + bool dataReceivedOnAllChannels() { + TrackInfo *track; + for (size_t i = 0; i < mTracks.size(); ++i) { + track = &mTracks.editItemAt(i); + if (track->mPackets.empty()) { + return false; + } + } + return true; + } + void onTimeUpdate(int32_t trackIndex, uint32_t rtpTime, uint64_t ntpTime) { ALOGV("onTimeUpdate track %d, rtpTime = 0x%08x, ntpTime = 0x%016llx", trackIndex, rtpTime, ntpTime); @@ -1409,6 +1701,27 @@ private: ALOGI("Time now established for all tracks."); } } + if (mAllTracksHaveTime && dataReceivedOnAllChannels()) { + // Time is now established, lets start timestamping immediately + for (size_t i = 0; i < mTracks.size(); ++i) { + TrackInfo *trackInfo = &mTracks.editItemAt(i); + while (!trackInfo->mPackets.empty()) { + sp<ABuffer> accessUnit = *trackInfo->mPackets.begin(); + trackInfo->mPackets.erase(trackInfo->mPackets.begin()); + + if (addMediaTimestamp(i, trackInfo, accessUnit)) { + postQueueAccessUnit(i, accessUnit); + } + } + } + for (size_t i = 0; i < mTracks.size(); ++i) { + TrackInfo *trackInfo = &mTracks.editItemAt(i); + if (trackInfo->mEOSReceived) { + postQueueEOS(i, ERROR_END_OF_STREAM); + trackInfo->mEOSReceived = false; + } + } + } } void onAccessUnitComplete( @@ -1453,6 +1766,11 @@ private: if (addMediaTimestamp(trackIndex, track, accessUnit)) { postQueueAccessUnit(trackIndex, accessUnit); } + + if (track->mEOSReceived) { + postQueueEOS(trackIndex, ERROR_END_OF_STREAM); + track->mEOSReceived = false; + } } bool addMediaTimestamp( diff --git a/media/libstagefright/rtsp/SDPLoader.cpp b/media/libstagefright/rtsp/SDPLoader.cpp new file mode 100644 index 0000000..ed3fa7e --- /dev/null +++ b/media/libstagefright/rtsp/SDPLoader.cpp @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "SDPLoader" +#include <utils/Log.h> + +#include "SDPLoader.h" + +#include "ASessionDescription.h" +#include "HTTPBase.h" + +#include <media/stagefright/foundation/ABuffer.h> +#include <media/stagefright/foundation/ADebug.h> + +#define DEFAULT_SDP_SIZE 100000 + +namespace android { + +SDPLoader::SDPLoader(const sp<AMessage> ¬ify, uint32_t flags, bool uidValid, uid_t uid) + : mNotify(notify), + mFlags(flags), + mUIDValid(uidValid), + mUID(uid), + mNetLooper(new ALooper), + mCancelled(false), + mHTTPDataSource( + HTTPBase::Create( + (mFlags & kFlagIncognito) + ? HTTPBase::kFlagIncognito + : 0)) { + if (mUIDValid) { + mHTTPDataSource->setUID(mUID); + } + + mNetLooper->setName("sdp net"); + mNetLooper->start(false /* runOnCallingThread */, + false /* canCallJava */, + PRIORITY_HIGHEST); +} + +void SDPLoader::load(const char *url, const KeyedVector<String8, String8> *headers) { + mNetLooper->registerHandler(this); + + sp<AMessage> msg = new AMessage(kWhatLoad, id()); + msg->setString("url", url); + + if (headers != NULL) { + msg->setPointer( + "headers", + new KeyedVector<String8, String8>(*headers)); + } + + msg->post(); +} + +void SDPLoader::cancel() { + mCancelled = true; + sp<HTTPBase> HTTPDataSource = mHTTPDataSource; + HTTPDataSource->disconnect(); +} + +void SDPLoader::onMessageReceived(const sp<AMessage> &msg) { + switch (msg->what()) { + case kWhatLoad: + onLoad(msg); + break; + + default: + TRESPASS(); + break; + } +} + +void SDPLoader::onLoad(const sp<AMessage> &msg) { + status_t err = OK; + sp<ASessionDescription> desc = NULL; + AString url; + CHECK(msg->findString("url", &url)); + + KeyedVector<String8, String8> *headers = NULL; + msg->findPointer("headers", (void **)&headers); + + if (!(mFlags & kFlagIncognito)) { + ALOGI("onLoad '%s'", url.c_str()); + } else { + ALOGI("onLoad <URL suppressed>"); + } + + if (!mCancelled) { + err = mHTTPDataSource->connect(url.c_str(), headers); + + if (err != OK) { + ALOGE("connect() returned %d", err); + } + } + + if (headers != NULL) { + delete headers; + headers = NULL; + } + + off64_t sdpSize; + if (err == OK && !mCancelled) { + err = mHTTPDataSource->getSize(&sdpSize); + + if (err != OK) { + //We did not get the size of the sdp file, default to a large value + sdpSize = DEFAULT_SDP_SIZE; + err = OK; + } + } + + sp<ABuffer> buffer = new ABuffer(sdpSize); + + if (err == OK && !mCancelled) { + ssize_t readSize = mHTTPDataSource->readAt(0, buffer->data(), sdpSize); + + if (readSize < 0) { + ALOGE("Failed to read SDP, error code = %ld", readSize); + err = UNKNOWN_ERROR; + } else { + desc = new ASessionDescription; + + if (desc == NULL || !desc->setTo(buffer->data(), (size_t)readSize)) { + err = UNKNOWN_ERROR; + ALOGE("Failed to parse SDP"); + } + } + } + + mHTTPDataSource.clear(); + + sp<AMessage> notify = mNotify->dup(); + notify->setInt32("what", kWhatSDPLoaded); + notify->setInt32("result", err); + notify->setObject("description", desc); + notify->post(); +} + +} // namespace android diff --git a/media/libstagefright/tests/Android.mk b/media/libstagefright/tests/Android.mk index 57fff0b..06ce16b 100644 --- a/media/libstagefright/tests/Android.mk +++ b/media/libstagefright/tests/Android.mk @@ -26,6 +26,7 @@ LOCAL_SHARED_LIBRARIES := \ libsync \ libui \ libutils \ + liblog LOCAL_STATIC_LIBRARIES := \ libgtest \ diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp index a61d6a2..a5459fe 100644 --- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp +++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp @@ -27,7 +27,7 @@ #include <media/mediarecorder.h> #include <ui/GraphicBuffer.h> -#include <gui/SurfaceTextureClient.h> +#include <gui/Surface.h> #include <gui/ISurfaceComposer.h> #include <gui/Surface.h> #include <gui/SurfaceComposerClient.h> @@ -107,9 +107,9 @@ protected: window.get(), NULL); } else { ALOGV("No actual display. Choosing EGLSurface based on SurfaceMediaSource"); - sp<ISurfaceTexture> sms = (new SurfaceMediaSource( + sp<IGraphicBufferProducer> sms = (new SurfaceMediaSource( getSurfaceWidth(), getSurfaceHeight()))->getBufferQueue(); - sp<SurfaceTextureClient> stc = new SurfaceTextureClient(sms); + sp<Surface> stc = new Surface(sms); sp<ANativeWindow> window = stc; mEglSurface = eglCreateWindowSurface(mEglDisplay, mGlConfig, @@ -361,7 +361,7 @@ protected: mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight); // Manual cast is required to avoid constructor ambiguity - mSTC = new SurfaceTextureClient(static_cast<sp<ISurfaceTexture> >( mSMS->getBufferQueue())); + mSTC = new Surface(static_cast<sp<IGraphicBufferProducer> >( mSMS->getBufferQueue())); mANW = mSTC; } @@ -375,7 +375,7 @@ protected: const int mYuvTexHeight; sp<SurfaceMediaSource> mSMS; - sp<SurfaceTextureClient> mSTC; + sp<Surface> mSTC; sp<ANativeWindow> mANW; }; @@ -396,7 +396,7 @@ protected: ALOGV("SMS-GLTest::SetUp()"); android::ProcessState::self()->startThreadPool(); mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight); - mSTC = new SurfaceTextureClient(static_cast<sp<ISurfaceTexture> >( mSMS->getBufferQueue())); + mSTC = new Surface(static_cast<sp<IGraphicBufferProducer> >( mSMS->getBufferQueue())); mANW = mSTC; // Doing the setup related to the GL Side @@ -416,7 +416,7 @@ protected: const int mYuvTexHeight; sp<SurfaceMediaSource> mSMS; - sp<SurfaceTextureClient> mSTC; + sp<Surface> mSTC; sp<ANativeWindow> mANW; }; @@ -482,8 +482,8 @@ sp<MediaRecorder> SurfaceMediaSourceGLTest::setUpMediaRecorder(int fd, int video // query the mediarecorder for a surfacemeidasource and create an egl surface with that void SurfaceMediaSourceGLTest::setUpEGLSurfaceFromMediaRecorder(sp<MediaRecorder>& mr) { - sp<ISurfaceTexture> iST = mr->querySurfaceMediaSourceFromMediaServer(); - mSTC = new SurfaceTextureClient(iST); + sp<IGraphicBufferProducer> iST = mr->querySurfaceMediaSourceFromMediaServer(); + mSTC = new Surface(iST); mANW = mSTC; if (mEglSurface != EGL_NO_SURFACE) { @@ -749,8 +749,8 @@ TEST_F(SurfaceMediaSourceTest, DISABLED_EncodingFromCpuYV12BufferNpotWriteMediaS mYuvTexHeight, 30); // get the reference to the surfacemediasource living in // mediaserver that is created by stagefrightrecorder - sp<ISurfaceTexture> iST = mr->querySurfaceMediaSourceFromMediaServer(); - mSTC = new SurfaceTextureClient(iST); + sp<IGraphicBufferProducer> iST = mr->querySurfaceMediaSourceFromMediaServer(); + mSTC = new Surface(iST); mANW = mSTC; ASSERT_EQ(NO_ERROR, native_window_api_connect(mANW.get(), NATIVE_WINDOW_API_CPU)); ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(), @@ -781,7 +781,7 @@ TEST_F(SurfaceMediaSourceGLTest, ChooseAndroidRecordableEGLConfigDummyWriter) { ALOGV("Verify creating a surface w/ right config + dummy writer*********"); mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight); - mSTC = new SurfaceTextureClient(static_cast<sp<ISurfaceTexture> >( mSMS->getBufferQueue())); + mSTC = new Surface(static_cast<sp<IGraphicBufferProducer> >( mSMS->getBufferQueue())); mANW = mSTC; DummyRecorder writer(mSMS); diff --git a/media/libstagefright/timedtext/TimedTextSRTSource.cpp b/media/libstagefright/timedtext/TimedTextSRTSource.cpp index eac23ba..2ac1e72 100644 --- a/media/libstagefright/timedtext/TimedTextSRTSource.cpp +++ b/media/libstagefright/timedtext/TimedTextSRTSource.cpp @@ -36,6 +36,9 @@ TimedTextSRTSource::TimedTextSRTSource(const sp<DataSource>& dataSource) : mSource(dataSource), mMetaData(new MetaData), mIndex(0) { + // TODO: Need to detect the language, because SRT doesn't give language + // information explicitly. + mMetaData->setCString(kKeyMediaLanguage, "und"); } TimedTextSRTSource::~TimedTextSRTSource() { @@ -46,14 +49,10 @@ status_t TimedTextSRTSource::start() { if (err != OK) { reset(); } - // TODO: Need to detect the language, because SRT doesn't give language - // information explicitly. - mMetaData->setCString(kKeyMediaLanguage, ""); return err; } void TimedTextSRTSource::reset() { - mMetaData->clear(); mTextVector.clear(); mIndex = 0; } diff --git a/media/libstagefright/wifi-display/ANetworkSession.cpp b/media/libstagefright/wifi-display/ANetworkSession.cpp index 62a6e7f..938d601 100644 --- a/media/libstagefright/wifi-display/ANetworkSession.cpp +++ b/media/libstagefright/wifi-display/ANetworkSession.cpp @@ -23,9 +23,11 @@ #include <arpa/inet.h> #include <fcntl.h> +#include <linux/tcp.h> #include <net/if.h> #include <netdb.h> #include <netinet/in.h> +#include <sys/ioctl.h> #include <sys/socket.h> #include <media/stagefright/foundation/ABuffer.h> @@ -37,6 +39,7 @@ namespace android { static const size_t kMaxUDPSize = 1500; +static const int32_t kMaxUDPRetries = 200; struct ANetworkSession::NetworkThread : public Thread { NetworkThread(ANetworkSession *session); @@ -79,7 +82,8 @@ struct ANetworkSession::Session : public RefBase { status_t readMore(); status_t writeMore(); - status_t sendRequest(const void *data, ssize_t size); + status_t sendRequest( + const void *data, ssize_t size, bool timeValid, int64_t timeUs); void setIsRTSPConnection(bool yesno); @@ -87,24 +91,34 @@ protected: virtual ~Session(); private: + enum { + FRAGMENT_FLAG_TIME_VALID = 1, + }; + struct Fragment { + uint32_t mFlags; + int64_t mTimeUs; + sp<ABuffer> mBuffer; + }; + int32_t mSessionID; State mState; bool mIsRTSPConnection; int mSocket; sp<AMessage> mNotify; bool mSawReceiveFailure, mSawSendFailure; + int32_t mUDPRetries; - // for TCP / stream data - AString mOutBuffer; - - // for UDP / datagrams - List<sp<ABuffer> > mOutDatagrams; + List<Fragment> mOutFragments; AString mInBuffer; + int64_t mLastStallReportUs; + void notifyError(bool send, status_t err, const char *detail); void notify(NotificationReason reason); + void dumpFragmentStats(const Fragment &frag); + DISALLOW_EVIL_CONSTRUCTORS(Session); }; //////////////////////////////////////////////////////////////////////////////// @@ -135,7 +149,9 @@ ANetworkSession::Session::Session( mSocket(s), mNotify(notify), mSawReceiveFailure(false), - mSawSendFailure(false) { + mSawSendFailure(false), + mUDPRetries(kMaxUDPRetries), + mLastStallReportUs(-1ll) { if (mState == CONNECTED) { struct sockaddr_in localAddr; socklen_t localAddrLen = sizeof(localAddr); @@ -216,8 +232,8 @@ bool ANetworkSession::Session::wantsToRead() { bool ANetworkSession::Session::wantsToWrite() { return !mSawSendFailure && (mState == CONNECTING - || (mState == CONNECTED && !mOutBuffer.empty()) - || (mState == DATAGRAM && !mOutDatagrams.empty())); + || (mState == CONNECTED && !mOutFragments.empty()) + || (mState == DATAGRAM && !mOutFragments.empty())); } status_t ANetworkSession::Session::readMore() { @@ -273,8 +289,17 @@ status_t ANetworkSession::Session::readMore() { } if (err != OK) { - notifyError(false /* send */, err, "Recvfrom failed."); - mSawReceiveFailure = true; + if (!mUDPRetries) { + notifyError(false /* send */, err, "Recvfrom failed."); + mSawReceiveFailure = true; + } else { + mUDPRetries--; + ALOGE("Recvfrom failed, %d/%d retries left", + mUDPRetries, kMaxUDPRetries); + err = OK; + } + } else { + mUDPRetries = kMaxUDPRetries; } return err; @@ -314,6 +339,9 @@ status_t ANetworkSession::Session::readMore() { sp<ABuffer> packet = new ABuffer(packetSize); memcpy(packet->data(), mInBuffer.c_str() + 2, packetSize); + int64_t nowUs = ALooper::GetNowUs(); + packet->meta()->setInt64("arrivalTimeUs", nowUs); + sp<AMessage> notify = mNotify->dup(); notify->setInt32("sessionID", mSessionID); notify->setInt32("reason", kWhatDatagram); @@ -399,31 +427,41 @@ status_t ANetworkSession::Session::readMore() { return err; } -status_t ANetworkSession::Session::writeMore() { - if (mState == DATAGRAM) { - CHECK(!mOutDatagrams.empty()); +void ANetworkSession::Session::dumpFragmentStats(const Fragment &frag) { +#if 0 + int64_t nowUs = ALooper::GetNowUs(); + int64_t delayMs = (nowUs - frag.mTimeUs) / 1000ll; - status_t err; - do { - const sp<ABuffer> &datagram = *mOutDatagrams.begin(); + static const int64_t kMinDelayMs = 0; + static const int64_t kMaxDelayMs = 300; - uint8_t *data = datagram->data(); - if (data[0] == 0x80 && (data[1] & 0x7f) == 33) { - int64_t nowUs = ALooper::GetNowUs(); + const char *kPattern = "########################################"; + size_t kPatternSize = strlen(kPattern); - uint32_t prevRtpTime = U32_AT(&data[4]); + int n = (kPatternSize * (delayMs - kMinDelayMs)) + / (kMaxDelayMs - kMinDelayMs); - // 90kHz time scale - uint32_t rtpTime = (nowUs * 9ll) / 100ll; - int32_t diffTime = (int32_t)rtpTime - (int32_t)prevRtpTime; + if (n < 0) { + n = 0; + } else if ((size_t)n > kPatternSize) { + n = kPatternSize; + } - ALOGV("correcting rtpTime by %.0f ms", diffTime / 90.0); + ALOGI("[%lld]: (%4lld ms) %s\n", + frag.mTimeUs / 1000, + delayMs, + kPattern + kPatternSize - n); +#endif +} - data[4] = rtpTime >> 24; - data[5] = (rtpTime >> 16) & 0xff; - data[6] = (rtpTime >> 8) & 0xff; - data[7] = rtpTime & 0xff; - } +status_t ANetworkSession::Session::writeMore() { + if (mState == DATAGRAM) { + CHECK(!mOutFragments.empty()); + + status_t err; + do { + const Fragment &frag = *mOutFragments.begin(); + const sp<ABuffer> &datagram = frag.mBuffer; int n; do { @@ -433,24 +471,37 @@ status_t ANetworkSession::Session::writeMore() { err = OK; if (n > 0) { - mOutDatagrams.erase(mOutDatagrams.begin()); + if (frag.mFlags & FRAGMENT_FLAG_TIME_VALID) { + dumpFragmentStats(frag); + } + + mOutFragments.erase(mOutFragments.begin()); } else if (n < 0) { err = -errno; } else if (n == 0) { err = -ECONNRESET; } - } while (err == OK && !mOutDatagrams.empty()); + } while (err == OK && !mOutFragments.empty()); if (err == -EAGAIN) { - if (!mOutDatagrams.empty()) { - ALOGI("%d datagrams remain queued.", mOutDatagrams.size()); + if (!mOutFragments.empty()) { + ALOGI("%d datagrams remain queued.", mOutFragments.size()); } err = OK; } if (err != OK) { - notifyError(true /* send */, err, "Send datagram failed."); - mSawSendFailure = true; + if (!mUDPRetries) { + notifyError(true /* send */, err, "Send datagram failed."); + mSawSendFailure = true; + } else { + mUDPRetries--; + ALOGE("Send datagram failed, %d/%d retries left", + mUDPRetries, kMaxUDPRetries); + err = OK; + } + } else { + mUDPRetries = kMaxUDPRetries; } return err; @@ -476,23 +527,37 @@ status_t ANetworkSession::Session::writeMore() { } CHECK_EQ(mState, CONNECTED); - CHECK(!mOutBuffer.empty()); + CHECK(!mOutFragments.empty()); ssize_t n; - do { - n = send(mSocket, mOutBuffer.c_str(), mOutBuffer.size(), 0); - } while (n < 0 && errno == EINTR); + while (!mOutFragments.empty()) { + const Fragment &frag = *mOutFragments.begin(); - status_t err = OK; + do { + n = send(mSocket, frag.mBuffer->data(), frag.mBuffer->size(), 0); + } while (n < 0 && errno == EINTR); - if (n > 0) { -#if 0 - ALOGI("out:"); - hexdump(mOutBuffer.c_str(), n); -#endif + if (n <= 0) { + break; + } - mOutBuffer.erase(0, n); - } else if (n < 0) { + frag.mBuffer->setRange( + frag.mBuffer->offset() + n, frag.mBuffer->size() - n); + + if (frag.mBuffer->size() > 0) { + break; + } + + if (frag.mFlags & FRAGMENT_FLAG_TIME_VALID) { + dumpFragmentStats(frag); + } + + mOutFragments.erase(mOutFragments.begin()); + } + + status_t err = OK; + + if (n < 0) { err = -errno; } else if (n == 0) { err = -ECONNRESET; @@ -503,35 +568,69 @@ status_t ANetworkSession::Session::writeMore() { mSawSendFailure = true; } +#if 0 + int numBytesQueued; + int res = ioctl(mSocket, SIOCOUTQ, &numBytesQueued); + if (res == 0 && numBytesQueued > 50 * 1024) { + if (numBytesQueued > 409600) { + ALOGW("!!! numBytesQueued = %d", numBytesQueued); + } + + int64_t nowUs = ALooper::GetNowUs(); + + if (mLastStallReportUs < 0ll + || nowUs > mLastStallReportUs + 100000ll) { + sp<AMessage> msg = mNotify->dup(); + msg->setInt32("sessionID", mSessionID); + msg->setInt32("reason", kWhatNetworkStall); + msg->setSize("numBytesQueued", numBytesQueued); + msg->post(); + + mLastStallReportUs = nowUs; + } + } +#endif + return err; } -status_t ANetworkSession::Session::sendRequest(const void *data, ssize_t size) { +status_t ANetworkSession::Session::sendRequest( + const void *data, ssize_t size, bool timeValid, int64_t timeUs) { CHECK(mState == CONNECTED || mState == DATAGRAM); - if (mState == DATAGRAM) { - CHECK_GE(size, 0); - - sp<ABuffer> datagram = new ABuffer(size); - memcpy(datagram->data(), data, size); + if (size < 0) { + size = strlen((const char *)data); + } - mOutDatagrams.push_back(datagram); + if (size == 0) { return OK; } + sp<ABuffer> buffer; + if (mState == CONNECTED && !mIsRTSPConnection) { CHECK_LE(size, 65535); - uint8_t prefix[2]; - prefix[0] = size >> 8; - prefix[1] = size & 0xff; + buffer = new ABuffer(size + 2); + buffer->data()[0] = size >> 8; + buffer->data()[1] = size & 0xff; + memcpy(buffer->data() + 2, data, size); + } else { + buffer = new ABuffer(size); + memcpy(buffer->data(), data, size); + } + + Fragment frag; - mOutBuffer.append((const char *)prefix, sizeof(prefix)); + frag.mFlags = 0; + if (timeValid) { + frag.mFlags = FRAGMENT_FLAG_TIME_VALID; + frag.mTimeUs = timeUs; } - mOutBuffer.append( - (const char *)data, - (size >= 0) ? size : strlen((const char *)data)); + frag.mBuffer = buffer; + + mOutFragments.push_back(frag); return OK; } @@ -770,6 +869,22 @@ status_t ANetworkSession::createClientOrServer( err = -errno; goto bail2; } + } else if (mode == kModeCreateTCPDatagramSessionActive) { + int flag = 1; + res = setsockopt(s, IPPROTO_TCP, TCP_NODELAY, &flag, sizeof(flag)); + + if (res < 0) { + err = -errno; + goto bail2; + } + + int tos = 224; // VOICE + res = setsockopt(s, IPPROTO_IP, IP_TOS, &tos, sizeof(tos)); + + if (res < 0) { + err = -errno; + goto bail2; + } } err = MakeSocketNonBlocking(s); @@ -946,7 +1061,8 @@ status_t ANetworkSession::connectUDPSession( } status_t ANetworkSession::sendRequest( - int32_t sessionID, const void *data, ssize_t size) { + int32_t sessionID, const void *data, ssize_t size, + bool timeValid, int64_t timeUs) { Mutex::Autolock autoLock(mLock); ssize_t index = mSessions.indexOfKey(sessionID); @@ -957,7 +1073,7 @@ status_t ANetworkSession::sendRequest( const sp<Session> session = mSessions.valueAt(index); - status_t err = session->sendRequest(data, size); + status_t err = session->sendRequest(data, size, timeValid, timeUs); interrupt(); @@ -1091,7 +1207,6 @@ void ANetworkSession::threadLoop() { clientSocket); sp<Session> clientSession = - // using socket sd as sessionID new Session( mNextSessionID++, Session::CONNECTED, diff --git a/media/libstagefright/wifi-display/ANetworkSession.h b/media/libstagefright/wifi-display/ANetworkSession.h index c1acdcc..7c62b29 100644 --- a/media/libstagefright/wifi-display/ANetworkSession.h +++ b/media/libstagefright/wifi-display/ANetworkSession.h @@ -74,7 +74,8 @@ struct ANetworkSession : public RefBase { status_t destroySession(int32_t sessionID); status_t sendRequest( - int32_t sessionID, const void *data, ssize_t size = -1); + int32_t sessionID, const void *data, ssize_t size = -1, + bool timeValid = false, int64_t timeUs = -1ll); enum NotificationReason { kWhatError, @@ -83,6 +84,7 @@ struct ANetworkSession : public RefBase { kWhatData, kWhatDatagram, kWhatBinaryData, + kWhatNetworkStall, }; protected: diff --git a/media/libstagefright/wifi-display/Android.mk b/media/libstagefright/wifi-display/Android.mk index 75098f1..061ae89 100644 --- a/media/libstagefright/wifi-display/Android.mk +++ b/media/libstagefright/wifi-display/Android.mk @@ -4,20 +4,17 @@ include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ ANetworkSession.cpp \ + MediaSender.cpp \ Parameters.cpp \ ParsedMessage.cpp \ - sink/LinearRegression.cpp \ - sink/RTPSink.cpp \ - sink/TunnelRenderer.cpp \ - sink/WifiDisplaySink.cpp \ + rtp/RTPSender.cpp \ source/Converter.cpp \ source/MediaPuller.cpp \ source/PlaybackSession.cpp \ source/RepeaterSource.cpp \ - source/Sender.cpp \ source/TSPacketizer.cpp \ source/WifiDisplaySource.cpp \ - TimeSeries.cpp \ + VideoFormats.cpp \ LOCAL_C_INCLUDES:= \ $(TOP)/frameworks/av/media/libstagefright \ @@ -27,6 +24,7 @@ LOCAL_C_INCLUDES:= \ LOCAL_SHARED_LIBRARIES:= \ libbinder \ libcutils \ + liblog \ libgui \ libmedia \ libstagefright \ @@ -55,31 +53,10 @@ LOCAL_SHARED_LIBRARIES:= \ libstagefright_foundation \ libstagefright_wfd \ libutils \ + liblog \ LOCAL_MODULE:= wfd LOCAL_MODULE_TAGS := debug include $(BUILD_EXECUTABLE) - -################################################################################ - -include $(CLEAR_VARS) - -LOCAL_SRC_FILES:= \ - udptest.cpp \ - -LOCAL_SHARED_LIBRARIES:= \ - libbinder \ - libgui \ - libmedia \ - libstagefright \ - libstagefright_foundation \ - libstagefright_wfd \ - libutils \ - -LOCAL_MODULE:= udptest - -LOCAL_MODULE_TAGS := debug - -include $(BUILD_EXECUTABLE) diff --git a/media/libstagefright/wifi-display/MediaSender.cpp b/media/libstagefright/wifi-display/MediaSender.cpp new file mode 100644 index 0000000..8a3566f --- /dev/null +++ b/media/libstagefright/wifi-display/MediaSender.cpp @@ -0,0 +1,474 @@ +/* + * Copyright 2013, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "MediaSender" +#include <utils/Log.h> + +#include "MediaSender.h" + +#include "ANetworkSession.h" +#include "rtp/RTPSender.h" +#include "source/TSPacketizer.h" + +#include "include/avc_utils.h" + +#include <media/IHDCP.h> +#include <media/stagefright/foundation/ABuffer.h> +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/AMessage.h> + +namespace android { + +MediaSender::MediaSender( + const sp<ANetworkSession> &netSession, + const sp<AMessage> ¬ify) + : mNetSession(netSession), + mNotify(notify), + mMode(MODE_UNDEFINED), + mGeneration(0), + mPrevTimeUs(-1ll), + mInitDoneCount(0), + mLogFile(NULL) { + // mLogFile = fopen("/data/misc/log.ts", "wb"); +} + +MediaSender::~MediaSender() { + if (mLogFile != NULL) { + fclose(mLogFile); + mLogFile = NULL; + } +} + +status_t MediaSender::setHDCP(const sp<IHDCP> &hdcp) { + if (mMode != MODE_UNDEFINED) { + return INVALID_OPERATION; + } + + mHDCP = hdcp; + + return OK; +} + +ssize_t MediaSender::addTrack(const sp<AMessage> &format, uint32_t flags) { + if (mMode != MODE_UNDEFINED) { + return INVALID_OPERATION; + } + + TrackInfo info; + info.mFormat = format; + info.mFlags = flags; + info.mPacketizerTrackIndex = -1; + + AString mime; + CHECK(format->findString("mime", &mime)); + info.mIsAudio = !strncasecmp("audio/", mime.c_str(), 6); + + size_t index = mTrackInfos.size(); + mTrackInfos.push_back(info); + + return index; +} + +status_t MediaSender::initAsync( + ssize_t trackIndex, + const char *remoteHost, + int32_t remoteRTPPort, + RTPSender::TransportMode rtpMode, + int32_t remoteRTCPPort, + RTPSender::TransportMode rtcpMode, + int32_t *localRTPPort) { + if (trackIndex < 0) { + if (mMode != MODE_UNDEFINED) { + return INVALID_OPERATION; + } + + uint32_t flags = 0; + if (mHDCP != NULL) { + // XXX Determine proper HDCP version. + flags |= TSPacketizer::EMIT_HDCP20_DESCRIPTOR; + } + mTSPacketizer = new TSPacketizer(flags); + + status_t err = OK; + for (size_t i = 0; i < mTrackInfos.size(); ++i) { + TrackInfo *info = &mTrackInfos.editItemAt(i); + + ssize_t packetizerTrackIndex = + mTSPacketizer->addTrack(info->mFormat); + + if (packetizerTrackIndex < 0) { + err = packetizerTrackIndex; + break; + } + + info->mPacketizerTrackIndex = packetizerTrackIndex; + } + + if (err == OK) { + sp<AMessage> notify = new AMessage(kWhatSenderNotify, id()); + notify->setInt32("generation", mGeneration); + mTSSender = new RTPSender(mNetSession, notify); + looper()->registerHandler(mTSSender); + + err = mTSSender->initAsync( + remoteHost, + remoteRTPPort, + rtpMode, + remoteRTCPPort, + rtcpMode, + localRTPPort); + + if (err != OK) { + looper()->unregisterHandler(mTSSender->id()); + mTSSender.clear(); + } + } + + if (err != OK) { + for (size_t i = 0; i < mTrackInfos.size(); ++i) { + TrackInfo *info = &mTrackInfos.editItemAt(i); + info->mPacketizerTrackIndex = -1; + } + + mTSPacketizer.clear(); + return err; + } + + mMode = MODE_TRANSPORT_STREAM; + mInitDoneCount = 1; + + return OK; + } + + if (mMode == MODE_TRANSPORT_STREAM) { + return INVALID_OPERATION; + } + + if ((size_t)trackIndex >= mTrackInfos.size()) { + return -ERANGE; + } + + TrackInfo *info = &mTrackInfos.editItemAt(trackIndex); + + if (info->mSender != NULL) { + return INVALID_OPERATION; + } + + sp<AMessage> notify = new AMessage(kWhatSenderNotify, id()); + notify->setInt32("generation", mGeneration); + notify->setSize("trackIndex", trackIndex); + + info->mSender = new RTPSender(mNetSession, notify); + looper()->registerHandler(info->mSender); + + status_t err = info->mSender->initAsync( + remoteHost, + remoteRTPPort, + rtpMode, + remoteRTCPPort, + rtcpMode, + localRTPPort); + + if (err != OK) { + looper()->unregisterHandler(info->mSender->id()); + info->mSender.clear(); + + return err; + } + + if (mMode == MODE_UNDEFINED) { + mInitDoneCount = mTrackInfos.size(); + } + + mMode = MODE_ELEMENTARY_STREAMS; + + return OK; +} + +status_t MediaSender::queueAccessUnit( + size_t trackIndex, const sp<ABuffer> &accessUnit) { + if (mMode == MODE_UNDEFINED) { + return INVALID_OPERATION; + } + + if (trackIndex >= mTrackInfos.size()) { + return -ERANGE; + } + + if (mMode == MODE_TRANSPORT_STREAM) { + TrackInfo *info = &mTrackInfos.editItemAt(trackIndex); + info->mAccessUnits.push_back(accessUnit); + + mTSPacketizer->extractCSDIfNecessary(info->mPacketizerTrackIndex); + + for (;;) { + ssize_t minTrackIndex = -1; + int64_t minTimeUs = -1ll; + + for (size_t i = 0; i < mTrackInfos.size(); ++i) { + const TrackInfo &info = mTrackInfos.itemAt(i); + + if (info.mAccessUnits.empty()) { + minTrackIndex = -1; + minTimeUs = -1ll; + break; + } + + int64_t timeUs; + const sp<ABuffer> &accessUnit = *info.mAccessUnits.begin(); + CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs)); + + if (minTrackIndex < 0 || timeUs < minTimeUs) { + minTrackIndex = i; + minTimeUs = timeUs; + } + } + + if (minTrackIndex < 0) { + return OK; + } + + TrackInfo *info = &mTrackInfos.editItemAt(minTrackIndex); + sp<ABuffer> accessUnit = *info->mAccessUnits.begin(); + info->mAccessUnits.erase(info->mAccessUnits.begin()); + + sp<ABuffer> tsPackets; + status_t err = packetizeAccessUnit( + minTrackIndex, accessUnit, &tsPackets); + + if (err == OK) { + if (mLogFile != NULL) { + fwrite(tsPackets->data(), 1, tsPackets->size(), mLogFile); + } + + int64_t timeUs; + CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs)); + tsPackets->meta()->setInt64("timeUs", timeUs); + + err = mTSSender->queueBuffer( + tsPackets, + 33 /* packetType */, + RTPSender::PACKETIZATION_TRANSPORT_STREAM); + } + + if (err != OK) { + return err; + } + } + } + + TrackInfo *info = &mTrackInfos.editItemAt(trackIndex); + + return info->mSender->queueBuffer( + accessUnit, + info->mIsAudio ? 96 : 97 /* packetType */, + info->mIsAudio + ? RTPSender::PACKETIZATION_AAC : RTPSender::PACKETIZATION_H264); +} + +void MediaSender::onMessageReceived(const sp<AMessage> &msg) { + switch (msg->what()) { + case kWhatSenderNotify: + { + int32_t generation; + CHECK(msg->findInt32("generation", &generation)); + if (generation != mGeneration) { + break; + } + + onSenderNotify(msg); + break; + } + + default: + TRESPASS(); + } +} + +void MediaSender::onSenderNotify(const sp<AMessage> &msg) { + int32_t what; + CHECK(msg->findInt32("what", &what)); + + switch (what) { + case RTPSender::kWhatInitDone: + { + --mInitDoneCount; + + int32_t err; + CHECK(msg->findInt32("err", &err)); + + if (err != OK) { + notifyInitDone(err); + ++mGeneration; + break; + } + + if (mInitDoneCount == 0) { + notifyInitDone(OK); + } + break; + } + + case RTPSender::kWhatError: + { + int32_t err; + CHECK(msg->findInt32("err", &err)); + + notifyError(err); + break; + } + + case kWhatNetworkStall: + { + size_t numBytesQueued; + CHECK(msg->findSize("numBytesQueued", &numBytesQueued)); + + notifyNetworkStall(numBytesQueued); + break; + } + + default: + TRESPASS(); + } +} + +void MediaSender::notifyInitDone(status_t err) { + sp<AMessage> notify = mNotify->dup(); + notify->setInt32("what", kWhatInitDone); + notify->setInt32("err", err); + notify->post(); +} + +void MediaSender::notifyError(status_t err) { + sp<AMessage> notify = mNotify->dup(); + notify->setInt32("what", kWhatError); + notify->setInt32("err", err); + notify->post(); +} + +void MediaSender::notifyNetworkStall(size_t numBytesQueued) { + sp<AMessage> notify = mNotify->dup(); + notify->setInt32("what", kWhatNetworkStall); + notify->setSize("numBytesQueued", numBytesQueued); + notify->post(); +} + +status_t MediaSender::packetizeAccessUnit( + size_t trackIndex, + sp<ABuffer> accessUnit, + sp<ABuffer> *tsPackets) { + const TrackInfo &info = mTrackInfos.itemAt(trackIndex); + + uint32_t flags = 0; + + bool isHDCPEncrypted = false; + uint64_t inputCTR; + uint8_t HDCP_private_data[16]; + + bool manuallyPrependSPSPPS = + !info.mIsAudio + && (info.mFlags & FLAG_MANUALLY_PREPEND_SPS_PPS) + && IsIDR(accessUnit); + + if (mHDCP != NULL && !info.mIsAudio) { + isHDCPEncrypted = true; + + if (manuallyPrependSPSPPS) { + accessUnit = mTSPacketizer->prependCSD( + info.mPacketizerTrackIndex, accessUnit); + } + + status_t err = mHDCP->encrypt( + accessUnit->data(), accessUnit->size(), + trackIndex /* streamCTR */, + &inputCTR, + accessUnit->data()); + + if (err != OK) { + ALOGE("Failed to HDCP-encrypt media data (err %d)", + err); + + return err; + } + + HDCP_private_data[0] = 0x00; + + HDCP_private_data[1] = + (((trackIndex >> 30) & 3) << 1) | 1; + + HDCP_private_data[2] = (trackIndex >> 22) & 0xff; + + HDCP_private_data[3] = + (((trackIndex >> 15) & 0x7f) << 1) | 1; + + HDCP_private_data[4] = (trackIndex >> 7) & 0xff; + + HDCP_private_data[5] = + ((trackIndex & 0x7f) << 1) | 1; + + HDCP_private_data[6] = 0x00; + + HDCP_private_data[7] = + (((inputCTR >> 60) & 0x0f) << 1) | 1; + + HDCP_private_data[8] = (inputCTR >> 52) & 0xff; + + HDCP_private_data[9] = + (((inputCTR >> 45) & 0x7f) << 1) | 1; + + HDCP_private_data[10] = (inputCTR >> 37) & 0xff; + + HDCP_private_data[11] = + (((inputCTR >> 30) & 0x7f) << 1) | 1; + + HDCP_private_data[12] = (inputCTR >> 22) & 0xff; + + HDCP_private_data[13] = + (((inputCTR >> 15) & 0x7f) << 1) | 1; + + HDCP_private_data[14] = (inputCTR >> 7) & 0xff; + + HDCP_private_data[15] = + ((inputCTR & 0x7f) << 1) | 1; + + flags |= TSPacketizer::IS_ENCRYPTED; + } else if (manuallyPrependSPSPPS) { + flags |= TSPacketizer::PREPEND_SPS_PPS_TO_IDR_FRAMES; + } + + int64_t timeUs = ALooper::GetNowUs(); + if (mPrevTimeUs < 0ll || mPrevTimeUs + 100000ll <= timeUs) { + flags |= TSPacketizer::EMIT_PCR; + flags |= TSPacketizer::EMIT_PAT_AND_PMT; + + mPrevTimeUs = timeUs; + } + + mTSPacketizer->packetize( + info.mPacketizerTrackIndex, + accessUnit, + tsPackets, + flags, + !isHDCPEncrypted ? NULL : HDCP_private_data, + !isHDCPEncrypted ? 0 : sizeof(HDCP_private_data), + info.mIsAudio ? 2 : 0 /* numStuffingBytes */); + + return OK; +} + +} // namespace android + diff --git a/media/libstagefright/wifi-display/MediaSender.h b/media/libstagefright/wifi-display/MediaSender.h new file mode 100644 index 0000000..64722c5 --- /dev/null +++ b/media/libstagefright/wifi-display/MediaSender.h @@ -0,0 +1,131 @@ +/* + * Copyright 2013, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MEDIA_SENDER_H_ + +#define MEDIA_SENDER_H_ + +#include "rtp/RTPSender.h" + +#include <media/stagefright/foundation/ABase.h> +#include <media/stagefright/foundation/AHandler.h> +#include <utils/Errors.h> +#include <utils/Vector.h> + +namespace android { + +struct ABuffer; +struct ANetworkSession; +struct AMessage; +struct IHDCP; +struct TSPacketizer; + +// This class facilitates sending of data from one or more media tracks +// through one or more RTP channels, either providing a 1:1 mapping from +// track to RTP channel or muxing all tracks into a single RTP channel and +// using transport stream encapsulation. +// Optionally the (video) data is encrypted using the provided hdcp object. +struct MediaSender : public AHandler { + enum { + kWhatInitDone, + kWhatError, + kWhatNetworkStall, + }; + + MediaSender( + const sp<ANetworkSession> &netSession, + const sp<AMessage> ¬ify); + + status_t setHDCP(const sp<IHDCP> &hdcp); + + enum FlagBits { + FLAG_MANUALLY_PREPEND_SPS_PPS = 1, + }; + ssize_t addTrack(const sp<AMessage> &format, uint32_t flags); + + // If trackIndex == -1, initialize for transport stream muxing. + status_t initAsync( + ssize_t trackIndex, + const char *remoteHost, + int32_t remoteRTPPort, + RTPSender::TransportMode rtpMode, + int32_t remoteRTCPPort, + RTPSender::TransportMode rtcpMode, + int32_t *localRTPPort); + + status_t queueAccessUnit( + size_t trackIndex, const sp<ABuffer> &accessUnit); + +protected: + virtual void onMessageReceived(const sp<AMessage> &msg); + virtual ~MediaSender(); + +private: + enum { + kWhatSenderNotify, + }; + + enum Mode { + MODE_UNDEFINED, + MODE_TRANSPORT_STREAM, + MODE_ELEMENTARY_STREAMS, + }; + + struct TrackInfo { + sp<AMessage> mFormat; + uint32_t mFlags; + sp<RTPSender> mSender; + List<sp<ABuffer> > mAccessUnits; + ssize_t mPacketizerTrackIndex; + bool mIsAudio; + }; + + sp<ANetworkSession> mNetSession; + sp<AMessage> mNotify; + + sp<IHDCP> mHDCP; + + Mode mMode; + int32_t mGeneration; + + Vector<TrackInfo> mTrackInfos; + + sp<TSPacketizer> mTSPacketizer; + sp<RTPSender> mTSSender; + int64_t mPrevTimeUs; + + size_t mInitDoneCount; + + FILE *mLogFile; + + void onSenderNotify(const sp<AMessage> &msg); + + void notifyInitDone(status_t err); + void notifyError(status_t err); + void notifyNetworkStall(size_t numBytesQueued); + + status_t packetizeAccessUnit( + size_t trackIndex, + sp<ABuffer> accessUnit, + sp<ABuffer> *tsPackets); + + DISALLOW_EVIL_CONSTRUCTORS(MediaSender); +}; + +} // namespace android + +#endif // MEDIA_SENDER_H_ + diff --git a/media/libstagefright/wifi-display/Parameters.cpp b/media/libstagefright/wifi-display/Parameters.cpp index f7118b3..d2a61ea 100644 --- a/media/libstagefright/wifi-display/Parameters.cpp +++ b/media/libstagefright/wifi-display/Parameters.cpp @@ -65,7 +65,9 @@ status_t Parameters::parse(const char *data, size_t size) { mDict.add(name, value); - i += 2; + while (i + 1 < size && data[i] == '\r' && data[i + 1] == '\n') { + i += 2; + } } return OK; diff --git a/media/libstagefright/wifi-display/TimeSeries.cpp b/media/libstagefright/wifi-display/TimeSeries.cpp deleted file mode 100644 index d882d98..0000000 --- a/media/libstagefright/wifi-display/TimeSeries.cpp +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2012, The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "TimeSeries.h" - -#include <math.h> -#include <string.h> - -namespace android { - -TimeSeries::TimeSeries() - : mCount(0), - mSum(0.0) { -} - -void TimeSeries::add(double val) { - if (mCount < kHistorySize) { - mValues[mCount++] = val; - mSum += val; - } else { - mSum -= mValues[0]; - memmove(&mValues[0], &mValues[1], (kHistorySize - 1) * sizeof(double)); - mValues[kHistorySize - 1] = val; - mSum += val; - } -} - -double TimeSeries::mean() const { - if (mCount < 1) { - return 0.0; - } - - return mSum / mCount; -} - -double TimeSeries::sdev() const { - if (mCount < 1) { - return 0.0; - } - - double m = mean(); - - double sum = 0.0; - for (size_t i = 0; i < mCount; ++i) { - double tmp = mValues[i] - m; - tmp *= tmp; - - sum += tmp; - } - - return sqrt(sum / mCount); -} - -} // namespace android diff --git a/media/libstagefright/wifi-display/VideoFormats.cpp b/media/libstagefright/wifi-display/VideoFormats.cpp new file mode 100644 index 0000000..458b163 --- /dev/null +++ b/media/libstagefright/wifi-display/VideoFormats.cpp @@ -0,0 +1,419 @@ +/* + * Copyright 2013, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "VideoFormats" +#include <utils/Log.h> + +#include "VideoFormats.h" + +#include <media/stagefright/foundation/ADebug.h> + +namespace android { + +VideoFormats::config_t VideoFormats::mConfigs[][32] = { + { + // CEA Resolutions + { 640, 480, 60, false, 0, 0}, + { 720, 480, 60, false, 0, 0}, + { 720, 480, 60, true, 0, 0}, + { 720, 576, 50, false, 0, 0}, + { 720, 576, 50, true, 0, 0}, + { 1280, 720, 30, false, 0, 0}, + { 1280, 720, 60, false, 0, 0}, + { 1920, 1080, 30, false, 0, 0}, + { 1920, 1080, 60, false, 0, 0}, + { 1920, 1080, 60, true, 0, 0}, + { 1280, 720, 25, false, 0, 0}, + { 1280, 720, 50, false, 0, 0}, + { 1920, 1080, 25, false, 0, 0}, + { 1920, 1080, 50, false, 0, 0}, + { 1920, 1080, 50, true, 0, 0}, + { 1280, 720, 24, false, 0, 0}, + { 1920, 1080, 24, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + }, + { + // VESA Resolutions + { 800, 600, 30, false, 0, 0}, + { 800, 600, 60, false, 0, 0}, + { 1024, 768, 30, false, 0, 0}, + { 1024, 768, 60, false, 0, 0}, + { 1152, 864, 30, false, 0, 0}, + { 1152, 864, 60, false, 0, 0}, + { 1280, 768, 30, false, 0, 0}, + { 1280, 768, 60, false, 0, 0}, + { 1280, 800, 30, false, 0, 0}, + { 1280, 800, 60, false, 0, 0}, + { 1360, 768, 30, false, 0, 0}, + { 1360, 768, 60, false, 0, 0}, + { 1366, 768, 30, false, 0, 0}, + { 1366, 768, 60, false, 0, 0}, + { 1280, 1024, 30, false, 0, 0}, + { 1280, 1024, 60, false, 0, 0}, + { 1400, 1050, 30, false, 0, 0}, + { 1400, 1050, 60, false, 0, 0}, + { 1440, 900, 30, false, 0, 0}, + { 1440, 900, 60, false, 0, 0}, + { 1600, 900, 30, false, 0, 0}, + { 1600, 900, 60, false, 0, 0}, + { 1600, 1200, 30, false, 0, 0}, + { 1600, 1200, 60, false, 0, 0}, + { 1680, 1024, 30, false, 0, 0}, + { 1680, 1024, 60, false, 0, 0}, + { 1680, 1050, 30, false, 0, 0}, + { 1680, 1050, 60, false, 0, 0}, + { 1920, 1200, 30, false, 0, 0}, + { 1920, 1200, 60, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + }, + { + // HH Resolutions + { 800, 480, 30, false, 0, 0}, + { 800, 480, 60, false, 0, 0}, + { 854, 480, 30, false, 0, 0}, + { 854, 480, 60, false, 0, 0}, + { 864, 480, 30, false, 0, 0}, + { 864, 480, 60, false, 0, 0}, + { 640, 360, 30, false, 0, 0}, + { 640, 360, 60, false, 0, 0}, + { 960, 540, 30, false, 0, 0}, + { 960, 540, 60, false, 0, 0}, + { 848, 480, 30, false, 0, 0}, + { 848, 480, 60, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + { 0, 0, 0, false, 0, 0}, + } +}; + +VideoFormats::VideoFormats() { + for (size_t i = 0; i < kNumResolutionTypes; ++i) { + mResolutionEnabled[i] = 0; + } + + setNativeResolution(RESOLUTION_CEA, 0); // default to 640x480 p60 +} + +void VideoFormats::setNativeResolution(ResolutionType type, size_t index) { + CHECK_LT(type, kNumResolutionTypes); + CHECK(GetConfiguration(type, index, NULL, NULL, NULL, NULL)); + + mNativeType = type; + mNativeIndex = index; + + setResolutionEnabled(type, index); +} + +void VideoFormats::getNativeResolution( + ResolutionType *type, size_t *index) const { + *type = mNativeType; + *index = mNativeIndex; +} + +void VideoFormats::disableAll() { + for (size_t i = 0; i < kNumResolutionTypes; ++i) { + mResolutionEnabled[i] = 0; + for (size_t j = 0; j < 32; j++) { + mConfigs[i][j].profile = mConfigs[i][j].level = 0; + } + } +} + +void VideoFormats::enableAll() { + for (size_t i = 0; i < kNumResolutionTypes; ++i) { + mResolutionEnabled[i] = 0xffffffff; + for (size_t j = 0; j < 32; j++) { + mConfigs[i][j].profile = (1ul << PROFILE_CBP); + mConfigs[i][j].level = (1ul << LEVEL_31); + } + } +} + +void VideoFormats::setResolutionEnabled( + ResolutionType type, size_t index, bool enabled) { + CHECK_LT(type, kNumResolutionTypes); + CHECK(GetConfiguration(type, index, NULL, NULL, NULL, NULL)); + + if (enabled) { + mResolutionEnabled[type] |= (1ul << index); + } else { + mResolutionEnabled[type] &= ~(1ul << index); + } +} + +bool VideoFormats::isResolutionEnabled( + ResolutionType type, size_t index) const { + CHECK_LT(type, kNumResolutionTypes); + CHECK(GetConfiguration(type, index, NULL, NULL, NULL, NULL)); + + return mResolutionEnabled[type] & (1ul << index); +} + +// static +bool VideoFormats::GetConfiguration( + ResolutionType type, + size_t index, + size_t *width, size_t *height, size_t *framesPerSecond, + bool *interlaced) { + CHECK_LT(type, kNumResolutionTypes); + + if (index >= 32) { + return false; + } + + const config_t *config = &mConfigs[type][index]; + + if (config->width == 0) { + return false; + } + + if (width) { + *width = config->width; + } + + if (height) { + *height = config->height; + } + + if (framesPerSecond) { + *framesPerSecond = config->framesPerSecond; + } + + if (interlaced) { + *interlaced = config->interlaced; + } + + return true; +} + +bool VideoFormats::parseH264Codec(const char *spec) { + unsigned profile, level, res[3]; + + if (sscanf( + spec, + "%02x %02x %08X %08X %08X", + &profile, + &level, + &res[0], + &res[1], + &res[2]) != 5) { + return false; + } + + for (size_t i = 0; i < kNumResolutionTypes; ++i) { + for (size_t j = 0; j < 32; ++j) { + if (res[i] & (1ul << j)){ + mResolutionEnabled[i] |= (1ul << j); + if (profile > mConfigs[i][j].profile) { + mConfigs[i][j].profile = profile; + if (level > mConfigs[i][j].level) + mConfigs[i][j].level = level; + } + } + } + } + + return true; +} + +bool VideoFormats::parseFormatSpec(const char *spec) { + CHECK_EQ(kNumResolutionTypes, 3); + + unsigned native, dummy; + unsigned res[3]; + size_t size = strlen(spec); + size_t offset = 0; + + if (sscanf(spec, "%02x %02x ", &native, &dummy) != 2) { + return false; + } + + offset += 6; // skip native and preferred-display-mode-supported + CHECK_LE(offset + 58, size); + while (offset < size) { + parseH264Codec(spec + offset); + offset += 60; // skip H.264-codec + ", " + } + + mNativeIndex = native >> 3; + mNativeType = (ResolutionType)(native & 7); + + bool success; + if (mNativeType >= kNumResolutionTypes) { + success = false; + } else { + success = GetConfiguration( + mNativeType, mNativeIndex, NULL, NULL, NULL, NULL); + } + + if (!success) { + ALOGW("sink advertised an illegal native resolution, fortunately " + "this value is ignored for the time being..."); + } + + return true; +} + +AString VideoFormats::getFormatSpec(bool forM4Message) const { + CHECK_EQ(kNumResolutionTypes, 3); + + // wfd_video_formats: + // 1 byte "native" + // 1 byte "preferred-display-mode-supported" 0 or 1 + // one or more avc codec structures + // 1 byte profile + // 1 byte level + // 4 byte CEA mask + // 4 byte VESA mask + // 4 byte HH mask + // 1 byte latency + // 2 byte min-slice-slice + // 2 byte slice-enc-params + // 1 byte framerate-control-support + // max-hres (none or 2 byte) + // max-vres (none or 2 byte) + + return StringPrintf( + "%02x 00 02 02 %08x %08x %08x 00 0000 0000 00 none none", + forM4Message ? 0x00 : ((mNativeIndex << 3) | mNativeType), + mResolutionEnabled[0], + mResolutionEnabled[1], + mResolutionEnabled[2]); +} + +// static +bool VideoFormats::PickBestFormat( + const VideoFormats &sinkSupported, + const VideoFormats &sourceSupported, + ResolutionType *chosenType, + size_t *chosenIndex) { +#if 0 + // Support for the native format is a great idea, the spec includes + // these features, but nobody supports it and the tests don't validate it. + + ResolutionType nativeType; + size_t nativeIndex; + sinkSupported.getNativeResolution(&nativeType, &nativeIndex); + if (sinkSupported.isResolutionEnabled(nativeType, nativeIndex)) { + if (sourceSupported.isResolutionEnabled(nativeType, nativeIndex)) { + ALOGI("Choosing sink's native resolution"); + *chosenType = nativeType; + *chosenIndex = nativeIndex; + return true; + } + } else { + ALOGW("Sink advertised native resolution that it doesn't " + "actually support... ignoring"); + } + + sourceSupported.getNativeResolution(&nativeType, &nativeIndex); + if (sourceSupported.isResolutionEnabled(nativeType, nativeIndex)) { + if (sinkSupported.isResolutionEnabled(nativeType, nativeIndex)) { + ALOGI("Choosing source's native resolution"); + *chosenType = nativeType; + *chosenIndex = nativeIndex; + return true; + } + } else { + ALOGW("Source advertised native resolution that it doesn't " + "actually support... ignoring"); + } +#endif + + bool first = true; + uint32_t bestScore = 0; + size_t bestType = 0; + size_t bestIndex = 0; + for (size_t i = 0; i < kNumResolutionTypes; ++i) { + for (size_t j = 0; j < 32; ++j) { + size_t width, height, framesPerSecond; + bool interlaced; + if (!GetConfiguration( + (ResolutionType)i, + j, + &width, &height, &framesPerSecond, &interlaced)) { + break; + } + + if (!sinkSupported.isResolutionEnabled((ResolutionType)i, j) + || !sourceSupported.isResolutionEnabled( + (ResolutionType)i, j)) { + continue; + } + + ALOGV("type %u, index %u, %u x %u %c%u supported", + i, j, width, height, interlaced ? 'i' : 'p', framesPerSecond); + + uint32_t score = width * height * framesPerSecond; + if (!interlaced) { + score *= 2; + } + + if (first || score > bestScore) { + bestScore = score; + bestType = i; + bestIndex = j; + + first = false; + } + } + } + + if (first) { + return false; + } + + *chosenType = (ResolutionType)bestType; + *chosenIndex = bestIndex; + + return true; +} + +} // namespace android + diff --git a/media/libstagefright/wifi-display/VideoFormats.h b/media/libstagefright/wifi-display/VideoFormats.h new file mode 100644 index 0000000..01de246 --- /dev/null +++ b/media/libstagefright/wifi-display/VideoFormats.h @@ -0,0 +1,106 @@ +/* + * Copyright 2013, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef VIDEO_FORMATS_H_ + +#define VIDEO_FORMATS_H_ + +#include <media/stagefright/foundation/ABase.h> + +#include <stdint.h> + +namespace android { + +struct AString; + +// This class encapsulates that video resolution capabilities of a wfd source +// or sink as outlined in the wfd specs. Currently three sets of resolutions +// are specified, each of which supports up to 32 resolutions. +// In addition to its capabilities each sink/source also publishes its +// "native" resolution, presumably one that is preferred among all others +// because it wouldn't require any scaling and directly corresponds to the +// display capabilities/pixels. +struct VideoFormats { + VideoFormats(); + + struct config_t { + size_t width, height, framesPerSecond; + bool interlaced; + unsigned char profile, level; + }; + + enum ProfileType { + PROFILE_CBP = 0, + PROFILE_CHP, + kNumProfileTypes, + }; + + enum LevelType { + LEVEL_31 = 0, + LEVEL_32, + LEVEL_40, + LEVEL_41, + LEVEL_42, + kNumLevelTypes, + }; + + enum ResolutionType { + RESOLUTION_CEA, + RESOLUTION_VESA, + RESOLUTION_HH, + kNumResolutionTypes, + }; + + void setNativeResolution(ResolutionType type, size_t index); + void getNativeResolution(ResolutionType *type, size_t *index) const; + + void disableAll(); + void enableAll(); + + void setResolutionEnabled( + ResolutionType type, size_t index, bool enabled = true); + + bool isResolutionEnabled(ResolutionType type, size_t index) const; + + static bool GetConfiguration( + ResolutionType type, size_t index, + size_t *width, size_t *height, size_t *framesPerSecond, + bool *interlaced); + + bool parseFormatSpec(const char *spec); + AString getFormatSpec(bool forM4Message = false) const; + + static bool PickBestFormat( + const VideoFormats &sinkSupported, + const VideoFormats &sourceSupported, + ResolutionType *chosenType, + size_t *chosenIndex); + +private: + bool parseH264Codec(const char *spec); + ResolutionType mNativeType; + size_t mNativeIndex; + + uint32_t mResolutionEnabled[kNumResolutionTypes]; + static config_t mConfigs[kNumResolutionTypes][32]; + + DISALLOW_EVIL_CONSTRUCTORS(VideoFormats); +}; + +} // namespace android + +#endif // VIDEO_FORMATS_H_ + diff --git a/media/libstagefright/wifi-display/rtp/RTPBase.h b/media/libstagefright/wifi-display/rtp/RTPBase.h new file mode 100644 index 0000000..6178f00 --- /dev/null +++ b/media/libstagefright/wifi-display/rtp/RTPBase.h @@ -0,0 +1,51 @@ +/* + * Copyright 2013, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef RTP_BASE_H_ + +#define RTP_BASE_H_ + +namespace android { + +struct RTPBase { + enum PacketizationMode { + PACKETIZATION_TRANSPORT_STREAM, + PACKETIZATION_H264, + PACKETIZATION_AAC, + PACKETIZATION_NONE, + }; + + enum TransportMode { + TRANSPORT_UNDEFINED, + TRANSPORT_NONE, + TRANSPORT_UDP, + TRANSPORT_TCP, + TRANSPORT_TCP_INTERLEAVED, + }; + + enum { + // Really UDP _payload_ size + kMaxUDPPacketSize = 1472, // 1472 good, 1473 bad on Android@Home + }; + + static int32_t PickRandomRTPPort(); +}; + +} // namespace android + +#endif // RTP_BASE_H_ + + diff --git a/media/libstagefright/wifi-display/rtp/RTPSender.cpp b/media/libstagefright/wifi-display/rtp/RTPSender.cpp new file mode 100644 index 0000000..095fd97 --- /dev/null +++ b/media/libstagefright/wifi-display/rtp/RTPSender.cpp @@ -0,0 +1,795 @@ +/* + * Copyright 2013, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "RTPSender" +#include <utils/Log.h> + +#include "RTPSender.h" + +#include "ANetworkSession.h" + +#include <media/stagefright/foundation/ABuffer.h> +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/AMessage.h> +#include <media/stagefright/foundation/hexdump.h> +#include <media/stagefright/MediaErrors.h> +#include <media/stagefright/Utils.h> + +#include "include/avc_utils.h" + +namespace android { + +RTPSender::RTPSender( + const sp<ANetworkSession> &netSession, + const sp<AMessage> ¬ify) + : mNetSession(netSession), + mNotify(notify), + mRTPMode(TRANSPORT_UNDEFINED), + mRTCPMode(TRANSPORT_UNDEFINED), + mRTPSessionID(0), + mRTCPSessionID(0), + mRTPConnected(false), + mRTCPConnected(false), + mLastNTPTime(0), + mLastRTPTime(0), + mNumRTPSent(0), + mNumRTPOctetsSent(0), + mNumSRsSent(0), + mRTPSeqNo(0), + mHistorySize(0) { +} + +RTPSender::~RTPSender() { + if (mRTCPSessionID != 0) { + mNetSession->destroySession(mRTCPSessionID); + mRTCPSessionID = 0; + } + + if (mRTPSessionID != 0) { + mNetSession->destroySession(mRTPSessionID); + mRTPSessionID = 0; + } +} + +// static +int32_t RTPBase::PickRandomRTPPort() { + // Pick an even integer in range [1024, 65534) + + static const size_t kRange = (65534 - 1024) / 2; + + return (int32_t)(((float)(kRange + 1) * rand()) / RAND_MAX) * 2 + 1024; +} + +status_t RTPSender::initAsync( + const char *remoteHost, + int32_t remoteRTPPort, + TransportMode rtpMode, + int32_t remoteRTCPPort, + TransportMode rtcpMode, + int32_t *outLocalRTPPort) { + if (mRTPMode != TRANSPORT_UNDEFINED + || rtpMode == TRANSPORT_UNDEFINED + || rtpMode == TRANSPORT_NONE + || rtcpMode == TRANSPORT_UNDEFINED) { + return INVALID_OPERATION; + } + + CHECK_NE(rtpMode, TRANSPORT_TCP_INTERLEAVED); + CHECK_NE(rtcpMode, TRANSPORT_TCP_INTERLEAVED); + + if ((rtcpMode == TRANSPORT_NONE && remoteRTCPPort >= 0) + || (rtcpMode != TRANSPORT_NONE && remoteRTCPPort < 0)) { + return INVALID_OPERATION; + } + + sp<AMessage> rtpNotify = new AMessage(kWhatRTPNotify, id()); + + sp<AMessage> rtcpNotify; + if (remoteRTCPPort >= 0) { + rtcpNotify = new AMessage(kWhatRTCPNotify, id()); + } + + CHECK_EQ(mRTPSessionID, 0); + CHECK_EQ(mRTCPSessionID, 0); + + int32_t localRTPPort; + + for (;;) { + localRTPPort = PickRandomRTPPort(); + + status_t err; + if (rtpMode == TRANSPORT_UDP) { + err = mNetSession->createUDPSession( + localRTPPort, + remoteHost, + remoteRTPPort, + rtpNotify, + &mRTPSessionID); + } else { + CHECK_EQ(rtpMode, TRANSPORT_TCP); + err = mNetSession->createTCPDatagramSession( + localRTPPort, + remoteHost, + remoteRTPPort, + rtpNotify, + &mRTPSessionID); + } + + if (err != OK) { + continue; + } + + if (remoteRTCPPort < 0) { + break; + } + + if (rtcpMode == TRANSPORT_UDP) { + err = mNetSession->createUDPSession( + localRTPPort + 1, + remoteHost, + remoteRTCPPort, + rtcpNotify, + &mRTCPSessionID); + } else { + CHECK_EQ(rtcpMode, TRANSPORT_TCP); + err = mNetSession->createTCPDatagramSession( + localRTPPort + 1, + remoteHost, + remoteRTCPPort, + rtcpNotify, + &mRTCPSessionID); + } + + if (err == OK) { + break; + } + + mNetSession->destroySession(mRTPSessionID); + mRTPSessionID = 0; + } + + if (rtpMode == TRANSPORT_UDP) { + mRTPConnected = true; + } + + if (rtcpMode == TRANSPORT_UDP) { + mRTCPConnected = true; + } + + mRTPMode = rtpMode; + mRTCPMode = rtcpMode; + *outLocalRTPPort = localRTPPort; + + if (mRTPMode == TRANSPORT_UDP + && (mRTCPMode == TRANSPORT_UDP || mRTCPMode == TRANSPORT_NONE)) { + notifyInitDone(OK); + } + + return OK; +} + +status_t RTPSender::queueBuffer( + const sp<ABuffer> &buffer, uint8_t packetType, PacketizationMode mode) { + status_t err; + + switch (mode) { + case PACKETIZATION_NONE: + err = queueRawPacket(buffer, packetType); + break; + + case PACKETIZATION_TRANSPORT_STREAM: + err = queueTSPackets(buffer, packetType); + break; + + case PACKETIZATION_H264: + err = queueAVCBuffer(buffer, packetType); + break; + + default: + TRESPASS(); + } + + return err; +} + +status_t RTPSender::queueRawPacket( + const sp<ABuffer> &packet, uint8_t packetType) { + CHECK_LE(packet->size(), kMaxUDPPacketSize - 12); + + int64_t timeUs; + CHECK(packet->meta()->findInt64("timeUs", &timeUs)); + + sp<ABuffer> udpPacket = new ABuffer(12 + packet->size()); + + udpPacket->setInt32Data(mRTPSeqNo); + + uint8_t *rtp = udpPacket->data(); + rtp[0] = 0x80; + rtp[1] = packetType; + + rtp[2] = (mRTPSeqNo >> 8) & 0xff; + rtp[3] = mRTPSeqNo & 0xff; + ++mRTPSeqNo; + + uint32_t rtpTime = (timeUs * 9) / 100ll; + + rtp[4] = rtpTime >> 24; + rtp[5] = (rtpTime >> 16) & 0xff; + rtp[6] = (rtpTime >> 8) & 0xff; + rtp[7] = rtpTime & 0xff; + + rtp[8] = kSourceID >> 24; + rtp[9] = (kSourceID >> 16) & 0xff; + rtp[10] = (kSourceID >> 8) & 0xff; + rtp[11] = kSourceID & 0xff; + + memcpy(&rtp[12], packet->data(), packet->size()); + + return sendRTPPacket( + udpPacket, + true /* storeInHistory */, + true /* timeValid */, + ALooper::GetNowUs()); +} + +status_t RTPSender::queueTSPackets( + const sp<ABuffer> &tsPackets, uint8_t packetType) { + CHECK_EQ(0, tsPackets->size() % 188); + + int64_t timeUs; + CHECK(tsPackets->meta()->findInt64("timeUs", &timeUs)); + + const size_t numTSPackets = tsPackets->size() / 188; + + size_t srcOffset = 0; + while (srcOffset < tsPackets->size()) { + sp<ABuffer> udpPacket = + new ABuffer(12 + kMaxNumTSPacketsPerRTPPacket * 188); + + udpPacket->setInt32Data(mRTPSeqNo); + + uint8_t *rtp = udpPacket->data(); + rtp[0] = 0x80; + rtp[1] = packetType; + + rtp[2] = (mRTPSeqNo >> 8) & 0xff; + rtp[3] = mRTPSeqNo & 0xff; + ++mRTPSeqNo; + + int64_t nowUs = ALooper::GetNowUs(); + uint32_t rtpTime = (nowUs * 9) / 100ll; + + rtp[4] = rtpTime >> 24; + rtp[5] = (rtpTime >> 16) & 0xff; + rtp[6] = (rtpTime >> 8) & 0xff; + rtp[7] = rtpTime & 0xff; + + rtp[8] = kSourceID >> 24; + rtp[9] = (kSourceID >> 16) & 0xff; + rtp[10] = (kSourceID >> 8) & 0xff; + rtp[11] = kSourceID & 0xff; + + size_t numTSPackets = (tsPackets->size() - srcOffset) / 188; + if (numTSPackets > kMaxNumTSPacketsPerRTPPacket) { + numTSPackets = kMaxNumTSPacketsPerRTPPacket; + } + + memcpy(&rtp[12], tsPackets->data() + srcOffset, numTSPackets * 188); + + udpPacket->setRange(0, 12 + numTSPackets * 188); + + srcOffset += numTSPackets * 188; + bool isLastPacket = (srcOffset == tsPackets->size()); + + status_t err = sendRTPPacket( + udpPacket, + true /* storeInHistory */, + isLastPacket /* timeValid */, + timeUs); + + if (err != OK) { + return err; + } + } + + return OK; +} + +status_t RTPSender::queueAVCBuffer( + const sp<ABuffer> &accessUnit, uint8_t packetType) { + int64_t timeUs; + CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs)); + + uint32_t rtpTime = (timeUs * 9 / 100ll); + + List<sp<ABuffer> > packets; + + sp<ABuffer> out = new ABuffer(kMaxUDPPacketSize); + size_t outBytesUsed = 12; // Placeholder for RTP header. + + const uint8_t *data = accessUnit->data(); + size_t size = accessUnit->size(); + const uint8_t *nalStart; + size_t nalSize; + while (getNextNALUnit( + &data, &size, &nalStart, &nalSize, + true /* startCodeFollows */) == OK) { + size_t bytesNeeded = nalSize + 2; + if (outBytesUsed == 12) { + ++bytesNeeded; + } + + if (outBytesUsed + bytesNeeded > out->capacity()) { + bool emitSingleNALPacket = false; + + if (outBytesUsed == 12 + && outBytesUsed + nalSize <= out->capacity()) { + // We haven't emitted anything into the current packet yet and + // this NAL unit fits into a single-NAL-unit-packet while + // it wouldn't have fit as part of a STAP-A packet. + + memcpy(out->data() + outBytesUsed, nalStart, nalSize); + outBytesUsed += nalSize; + + emitSingleNALPacket = true; + } + + if (outBytesUsed > 12) { + out->setRange(0, outBytesUsed); + packets.push_back(out); + out = new ABuffer(kMaxUDPPacketSize); + outBytesUsed = 12; // Placeholder for RTP header + } + + if (emitSingleNALPacket) { + continue; + } + } + + if (outBytesUsed + bytesNeeded <= out->capacity()) { + uint8_t *dst = out->data() + outBytesUsed; + + if (outBytesUsed == 12) { + *dst++ = 24; // STAP-A header + } + + *dst++ = (nalSize >> 8) & 0xff; + *dst++ = nalSize & 0xff; + memcpy(dst, nalStart, nalSize); + + outBytesUsed += bytesNeeded; + continue; + } + + // This single NAL unit does not fit into a single RTP packet, + // we need to emit an FU-A. + + CHECK_EQ(outBytesUsed, 12u); + + uint8_t nalType = nalStart[0] & 0x1f; + uint8_t nri = (nalStart[0] >> 5) & 3; + + size_t srcOffset = 1; + while (srcOffset < nalSize) { + size_t copy = out->capacity() - outBytesUsed - 2; + if (copy > nalSize - srcOffset) { + copy = nalSize - srcOffset; + } + + uint8_t *dst = out->data() + outBytesUsed; + dst[0] = (nri << 5) | 28; + + dst[1] = nalType; + + if (srcOffset == 1) { + dst[1] |= 0x80; + } + + if (srcOffset + copy == nalSize) { + dst[1] |= 0x40; + } + + memcpy(&dst[2], nalStart + srcOffset, copy); + srcOffset += copy; + + out->setRange(0, outBytesUsed + copy + 2); + + packets.push_back(out); + out = new ABuffer(kMaxUDPPacketSize); + outBytesUsed = 12; // Placeholder for RTP header + } + } + + if (outBytesUsed > 12) { + out->setRange(0, outBytesUsed); + packets.push_back(out); + } + + while (!packets.empty()) { + sp<ABuffer> out = *packets.begin(); + packets.erase(packets.begin()); + + out->setInt32Data(mRTPSeqNo); + + bool last = packets.empty(); + + uint8_t *dst = out->data(); + + dst[0] = 0x80; + + dst[1] = packetType; + if (last) { + dst[1] |= 1 << 7; // M-bit + } + + dst[2] = (mRTPSeqNo >> 8) & 0xff; + dst[3] = mRTPSeqNo & 0xff; + ++mRTPSeqNo; + + dst[4] = rtpTime >> 24; + dst[5] = (rtpTime >> 16) & 0xff; + dst[6] = (rtpTime >> 8) & 0xff; + dst[7] = rtpTime & 0xff; + dst[8] = kSourceID >> 24; + dst[9] = (kSourceID >> 16) & 0xff; + dst[10] = (kSourceID >> 8) & 0xff; + dst[11] = kSourceID & 0xff; + + status_t err = sendRTPPacket(out, true /* storeInHistory */); + + if (err != OK) { + return err; + } + } + + return OK; +} + +status_t RTPSender::sendRTPPacket( + const sp<ABuffer> &buffer, bool storeInHistory, + bool timeValid, int64_t timeUs) { + CHECK(mRTPConnected); + + status_t err = mNetSession->sendRequest( + mRTPSessionID, buffer->data(), buffer->size(), + timeValid, timeUs); + + if (err != OK) { + return err; + } + + mLastNTPTime = GetNowNTP(); + mLastRTPTime = U32_AT(buffer->data() + 4); + + ++mNumRTPSent; + mNumRTPOctetsSent += buffer->size() - 12; + + if (storeInHistory) { + if (mHistorySize == kMaxHistorySize) { + mHistory.erase(mHistory.begin()); + } else { + ++mHistorySize; + } + mHistory.push_back(buffer); + } + + return OK; +} + +// static +uint64_t RTPSender::GetNowNTP() { + struct timeval tv; + gettimeofday(&tv, NULL /* timezone */); + + uint64_t nowUs = tv.tv_sec * 1000000ll + tv.tv_usec; + + nowUs += ((70ll * 365 + 17) * 24) * 60 * 60 * 1000000ll; + + uint64_t hi = nowUs / 1000000ll; + uint64_t lo = ((1ll << 32) * (nowUs % 1000000ll)) / 1000000ll; + + return (hi << 32) | lo; +} + +void RTPSender::onMessageReceived(const sp<AMessage> &msg) { + switch (msg->what()) { + case kWhatRTPNotify: + case kWhatRTCPNotify: + onNetNotify(msg->what() == kWhatRTPNotify, msg); + break; + + default: + TRESPASS(); + } +} + +void RTPSender::onNetNotify(bool isRTP, const sp<AMessage> &msg) { + int32_t reason; + CHECK(msg->findInt32("reason", &reason)); + + switch (reason) { + case ANetworkSession::kWhatError: + { + int32_t sessionID; + CHECK(msg->findInt32("sessionID", &sessionID)); + + int32_t err; + CHECK(msg->findInt32("err", &err)); + + int32_t errorOccuredDuringSend; + CHECK(msg->findInt32("send", &errorOccuredDuringSend)); + + AString detail; + CHECK(msg->findString("detail", &detail)); + + ALOGE("An error occurred during %s in session %d " + "(%d, '%s' (%s)).", + errorOccuredDuringSend ? "send" : "receive", + sessionID, + err, + detail.c_str(), + strerror(-err)); + + mNetSession->destroySession(sessionID); + + if (sessionID == mRTPSessionID) { + mRTPSessionID = 0; + } else if (sessionID == mRTCPSessionID) { + mRTCPSessionID = 0; + } + + if (!mRTPConnected + || (mRTPMode != TRANSPORT_NONE && !mRTCPConnected)) { + // We haven't completed initialization, attach the error + // to the notification instead. + notifyInitDone(err); + break; + } + + notifyError(err); + break; + } + + case ANetworkSession::kWhatDatagram: + { + sp<ABuffer> data; + CHECK(msg->findBuffer("data", &data)); + + if (isRTP) { + ALOGW("Huh? Received data on RTP connection..."); + } else { + onRTCPData(data); + } + break; + } + + case ANetworkSession::kWhatConnected: + { + int32_t sessionID; + CHECK(msg->findInt32("sessionID", &sessionID)); + + if (isRTP) { + CHECK_EQ(mRTPMode, TRANSPORT_TCP); + CHECK_EQ(sessionID, mRTPSessionID); + mRTPConnected = true; + } else { + CHECK_EQ(mRTCPMode, TRANSPORT_TCP); + CHECK_EQ(sessionID, mRTCPSessionID); + mRTCPConnected = true; + } + + if (mRTPConnected + && (mRTCPMode == TRANSPORT_NONE || mRTCPConnected)) { + notifyInitDone(OK); + } + break; + } + + case ANetworkSession::kWhatNetworkStall: + { + size_t numBytesQueued; + CHECK(msg->findSize("numBytesQueued", &numBytesQueued)); + + notifyNetworkStall(numBytesQueued); + break; + } + + default: + TRESPASS(); + } +} + +status_t RTPSender::onRTCPData(const sp<ABuffer> &buffer) { + const uint8_t *data = buffer->data(); + size_t size = buffer->size(); + + while (size > 0) { + if (size < 8) { + // Too short to be a valid RTCP header + return ERROR_MALFORMED; + } + + if ((data[0] >> 6) != 2) { + // Unsupported version. + return ERROR_UNSUPPORTED; + } + + if (data[0] & 0x20) { + // Padding present. + + size_t paddingLength = data[size - 1]; + + if (paddingLength + 12 > size) { + // If we removed this much padding we'd end up with something + // that's too short to be a valid RTP header. + return ERROR_MALFORMED; + } + + size -= paddingLength; + } + + size_t headerLength = 4 * (data[2] << 8 | data[3]) + 4; + + if (size < headerLength) { + // Only received a partial packet? + return ERROR_MALFORMED; + } + + switch (data[1]) { + case 200: + case 201: // RR + parseReceiverReport(data, headerLength); + break; + + case 202: // SDES + case 203: + break; + + case 204: // APP + parseAPP(data, headerLength); + break; + + case 205: // TSFB (transport layer specific feedback) + parseTSFB(data, headerLength); + break; + + case 206: // PSFB (payload specific feedback) + // hexdump(data, headerLength); + break; + + default: + { + ALOGW("Unknown RTCP packet type %u of size %d", + (unsigned)data[1], headerLength); + break; + } + } + + data += headerLength; + size -= headerLength; + } + + return OK; +} + +status_t RTPSender::parseReceiverReport(const uint8_t *data, size_t size) { + // hexdump(data, size); + + float fractionLost = data[12] / 256.0f; + + ALOGI("lost %.2f %% of packets during report interval.", + 100.0f * fractionLost); + + return OK; +} + +status_t RTPSender::parseTSFB(const uint8_t *data, size_t size) { + if ((data[0] & 0x1f) != 1) { + return ERROR_UNSUPPORTED; // We only support NACK for now. + } + + uint32_t srcId = U32_AT(&data[8]); + if (srcId != kSourceID) { + return ERROR_MALFORMED; + } + + for (size_t i = 12; i < size; i += 4) { + uint16_t seqNo = U16_AT(&data[i]); + uint16_t blp = U16_AT(&data[i + 2]); + + List<sp<ABuffer> >::iterator it = mHistory.begin(); + bool foundSeqNo = false; + while (it != mHistory.end()) { + const sp<ABuffer> &buffer = *it; + + uint16_t bufferSeqNo = buffer->int32Data() & 0xffff; + + bool retransmit = false; + if (bufferSeqNo == seqNo) { + retransmit = true; + } else if (blp != 0) { + for (size_t i = 0; i < 16; ++i) { + if ((blp & (1 << i)) + && (bufferSeqNo == ((seqNo + i + 1) & 0xffff))) { + blp &= ~(1 << i); + retransmit = true; + } + } + } + + if (retransmit) { + ALOGV("retransmitting seqNo %d", bufferSeqNo); + + CHECK_EQ((status_t)OK, + sendRTPPacket(buffer, false /* storeInHistory */)); + + if (bufferSeqNo == seqNo) { + foundSeqNo = true; + } + + if (foundSeqNo && blp == 0) { + break; + } + } + + ++it; + } + + if (!foundSeqNo || blp != 0) { + ALOGI("Some sequence numbers were no longer available for " + "retransmission (seqNo = %d, foundSeqNo = %d, blp = 0x%04x)", + seqNo, foundSeqNo, blp); + + if (!mHistory.empty()) { + int32_t earliest = (*mHistory.begin())->int32Data() & 0xffff; + int32_t latest = (*--mHistory.end())->int32Data() & 0xffff; + + ALOGI("have seq numbers from %d - %d", earliest, latest); + } + } + } + + return OK; +} + +status_t RTPSender::parseAPP(const uint8_t *data, size_t size) { + return OK; +} + +void RTPSender::notifyInitDone(status_t err) { + sp<AMessage> notify = mNotify->dup(); + notify->setInt32("what", kWhatInitDone); + notify->setInt32("err", err); + notify->post(); +} + +void RTPSender::notifyError(status_t err) { + sp<AMessage> notify = mNotify->dup(); + notify->setInt32("what", kWhatError); + notify->setInt32("err", err); + notify->post(); +} + +void RTPSender::notifyNetworkStall(size_t numBytesQueued) { + sp<AMessage> notify = mNotify->dup(); + notify->setInt32("what", kWhatNetworkStall); + notify->setSize("numBytesQueued", numBytesQueued); + notify->post(); +} + +} // namespace android + diff --git a/media/libstagefright/wifi-display/rtp/RTPSender.h b/media/libstagefright/wifi-display/rtp/RTPSender.h new file mode 100644 index 0000000..7dc138a --- /dev/null +++ b/media/libstagefright/wifi-display/rtp/RTPSender.h @@ -0,0 +1,120 @@ +/* + * Copyright 2013, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef RTP_SENDER_H_ + +#define RTP_SENDER_H_ + +#include "RTPBase.h" + +#include <media/stagefright/foundation/AHandler.h> + +namespace android { + +struct ABuffer; +struct ANetworkSession; + +// An object of this class facilitates sending of media data over an RTP +// channel. The channel is established over a UDP or TCP connection depending +// on which "TransportMode" was chosen. In addition different RTP packetization +// schemes are supported such as "Transport Stream Packets over RTP", +// or "AVC/H.264 encapsulation as specified in RFC 3984 (non-interleaved mode)" +struct RTPSender : public RTPBase, public AHandler { + enum { + kWhatInitDone, + kWhatError, + kWhatNetworkStall, + }; + RTPSender( + const sp<ANetworkSession> &netSession, + const sp<AMessage> ¬ify); + + status_t initAsync( + const char *remoteHost, + int32_t remoteRTPPort, + TransportMode rtpMode, + int32_t remoteRTCPPort, + TransportMode rtcpMode, + int32_t *outLocalRTPPort); + + status_t queueBuffer( + const sp<ABuffer> &buffer, + uint8_t packetType, + PacketizationMode mode); + +protected: + virtual ~RTPSender(); + virtual void onMessageReceived(const sp<AMessage> &msg); + +private: + enum { + kWhatRTPNotify, + kWhatRTCPNotify, + }; + + enum { + kMaxNumTSPacketsPerRTPPacket = (kMaxUDPPacketSize - 12) / 188, + kMaxHistorySize = 1024, + kSourceID = 0xdeadbeef, + }; + + sp<ANetworkSession> mNetSession; + sp<AMessage> mNotify; + TransportMode mRTPMode; + TransportMode mRTCPMode; + int32_t mRTPSessionID; + int32_t mRTCPSessionID; + bool mRTPConnected; + bool mRTCPConnected; + + uint64_t mLastNTPTime; + uint32_t mLastRTPTime; + uint32_t mNumRTPSent; + uint32_t mNumRTPOctetsSent; + uint32_t mNumSRsSent; + + uint32_t mRTPSeqNo; + + List<sp<ABuffer> > mHistory; + size_t mHistorySize; + + static uint64_t GetNowNTP(); + + status_t queueRawPacket(const sp<ABuffer> &tsPackets, uint8_t packetType); + status_t queueTSPackets(const sp<ABuffer> &tsPackets, uint8_t packetType); + status_t queueAVCBuffer(const sp<ABuffer> &accessUnit, uint8_t packetType); + + status_t sendRTPPacket( + const sp<ABuffer> &packet, bool storeInHistory, + bool timeValid = false, int64_t timeUs = -1ll); + + void onNetNotify(bool isRTP, const sp<AMessage> &msg); + + status_t onRTCPData(const sp<ABuffer> &data); + status_t parseReceiverReport(const uint8_t *data, size_t size); + status_t parseTSFB(const uint8_t *data, size_t size); + status_t parseAPP(const uint8_t *data, size_t size); + + void notifyInitDone(status_t err); + void notifyError(status_t err); + void notifyNetworkStall(size_t numBytesQueued); + + DISALLOW_EVIL_CONSTRUCTORS(RTPSender); +}; + +} // namespace android + +#endif // RTP_SENDER_H_ diff --git a/media/libstagefright/wifi-display/sink/LinearRegression.cpp b/media/libstagefright/wifi-display/sink/LinearRegression.cpp deleted file mode 100644 index 8cfce37..0000000 --- a/media/libstagefright/wifi-display/sink/LinearRegression.cpp +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2012, The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//#define LOG_NDEBUG 0 -#define LOG_TAG "LinearRegression" -#include <utils/Log.h> - -#include "LinearRegression.h" - -#include <math.h> -#include <string.h> - -namespace android { - -LinearRegression::LinearRegression(size_t historySize) - : mHistorySize(historySize), - mCount(0), - mHistory(new Point[mHistorySize]), - mSumX(0.0), - mSumY(0.0) { -} - -LinearRegression::~LinearRegression() { - delete[] mHistory; - mHistory = NULL; -} - -void LinearRegression::addPoint(float x, float y) { - if (mCount == mHistorySize) { - const Point &oldest = mHistory[0]; - - mSumX -= oldest.mX; - mSumY -= oldest.mY; - - memmove(&mHistory[0], &mHistory[1], (mHistorySize - 1) * sizeof(Point)); - --mCount; - } - - Point *newest = &mHistory[mCount++]; - newest->mX = x; - newest->mY = y; - - mSumX += x; - mSumY += y; -} - -bool LinearRegression::approxLine(float *n1, float *n2, float *b) const { - static const float kEpsilon = 1.0E-4; - - if (mCount < 2) { - return false; - } - - float sumX2 = 0.0f; - float sumY2 = 0.0f; - float sumXY = 0.0f; - - float meanX = mSumX / (float)mCount; - float meanY = mSumY / (float)mCount; - - for (size_t i = 0; i < mCount; ++i) { - const Point &p = mHistory[i]; - - float x = p.mX - meanX; - float y = p.mY - meanY; - - sumX2 += x * x; - sumY2 += y * y; - sumXY += x * y; - } - - float T = sumX2 + sumY2; - float D = sumX2 * sumY2 - sumXY * sumXY; - float root = sqrt(T * T * 0.25 - D); - - float L1 = T * 0.5 - root; - - if (fabs(sumXY) > kEpsilon) { - *n1 = 1.0; - *n2 = (2.0 * L1 - sumX2) / sumXY; - - float mag = sqrt((*n1) * (*n1) + (*n2) * (*n2)); - - *n1 /= mag; - *n2 /= mag; - } else { - *n1 = 0.0; - *n2 = 1.0; - } - - *b = (*n1) * meanX + (*n2) * meanY; - - return true; -} - -} // namespace android - diff --git a/media/libstagefright/wifi-display/sink/LinearRegression.h b/media/libstagefright/wifi-display/sink/LinearRegression.h deleted file mode 100644 index ca6f5a1..0000000 --- a/media/libstagefright/wifi-display/sink/LinearRegression.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2012, The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LINEAR_REGRESSION_H_ - -#define LINEAR_REGRESSION_H_ - -#include <sys/types.h> -#include <media/stagefright/foundation/ABase.h> - -namespace android { - -// Helper class to fit a line to a set of points minimizing the sum of -// squared (orthogonal) distances from line to individual points. -struct LinearRegression { - LinearRegression(size_t historySize); - ~LinearRegression(); - - void addPoint(float x, float y); - - bool approxLine(float *n1, float *n2, float *b) const; - -private: - struct Point { - float mX, mY; - }; - - size_t mHistorySize; - size_t mCount; - Point *mHistory; - - float mSumX, mSumY; - - DISALLOW_EVIL_CONSTRUCTORS(LinearRegression); -}; - -} // namespace android - -#endif // LINEAR_REGRESSION_H_ diff --git a/media/libstagefright/wifi-display/sink/RTPSink.cpp b/media/libstagefright/wifi-display/sink/RTPSink.cpp deleted file mode 100644 index 0918034..0000000 --- a/media/libstagefright/wifi-display/sink/RTPSink.cpp +++ /dev/null @@ -1,806 +0,0 @@ -/* - * Copyright 2012, The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//#define LOG_NDEBUG 0 -#define LOG_TAG "RTPSink" -#include <utils/Log.h> - -#include "RTPSink.h" - -#include "ANetworkSession.h" -#include "TunnelRenderer.h" - -#include <media/stagefright/foundation/ABuffer.h> -#include <media/stagefright/foundation/ADebug.h> -#include <media/stagefright/foundation/AMessage.h> -#include <media/stagefright/foundation/hexdump.h> -#include <media/stagefright/MediaErrors.h> -#include <media/stagefright/Utils.h> - -namespace android { - -struct RTPSink::Source : public RefBase { - Source(uint16_t seq, const sp<ABuffer> &buffer, - const sp<AMessage> queueBufferMsg); - - bool updateSeq(uint16_t seq, const sp<ABuffer> &buffer); - - void addReportBlock(uint32_t ssrc, const sp<ABuffer> &buf); - -protected: - virtual ~Source(); - -private: - static const uint32_t kMinSequential = 2; - static const uint32_t kMaxDropout = 3000; - static const uint32_t kMaxMisorder = 100; - static const uint32_t kRTPSeqMod = 1u << 16; - - sp<AMessage> mQueueBufferMsg; - - uint16_t mMaxSeq; - uint32_t mCycles; - uint32_t mBaseSeq; - uint32_t mBadSeq; - uint32_t mProbation; - uint32_t mReceived; - uint32_t mExpectedPrior; - uint32_t mReceivedPrior; - - void initSeq(uint16_t seq); - void queuePacket(const sp<ABuffer> &buffer); - - DISALLOW_EVIL_CONSTRUCTORS(Source); -}; - -//////////////////////////////////////////////////////////////////////////////// - -RTPSink::Source::Source( - uint16_t seq, const sp<ABuffer> &buffer, - const sp<AMessage> queueBufferMsg) - : mQueueBufferMsg(queueBufferMsg), - mProbation(kMinSequential) { - initSeq(seq); - mMaxSeq = seq - 1; - - buffer->setInt32Data(mCycles | seq); - queuePacket(buffer); -} - -RTPSink::Source::~Source() { -} - -void RTPSink::Source::initSeq(uint16_t seq) { - mMaxSeq = seq; - mCycles = 0; - mBaseSeq = seq; - mBadSeq = kRTPSeqMod + 1; - mReceived = 0; - mExpectedPrior = 0; - mReceivedPrior = 0; -} - -bool RTPSink::Source::updateSeq(uint16_t seq, const sp<ABuffer> &buffer) { - uint16_t udelta = seq - mMaxSeq; - - if (mProbation) { - // Startup phase - - if (seq == mMaxSeq + 1) { - buffer->setInt32Data(mCycles | seq); - queuePacket(buffer); - - --mProbation; - mMaxSeq = seq; - if (mProbation == 0) { - initSeq(seq); - ++mReceived; - - return true; - } - } else { - // Packet out of sequence, restart startup phase - - mProbation = kMinSequential - 1; - mMaxSeq = seq; - -#if 0 - mPackets.clear(); - mTotalBytesQueued = 0; - ALOGI("XXX cleared packets"); -#endif - - buffer->setInt32Data(mCycles | seq); - queuePacket(buffer); - } - - return false; - } - - if (udelta < kMaxDropout) { - // In order, with permissible gap. - - if (seq < mMaxSeq) { - // Sequence number wrapped - count another 64K cycle - mCycles += kRTPSeqMod; - } - - mMaxSeq = seq; - } else if (udelta <= kRTPSeqMod - kMaxMisorder) { - // The sequence number made a very large jump - - if (seq == mBadSeq) { - // Two sequential packets -- assume that the other side - // restarted without telling us so just re-sync - // (i.e. pretend this was the first packet) - - initSeq(seq); - } else { - mBadSeq = (seq + 1) & (kRTPSeqMod - 1); - - return false; - } - } else { - // Duplicate or reordered packet. - } - - ++mReceived; - - buffer->setInt32Data(mCycles | seq); - queuePacket(buffer); - - return true; -} - -void RTPSink::Source::queuePacket(const sp<ABuffer> &buffer) { - sp<AMessage> msg = mQueueBufferMsg->dup(); - msg->setBuffer("buffer", buffer); - msg->post(); -} - -void RTPSink::Source::addReportBlock( - uint32_t ssrc, const sp<ABuffer> &buf) { - uint32_t extMaxSeq = mMaxSeq | mCycles; - uint32_t expected = extMaxSeq - mBaseSeq + 1; - - int64_t lost = (int64_t)expected - (int64_t)mReceived; - if (lost > 0x7fffff) { - lost = 0x7fffff; - } else if (lost < -0x800000) { - lost = -0x800000; - } - - uint32_t expectedInterval = expected - mExpectedPrior; - mExpectedPrior = expected; - - uint32_t receivedInterval = mReceived - mReceivedPrior; - mReceivedPrior = mReceived; - - int64_t lostInterval = expectedInterval - receivedInterval; - - uint8_t fractionLost; - if (expectedInterval == 0 || lostInterval <=0) { - fractionLost = 0; - } else { - fractionLost = (lostInterval << 8) / expectedInterval; - } - - uint8_t *ptr = buf->data() + buf->size(); - - ptr[0] = ssrc >> 24; - ptr[1] = (ssrc >> 16) & 0xff; - ptr[2] = (ssrc >> 8) & 0xff; - ptr[3] = ssrc & 0xff; - - ptr[4] = fractionLost; - - ptr[5] = (lost >> 16) & 0xff; - ptr[6] = (lost >> 8) & 0xff; - ptr[7] = lost & 0xff; - - ptr[8] = extMaxSeq >> 24; - ptr[9] = (extMaxSeq >> 16) & 0xff; - ptr[10] = (extMaxSeq >> 8) & 0xff; - ptr[11] = extMaxSeq & 0xff; - - // XXX TODO: - - ptr[12] = 0x00; // interarrival jitter - ptr[13] = 0x00; - ptr[14] = 0x00; - ptr[15] = 0x00; - - ptr[16] = 0x00; // last SR - ptr[17] = 0x00; - ptr[18] = 0x00; - ptr[19] = 0x00; - - ptr[20] = 0x00; // delay since last SR - ptr[21] = 0x00; - ptr[22] = 0x00; - ptr[23] = 0x00; -} - -//////////////////////////////////////////////////////////////////////////////// - -RTPSink::RTPSink( - const sp<ANetworkSession> &netSession, - const sp<ISurfaceTexture> &surfaceTex) - : mNetSession(netSession), - mSurfaceTex(surfaceTex), - mRTPPort(0), - mRTPSessionID(0), - mRTCPSessionID(0), - mFirstArrivalTimeUs(-1ll), - mNumPacketsReceived(0ll), - mRegression(1000), - mMaxDelayMs(-1ll) { -} - -RTPSink::~RTPSink() { - if (mRTCPSessionID != 0) { - mNetSession->destroySession(mRTCPSessionID); - } - - if (mRTPSessionID != 0) { - mNetSession->destroySession(mRTPSessionID); - } -} - -status_t RTPSink::init(bool useTCPInterleaving) { - if (useTCPInterleaving) { - return OK; - } - - int clientRtp; - - sp<AMessage> rtpNotify = new AMessage(kWhatRTPNotify, id()); - sp<AMessage> rtcpNotify = new AMessage(kWhatRTCPNotify, id()); - for (clientRtp = 15550;; clientRtp += 2) { - int32_t rtpSession; - status_t err = mNetSession->createUDPSession( - clientRtp, rtpNotify, &rtpSession); - - if (err != OK) { - ALOGI("failed to create RTP socket on port %d", clientRtp); - continue; - } - - int32_t rtcpSession; - err = mNetSession->createUDPSession( - clientRtp + 1, rtcpNotify, &rtcpSession); - - if (err == OK) { - mRTPPort = clientRtp; - mRTPSessionID = rtpSession; - mRTCPSessionID = rtcpSession; - break; - } - - ALOGI("failed to create RTCP socket on port %d", clientRtp + 1); - mNetSession->destroySession(rtpSession); - } - - if (mRTPPort == 0) { - return UNKNOWN_ERROR; - } - - return OK; -} - -int32_t RTPSink::getRTPPort() const { - return mRTPPort; -} - -void RTPSink::onMessageReceived(const sp<AMessage> &msg) { - switch (msg->what()) { - case kWhatRTPNotify: - case kWhatRTCPNotify: - { - int32_t reason; - CHECK(msg->findInt32("reason", &reason)); - - switch (reason) { - case ANetworkSession::kWhatError: - { - int32_t sessionID; - CHECK(msg->findInt32("sessionID", &sessionID)); - - int32_t err; - CHECK(msg->findInt32("err", &err)); - - AString detail; - CHECK(msg->findString("detail", &detail)); - - ALOGE("An error occurred in session %d (%d, '%s/%s').", - sessionID, - err, - detail.c_str(), - strerror(-err)); - - mNetSession->destroySession(sessionID); - - if (sessionID == mRTPSessionID) { - mRTPSessionID = 0; - } else if (sessionID == mRTCPSessionID) { - mRTCPSessionID = 0; - } - break; - } - - case ANetworkSession::kWhatDatagram: - { - int32_t sessionID; - CHECK(msg->findInt32("sessionID", &sessionID)); - - sp<ABuffer> data; - CHECK(msg->findBuffer("data", &data)); - - status_t err; - if (msg->what() == kWhatRTPNotify) { - err = parseRTP(data); - } else { - err = parseRTCP(data); - } - break; - } - - default: - TRESPASS(); - } - break; - } - - case kWhatSendRR: - { - onSendRR(); - break; - } - - case kWhatPacketLost: - { - onPacketLost(msg); - break; - } - - case kWhatInject: - { - int32_t isRTP; - CHECK(msg->findInt32("isRTP", &isRTP)); - - sp<ABuffer> buffer; - CHECK(msg->findBuffer("buffer", &buffer)); - - status_t err; - if (isRTP) { - err = parseRTP(buffer); - } else { - err = parseRTCP(buffer); - } - break; - } - - default: - TRESPASS(); - } -} - -status_t RTPSink::injectPacket(bool isRTP, const sp<ABuffer> &buffer) { - sp<AMessage> msg = new AMessage(kWhatInject, id()); - msg->setInt32("isRTP", isRTP); - msg->setBuffer("buffer", buffer); - msg->post(); - - return OK; -} - -status_t RTPSink::parseRTP(const sp<ABuffer> &buffer) { - size_t size = buffer->size(); - if (size < 12) { - // Too short to be a valid RTP header. - return ERROR_MALFORMED; - } - - const uint8_t *data = buffer->data(); - - if ((data[0] >> 6) != 2) { - // Unsupported version. - return ERROR_UNSUPPORTED; - } - - if (data[0] & 0x20) { - // Padding present. - - size_t paddingLength = data[size - 1]; - - if (paddingLength + 12 > size) { - // If we removed this much padding we'd end up with something - // that's too short to be a valid RTP header. - return ERROR_MALFORMED; - } - - size -= paddingLength; - } - - int numCSRCs = data[0] & 0x0f; - - size_t payloadOffset = 12 + 4 * numCSRCs; - - if (size < payloadOffset) { - // Not enough data to fit the basic header and all the CSRC entries. - return ERROR_MALFORMED; - } - - if (data[0] & 0x10) { - // Header eXtension present. - - if (size < payloadOffset + 4) { - // Not enough data to fit the basic header, all CSRC entries - // and the first 4 bytes of the extension header. - - return ERROR_MALFORMED; - } - - const uint8_t *extensionData = &data[payloadOffset]; - - size_t extensionLength = - 4 * (extensionData[2] << 8 | extensionData[3]); - - if (size < payloadOffset + 4 + extensionLength) { - return ERROR_MALFORMED; - } - - payloadOffset += 4 + extensionLength; - } - - uint32_t srcId = U32_AT(&data[8]); - uint32_t rtpTime = U32_AT(&data[4]); - uint16_t seqNo = U16_AT(&data[2]); - - int64_t arrivalTimeUs; - CHECK(buffer->meta()->findInt64("arrivalTimeUs", &arrivalTimeUs)); - - if (mFirstArrivalTimeUs < 0ll) { - mFirstArrivalTimeUs = arrivalTimeUs; - } - arrivalTimeUs -= mFirstArrivalTimeUs; - - int64_t arrivalTimeMedia = (arrivalTimeUs * 9ll) / 100ll; - - ALOGV("seqNo: %d, SSRC 0x%08x, diff %lld", - seqNo, srcId, rtpTime - arrivalTimeMedia); - - mRegression.addPoint((float)rtpTime, (float)arrivalTimeMedia); - - ++mNumPacketsReceived; - - float n1, n2, b; - if (mRegression.approxLine(&n1, &n2, &b)) { - ALOGV("Line %lld: %.2f %.2f %.2f, slope %.2f", - mNumPacketsReceived, n1, n2, b, -n1 / n2); - - float expectedArrivalTimeMedia = (b - n1 * (float)rtpTime) / n2; - float latenessMs = (arrivalTimeMedia - expectedArrivalTimeMedia) / 90.0; - - if (mMaxDelayMs < 0ll || latenessMs > mMaxDelayMs) { - mMaxDelayMs = latenessMs; - ALOGI("packet was %.2f ms late", latenessMs); - } - } - - sp<AMessage> meta = buffer->meta(); - meta->setInt32("ssrc", srcId); - meta->setInt32("rtp-time", rtpTime); - meta->setInt32("PT", data[1] & 0x7f); - meta->setInt32("M", data[1] >> 7); - - buffer->setRange(payloadOffset, size - payloadOffset); - - ssize_t index = mSources.indexOfKey(srcId); - if (index < 0) { - if (mRenderer == NULL) { - sp<AMessage> notifyLost = new AMessage(kWhatPacketLost, id()); - notifyLost->setInt32("ssrc", srcId); - - mRenderer = new TunnelRenderer(notifyLost, mSurfaceTex); - looper()->registerHandler(mRenderer); - } - - sp<AMessage> queueBufferMsg = - new AMessage(TunnelRenderer::kWhatQueueBuffer, mRenderer->id()); - - sp<Source> source = new Source(seqNo, buffer, queueBufferMsg); - mSources.add(srcId, source); - } else { - mSources.valueAt(index)->updateSeq(seqNo, buffer); - } - - return OK; -} - -status_t RTPSink::parseRTCP(const sp<ABuffer> &buffer) { - const uint8_t *data = buffer->data(); - size_t size = buffer->size(); - - while (size > 0) { - if (size < 8) { - // Too short to be a valid RTCP header - return ERROR_MALFORMED; - } - - if ((data[0] >> 6) != 2) { - // Unsupported version. - return ERROR_UNSUPPORTED; - } - - if (data[0] & 0x20) { - // Padding present. - - size_t paddingLength = data[size - 1]; - - if (paddingLength + 12 > size) { - // If we removed this much padding we'd end up with something - // that's too short to be a valid RTP header. - return ERROR_MALFORMED; - } - - size -= paddingLength; - } - - size_t headerLength = 4 * (data[2] << 8 | data[3]) + 4; - - if (size < headerLength) { - // Only received a partial packet? - return ERROR_MALFORMED; - } - - switch (data[1]) { - case 200: - { - parseSR(data, headerLength); - break; - } - - case 201: // RR - case 202: // SDES - case 204: // APP - break; - - case 205: // TSFB (transport layer specific feedback) - case 206: // PSFB (payload specific feedback) - // hexdump(data, headerLength); - break; - - case 203: - { - parseBYE(data, headerLength); - break; - } - - default: - { - ALOGW("Unknown RTCP packet type %u of size %d", - (unsigned)data[1], headerLength); - break; - } - } - - data += headerLength; - size -= headerLength; - } - - return OK; -} - -status_t RTPSink::parseBYE(const uint8_t *data, size_t size) { - size_t SC = data[0] & 0x3f; - - if (SC == 0 || size < (4 + SC * 4)) { - // Packet too short for the minimal BYE header. - return ERROR_MALFORMED; - } - - uint32_t id = U32_AT(&data[4]); - - return OK; -} - -status_t RTPSink::parseSR(const uint8_t *data, size_t size) { - size_t RC = data[0] & 0x1f; - - if (size < (7 + RC * 6) * 4) { - // Packet too short for the minimal SR header. - return ERROR_MALFORMED; - } - - uint32_t id = U32_AT(&data[4]); - uint64_t ntpTime = U64_AT(&data[8]); - uint32_t rtpTime = U32_AT(&data[16]); - - ALOGV("SR: ssrc 0x%08x, ntpTime 0x%016llx, rtpTime 0x%08x", - id, ntpTime, rtpTime); - - return OK; -} - -status_t RTPSink::connect( - const char *host, int32_t remoteRtpPort, int32_t remoteRtcpPort) { - ALOGI("connecting RTP/RTCP sockets to %s:{%d,%d}", - host, remoteRtpPort, remoteRtcpPort); - - status_t err = - mNetSession->connectUDPSession(mRTPSessionID, host, remoteRtpPort); - - if (err != OK) { - return err; - } - - err = mNetSession->connectUDPSession(mRTCPSessionID, host, remoteRtcpPort); - - if (err != OK) { - return err; - } - -#if 0 - sp<ABuffer> buf = new ABuffer(1500); - memset(buf->data(), 0, buf->size()); - - mNetSession->sendRequest( - mRTPSessionID, buf->data(), buf->size()); - - mNetSession->sendRequest( - mRTCPSessionID, buf->data(), buf->size()); -#endif - - scheduleSendRR(); - - return OK; -} - -void RTPSink::scheduleSendRR() { - (new AMessage(kWhatSendRR, id()))->post(2000000ll); -} - -void RTPSink::addSDES(const sp<ABuffer> &buffer) { - uint8_t *data = buffer->data() + buffer->size(); - data[0] = 0x80 | 1; - data[1] = 202; // SDES - data[4] = 0xde; // SSRC - data[5] = 0xad; - data[6] = 0xbe; - data[7] = 0xef; - - size_t offset = 8; - - data[offset++] = 1; // CNAME - - AString cname = "stagefright@somewhere"; - data[offset++] = cname.size(); - - memcpy(&data[offset], cname.c_str(), cname.size()); - offset += cname.size(); - - data[offset++] = 6; // TOOL - - AString tool = "stagefright/1.0"; - data[offset++] = tool.size(); - - memcpy(&data[offset], tool.c_str(), tool.size()); - offset += tool.size(); - - data[offset++] = 0; - - if ((offset % 4) > 0) { - size_t count = 4 - (offset % 4); - switch (count) { - case 3: - data[offset++] = 0; - case 2: - data[offset++] = 0; - case 1: - data[offset++] = 0; - } - } - - size_t numWords = (offset / 4) - 1; - data[2] = numWords >> 8; - data[3] = numWords & 0xff; - - buffer->setRange(buffer->offset(), buffer->size() + offset); -} - -void RTPSink::onSendRR() { - sp<ABuffer> buf = new ABuffer(1500); - buf->setRange(0, 0); - - uint8_t *ptr = buf->data(); - ptr[0] = 0x80 | 0; - ptr[1] = 201; // RR - ptr[2] = 0; - ptr[3] = 1; - ptr[4] = 0xde; // SSRC - ptr[5] = 0xad; - ptr[6] = 0xbe; - ptr[7] = 0xef; - - buf->setRange(0, 8); - - size_t numReportBlocks = 0; - for (size_t i = 0; i < mSources.size(); ++i) { - uint32_t ssrc = mSources.keyAt(i); - sp<Source> source = mSources.valueAt(i); - - if (numReportBlocks > 31 || buf->size() + 24 > buf->capacity()) { - // Cannot fit another report block. - break; - } - - source->addReportBlock(ssrc, buf); - ++numReportBlocks; - } - - ptr[0] |= numReportBlocks; // 5 bit - - size_t sizeInWordsMinus1 = 1 + 6 * numReportBlocks; - ptr[2] = sizeInWordsMinus1 >> 8; - ptr[3] = sizeInWordsMinus1 & 0xff; - - buf->setRange(0, (sizeInWordsMinus1 + 1) * 4); - - addSDES(buf); - - mNetSession->sendRequest(mRTCPSessionID, buf->data(), buf->size()); - - scheduleSendRR(); -} - -void RTPSink::onPacketLost(const sp<AMessage> &msg) { - uint32_t srcId; - CHECK(msg->findInt32("ssrc", (int32_t *)&srcId)); - - int32_t seqNo; - CHECK(msg->findInt32("seqNo", &seqNo)); - - int32_t blp = 0; - - sp<ABuffer> buf = new ABuffer(1500); - buf->setRange(0, 0); - - uint8_t *ptr = buf->data(); - ptr[0] = 0x80 | 1; // generic NACK - ptr[1] = 205; // RTPFB - ptr[2] = 0; - ptr[3] = 3; - ptr[4] = 0xde; // sender SSRC - ptr[5] = 0xad; - ptr[6] = 0xbe; - ptr[7] = 0xef; - ptr[8] = (srcId >> 24) & 0xff; - ptr[9] = (srcId >> 16) & 0xff; - ptr[10] = (srcId >> 8) & 0xff; - ptr[11] = (srcId & 0xff); - ptr[12] = (seqNo >> 8) & 0xff; - ptr[13] = (seqNo & 0xff); - ptr[14] = (blp >> 8) & 0xff; - ptr[15] = (blp & 0xff); - - buf->setRange(0, 16); - - mNetSession->sendRequest(mRTCPSessionID, buf->data(), buf->size()); -} - -} // namespace android - diff --git a/media/libstagefright/wifi-display/sink/RTPSink.h b/media/libstagefright/wifi-display/sink/RTPSink.h deleted file mode 100644 index a1d127d..0000000 --- a/media/libstagefright/wifi-display/sink/RTPSink.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2012, The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef RTP_SINK_H_ - -#define RTP_SINK_H_ - -#include <media/stagefright/foundation/AHandler.h> - -#include "LinearRegression.h" - -#include <gui/Surface.h> - -namespace android { - -struct ABuffer; -struct ANetworkSession; -struct TunnelRenderer; - -// Creates a pair of sockets for RTP/RTCP traffic, instantiates a renderer -// for incoming transport stream data and occasionally sends statistics over -// the RTCP channel. -struct RTPSink : public AHandler { - RTPSink(const sp<ANetworkSession> &netSession, - const sp<ISurfaceTexture> &surfaceTex); - - // If TCP interleaving is used, no UDP sockets are created, instead - // incoming RTP/RTCP packets (arriving on the RTSP control connection) - // are manually injected by WifiDisplaySink. - status_t init(bool useTCPInterleaving); - - status_t connect( - const char *host, int32_t remoteRtpPort, int32_t remoteRtcpPort); - - int32_t getRTPPort() const; - - status_t injectPacket(bool isRTP, const sp<ABuffer> &buffer); - -protected: - virtual void onMessageReceived(const sp<AMessage> &msg); - virtual ~RTPSink(); - -private: - enum { - kWhatRTPNotify, - kWhatRTCPNotify, - kWhatSendRR, - kWhatPacketLost, - kWhatInject, - }; - - struct Source; - struct StreamSource; - - sp<ANetworkSession> mNetSession; - sp<ISurfaceTexture> mSurfaceTex; - KeyedVector<uint32_t, sp<Source> > mSources; - - int32_t mRTPPort; - int32_t mRTPSessionID; - int32_t mRTCPSessionID; - - int64_t mFirstArrivalTimeUs; - int64_t mNumPacketsReceived; - LinearRegression mRegression; - int64_t mMaxDelayMs; - - sp<TunnelRenderer> mRenderer; - - status_t parseRTP(const sp<ABuffer> &buffer); - status_t parseRTCP(const sp<ABuffer> &buffer); - status_t parseBYE(const uint8_t *data, size_t size); - status_t parseSR(const uint8_t *data, size_t size); - - void addSDES(const sp<ABuffer> &buffer); - void onSendRR(); - void onPacketLost(const sp<AMessage> &msg); - void scheduleSendRR(); - - DISALLOW_EVIL_CONSTRUCTORS(RTPSink); -}; - -} // namespace android - -#endif // RTP_SINK_H_ diff --git a/media/libstagefright/wifi-display/sink/TunnelRenderer.cpp b/media/libstagefright/wifi-display/sink/TunnelRenderer.cpp deleted file mode 100644 index bc35aef..0000000 --- a/media/libstagefright/wifi-display/sink/TunnelRenderer.cpp +++ /dev/null @@ -1,396 +0,0 @@ -/* - * Copyright 2012, The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//#define LOG_NDEBUG 0 -#define LOG_TAG "TunnelRenderer" -#include <utils/Log.h> - -#include "TunnelRenderer.h" - -#include "ATSParser.h" - -#include <binder/IMemory.h> -#include <binder/IServiceManager.h> -#include <gui/SurfaceComposerClient.h> -#include <media/IMediaPlayerService.h> -#include <media/IStreamSource.h> -#include <media/stagefright/foundation/ABuffer.h> -#include <media/stagefright/foundation/ADebug.h> -#include <media/stagefright/foundation/AMessage.h> -#include <ui/DisplayInfo.h> - -namespace android { - -struct TunnelRenderer::PlayerClient : public BnMediaPlayerClient { - PlayerClient() {} - - virtual void notify(int msg, int ext1, int ext2, const Parcel *obj) { - ALOGI("notify %d, %d, %d", msg, ext1, ext2); - } - -protected: - virtual ~PlayerClient() {} - -private: - DISALLOW_EVIL_CONSTRUCTORS(PlayerClient); -}; - -struct TunnelRenderer::StreamSource : public BnStreamSource { - StreamSource(TunnelRenderer *owner); - - virtual void setListener(const sp<IStreamListener> &listener); - virtual void setBuffers(const Vector<sp<IMemory> > &buffers); - - virtual void onBufferAvailable(size_t index); - - virtual uint32_t flags() const; - - void doSomeWork(); - -protected: - virtual ~StreamSource(); - -private: - mutable Mutex mLock; - - TunnelRenderer *mOwner; - - sp<IStreamListener> mListener; - - Vector<sp<IMemory> > mBuffers; - List<size_t> mIndicesAvailable; - - size_t mNumDeqeued; - - DISALLOW_EVIL_CONSTRUCTORS(StreamSource); -}; - -//////////////////////////////////////////////////////////////////////////////// - -TunnelRenderer::StreamSource::StreamSource(TunnelRenderer *owner) - : mOwner(owner), - mNumDeqeued(0) { -} - -TunnelRenderer::StreamSource::~StreamSource() { -} - -void TunnelRenderer::StreamSource::setListener( - const sp<IStreamListener> &listener) { - mListener = listener; -} - -void TunnelRenderer::StreamSource::setBuffers( - const Vector<sp<IMemory> > &buffers) { - mBuffers = buffers; -} - -void TunnelRenderer::StreamSource::onBufferAvailable(size_t index) { - CHECK_LT(index, mBuffers.size()); - - { - Mutex::Autolock autoLock(mLock); - mIndicesAvailable.push_back(index); - } - - doSomeWork(); -} - -uint32_t TunnelRenderer::StreamSource::flags() const { - return kFlagAlignedVideoData; -} - -void TunnelRenderer::StreamSource::doSomeWork() { - Mutex::Autolock autoLock(mLock); - - while (!mIndicesAvailable.empty()) { - sp<ABuffer> srcBuffer = mOwner->dequeueBuffer(); - if (srcBuffer == NULL) { - break; - } - - ++mNumDeqeued; - - if (mNumDeqeued == 1) { - ALOGI("fixing real time now."); - - sp<AMessage> extra = new AMessage; - - extra->setInt32( - IStreamListener::kKeyDiscontinuityMask, - ATSParser::DISCONTINUITY_ABSOLUTE_TIME); - - extra->setInt64("timeUs", ALooper::GetNowUs()); - - mListener->issueCommand( - IStreamListener::DISCONTINUITY, - false /* synchronous */, - extra); - } - - ALOGV("dequeue TS packet of size %d", srcBuffer->size()); - - size_t index = *mIndicesAvailable.begin(); - mIndicesAvailable.erase(mIndicesAvailable.begin()); - - sp<IMemory> mem = mBuffers.itemAt(index); - CHECK_LE(srcBuffer->size(), mem->size()); - CHECK_EQ((srcBuffer->size() % 188), 0u); - - memcpy(mem->pointer(), srcBuffer->data(), srcBuffer->size()); - mListener->queueBuffer(index, srcBuffer->size()); - } -} - -//////////////////////////////////////////////////////////////////////////////// - -TunnelRenderer::TunnelRenderer( - const sp<AMessage> ¬ifyLost, - const sp<ISurfaceTexture> &surfaceTex) - : mNotifyLost(notifyLost), - mSurfaceTex(surfaceTex), - mTotalBytesQueued(0ll), - mLastDequeuedExtSeqNo(-1), - mFirstFailedAttemptUs(-1ll), - mRequestedRetransmission(false) { -} - -TunnelRenderer::~TunnelRenderer() { - destroyPlayer(); -} - -void TunnelRenderer::queueBuffer(const sp<ABuffer> &buffer) { - Mutex::Autolock autoLock(mLock); - - mTotalBytesQueued += buffer->size(); - - if (mPackets.empty()) { - mPackets.push_back(buffer); - return; - } - - int32_t newExtendedSeqNo = buffer->int32Data(); - - List<sp<ABuffer> >::iterator firstIt = mPackets.begin(); - List<sp<ABuffer> >::iterator it = --mPackets.end(); - for (;;) { - int32_t extendedSeqNo = (*it)->int32Data(); - - if (extendedSeqNo == newExtendedSeqNo) { - // Duplicate packet. - return; - } - - if (extendedSeqNo < newExtendedSeqNo) { - // Insert new packet after the one at "it". - mPackets.insert(++it, buffer); - return; - } - - if (it == firstIt) { - // Insert new packet before the first existing one. - mPackets.insert(it, buffer); - return; - } - - --it; - } -} - -sp<ABuffer> TunnelRenderer::dequeueBuffer() { - Mutex::Autolock autoLock(mLock); - - sp<ABuffer> buffer; - int32_t extSeqNo; - while (!mPackets.empty()) { - buffer = *mPackets.begin(); - extSeqNo = buffer->int32Data(); - - if (mLastDequeuedExtSeqNo < 0 || extSeqNo > mLastDequeuedExtSeqNo) { - break; - } - - // This is a retransmission of a packet we've already returned. - - mTotalBytesQueued -= buffer->size(); - buffer.clear(); - extSeqNo = -1; - - mPackets.erase(mPackets.begin()); - } - - if (mPackets.empty()) { - if (mFirstFailedAttemptUs < 0ll) { - mFirstFailedAttemptUs = ALooper::GetNowUs(); - mRequestedRetransmission = false; - } else { - ALOGV("no packets available for %.2f secs", - (ALooper::GetNowUs() - mFirstFailedAttemptUs) / 1E6); - } - - return NULL; - } - - if (mLastDequeuedExtSeqNo < 0 || extSeqNo == mLastDequeuedExtSeqNo + 1) { - if (mRequestedRetransmission) { - ALOGI("Recovered after requesting retransmission of %d", - extSeqNo); - } - - mLastDequeuedExtSeqNo = extSeqNo; - mFirstFailedAttemptUs = -1ll; - mRequestedRetransmission = false; - - mPackets.erase(mPackets.begin()); - - mTotalBytesQueued -= buffer->size(); - - return buffer; - } - - if (mFirstFailedAttemptUs < 0ll) { - mFirstFailedAttemptUs = ALooper::GetNowUs(); - - ALOGI("failed to get the correct packet the first time."); - return NULL; - } - - if (mFirstFailedAttemptUs + 50000ll > ALooper::GetNowUs()) { - // We're willing to wait a little while to get the right packet. - - if (!mRequestedRetransmission) { - ALOGI("requesting retransmission of seqNo %d", - (mLastDequeuedExtSeqNo + 1) & 0xffff); - - sp<AMessage> notify = mNotifyLost->dup(); - notify->setInt32("seqNo", (mLastDequeuedExtSeqNo + 1) & 0xffff); - notify->post(); - - mRequestedRetransmission = true; - } else { - ALOGI("still waiting for the correct packet to arrive."); - } - - return NULL; - } - - ALOGI("dropping packet. extSeqNo %d didn't arrive in time", - mLastDequeuedExtSeqNo + 1); - - // Permanent failure, we never received the packet. - mLastDequeuedExtSeqNo = extSeqNo; - mFirstFailedAttemptUs = -1ll; - mRequestedRetransmission = false; - - mTotalBytesQueued -= buffer->size(); - - mPackets.erase(mPackets.begin()); - - return buffer; -} - -void TunnelRenderer::onMessageReceived(const sp<AMessage> &msg) { - switch (msg->what()) { - case kWhatQueueBuffer: - { - sp<ABuffer> buffer; - CHECK(msg->findBuffer("buffer", &buffer)); - - queueBuffer(buffer); - - if (mStreamSource == NULL) { - if (mTotalBytesQueued > 0ll) { - initPlayer(); - } else { - ALOGI("Have %lld bytes queued...", mTotalBytesQueued); - } - } else { - mStreamSource->doSomeWork(); - } - break; - } - - default: - TRESPASS(); - } -} - -void TunnelRenderer::initPlayer() { - if (mSurfaceTex == NULL) { - mComposerClient = new SurfaceComposerClient; - CHECK_EQ(mComposerClient->initCheck(), (status_t)OK); - - DisplayInfo info; - SurfaceComposerClient::getDisplayInfo(0, &info); - ssize_t displayWidth = info.w; - ssize_t displayHeight = info.h; - - mSurfaceControl = - mComposerClient->createSurface( - String8("A Surface"), - displayWidth, - displayHeight, - PIXEL_FORMAT_RGB_565, - 0); - - CHECK(mSurfaceControl != NULL); - CHECK(mSurfaceControl->isValid()); - - SurfaceComposerClient::openGlobalTransaction(); - CHECK_EQ(mSurfaceControl->setLayer(INT_MAX), (status_t)OK); - CHECK_EQ(mSurfaceControl->show(), (status_t)OK); - SurfaceComposerClient::closeGlobalTransaction(); - - mSurface = mSurfaceControl->getSurface(); - CHECK(mSurface != NULL); - } - - sp<IServiceManager> sm = defaultServiceManager(); - sp<IBinder> binder = sm->getService(String16("media.player")); - sp<IMediaPlayerService> service = interface_cast<IMediaPlayerService>(binder); - CHECK(service.get() != NULL); - - mStreamSource = new StreamSource(this); - - mPlayerClient = new PlayerClient; - - mPlayer = service->create(getpid(), mPlayerClient, 0); - CHECK(mPlayer != NULL); - CHECK_EQ(mPlayer->setDataSource(mStreamSource), (status_t)OK); - - mPlayer->setVideoSurfaceTexture( - mSurfaceTex != NULL ? mSurfaceTex : mSurface->getSurfaceTexture()); - - mPlayer->start(); -} - -void TunnelRenderer::destroyPlayer() { - mStreamSource.clear(); - - mPlayer->stop(); - mPlayer.clear(); - - if (mSurfaceTex == NULL) { - mSurface.clear(); - mSurfaceControl.clear(); - - mComposerClient->dispose(); - mComposerClient.clear(); - } -} - -} // namespace android - diff --git a/media/libstagefright/wifi-display/sink/TunnelRenderer.h b/media/libstagefright/wifi-display/sink/TunnelRenderer.h deleted file mode 100644 index c9597e0..0000000 --- a/media/libstagefright/wifi-display/sink/TunnelRenderer.h +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2012, The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TUNNEL_RENDERER_H_ - -#define TUNNEL_RENDERER_H_ - -#include <gui/Surface.h> -#include <media/stagefright/foundation/AHandler.h> - -namespace android { - -struct ABuffer; -struct SurfaceComposerClient; -struct SurfaceControl; -struct Surface; -struct IMediaPlayer; -struct IStreamListener; - -// This class reassembles incoming RTP packets into the correct order -// and sends the resulting transport stream to a mediaplayer instance -// for playback. -struct TunnelRenderer : public AHandler { - TunnelRenderer( - const sp<AMessage> ¬ifyLost, - const sp<ISurfaceTexture> &surfaceTex); - - sp<ABuffer> dequeueBuffer(); - - enum { - kWhatQueueBuffer, - }; - -protected: - virtual void onMessageReceived(const sp<AMessage> &msg); - virtual ~TunnelRenderer(); - -private: - struct PlayerClient; - struct StreamSource; - - mutable Mutex mLock; - - sp<AMessage> mNotifyLost; - sp<ISurfaceTexture> mSurfaceTex; - - List<sp<ABuffer> > mPackets; - int64_t mTotalBytesQueued; - - sp<SurfaceComposerClient> mComposerClient; - sp<SurfaceControl> mSurfaceControl; - sp<Surface> mSurface; - sp<PlayerClient> mPlayerClient; - sp<IMediaPlayer> mPlayer; - sp<StreamSource> mStreamSource; - - int32_t mLastDequeuedExtSeqNo; - int64_t mFirstFailedAttemptUs; - bool mRequestedRetransmission; - - void initPlayer(); - void destroyPlayer(); - - void queueBuffer(const sp<ABuffer> &buffer); - - DISALLOW_EVIL_CONSTRUCTORS(TunnelRenderer); -}; - -} // namespace android - -#endif // TUNNEL_RENDERER_H_ diff --git a/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp b/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp deleted file mode 100644 index fcd20d4..0000000 --- a/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp +++ /dev/null @@ -1,644 +0,0 @@ -/* - * Copyright 2012, The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//#define LOG_NDEBUG 0 -#define LOG_TAG "WifiDisplaySink" -#include <utils/Log.h> - -#include "WifiDisplaySink.h" -#include "ParsedMessage.h" -#include "RTPSink.h" - -#include <media/stagefright/foundation/ABuffer.h> -#include <media/stagefright/foundation/ADebug.h> -#include <media/stagefright/foundation/AMessage.h> -#include <media/stagefright/MediaErrors.h> - -namespace android { - -WifiDisplaySink::WifiDisplaySink( - const sp<ANetworkSession> &netSession, - const sp<ISurfaceTexture> &surfaceTex) - : mState(UNDEFINED), - mNetSession(netSession), - mSurfaceTex(surfaceTex), - mSessionID(0), - mNextCSeq(1) { -} - -WifiDisplaySink::~WifiDisplaySink() { -} - -void WifiDisplaySink::start(const char *sourceHost, int32_t sourcePort) { - sp<AMessage> msg = new AMessage(kWhatStart, id()); - msg->setString("sourceHost", sourceHost); - msg->setInt32("sourcePort", sourcePort); - msg->post(); -} - -void WifiDisplaySink::start(const char *uri) { - sp<AMessage> msg = new AMessage(kWhatStart, id()); - msg->setString("setupURI", uri); - msg->post(); -} - -// static -bool WifiDisplaySink::ParseURL( - const char *url, AString *host, int32_t *port, AString *path, - AString *user, AString *pass) { - host->clear(); - *port = 0; - path->clear(); - user->clear(); - pass->clear(); - - if (strncasecmp("rtsp://", url, 7)) { - return false; - } - - const char *slashPos = strchr(&url[7], '/'); - - if (slashPos == NULL) { - host->setTo(&url[7]); - path->setTo("/"); - } else { - host->setTo(&url[7], slashPos - &url[7]); - path->setTo(slashPos); - } - - ssize_t atPos = host->find("@"); - - if (atPos >= 0) { - // Split of user:pass@ from hostname. - - AString userPass(*host, 0, atPos); - host->erase(0, atPos + 1); - - ssize_t colonPos = userPass.find(":"); - - if (colonPos < 0) { - *user = userPass; - } else { - user->setTo(userPass, 0, colonPos); - pass->setTo(userPass, colonPos + 1, userPass.size() - colonPos - 1); - } - } - - const char *colonPos = strchr(host->c_str(), ':'); - - if (colonPos != NULL) { - char *end; - unsigned long x = strtoul(colonPos + 1, &end, 10); - - if (end == colonPos + 1 || *end != '\0' || x >= 65536) { - return false; - } - - *port = x; - - size_t colonOffset = colonPos - host->c_str(); - size_t trailing = host->size() - colonOffset; - host->erase(colonOffset, trailing); - } else { - *port = 554; - } - - return true; -} - -void WifiDisplaySink::onMessageReceived(const sp<AMessage> &msg) { - switch (msg->what()) { - case kWhatStart: - { - int32_t sourcePort; - - if (msg->findString("setupURI", &mSetupURI)) { - AString path, user, pass; - CHECK(ParseURL( - mSetupURI.c_str(), - &mRTSPHost, &sourcePort, &path, &user, &pass) - && user.empty() && pass.empty()); - } else { - CHECK(msg->findString("sourceHost", &mRTSPHost)); - CHECK(msg->findInt32("sourcePort", &sourcePort)); - } - - sp<AMessage> notify = new AMessage(kWhatRTSPNotify, id()); - - status_t err = mNetSession->createRTSPClient( - mRTSPHost.c_str(), sourcePort, notify, &mSessionID); - CHECK_EQ(err, (status_t)OK); - - mState = CONNECTING; - break; - } - - case kWhatRTSPNotify: - { - int32_t reason; - CHECK(msg->findInt32("reason", &reason)); - - switch (reason) { - case ANetworkSession::kWhatError: - { - int32_t sessionID; - CHECK(msg->findInt32("sessionID", &sessionID)); - - int32_t err; - CHECK(msg->findInt32("err", &err)); - - AString detail; - CHECK(msg->findString("detail", &detail)); - - ALOGE("An error occurred in session %d (%d, '%s/%s').", - sessionID, - err, - detail.c_str(), - strerror(-err)); - - if (sessionID == mSessionID) { - ALOGI("Lost control connection."); - - // The control connection is dead now. - mNetSession->destroySession(mSessionID); - mSessionID = 0; - - looper()->stop(); - } - break; - } - - case ANetworkSession::kWhatConnected: - { - ALOGI("We're now connected."); - mState = CONNECTED; - - if (!mSetupURI.empty()) { - status_t err = - sendDescribe(mSessionID, mSetupURI.c_str()); - - CHECK_EQ(err, (status_t)OK); - } - break; - } - - case ANetworkSession::kWhatData: - { - onReceiveClientData(msg); - break; - } - - case ANetworkSession::kWhatBinaryData: - { - CHECK(sUseTCPInterleaving); - - int32_t channel; - CHECK(msg->findInt32("channel", &channel)); - - sp<ABuffer> data; - CHECK(msg->findBuffer("data", &data)); - - mRTPSink->injectPacket(channel == 0 /* isRTP */, data); - break; - } - - default: - TRESPASS(); - } - break; - } - - case kWhatStop: - { - looper()->stop(); - break; - } - - default: - TRESPASS(); - } -} - -void WifiDisplaySink::registerResponseHandler( - int32_t sessionID, int32_t cseq, HandleRTSPResponseFunc func) { - ResponseID id; - id.mSessionID = sessionID; - id.mCSeq = cseq; - mResponseHandlers.add(id, func); -} - -status_t WifiDisplaySink::sendM2(int32_t sessionID) { - AString request = "OPTIONS * RTSP/1.0\r\n"; - AppendCommonResponse(&request, mNextCSeq); - - request.append( - "Require: org.wfa.wfd1.0\r\n" - "\r\n"); - - status_t err = - mNetSession->sendRequest(sessionID, request.c_str(), request.size()); - - if (err != OK) { - return err; - } - - registerResponseHandler( - sessionID, mNextCSeq, &WifiDisplaySink::onReceiveM2Response); - - ++mNextCSeq; - - return OK; -} - -status_t WifiDisplaySink::onReceiveM2Response( - int32_t sessionID, const sp<ParsedMessage> &msg) { - int32_t statusCode; - if (!msg->getStatusCode(&statusCode)) { - return ERROR_MALFORMED; - } - - if (statusCode != 200) { - return ERROR_UNSUPPORTED; - } - - return OK; -} - -status_t WifiDisplaySink::onReceiveDescribeResponse( - int32_t sessionID, const sp<ParsedMessage> &msg) { - int32_t statusCode; - if (!msg->getStatusCode(&statusCode)) { - return ERROR_MALFORMED; - } - - if (statusCode != 200) { - return ERROR_UNSUPPORTED; - } - - return sendSetup(sessionID, mSetupURI.c_str()); -} - -status_t WifiDisplaySink::onReceiveSetupResponse( - int32_t sessionID, const sp<ParsedMessage> &msg) { - int32_t statusCode; - if (!msg->getStatusCode(&statusCode)) { - return ERROR_MALFORMED; - } - - if (statusCode != 200) { - return ERROR_UNSUPPORTED; - } - - if (!msg->findString("session", &mPlaybackSessionID)) { - return ERROR_MALFORMED; - } - - if (!ParsedMessage::GetInt32Attribute( - mPlaybackSessionID.c_str(), - "timeout", - &mPlaybackSessionTimeoutSecs)) { - mPlaybackSessionTimeoutSecs = -1; - } - - ssize_t colonPos = mPlaybackSessionID.find(";"); - if (colonPos >= 0) { - // Strip any options from the returned session id. - mPlaybackSessionID.erase( - colonPos, mPlaybackSessionID.size() - colonPos); - } - - status_t err = configureTransport(msg); - - if (err != OK) { - return err; - } - - mState = PAUSED; - - return sendPlay( - sessionID, - !mSetupURI.empty() - ? mSetupURI.c_str() : "rtsp://x.x.x.x:x/wfd1.0/streamid=0"); -} - -status_t WifiDisplaySink::configureTransport(const sp<ParsedMessage> &msg) { - if (sUseTCPInterleaving) { - return OK; - } - - AString transport; - if (!msg->findString("transport", &transport)) { - ALOGE("Missing 'transport' field in SETUP response."); - return ERROR_MALFORMED; - } - - AString sourceHost; - if (!ParsedMessage::GetAttribute( - transport.c_str(), "source", &sourceHost)) { - sourceHost = mRTSPHost; - } - - AString serverPortStr; - if (!ParsedMessage::GetAttribute( - transport.c_str(), "server_port", &serverPortStr)) { - ALOGE("Missing 'server_port' in Transport field."); - return ERROR_MALFORMED; - } - - int rtpPort, rtcpPort; - if (sscanf(serverPortStr.c_str(), "%d-%d", &rtpPort, &rtcpPort) != 2 - || rtpPort <= 0 || rtpPort > 65535 - || rtcpPort <=0 || rtcpPort > 65535 - || rtcpPort != rtpPort + 1) { - ALOGE("Invalid server_port description '%s'.", - serverPortStr.c_str()); - - return ERROR_MALFORMED; - } - - if (rtpPort & 1) { - ALOGW("Server picked an odd numbered RTP port."); - } - - return mRTPSink->connect(sourceHost.c_str(), rtpPort, rtcpPort); -} - -status_t WifiDisplaySink::onReceivePlayResponse( - int32_t sessionID, const sp<ParsedMessage> &msg) { - int32_t statusCode; - if (!msg->getStatusCode(&statusCode)) { - return ERROR_MALFORMED; - } - - if (statusCode != 200) { - return ERROR_UNSUPPORTED; - } - - mState = PLAYING; - - return OK; -} - -void WifiDisplaySink::onReceiveClientData(const sp<AMessage> &msg) { - int32_t sessionID; - CHECK(msg->findInt32("sessionID", &sessionID)); - - sp<RefBase> obj; - CHECK(msg->findObject("data", &obj)); - - sp<ParsedMessage> data = - static_cast<ParsedMessage *>(obj.get()); - - ALOGV("session %d received '%s'", - sessionID, data->debugString().c_str()); - - AString method; - AString uri; - data->getRequestField(0, &method); - - int32_t cseq; - if (!data->findInt32("cseq", &cseq)) { - sendErrorResponse(sessionID, "400 Bad Request", -1 /* cseq */); - return; - } - - if (method.startsWith("RTSP/")) { - // This is a response. - - ResponseID id; - id.mSessionID = sessionID; - id.mCSeq = cseq; - - ssize_t index = mResponseHandlers.indexOfKey(id); - - if (index < 0) { - ALOGW("Received unsolicited server response, cseq %d", cseq); - return; - } - - HandleRTSPResponseFunc func = mResponseHandlers.valueAt(index); - mResponseHandlers.removeItemsAt(index); - - status_t err = (this->*func)(sessionID, data); - CHECK_EQ(err, (status_t)OK); - } else { - AString version; - data->getRequestField(2, &version); - if (!(version == AString("RTSP/1.0"))) { - sendErrorResponse(sessionID, "505 RTSP Version not supported", cseq); - return; - } - - if (method == "OPTIONS") { - onOptionsRequest(sessionID, cseq, data); - } else if (method == "GET_PARAMETER") { - onGetParameterRequest(sessionID, cseq, data); - } else if (method == "SET_PARAMETER") { - onSetParameterRequest(sessionID, cseq, data); - } else { - sendErrorResponse(sessionID, "405 Method Not Allowed", cseq); - } - } -} - -void WifiDisplaySink::onOptionsRequest( - int32_t sessionID, - int32_t cseq, - const sp<ParsedMessage> &data) { - AString response = "RTSP/1.0 200 OK\r\n"; - AppendCommonResponse(&response, cseq); - response.append("Public: org.wfa.wfd1.0, GET_PARAMETER, SET_PARAMETER\r\n"); - response.append("\r\n"); - - status_t err = mNetSession->sendRequest(sessionID, response.c_str()); - CHECK_EQ(err, (status_t)OK); - - err = sendM2(sessionID); - CHECK_EQ(err, (status_t)OK); -} - -void WifiDisplaySink::onGetParameterRequest( - int32_t sessionID, - int32_t cseq, - const sp<ParsedMessage> &data) { - AString body = - "wfd_video_formats: xxx\r\n" - "wfd_audio_codecs: xxx\r\n" - "wfd_client_rtp_ports: RTP/AVP/UDP;unicast xxx 0 mode=play\r\n"; - - AString response = "RTSP/1.0 200 OK\r\n"; - AppendCommonResponse(&response, cseq); - response.append("Content-Type: text/parameters\r\n"); - response.append(StringPrintf("Content-Length: %d\r\n", body.size())); - response.append("\r\n"); - response.append(body); - - status_t err = mNetSession->sendRequest(sessionID, response.c_str()); - CHECK_EQ(err, (status_t)OK); -} - -status_t WifiDisplaySink::sendDescribe(int32_t sessionID, const char *uri) { - uri = "rtsp://xwgntvx.is.livestream-api.com/livestreamiphone/wgntv"; - uri = "rtsp://v2.cache6.c.youtube.com/video.3gp?cid=e101d4bf280055f9&fmt=18"; - - AString request = StringPrintf("DESCRIBE %s RTSP/1.0\r\n", uri); - AppendCommonResponse(&request, mNextCSeq); - - request.append("Accept: application/sdp\r\n"); - request.append("\r\n"); - - status_t err = mNetSession->sendRequest( - sessionID, request.c_str(), request.size()); - - if (err != OK) { - return err; - } - - registerResponseHandler( - sessionID, mNextCSeq, &WifiDisplaySink::onReceiveDescribeResponse); - - ++mNextCSeq; - - return OK; -} - -status_t WifiDisplaySink::sendSetup(int32_t sessionID, const char *uri) { - mRTPSink = new RTPSink(mNetSession, mSurfaceTex); - looper()->registerHandler(mRTPSink); - - status_t err = mRTPSink->init(sUseTCPInterleaving); - - if (err != OK) { - looper()->unregisterHandler(mRTPSink->id()); - mRTPSink.clear(); - return err; - } - - AString request = StringPrintf("SETUP %s RTSP/1.0\r\n", uri); - - AppendCommonResponse(&request, mNextCSeq); - - if (sUseTCPInterleaving) { - request.append("Transport: RTP/AVP/TCP;interleaved=0-1\r\n"); - } else { - int32_t rtpPort = mRTPSink->getRTPPort(); - - request.append( - StringPrintf( - "Transport: RTP/AVP/UDP;unicast;client_port=%d-%d\r\n", - rtpPort, rtpPort + 1)); - } - - request.append("\r\n"); - - ALOGV("request = '%s'", request.c_str()); - - err = mNetSession->sendRequest(sessionID, request.c_str(), request.size()); - - if (err != OK) { - return err; - } - - registerResponseHandler( - sessionID, mNextCSeq, &WifiDisplaySink::onReceiveSetupResponse); - - ++mNextCSeq; - - return OK; -} - -status_t WifiDisplaySink::sendPlay(int32_t sessionID, const char *uri) { - AString request = StringPrintf("PLAY %s RTSP/1.0\r\n", uri); - - AppendCommonResponse(&request, mNextCSeq); - - request.append(StringPrintf("Session: %s\r\n", mPlaybackSessionID.c_str())); - request.append("\r\n"); - - status_t err = - mNetSession->sendRequest(sessionID, request.c_str(), request.size()); - - if (err != OK) { - return err; - } - - registerResponseHandler( - sessionID, mNextCSeq, &WifiDisplaySink::onReceivePlayResponse); - - ++mNextCSeq; - - return OK; -} - -void WifiDisplaySink::onSetParameterRequest( - int32_t sessionID, - int32_t cseq, - const sp<ParsedMessage> &data) { - const char *content = data->getContent(); - - if (strstr(content, "wfd_trigger_method: SETUP\r\n") != NULL) { - status_t err = - sendSetup( - sessionID, - "rtsp://x.x.x.x:x/wfd1.0/streamid=0"); - - CHECK_EQ(err, (status_t)OK); - } - - AString response = "RTSP/1.0 200 OK\r\n"; - AppendCommonResponse(&response, cseq); - response.append("\r\n"); - - status_t err = mNetSession->sendRequest(sessionID, response.c_str()); - CHECK_EQ(err, (status_t)OK); -} - -void WifiDisplaySink::sendErrorResponse( - int32_t sessionID, - const char *errorDetail, - int32_t cseq) { - AString response; - response.append("RTSP/1.0 "); - response.append(errorDetail); - response.append("\r\n"); - - AppendCommonResponse(&response, cseq); - - response.append("\r\n"); - - status_t err = mNetSession->sendRequest(sessionID, response.c_str()); - CHECK_EQ(err, (status_t)OK); -} - -// static -void WifiDisplaySink::AppendCommonResponse(AString *response, int32_t cseq) { - time_t now = time(NULL); - struct tm *now2 = gmtime(&now); - char buf[128]; - strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S %z", now2); - - response->append("Date: "); - response->append(buf); - response->append("\r\n"); - - response->append("User-Agent: stagefright/1.1 (Linux;Android 4.1)\r\n"); - - if (cseq >= 0) { - response->append(StringPrintf("CSeq: %d\r\n", cseq)); - } -} - -} // namespace android diff --git a/media/libstagefright/wifi-display/sink/WifiDisplaySink.h b/media/libstagefright/wifi-display/sink/WifiDisplaySink.h deleted file mode 100644 index f886ee5..0000000 --- a/media/libstagefright/wifi-display/sink/WifiDisplaySink.h +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright 2012, The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef WIFI_DISPLAY_SINK_H_ - -#define WIFI_DISPLAY_SINK_H_ - -#include "ANetworkSession.h" - -#include <gui/Surface.h> -#include <media/stagefright/foundation/AHandler.h> - -namespace android { - -struct ParsedMessage; -struct RTPSink; - -// Represents the RTSP client acting as a wifi display sink. -// Connects to a wifi display source and renders the incoming -// transport stream using a MediaPlayer instance. -struct WifiDisplaySink : public AHandler { - WifiDisplaySink( - const sp<ANetworkSession> &netSession, - const sp<ISurfaceTexture> &surfaceTex = NULL); - - void start(const char *sourceHost, int32_t sourcePort); - void start(const char *uri); - -protected: - virtual ~WifiDisplaySink(); - virtual void onMessageReceived(const sp<AMessage> &msg); - -private: - enum State { - UNDEFINED, - CONNECTING, - CONNECTED, - PAUSED, - PLAYING, - }; - - enum { - kWhatStart, - kWhatRTSPNotify, - kWhatStop, - }; - - struct ResponseID { - int32_t mSessionID; - int32_t mCSeq; - - bool operator<(const ResponseID &other) const { - return mSessionID < other.mSessionID - || (mSessionID == other.mSessionID - && mCSeq < other.mCSeq); - } - }; - - typedef status_t (WifiDisplaySink::*HandleRTSPResponseFunc)( - int32_t sessionID, const sp<ParsedMessage> &msg); - - static const bool sUseTCPInterleaving = false; - - State mState; - sp<ANetworkSession> mNetSession; - sp<ISurfaceTexture> mSurfaceTex; - AString mSetupURI; - AString mRTSPHost; - int32_t mSessionID; - - int32_t mNextCSeq; - - KeyedVector<ResponseID, HandleRTSPResponseFunc> mResponseHandlers; - - sp<RTPSink> mRTPSink; - AString mPlaybackSessionID; - int32_t mPlaybackSessionTimeoutSecs; - - status_t sendM2(int32_t sessionID); - status_t sendDescribe(int32_t sessionID, const char *uri); - status_t sendSetup(int32_t sessionID, const char *uri); - status_t sendPlay(int32_t sessionID, const char *uri); - - status_t onReceiveM2Response( - int32_t sessionID, const sp<ParsedMessage> &msg); - - status_t onReceiveDescribeResponse( - int32_t sessionID, const sp<ParsedMessage> &msg); - - status_t onReceiveSetupResponse( - int32_t sessionID, const sp<ParsedMessage> &msg); - - status_t configureTransport(const sp<ParsedMessage> &msg); - - status_t onReceivePlayResponse( - int32_t sessionID, const sp<ParsedMessage> &msg); - - void registerResponseHandler( - int32_t sessionID, int32_t cseq, HandleRTSPResponseFunc func); - - void onReceiveClientData(const sp<AMessage> &msg); - - void onOptionsRequest( - int32_t sessionID, - int32_t cseq, - const sp<ParsedMessage> &data); - - void onGetParameterRequest( - int32_t sessionID, - int32_t cseq, - const sp<ParsedMessage> &data); - - void onSetParameterRequest( - int32_t sessionID, - int32_t cseq, - const sp<ParsedMessage> &data); - - void sendErrorResponse( - int32_t sessionID, - const char *errorDetail, - int32_t cseq); - - static void AppendCommonResponse(AString *response, int32_t cseq); - - bool ParseURL( - const char *url, AString *host, int32_t *port, AString *path, - AString *user, AString *pass); - - DISALLOW_EVIL_CONSTRUCTORS(WifiDisplaySink); -}; - -} // namespace android - -#endif // WIFI_DISPLAY_SINK_H_ diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp index 7a87444..5344623 100644 --- a/media/libstagefright/wifi-display/source/Converter.cpp +++ b/media/libstagefright/wifi-display/source/Converter.cpp @@ -23,7 +23,7 @@ #include "MediaPuller.h" #include <cutils/properties.h> -#include <gui/SurfaceTextureClient.h> +#include <gui/Surface.h> #include <media/ICrypto.h> #include <media/stagefright/foundation/ABuffer.h> #include <media/stagefright/foundation/ADebug.h> @@ -54,6 +54,8 @@ Converter::Converter( ,mFirstSilentFrameUs(-1ll) ,mInSilentMode(false) #endif + ,mPrevVideoBitrate(-1) + ,mNumFramesToDrop(0) { AString mime; CHECK(mInputFormat->findString("mime", &mime)); @@ -67,11 +69,45 @@ Converter::Converter( mInitCheck = initEncoder(); if (mInitCheck != OK) { - if (mEncoder != NULL) { - mEncoder->release(); - mEncoder.clear(); - } + releaseEncoder(); + } +} + +static void ReleaseMediaBufferReference(const sp<ABuffer> &accessUnit) { + void *mbuf; + if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf) + && mbuf != NULL) { + ALOGV("releasing mbuf %p", mbuf); + + accessUnit->meta()->setPointer("mediaBuffer", NULL); + + static_cast<MediaBuffer *>(mbuf)->release(); + mbuf = NULL; + } +} + +void Converter::releaseEncoder() { + if (mEncoder == NULL) { + return; + } + + mEncoder->release(); + mEncoder.clear(); + + while (!mInputBufferQueue.empty()) { + sp<ABuffer> accessUnit = *mInputBufferQueue.begin(); + mInputBufferQueue.erase(mInputBufferQueue.begin()); + + ReleaseMediaBufferReference(accessUnit); } + + for (size_t i = 0; i < mEncoderInputBuffers.size(); ++i) { + sp<ABuffer> accessUnit = mEncoderInputBuffers.itemAt(i); + ReleaseMediaBufferReference(accessUnit); + } + + mEncoderInputBuffers.clear(); + mEncoderOutputBuffers.clear(); } Converter::~Converter() { @@ -99,7 +135,9 @@ bool Converter::needToManuallyPrependSPSPPS() const { return mNeedToManuallyPrependSPSPPS; } -static int32_t getBitrate(const char *propName, int32_t defaultValue) { +// static +int32_t Converter::GetInt32Property( + const char *propName, int32_t defaultValue) { char val[PROPERTY_VALUE_MAX]; if (property_get(propName, val, NULL)) { char *end; @@ -149,8 +187,9 @@ status_t Converter::initEncoder() { mOutputFormat->setString("mime", outputMIME.c_str()); - int32_t audioBitrate = getBitrate("media.wfd.audio-bitrate", 128000); - int32_t videoBitrate = getBitrate("media.wfd.video-bitrate", 5000000); + int32_t audioBitrate = GetInt32Property("media.wfd.audio-bitrate", 128000); + int32_t videoBitrate = GetInt32Property("media.wfd.video-bitrate", 5000000); + mPrevVideoBitrate = videoBitrate; ALOGI("using audio bitrate of %d bps, video bitrate of %d bps", audioBitrate, videoBitrate); @@ -274,16 +313,7 @@ void Converter::onMessageReceived(const sp<AMessage> &msg) { sp<ABuffer> accessUnit; CHECK(msg->findBuffer("accessUnit", &accessUnit)); - void *mbuf; - if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf) - && mbuf != NULL) { - ALOGV("releasing mbuf %p", mbuf); - - accessUnit->meta()->setPointer("mediaBuffer", NULL); - - static_cast<MediaBuffer *>(mbuf)->release(); - mbuf = NULL; - } + ReleaseMediaBufferReference(accessUnit); } break; } @@ -300,6 +330,13 @@ void Converter::onMessageReceived(const sp<AMessage> &msg) { sp<ABuffer> accessUnit; CHECK(msg->findBuffer("accessUnit", &accessUnit)); + if (mIsVideo && mNumFramesToDrop) { + --mNumFramesToDrop; + ALOGI("dropping frame."); + ReleaseMediaBufferReference(accessUnit); + break; + } + #if 0 void *mbuf; if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf) @@ -385,12 +422,9 @@ void Converter::onMessageReceived(const sp<AMessage> &msg) { case kWhatShutdown: { - ALOGI("shutting down encoder"); + ALOGI("shutting down %s encoder", mIsVideo ? "video" : "audio"); - if (mEncoder != NULL) { - mEncoder->release(); - mEncoder.clear(); - } + releaseEncoder(); AString mime; CHECK(mInputFormat->findString("mime", &mime)); @@ -398,6 +432,12 @@ void Converter::onMessageReceived(const sp<AMessage> &msg) { break; } + case kWhatDropAFrame: + { + ++mNumFramesToDrop; + break; + } + default: TRESPASS(); } @@ -609,6 +649,13 @@ status_t Converter::doMoreWork() { &bufferIndex, &offset, &size, &timeUs, &flags); if (err != OK) { + if (err == INFO_FORMAT_CHANGED) { + continue; + } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) { + mEncoder->getOutputBuffers(&mEncoderOutputBuffers); + continue; + } + if (err == -EAGAIN) { err = OK; } @@ -654,4 +701,23 @@ void Converter::requestIDRFrame() { (new AMessage(kWhatRequestIDRFrame, id()))->post(); } +void Converter::dropAFrame() { + (new AMessage(kWhatDropAFrame, id()))->post(); +} + +int32_t Converter::getVideoBitrate() const { + return mPrevVideoBitrate; +} + +void Converter::setVideoBitrate(int32_t bitRate) { + if (mIsVideo && mEncoder != NULL && bitRate != mPrevVideoBitrate) { + sp<AMessage> params = new AMessage; + params->setInt32("videoBitrate", bitRate); + + mEncoder->setParameters(params); + + mPrevVideoBitrate = bitRate; + } +} + } // namespace android diff --git a/media/libstagefright/wifi-display/source/Converter.h b/media/libstagefright/wifi-display/source/Converter.h index 0665eea..ba297c4 100644 --- a/media/libstagefright/wifi-display/source/Converter.h +++ b/media/libstagefright/wifi-display/source/Converter.h @@ -51,6 +51,8 @@ struct Converter : public AHandler { void requestIDRFrame(); + void dropAFrame(); + enum { kWhatAccessUnit, kWhatEOS, @@ -63,10 +65,16 @@ struct Converter : public AHandler { kWhatShutdown, kWhatMediaPullerNotify, kWhatEncoderActivity, + kWhatDropAFrame, }; void shutdownAsync(); + int32_t getVideoBitrate() const; + void setVideoBitrate(int32_t bitrate); + + static int32_t GetInt32Property(const char *propName, int32_t defaultValue); + protected: virtual ~Converter(); virtual void onMessageReceived(const sp<AMessage> &msg); @@ -100,7 +108,12 @@ private: sp<ABuffer> mPartialAudioAU; + int32_t mPrevVideoBitrate; + + int32_t mNumFramesToDrop; + status_t initEncoder(); + void releaseEncoder(); status_t feedEncoderInputBuffers(); diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp index 916f797..3d7b865 100644 --- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp +++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp @@ -23,14 +23,11 @@ #include "Converter.h" #include "MediaPuller.h" #include "RepeaterSource.h" -#include "Sender.h" -#include "TSPacketizer.h" #include "include/avc_utils.h" #include "WifiDisplaySource.h" #include <binder/IServiceManager.h> -#include <gui/ISurfaceComposer.h> -#include <gui/SurfaceComposerClient.h> +#include <cutils/properties.h> #include <media/IHDCP.h> #include <media/stagefright/foundation/ABitReader.h> #include <media/stagefright/foundation/ABuffer.h> @@ -41,10 +38,9 @@ #include <media/stagefright/DataSource.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> -#include <media/stagefright/MediaExtractor.h> #include <media/stagefright/MediaSource.h> #include <media/stagefright/MetaData.h> -#include <media/stagefright/MPEG2TSWriter.h> +#include <media/stagefright/NuMediaExtractor.h> #include <media/stagefright/SurfaceMediaSource.h> #include <media/stagefright/Utils.h> @@ -63,15 +59,18 @@ struct WifiDisplaySource::PlaybackSession::Track : public AHandler { const sp<MediaPuller> &mediaPuller, const sp<Converter> &converter); + Track(const sp<AMessage> ¬ify, const sp<AMessage> &format); + void setRepeaterSource(const sp<RepeaterSource> &source); sp<AMessage> getFormat(); bool isAudio() const; const sp<Converter> &converter() const; - ssize_t packetizerTrackIndex() const; + const sp<RepeaterSource> &repeaterSource() const; - void setPacketizerTrackIndex(size_t index); + ssize_t mediaSenderTrackIndex() const; + void setMediaSenderTrackIndex(size_t index); status_t start(); void stopAsync(); @@ -110,8 +109,9 @@ private: sp<ALooper> mCodecLooper; sp<MediaPuller> mMediaPuller; sp<Converter> mConverter; + sp<AMessage> mFormat; bool mStarted; - ssize_t mPacketizerTrackIndex; + ssize_t mMediaSenderTrackIndex; bool mIsAudio; List<sp<ABuffer> > mQueuedAccessUnits; sp<RepeaterSource> mRepeaterSource; @@ -135,11 +135,19 @@ WifiDisplaySource::PlaybackSession::Track::Track( mMediaPuller(mediaPuller), mConverter(converter), mStarted(false), - mPacketizerTrackIndex(-1), mIsAudio(IsAudioFormat(mConverter->getOutputFormat())), mLastOutputBufferQueuedTimeUs(-1ll) { } +WifiDisplaySource::PlaybackSession::Track::Track( + const sp<AMessage> ¬ify, const sp<AMessage> &format) + : mNotify(notify), + mFormat(format), + mStarted(false), + mIsAudio(IsAudioFormat(format)), + mLastOutputBufferQueuedTimeUs(-1ll) { +} + WifiDisplaySource::PlaybackSession::Track::~Track() { CHECK(!mStarted); } @@ -154,7 +162,7 @@ bool WifiDisplaySource::PlaybackSession::Track::IsAudioFormat( } sp<AMessage> WifiDisplaySource::PlaybackSession::Track::getFormat() { - return mConverter->getOutputFormat(); + return mFormat != NULL ? mFormat : mConverter->getOutputFormat(); } bool WifiDisplaySource::PlaybackSession::Track::isAudio() const { @@ -165,13 +173,19 @@ const sp<Converter> &WifiDisplaySource::PlaybackSession::Track::converter() cons return mConverter; } -ssize_t WifiDisplaySource::PlaybackSession::Track::packetizerTrackIndex() const { - return mPacketizerTrackIndex; +const sp<RepeaterSource> & +WifiDisplaySource::PlaybackSession::Track::repeaterSource() const { + return mRepeaterSource; +} + +ssize_t WifiDisplaySource::PlaybackSession::Track::mediaSenderTrackIndex() const { + CHECK_GE(mMediaSenderTrackIndex, 0); + return mMediaSenderTrackIndex; } -void WifiDisplaySource::PlaybackSession::Track::setPacketizerTrackIndex(size_t index) { - CHECK_LT(mPacketizerTrackIndex, 0); - mPacketizerTrackIndex = index; +void WifiDisplaySource::PlaybackSession::Track::setMediaSenderTrackIndex( + size_t index) { + mMediaSenderTrackIndex = index; } status_t WifiDisplaySource::PlaybackSession::Track::start() { @@ -195,7 +209,9 @@ status_t WifiDisplaySource::PlaybackSession::Track::start() { void WifiDisplaySource::PlaybackSession::Track::stopAsync() { ALOGV("Track::stopAsync isAudio=%d", mIsAudio); - mConverter->shutdownAsync(); + if (mConverter != NULL) { + mConverter->shutdownAsync(); + } sp<AMessage> msg = new AMessage(kWhatMediaPullerStopped, id()); @@ -207,6 +223,7 @@ void WifiDisplaySource::PlaybackSession::Track::stopAsync() { mMediaPuller->stopAsync(msg); } else { + mStarted = false; msg->post(); } } @@ -330,45 +347,68 @@ WifiDisplaySource::PlaybackSession::PlaybackSession( const sp<ANetworkSession> &netSession, const sp<AMessage> ¬ify, const in_addr &interfaceAddr, - const sp<IHDCP> &hdcp) + const sp<IHDCP> &hdcp, + const char *path) : mNetSession(netSession), mNotify(notify), mInterfaceAddr(interfaceAddr), mHDCP(hdcp), + mLocalRTPPort(-1), mWeAreDead(false), mPaused(false), mLastLifesignUs(), mVideoTrackIndex(-1), mPrevTimeUs(-1ll), - mAllTracksHavePacketizerIndex(false) { + mPullExtractorPending(false), + mPullExtractorGeneration(0), + mFirstSampleTimeRealUs(-1ll), + mFirstSampleTimeUs(-1ll) { + if (path != NULL) { + mMediaPath.setTo(path); + } } status_t WifiDisplaySource::PlaybackSession::init( - const char *clientIP, int32_t clientRtp, int32_t clientRtcp, - Sender::TransportMode transportMode, - bool usePCMAudio) { - status_t err = setupPacketizer(usePCMAudio); + const char *clientIP, + int32_t clientRtp, + RTPSender::TransportMode rtpMode, + int32_t clientRtcp, + RTPSender::TransportMode rtcpMode, + bool enableAudio, + bool usePCMAudio, + bool enableVideo, + VideoFormats::ResolutionType videoResolutionType, + size_t videoResolutionIndex) { + sp<AMessage> notify = new AMessage(kWhatMediaSenderNotify, id()); + mMediaSender = new MediaSender(mNetSession, notify); + looper()->registerHandler(mMediaSender); + + mMediaSender->setHDCP(mHDCP); + + status_t err = setupPacketizer( + enableAudio, + usePCMAudio, + enableVideo, + videoResolutionType, + videoResolutionIndex); - if (err != OK) { - return err; + if (err == OK) { + err = mMediaSender->initAsync( + -1 /* trackIndex */, + clientIP, + clientRtp, + rtpMode, + clientRtcp, + rtcpMode, + &mLocalRTPPort); } - sp<AMessage> notify = new AMessage(kWhatSenderNotify, id()); - mSender = new Sender(mNetSession, notify); - - mSenderLooper = new ALooper; - mSenderLooper->setName("sender_looper"); - - mSenderLooper->start( - false /* runOnCallingThread */, - false /* canCallJava */, - PRIORITY_AUDIO); - - mSenderLooper->registerHandler(mSender); + if (err != OK) { + mLocalRTPPort = -1; - err = mSender->init(clientIP, clientRtp, clientRtcp, transportMode); + looper()->unregisterHandler(mMediaSender->id()); + mMediaSender.clear(); - if (err != OK) { return err; } @@ -381,7 +421,7 @@ WifiDisplaySource::PlaybackSession::~PlaybackSession() { } int32_t WifiDisplaySource::PlaybackSession::getRTPPort() const { - return mSender->getRTPPort(); + return mLocalRTPPort; } int64_t WifiDisplaySource::PlaybackSession::getLastLifesignUs() const { @@ -400,19 +440,7 @@ status_t WifiDisplaySource::PlaybackSession::play() { return OK; } -status_t WifiDisplaySource::PlaybackSession::finishPlay() { - // XXX Give the dongle a second to bind its sockets. - (new AMessage(kWhatFinishPlay, id()))->post(1000000ll); - return OK; -} - -status_t WifiDisplaySource::PlaybackSession::onFinishPlay() { - return mSender->finishInit(); -} - -status_t WifiDisplaySource::PlaybackSession::onFinishPlay2() { - mSender->scheduleSendSR(); - +status_t WifiDisplaySource::PlaybackSession::onMediaSenderInitialized() { for (size_t i = 0; i < mTracks.size(); ++i) { CHECK_EQ((status_t)OK, mTracks.editValueAt(i)->start()); } @@ -459,44 +487,18 @@ void WifiDisplaySource::PlaybackSession::onMessageReceived( CHECK(msg->findSize("trackIndex", &trackIndex)); if (what == Converter::kWhatAccessUnit) { - const sp<Track> &track = mTracks.valueFor(trackIndex); - - ssize_t packetizerTrackIndex = track->packetizerTrackIndex(); - - if (packetizerTrackIndex < 0) { - sp<AMessage> trackFormat = track->getFormat()->dup(); - if (mHDCP != NULL && !track->isAudio()) { - // HDCP2.0 _and_ HDCP 2.1 specs say to set the version - // inside the HDCP descriptor to 0x20!!! - trackFormat->setInt32("hdcp-version", 0x20); - } - packetizerTrackIndex = mPacketizer->addTrack(trackFormat); - - CHECK_GE(packetizerTrackIndex, 0); - - track->setPacketizerTrackIndex(packetizerTrackIndex); - - if (allTracksHavePacketizerIndex()) { - status_t err = packetizeQueuedAccessUnits(); - - if (err != OK) { - notifySessionDead(); - break; - } - } - } - sp<ABuffer> accessUnit; CHECK(msg->findBuffer("accessUnit", &accessUnit)); - if (!allTracksHavePacketizerIndex()) { - track->queueAccessUnit(accessUnit); - break; - } + const sp<Track> &track = mTracks.valueFor(trackIndex); - track->queueOutputBuffer(accessUnit); + status_t err = mMediaSender->queueAccessUnit( + track->mediaSenderTrackIndex(), + accessUnit); - drainAccessUnits(); + if (err != OK) { + notifySessionDead(); + } break; } else if (what == Converter::kWhatEOS) { CHECK_EQ(what, Converter::kWhatEOS); @@ -528,25 +530,38 @@ void WifiDisplaySource::PlaybackSession::onMessageReceived( break; } - case kWhatSenderNotify: + case kWhatMediaSenderNotify: { int32_t what; CHECK(msg->findInt32("what", &what)); - if (what == Sender::kWhatInitDone) { - onFinishPlay2(); - } else if (what == Sender::kWhatSessionDead) { + if (what == MediaSender::kWhatInitDone) { + status_t err; + CHECK(msg->findInt32("err", &err)); + + if (err == OK) { + onMediaSenderInitialized(); + } else { + notifySessionDead(); + } + } else if (what == MediaSender::kWhatError) { notifySessionDead(); + } else if (what == MediaSender::kWhatNetworkStall) { + size_t numBytesQueued; + CHECK(msg->findSize("numBytesQueued", &numBytesQueued)); + + if (mVideoTrackIndex >= 0) { + const sp<Track> &videoTrack = + mTracks.valueFor(mVideoTrackIndex); + + sp<Converter> converter = videoTrack->converter(); + if (converter != NULL) { + converter->dropAFrame(); + } + } } else { TRESPASS(); } - - break; - } - - case kWhatFinishPlay: - { - onFinishPlay(); break; } @@ -571,11 +586,8 @@ void WifiDisplaySource::PlaybackSession::onMessageReceived( break; } - mSenderLooper->unregisterHandler(mSender->id()); - mSender.clear(); - mSenderLooper.clear(); - - mPacketizer.clear(); + looper()->unregisterHandler(mMediaSender->id()); + mMediaSender.clear(); sp<AMessage> notify = mNotify->dup(); notify->setInt32("what", kWhatSessionDestroyed); @@ -584,30 +596,14 @@ void WifiDisplaySource::PlaybackSession::onMessageReceived( break; } - case kWhatPacketize: + case kWhatPause: { - size_t trackIndex; - CHECK(msg->findSize("trackIndex", &trackIndex)); - - sp<ABuffer> accessUnit; - CHECK(msg->findBuffer("accessUnit", &accessUnit)); - -#if 0 - if ((ssize_t)trackIndex == mVideoTrackIndex) { - int64_t nowUs = ALooper::GetNowUs(); - static int64_t prevNowUs = 0ll; - - ALOGI("sending AU, dNowUs=%lld us", nowUs - prevNowUs); - - prevNowUs = nowUs; + if (mExtractor != NULL) { + ++mPullExtractorGeneration; + mFirstSampleTimeRealUs = -1ll; + mFirstSampleTimeUs = -1ll; } -#endif - break; - } - - case kWhatPause: - { if (mPaused) { break; } @@ -622,6 +618,10 @@ void WifiDisplaySource::PlaybackSession::onMessageReceived( case kWhatResume: { + if (mExtractor != NULL) { + schedulePullExtractor(); + } + if (!mPaused) { break; } @@ -634,20 +634,177 @@ void WifiDisplaySource::PlaybackSession::onMessageReceived( break; } + case kWhatPullExtractorSample: + { + int32_t generation; + CHECK(msg->findInt32("generation", &generation)); + + if (generation != mPullExtractorGeneration) { + break; + } + + mPullExtractorPending = false; + + onPullExtractor(); + break; + } + default: TRESPASS(); } } -status_t WifiDisplaySource::PlaybackSession::setupPacketizer(bool usePCMAudio) { - mPacketizer = new TSPacketizer; +status_t WifiDisplaySource::PlaybackSession::setupMediaPacketizer( + bool enableAudio, bool enableVideo) { + DataSource::RegisterDefaultSniffers(); - status_t err = addVideoSource(); + mExtractor = new NuMediaExtractor; + + status_t err = mExtractor->setDataSource(mMediaPath.c_str()); if (err != OK) { return err; } + size_t n = mExtractor->countTracks(); + bool haveAudio = false; + bool haveVideo = false; + for (size_t i = 0; i < n; ++i) { + sp<AMessage> format; + err = mExtractor->getTrackFormat(i, &format); + + if (err != OK) { + continue; + } + + AString mime; + CHECK(format->findString("mime", &mime)); + + bool isAudio = !strncasecmp(mime.c_str(), "audio/", 6); + bool isVideo = !strncasecmp(mime.c_str(), "video/", 6); + + if (isAudio && enableAudio && !haveAudio) { + haveAudio = true; + } else if (isVideo && enableVideo && !haveVideo) { + haveVideo = true; + } else { + continue; + } + + err = mExtractor->selectTrack(i); + + size_t trackIndex = mTracks.size(); + + sp<AMessage> notify = new AMessage(kWhatTrackNotify, id()); + notify->setSize("trackIndex", trackIndex); + + sp<Track> track = new Track(notify, format); + looper()->registerHandler(track); + + mTracks.add(trackIndex, track); + + mExtractorTrackToInternalTrack.add(i, trackIndex); + + if (isVideo) { + mVideoTrackIndex = trackIndex; + } + + uint32_t flags = MediaSender::FLAG_MANUALLY_PREPEND_SPS_PPS; + + ssize_t mediaSenderTrackIndex = + mMediaSender->addTrack(format, flags); + CHECK_GE(mediaSenderTrackIndex, 0); + + track->setMediaSenderTrackIndex(mediaSenderTrackIndex); + + if ((haveAudio || !enableAudio) && (haveVideo || !enableVideo)) { + break; + } + } + + return OK; +} + +void WifiDisplaySource::PlaybackSession::schedulePullExtractor() { + if (mPullExtractorPending) { + return; + } + + int64_t sampleTimeUs; + status_t err = mExtractor->getSampleTime(&sampleTimeUs); + + int64_t nowUs = ALooper::GetNowUs(); + + if (mFirstSampleTimeRealUs < 0ll) { + mFirstSampleTimeRealUs = nowUs; + mFirstSampleTimeUs = sampleTimeUs; + } + + int64_t whenUs = sampleTimeUs - mFirstSampleTimeUs + mFirstSampleTimeRealUs; + + sp<AMessage> msg = new AMessage(kWhatPullExtractorSample, id()); + msg->setInt32("generation", mPullExtractorGeneration); + msg->post(whenUs - nowUs); + + mPullExtractorPending = true; +} + +void WifiDisplaySource::PlaybackSession::onPullExtractor() { + sp<ABuffer> accessUnit = new ABuffer(1024 * 1024); + status_t err = mExtractor->readSampleData(accessUnit); + if (err != OK) { + // EOS. + return; + } + + int64_t timeUs; + CHECK_EQ((status_t)OK, mExtractor->getSampleTime(&timeUs)); + + accessUnit->meta()->setInt64( + "timeUs", mFirstSampleTimeRealUs + timeUs - mFirstSampleTimeUs); + + size_t trackIndex; + CHECK_EQ((status_t)OK, mExtractor->getSampleTrackIndex(&trackIndex)); + + sp<AMessage> msg = new AMessage(kWhatConverterNotify, id()); + + msg->setSize( + "trackIndex", mExtractorTrackToInternalTrack.valueFor(trackIndex)); + + msg->setInt32("what", Converter::kWhatAccessUnit); + msg->setBuffer("accessUnit", accessUnit); + msg->post(); + + mExtractor->advance(); + + schedulePullExtractor(); +} + +status_t WifiDisplaySource::PlaybackSession::setupPacketizer( + bool enableAudio, + bool usePCMAudio, + bool enableVideo, + VideoFormats::ResolutionType videoResolutionType, + size_t videoResolutionIndex) { + CHECK(enableAudio || enableVideo); + + if (!mMediaPath.empty()) { + return setupMediaPacketizer(enableAudio, enableVideo); + } + + if (enableVideo) { + status_t err = addVideoSource( + videoResolutionType, videoResolutionIndex); + + if (err != OK) { + return err; + } + } + + if (!enableAudio) { + return OK; + } + return addAudioSource(usePCMAudio); } @@ -732,30 +889,44 @@ status_t WifiDisplaySource::PlaybackSession::addSource( mVideoTrackIndex = trackIndex; } + uint32_t flags = 0; + if (converter->needToManuallyPrependSPSPPS()) { + flags |= MediaSender::FLAG_MANUALLY_PREPEND_SPS_PPS; + } + + ssize_t mediaSenderTrackIndex = + mMediaSender->addTrack(converter->getOutputFormat(), flags); + CHECK_GE(mediaSenderTrackIndex, 0); + + track->setMediaSenderTrackIndex(mediaSenderTrackIndex); + return OK; } -status_t WifiDisplaySource::PlaybackSession::addVideoSource() { - sp<SurfaceMediaSource> source = new SurfaceMediaSource(width(), height()); +status_t WifiDisplaySource::PlaybackSession::addVideoSource( + VideoFormats::ResolutionType videoResolutionType, + size_t videoResolutionIndex) { + size_t width, height, framesPerSecond; + bool interlaced; + CHECK(VideoFormats::GetConfiguration( + videoResolutionType, + videoResolutionIndex, + &width, + &height, + &framesPerSecond, + &interlaced)); + + sp<SurfaceMediaSource> source = new SurfaceMediaSource(width, height); source->setUseAbsoluteTimestamps(); -#if 1 sp<RepeaterSource> videoSource = - new RepeaterSource(source, 30.0 /* rateHz */); -#endif + new RepeaterSource(source, framesPerSecond); -#if 1 size_t numInputBuffers; status_t err = addSource( true /* isVideo */, videoSource, true /* isRepeaterSource */, false /* usePCMAudio */, &numInputBuffers); -#else - size_t numInputBuffers; - status_t err = addSource( - true /* isVideo */, source, false /* isRepeaterSource */, - false /* usePCMAudio */, &numInputBuffers); -#endif if (err != OK) { return err; @@ -786,26 +957,10 @@ status_t WifiDisplaySource::PlaybackSession::addAudioSource(bool usePCMAudio) { return OK; } -sp<ISurfaceTexture> WifiDisplaySource::PlaybackSession::getSurfaceTexture() { +sp<IGraphicBufferProducer> WifiDisplaySource::PlaybackSession::getSurfaceTexture() { return mBufferQueue; } -int32_t WifiDisplaySource::PlaybackSession::width() const { -#if USE_1080P - return 1920; -#else - return 1280; -#endif -} - -int32_t WifiDisplaySource::PlaybackSession::height() const { -#if USE_1080P - return 1080; -#else - return 720; -#endif -} - void WifiDisplaySource::PlaybackSession::requestIDRFrame() { for (size_t i = 0; i < mTracks.size(); ++i) { const sp<Track> &track = mTracks.valueAt(i); @@ -814,168 +969,6 @@ void WifiDisplaySource::PlaybackSession::requestIDRFrame() { } } -bool WifiDisplaySource::PlaybackSession::allTracksHavePacketizerIndex() { - if (mAllTracksHavePacketizerIndex) { - return true; - } - - for (size_t i = 0; i < mTracks.size(); ++i) { - if (mTracks.valueAt(i)->packetizerTrackIndex() < 0) { - return false; - } - } - - mAllTracksHavePacketizerIndex = true; - - return true; -} - -status_t WifiDisplaySource::PlaybackSession::packetizeAccessUnit( - size_t trackIndex, sp<ABuffer> accessUnit, - sp<ABuffer> *packets) { - const sp<Track> &track = mTracks.valueFor(trackIndex); - - uint32_t flags = 0; - - bool isHDCPEncrypted = false; - uint64_t inputCTR; - uint8_t HDCP_private_data[16]; - - bool manuallyPrependSPSPPS = - !track->isAudio() - && track->converter()->needToManuallyPrependSPSPPS() - && IsIDR(accessUnit); - - if (mHDCP != NULL && !track->isAudio()) { - isHDCPEncrypted = true; - - if (manuallyPrependSPSPPS) { - accessUnit = mPacketizer->prependCSD( - track->packetizerTrackIndex(), accessUnit); - } - - status_t err = mHDCP->encrypt( - accessUnit->data(), accessUnit->size(), - trackIndex /* streamCTR */, - &inputCTR, - accessUnit->data()); - - if (err != OK) { - ALOGE("Failed to HDCP-encrypt media data (err %d)", - err); - - return err; - } - - HDCP_private_data[0] = 0x00; - - HDCP_private_data[1] = - (((trackIndex >> 30) & 3) << 1) | 1; - - HDCP_private_data[2] = (trackIndex >> 22) & 0xff; - - HDCP_private_data[3] = - (((trackIndex >> 15) & 0x7f) << 1) | 1; - - HDCP_private_data[4] = (trackIndex >> 7) & 0xff; - - HDCP_private_data[5] = - ((trackIndex & 0x7f) << 1) | 1; - - HDCP_private_data[6] = 0x00; - - HDCP_private_data[7] = - (((inputCTR >> 60) & 0x0f) << 1) | 1; - - HDCP_private_data[8] = (inputCTR >> 52) & 0xff; - - HDCP_private_data[9] = - (((inputCTR >> 45) & 0x7f) << 1) | 1; - - HDCP_private_data[10] = (inputCTR >> 37) & 0xff; - - HDCP_private_data[11] = - (((inputCTR >> 30) & 0x7f) << 1) | 1; - - HDCP_private_data[12] = (inputCTR >> 22) & 0xff; - - HDCP_private_data[13] = - (((inputCTR >> 15) & 0x7f) << 1) | 1; - - HDCP_private_data[14] = (inputCTR >> 7) & 0xff; - - HDCP_private_data[15] = - ((inputCTR & 0x7f) << 1) | 1; - -#if 0 - ALOGI("HDCP_private_data:"); - hexdump(HDCP_private_data, sizeof(HDCP_private_data)); - - ABitReader br(HDCP_private_data, sizeof(HDCP_private_data)); - CHECK_EQ(br.getBits(13), 0); - CHECK_EQ(br.getBits(2), (trackIndex >> 30) & 3); - CHECK_EQ(br.getBits(1), 1u); - CHECK_EQ(br.getBits(15), (trackIndex >> 15) & 0x7fff); - CHECK_EQ(br.getBits(1), 1u); - CHECK_EQ(br.getBits(15), trackIndex & 0x7fff); - CHECK_EQ(br.getBits(1), 1u); - CHECK_EQ(br.getBits(11), 0); - CHECK_EQ(br.getBits(4), (inputCTR >> 60) & 0xf); - CHECK_EQ(br.getBits(1), 1u); - CHECK_EQ(br.getBits(15), (inputCTR >> 45) & 0x7fff); - CHECK_EQ(br.getBits(1), 1u); - CHECK_EQ(br.getBits(15), (inputCTR >> 30) & 0x7fff); - CHECK_EQ(br.getBits(1), 1u); - CHECK_EQ(br.getBits(15), (inputCTR >> 15) & 0x7fff); - CHECK_EQ(br.getBits(1), 1u); - CHECK_EQ(br.getBits(15), inputCTR & 0x7fff); - CHECK_EQ(br.getBits(1), 1u); -#endif - - flags |= TSPacketizer::IS_ENCRYPTED; - } else if (manuallyPrependSPSPPS) { - flags |= TSPacketizer::PREPEND_SPS_PPS_TO_IDR_FRAMES; - } - - int64_t timeUs = ALooper::GetNowUs(); - if (mPrevTimeUs < 0ll || mPrevTimeUs + 100000ll <= timeUs) { - flags |= TSPacketizer::EMIT_PCR; - flags |= TSPacketizer::EMIT_PAT_AND_PMT; - - mPrevTimeUs = timeUs; - } - - mPacketizer->packetize( - track->packetizerTrackIndex(), accessUnit, packets, flags, - !isHDCPEncrypted ? NULL : HDCP_private_data, - !isHDCPEncrypted ? 0 : sizeof(HDCP_private_data), - track->isAudio() ? 2 : 0 /* numStuffingBytes */); - - return OK; -} - -status_t WifiDisplaySource::PlaybackSession::packetizeQueuedAccessUnits() { - for (;;) { - bool gotMoreData = false; - for (size_t i = 0; i < mTracks.size(); ++i) { - size_t trackIndex = mTracks.keyAt(i); - const sp<Track> &track = mTracks.valueAt(i); - - sp<ABuffer> accessUnit = track->dequeueAccessUnit(); - if (accessUnit != NULL) { - track->queueOutputBuffer(accessUnit); - gotMoreData = true; - } - } - - if (!gotMoreData) { - break; - } - } - - return OK; -} - void WifiDisplaySource::PlaybackSession::notifySessionDead() { // Inform WifiDisplaySource of our premature death (wish). sp<AMessage> notify = mNotify->dup(); @@ -985,78 +978,5 @@ void WifiDisplaySource::PlaybackSession::notifySessionDead() { mWeAreDead = true; } -void WifiDisplaySource::PlaybackSession::drainAccessUnits() { - ALOGV("audio/video has %d/%d buffers ready.", - mTracks.valueFor(1)->countQueuedOutputBuffers(), - mTracks.valueFor(0)->countQueuedOutputBuffers()); - - while (drainAccessUnit()) { - } -} - -bool WifiDisplaySource::PlaybackSession::drainAccessUnit() { - ssize_t minTrackIndex = -1; - int64_t minTimeUs = -1ll; - - for (size_t i = 0; i < mTracks.size(); ++i) { - const sp<Track> &track = mTracks.valueAt(i); - - int64_t timeUs; - if (track->hasOutputBuffer(&timeUs)) { - if (minTrackIndex < 0 || timeUs < minTimeUs) { - minTrackIndex = mTracks.keyAt(i); - minTimeUs = timeUs; - } - } -#if SUSPEND_VIDEO_IF_IDLE - else if (!track->isSuspended()) { - // We still consider this track "live", so it should keep - // delivering output data whose time stamps we'll have to - // consider for proper interleaving. - return false; - } -#else - else { - // We need access units available on all tracks to be able to - // dequeue the earliest one. - return false; - } -#endif - } - - if (minTrackIndex < 0) { - return false; - } - - const sp<Track> &track = mTracks.valueFor(minTrackIndex); - sp<ABuffer> accessUnit = track->dequeueOutputBuffer(); - - sp<ABuffer> packets; - status_t err = packetizeAccessUnit(minTrackIndex, accessUnit, &packets); - - if (err != OK) { - notifySessionDead(); - return false; - } - - if ((ssize_t)minTrackIndex == mVideoTrackIndex) { - packets->meta()->setInt32("isVideo", 1); - } - mSender->queuePackets(minTimeUs, packets); - -#if 0 - if (minTrackIndex == mVideoTrackIndex) { - int64_t nowUs = ALooper::GetNowUs(); - - // Latency from "data acquired" to "ready to send if we wanted to". - ALOGI("[%s] latencyUs = %lld ms", - minTrackIndex == mVideoTrackIndex ? "video" : "audio", - (nowUs - minTimeUs) / 1000ll); - } -#endif - - return true; -} - } // namespace android diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.h b/media/libstagefright/wifi-display/source/PlaybackSession.h index b9d193b..39086a1 100644 --- a/media/libstagefright/wifi-display/source/PlaybackSession.h +++ b/media/libstagefright/wifi-display/source/PlaybackSession.h @@ -18,7 +18,8 @@ #define PLAYBACK_SESSION_H_ -#include "Sender.h" +#include "MediaSender.h" +#include "VideoFormats.h" #include "WifiDisplaySource.h" namespace android { @@ -26,10 +27,11 @@ namespace android { struct ABuffer; struct BufferQueue; struct IHDCP; -struct ISurfaceTexture; +struct IGraphicBufferProducer; struct MediaPuller; struct MediaSource; -struct TSPacketizer; +struct MediaSender; +struct NuMediaExtractor; // Encapsulates the state of an RTP/RTCP session in the context of wifi // display. @@ -38,12 +40,20 @@ struct WifiDisplaySource::PlaybackSession : public AHandler { const sp<ANetworkSession> &netSession, const sp<AMessage> ¬ify, const struct in_addr &interfaceAddr, - const sp<IHDCP> &hdcp); + const sp<IHDCP> &hdcp, + const char *path = NULL); status_t init( - const char *clientIP, int32_t clientRtp, int32_t clientRtcp, - Sender::TransportMode transportMode, - bool usePCMAudio); + const char *clientIP, + int32_t clientRtp, + RTPSender::TransportMode rtpMode, + int32_t clientRtcp, + RTPSender::TransportMode rtcpMode, + bool enableAudio, + bool usePCMAudio, + bool enableVideo, + VideoFormats::ResolutionType videoResolutionType, + size_t videoResolutionIndex); void destroyAsync(); @@ -56,9 +66,7 @@ struct WifiDisplaySource::PlaybackSession : public AHandler { status_t finishPlay(); status_t pause(); - sp<ISurfaceTexture> getSurfaceTexture(); - int32_t width() const; - int32_t height() const; + sp<IGraphicBufferProducer> getSurfaceTexture(); void requestIDRFrame(); @@ -80,26 +88,27 @@ private: kWhatMediaPullerNotify, kWhatConverterNotify, kWhatTrackNotify, - kWhatSenderNotify, kWhatUpdateSurface, - kWhatFinishPlay, - kWhatPacketize, kWhatPause, kWhatResume, + kWhatMediaSenderNotify, + kWhatPullExtractorSample, }; sp<ANetworkSession> mNetSession; - sp<Sender> mSender; - sp<ALooper> mSenderLooper; sp<AMessage> mNotify; in_addr mInterfaceAddr; sp<IHDCP> mHDCP; + AString mMediaPath; + + sp<MediaSender> mMediaSender; + int32_t mLocalRTPPort; + bool mWeAreDead; bool mPaused; int64_t mLastLifesignUs; - sp<TSPacketizer> mPacketizer; sp<BufferQueue> mBufferQueue; KeyedVector<size_t, sp<Track> > mTracks; @@ -107,9 +116,21 @@ private: int64_t mPrevTimeUs; - bool mAllTracksHavePacketizerIndex; + sp<NuMediaExtractor> mExtractor; + KeyedVector<size_t, size_t> mExtractorTrackToInternalTrack; + bool mPullExtractorPending; + int32_t mPullExtractorGeneration; + int64_t mFirstSampleTimeRealUs; + int64_t mFirstSampleTimeUs; - status_t setupPacketizer(bool usePCMAudio); + status_t setupMediaPacketizer(bool enableAudio, bool enableVideo); + + status_t setupPacketizer( + bool enableAudio, + bool usePCMAudio, + bool enableVideo, + VideoFormats::ResolutionType videoResolutionType, + size_t videoResolutionIndex); status_t addSource( bool isVideo, @@ -118,29 +139,20 @@ private: bool usePCMAudio, size_t *numInputBuffers); - status_t addVideoSource(); - status_t addAudioSource(bool usePCMAudio); - - ssize_t appendTSData( - const void *data, size_t size, bool timeDiscontinuity, bool flush); - - status_t onFinishPlay(); - status_t onFinishPlay2(); + status_t addVideoSource( + VideoFormats::ResolutionType videoResolutionType, + size_t videoResolutionIndex); - bool allTracksHavePacketizerIndex(); - - status_t packetizeAccessUnit( - size_t trackIndex, sp<ABuffer> accessUnit, - sp<ABuffer> *packets); + status_t addAudioSource(bool usePCMAudio); - status_t packetizeQueuedAccessUnits(); + status_t onMediaSenderInitialized(); void notifySessionDead(); - void drainAccessUnits(); + void schedulePullExtractor(); + void onPullExtractor(); - // Returns true iff an access unit was successfully drained. - bool drainAccessUnit(); + void onSinkFeedback(const sp<AMessage> &msg); DISALLOW_EVIL_CONSTRUCTORS(PlaybackSession); }; diff --git a/media/libstagefright/wifi-display/source/RepeaterSource.cpp b/media/libstagefright/wifi-display/source/RepeaterSource.cpp index 72be927..cc8dee3 100644 --- a/media/libstagefright/wifi-display/source/RepeaterSource.cpp +++ b/media/libstagefright/wifi-display/source/RepeaterSource.cpp @@ -27,6 +27,25 @@ RepeaterSource::~RepeaterSource() { CHECK(!mStarted); } +double RepeaterSource::getFrameRate() const { + return mRateHz; +} + +void RepeaterSource::setFrameRate(double rateHz) { + Mutex::Autolock autoLock(mLock); + + if (rateHz == mRateHz) { + return; + } + + if (mStartTimeUs >= 0ll) { + int64_t nextTimeUs = mStartTimeUs + (mFrameCount * 1000000ll) / mRateHz; + mStartTimeUs = nextTimeUs; + mFrameCount = 0; + } + mRateHz = rateHz; +} + status_t RepeaterSource::start(MetaData *params) { CHECK(!mStarted); diff --git a/media/libstagefright/wifi-display/source/RepeaterSource.h b/media/libstagefright/wifi-display/source/RepeaterSource.h index a13973c..8d414fd 100644 --- a/media/libstagefright/wifi-display/source/RepeaterSource.h +++ b/media/libstagefright/wifi-display/source/RepeaterSource.h @@ -6,7 +6,7 @@ #include <media/stagefright/foundation/AHandlerReflector.h> #include <media/stagefright/MediaSource.h> -#define SUSPEND_VIDEO_IF_IDLE 1 +#define SUSPEND_VIDEO_IF_IDLE 0 namespace android { @@ -28,6 +28,9 @@ struct RepeaterSource : public MediaSource { // send updates in a while, this is its wakeup call. void wakeUp(); + double getFrameRate() const; + void setFrameRate(double rateHz); + protected: virtual ~RepeaterSource(); diff --git a/media/libstagefright/wifi-display/source/Sender.cpp b/media/libstagefright/wifi-display/source/Sender.cpp deleted file mode 100644 index 9048691..0000000 --- a/media/libstagefright/wifi-display/source/Sender.cpp +++ /dev/null @@ -1,870 +0,0 @@ -/* - * Copyright 2012, The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//#define LOG_NDEBUG 0 -#define LOG_TAG "Sender" -#include <utils/Log.h> - -#include "Sender.h" - -#include "ANetworkSession.h" -#include "TimeSeries.h" - -#include <media/stagefright/foundation/ABuffer.h> -#include <media/stagefright/foundation/ADebug.h> -#include <media/stagefright/foundation/AMessage.h> -#include <media/stagefright/foundation/hexdump.h> -#include <media/stagefright/MediaErrors.h> -#include <media/stagefright/Utils.h> - -namespace android { - -static size_t kMaxRTPPacketSize = 1500; -static size_t kMaxNumTSPacketsPerRTPPacket = (kMaxRTPPacketSize - 12) / 188; - -Sender::Sender( - const sp<ANetworkSession> &netSession, - const sp<AMessage> ¬ify) - : mNetSession(netSession), - mNotify(notify), - mTransportMode(TRANSPORT_UDP), - mRTPChannel(0), - mRTCPChannel(0), - mRTPPort(0), - mRTPSessionID(0), - mRTCPSessionID(0), -#if ENABLE_RETRANSMISSION && RETRANSMISSION_ACCORDING_TO_RFC_XXXX - mRTPRetransmissionSessionID(0), - mRTCPRetransmissionSessionID(0), -#endif - mClientRTPPort(0), - mClientRTCPPort(0), - mRTPConnected(false), - mRTCPConnected(false), - mFirstOutputBufferReadyTimeUs(-1ll), - mFirstOutputBufferSentTimeUs(-1ll), - mRTPSeqNo(0), -#if ENABLE_RETRANSMISSION && RETRANSMISSION_ACCORDING_TO_RFC_XXXX - mRTPRetransmissionSeqNo(0), -#endif - mLastNTPTime(0), - mLastRTPTime(0), - mNumRTPSent(0), - mNumRTPOctetsSent(0), - mNumSRsSent(0), - mSendSRPending(false) -#if ENABLE_RETRANSMISSION - ,mHistoryLength(0) -#endif -#if TRACK_BANDWIDTH - ,mFirstPacketTimeUs(-1ll) - ,mTotalBytesSent(0ll) -#endif -#if LOG_TRANSPORT_STREAM - ,mLogFile(NULL) -#endif -{ -#if LOG_TRANSPORT_STREAM - mLogFile = fopen("/system/etc/log.ts", "wb"); -#endif -} - -Sender::~Sender() { -#if ENABLE_RETRANSMISSION && RETRANSMISSION_ACCORDING_TO_RFC_XXXX - if (mRTCPRetransmissionSessionID != 0) { - mNetSession->destroySession(mRTCPRetransmissionSessionID); - } - - if (mRTPRetransmissionSessionID != 0) { - mNetSession->destroySession(mRTPRetransmissionSessionID); - } -#endif - - if (mRTCPSessionID != 0) { - mNetSession->destroySession(mRTCPSessionID); - } - - if (mRTPSessionID != 0) { - mNetSession->destroySession(mRTPSessionID); - } - -#if LOG_TRANSPORT_STREAM - if (mLogFile != NULL) { - fclose(mLogFile); - mLogFile = NULL; - } -#endif -} - -status_t Sender::init( - const char *clientIP, int32_t clientRtp, int32_t clientRtcp, - TransportMode transportMode) { - mClientIP = clientIP; - mTransportMode = transportMode; - - if (transportMode == TRANSPORT_TCP_INTERLEAVED) { - mRTPChannel = clientRtp; - mRTCPChannel = clientRtcp; - mRTPPort = 0; - mRTPSessionID = 0; - mRTCPSessionID = 0; - return OK; - } - - mRTPChannel = 0; - mRTCPChannel = 0; - - if (mTransportMode == TRANSPORT_TCP) { - // XXX This is wrong, we need to allocate sockets here, we only - // need to do this because the dongles are not establishing their - // end until after PLAY instead of before SETUP. - mRTPPort = 20000; - mRTPSessionID = 0; - mRTCPSessionID = 0; - mClientRTPPort = clientRtp; - mClientRTCPPort = clientRtcp; - return OK; - } - - int serverRtp; - - sp<AMessage> rtpNotify = new AMessage(kWhatRTPNotify, id()); - sp<AMessage> rtcpNotify = new AMessage(kWhatRTCPNotify, id()); - -#if ENABLE_RETRANSMISSION && RETRANSMISSION_ACCORDING_TO_RFC_XXXX - sp<AMessage> rtpRetransmissionNotify = - new AMessage(kWhatRTPRetransmissionNotify, id()); - - sp<AMessage> rtcpRetransmissionNotify = - new AMessage(kWhatRTCPRetransmissionNotify, id()); -#endif - - status_t err; - for (serverRtp = 15550;; serverRtp += 2) { - int32_t rtpSession; - if (mTransportMode == TRANSPORT_UDP) { - err = mNetSession->createUDPSession( - serverRtp, clientIP, clientRtp, - rtpNotify, &rtpSession); - } else { - err = mNetSession->createTCPDatagramSession( - serverRtp, clientIP, clientRtp, - rtpNotify, &rtpSession); - } - - if (err != OK) { - ALOGI("failed to create RTP socket on port %d", serverRtp); - continue; - } - - int32_t rtcpSession = 0; - - if (clientRtcp >= 0) { - if (mTransportMode == TRANSPORT_UDP) { - err = mNetSession->createUDPSession( - serverRtp + 1, clientIP, clientRtcp, - rtcpNotify, &rtcpSession); - } else { - err = mNetSession->createTCPDatagramSession( - serverRtp + 1, clientIP, clientRtcp, - rtcpNotify, &rtcpSession); - } - - if (err != OK) { - ALOGI("failed to create RTCP socket on port %d", serverRtp + 1); - - mNetSession->destroySession(rtpSession); - continue; - } - } - -#if ENABLE_RETRANSMISSION && RETRANSMISSION_ACCORDING_TO_RFC_XXXX - if (mTransportMode == TRANSPORT_UDP) { - int32_t rtpRetransmissionSession; - - err = mNetSession->createUDPSession( - serverRtp + kRetransmissionPortOffset, - clientIP, - clientRtp + kRetransmissionPortOffset, - rtpRetransmissionNotify, - &rtpRetransmissionSession); - - if (err != OK) { - mNetSession->destroySession(rtcpSession); - mNetSession->destroySession(rtpSession); - continue; - } - - CHECK_GE(clientRtcp, 0); - - int32_t rtcpRetransmissionSession; - err = mNetSession->createUDPSession( - serverRtp + 1 + kRetransmissionPortOffset, - clientIP, - clientRtp + 1 + kRetransmissionPortOffset, - rtcpRetransmissionNotify, - &rtcpRetransmissionSession); - - if (err != OK) { - mNetSession->destroySession(rtpRetransmissionSession); - mNetSession->destroySession(rtcpSession); - mNetSession->destroySession(rtpSession); - continue; - } - - mRTPRetransmissionSessionID = rtpRetransmissionSession; - mRTCPRetransmissionSessionID = rtcpRetransmissionSession; - - ALOGI("rtpRetransmissionSessionID = %d, " - "rtcpRetransmissionSessionID = %d", - rtpRetransmissionSession, rtcpRetransmissionSession); - } -#endif - - mRTPPort = serverRtp; - mRTPSessionID = rtpSession; - mRTCPSessionID = rtcpSession; - - ALOGI("rtpSessionID = %d, rtcpSessionID = %d", rtpSession, rtcpSession); - break; - } - - if (mRTPPort == 0) { - return UNKNOWN_ERROR; - } - - return OK; -} - -status_t Sender::finishInit() { - if (mTransportMode != TRANSPORT_TCP) { - notifyInitDone(); - return OK; - } - - sp<AMessage> rtpNotify = new AMessage(kWhatRTPNotify, id()); - - status_t err = mNetSession->createTCPDatagramSession( - mRTPPort, mClientIP.c_str(), mClientRTPPort, - rtpNotify, &mRTPSessionID); - - if (err != OK) { - return err; - } - - if (mClientRTCPPort >= 0) { - sp<AMessage> rtcpNotify = new AMessage(kWhatRTCPNotify, id()); - - err = mNetSession->createTCPDatagramSession( - mRTPPort + 1, mClientIP.c_str(), mClientRTCPPort, - rtcpNotify, &mRTCPSessionID); - - if (err != OK) { - return err; - } - } - - return OK; -} - -int32_t Sender::getRTPPort() const { - return mRTPPort; -} - -void Sender::queuePackets( - int64_t timeUs, const sp<ABuffer> &tsPackets) { - const size_t numTSPackets = tsPackets->size() / 188; - - const size_t numRTPPackets = - (numTSPackets + kMaxNumTSPacketsPerRTPPacket - 1) - / kMaxNumTSPacketsPerRTPPacket; - - sp<ABuffer> udpPackets = new ABuffer( - numRTPPackets * (12 + kMaxNumTSPacketsPerRTPPacket * 188)); - - udpPackets->meta()->setInt64("timeUs", timeUs); - - size_t dstOffset = 0; - for (size_t i = 0; i < numTSPackets; ++i) { - if ((i % kMaxNumTSPacketsPerRTPPacket) == 0) { - static const bool kMarkerBit = false; - - uint8_t *rtp = udpPackets->data() + dstOffset; - rtp[0] = 0x80; - rtp[1] = 33 | (kMarkerBit ? (1 << 7) : 0); // M-bit - rtp[2] = (mRTPSeqNo >> 8) & 0xff; - rtp[3] = mRTPSeqNo & 0xff; - rtp[4] = 0x00; // rtp time to be filled in later. - rtp[5] = 0x00; - rtp[6] = 0x00; - rtp[7] = 0x00; - rtp[8] = kSourceID >> 24; - rtp[9] = (kSourceID >> 16) & 0xff; - rtp[10] = (kSourceID >> 8) & 0xff; - rtp[11] = kSourceID & 0xff; - - ++mRTPSeqNo; - - dstOffset += 12; - } - - memcpy(udpPackets->data() + dstOffset, - tsPackets->data() + 188 * i, - 188); - - dstOffset += 188; - } - - udpPackets->setRange(0, dstOffset); - - sp<AMessage> msg = new AMessage(kWhatDrainQueue, id()); - msg->setBuffer("udpPackets", udpPackets); - msg->post(); - -#if LOG_TRANSPORT_STREAM - if (mLogFile != NULL) { - fwrite(tsPackets->data(), 1, tsPackets->size(), mLogFile); - } -#endif -} - -void Sender::onMessageReceived(const sp<AMessage> &msg) { - switch (msg->what()) { - case kWhatRTPNotify: - case kWhatRTCPNotify: -#if ENABLE_RETRANSMISSION && RETRANSMISSION_ACCORDING_TO_RFC_XXXX - case kWhatRTPRetransmissionNotify: - case kWhatRTCPRetransmissionNotify: -#endif - { - int32_t reason; - CHECK(msg->findInt32("reason", &reason)); - - switch (reason) { - case ANetworkSession::kWhatError: - { - int32_t sessionID; - CHECK(msg->findInt32("sessionID", &sessionID)); - - int32_t err; - CHECK(msg->findInt32("err", &err)); - - int32_t errorOccuredDuringSend; - CHECK(msg->findInt32("send", &errorOccuredDuringSend)); - - AString detail; - CHECK(msg->findString("detail", &detail)); - - if ((msg->what() == kWhatRTPNotify -#if ENABLE_RETRANSMISSION && RETRANSMISSION_ACCORDING_TO_RFC_XXXX - || msg->what() == kWhatRTPRetransmissionNotify -#endif - ) && !errorOccuredDuringSend) { - // This is ok, we don't expect to receive anything on - // the RTP socket. - break; - } - - ALOGE("An error occurred during %s in session %d " - "(%d, '%s' (%s)).", - errorOccuredDuringSend ? "send" : "receive", - sessionID, - err, - detail.c_str(), - strerror(-err)); - - mNetSession->destroySession(sessionID); - - if (sessionID == mRTPSessionID) { - mRTPSessionID = 0; - } else if (sessionID == mRTCPSessionID) { - mRTCPSessionID = 0; - } -#if ENABLE_RETRANSMISSION && RETRANSMISSION_ACCORDING_TO_RFC_XXXX - else if (sessionID == mRTPRetransmissionSessionID) { - mRTPRetransmissionSessionID = 0; - } else if (sessionID == mRTCPRetransmissionSessionID) { - mRTCPRetransmissionSessionID = 0; - } -#endif - - notifySessionDead(); - break; - } - - case ANetworkSession::kWhatDatagram: - { - int32_t sessionID; - CHECK(msg->findInt32("sessionID", &sessionID)); - - sp<ABuffer> data; - CHECK(msg->findBuffer("data", &data)); - - status_t err; - if (msg->what() == kWhatRTCPNotify -#if ENABLE_RETRANSMISSION && RETRANSMISSION_ACCORDING_TO_RFC_XXXX - || msg->what() == kWhatRTCPRetransmissionNotify -#endif - ) - { - err = parseRTCP(data); - } - break; - } - - case ANetworkSession::kWhatConnected: - { - CHECK_EQ(mTransportMode, TRANSPORT_TCP); - - int32_t sessionID; - CHECK(msg->findInt32("sessionID", &sessionID)); - - if (sessionID == mRTPSessionID) { - CHECK(!mRTPConnected); - mRTPConnected = true; - ALOGI("RTP Session now connected."); - } else if (sessionID == mRTCPSessionID) { - CHECK(!mRTCPConnected); - mRTCPConnected = true; - ALOGI("RTCP Session now connected."); - } else { - TRESPASS(); - } - - if (mRTPConnected - && (mClientRTCPPort < 0 || mRTCPConnected)) { - notifyInitDone(); - } - break; - } - - default: - TRESPASS(); - } - break; - } - - case kWhatDrainQueue: - { - sp<ABuffer> udpPackets; - CHECK(msg->findBuffer("udpPackets", &udpPackets)); - - onDrainQueue(udpPackets); - break; - } - - case kWhatSendSR: - { - mSendSRPending = false; - - if (mRTCPSessionID == 0) { - break; - } - - onSendSR(); - - scheduleSendSR(); - break; - } - } -} - -void Sender::scheduleSendSR() { - if (mSendSRPending || mRTCPSessionID == 0) { - return; - } - - mSendSRPending = true; - (new AMessage(kWhatSendSR, id()))->post(kSendSRIntervalUs); -} - -void Sender::addSR(const sp<ABuffer> &buffer) { - uint8_t *data = buffer->data() + buffer->size(); - - // TODO: Use macros/utility functions to clean up all the bitshifts below. - - data[0] = 0x80 | 0; - data[1] = 200; // SR - data[2] = 0; - data[3] = 6; - data[4] = kSourceID >> 24; - data[5] = (kSourceID >> 16) & 0xff; - data[6] = (kSourceID >> 8) & 0xff; - data[7] = kSourceID & 0xff; - - data[8] = mLastNTPTime >> (64 - 8); - data[9] = (mLastNTPTime >> (64 - 16)) & 0xff; - data[10] = (mLastNTPTime >> (64 - 24)) & 0xff; - data[11] = (mLastNTPTime >> 32) & 0xff; - data[12] = (mLastNTPTime >> 24) & 0xff; - data[13] = (mLastNTPTime >> 16) & 0xff; - data[14] = (mLastNTPTime >> 8) & 0xff; - data[15] = mLastNTPTime & 0xff; - - data[16] = (mLastRTPTime >> 24) & 0xff; - data[17] = (mLastRTPTime >> 16) & 0xff; - data[18] = (mLastRTPTime >> 8) & 0xff; - data[19] = mLastRTPTime & 0xff; - - data[20] = mNumRTPSent >> 24; - data[21] = (mNumRTPSent >> 16) & 0xff; - data[22] = (mNumRTPSent >> 8) & 0xff; - data[23] = mNumRTPSent & 0xff; - - data[24] = mNumRTPOctetsSent >> 24; - data[25] = (mNumRTPOctetsSent >> 16) & 0xff; - data[26] = (mNumRTPOctetsSent >> 8) & 0xff; - data[27] = mNumRTPOctetsSent & 0xff; - - buffer->setRange(buffer->offset(), buffer->size() + 28); -} - -void Sender::addSDES(const sp<ABuffer> &buffer) { - uint8_t *data = buffer->data() + buffer->size(); - data[0] = 0x80 | 1; - data[1] = 202; // SDES - data[4] = kSourceID >> 24; - data[5] = (kSourceID >> 16) & 0xff; - data[6] = (kSourceID >> 8) & 0xff; - data[7] = kSourceID & 0xff; - - size_t offset = 8; - - data[offset++] = 1; // CNAME - - static const char *kCNAME = "someone@somewhere"; - data[offset++] = strlen(kCNAME); - - memcpy(&data[offset], kCNAME, strlen(kCNAME)); - offset += strlen(kCNAME); - - data[offset++] = 7; // NOTE - - static const char *kNOTE = "Hell's frozen over."; - data[offset++] = strlen(kNOTE); - - memcpy(&data[offset], kNOTE, strlen(kNOTE)); - offset += strlen(kNOTE); - - data[offset++] = 0; - - if ((offset % 4) > 0) { - size_t count = 4 - (offset % 4); - switch (count) { - case 3: - data[offset++] = 0; - case 2: - data[offset++] = 0; - case 1: - data[offset++] = 0; - } - } - - size_t numWords = (offset / 4) - 1; - data[2] = numWords >> 8; - data[3] = numWords & 0xff; - - buffer->setRange(buffer->offset(), buffer->size() + offset); -} - -// static -uint64_t Sender::GetNowNTP() { - uint64_t nowUs = ALooper::GetNowUs(); - - nowUs += ((70ll * 365 + 17) * 24) * 60 * 60 * 1000000ll; - - uint64_t hi = nowUs / 1000000ll; - uint64_t lo = ((1ll << 32) * (nowUs % 1000000ll)) / 1000000ll; - - return (hi << 32) | lo; -} - -void Sender::onSendSR() { - sp<ABuffer> buffer = new ABuffer(1500); - buffer->setRange(0, 0); - - addSR(buffer); - addSDES(buffer); - - if (mTransportMode == TRANSPORT_TCP_INTERLEAVED) { - sp<AMessage> notify = mNotify->dup(); - notify->setInt32("what", kWhatBinaryData); - notify->setInt32("channel", mRTCPChannel); - notify->setBuffer("data", buffer); - notify->post(); - } else { - sendPacket(mRTCPSessionID, buffer->data(), buffer->size()); - } - - ++mNumSRsSent; -} - -#if ENABLE_RETRANSMISSION -status_t Sender::parseTSFB( - const uint8_t *data, size_t size) { - if ((data[0] & 0x1f) != 1) { - return ERROR_UNSUPPORTED; // We only support NACK for now. - } - - uint32_t srcId = U32_AT(&data[8]); - if (srcId != kSourceID) { - return ERROR_MALFORMED; - } - - for (size_t i = 12; i < size; i += 4) { - uint16_t seqNo = U16_AT(&data[i]); - uint16_t blp = U16_AT(&data[i + 2]); - - List<sp<ABuffer> >::iterator it = mHistory.begin(); - bool foundSeqNo = false; - while (it != mHistory.end()) { - const sp<ABuffer> &buffer = *it; - - uint16_t bufferSeqNo = buffer->int32Data() & 0xffff; - - bool retransmit = false; - if (bufferSeqNo == seqNo) { - retransmit = true; - } else if (blp != 0) { - for (size_t i = 0; i < 16; ++i) { - if ((blp & (1 << i)) - && (bufferSeqNo == ((seqNo + i + 1) & 0xffff))) { - blp &= ~(1 << i); - retransmit = true; - } - } - } - - if (retransmit) { - ALOGI("retransmitting seqNo %d", bufferSeqNo); - -#if RETRANSMISSION_ACCORDING_TO_RFC_XXXX - sp<ABuffer> retransRTP = new ABuffer(2 + buffer->size()); - uint8_t *rtp = retransRTP->data(); - memcpy(rtp, buffer->data(), 12); - rtp[2] = (mRTPRetransmissionSeqNo >> 8) & 0xff; - rtp[3] = mRTPRetransmissionSeqNo & 0xff; - rtp[12] = (bufferSeqNo >> 8) & 0xff; - rtp[13] = bufferSeqNo & 0xff; - memcpy(&rtp[14], buffer->data() + 12, buffer->size() - 12); - - ++mRTPRetransmissionSeqNo; - - sendPacket( - mRTPRetransmissionSessionID, - retransRTP->data(), retransRTP->size()); -#else - sendPacket( - mRTPSessionID, buffer->data(), buffer->size()); -#endif - - if (bufferSeqNo == seqNo) { - foundSeqNo = true; - } - - if (foundSeqNo && blp == 0) { - break; - } - } - - ++it; - } - - if (!foundSeqNo || blp != 0) { - ALOGI("Some sequence numbers were no longer available for " - "retransmission"); - } - } - - return OK; -} -#endif - -status_t Sender::parseRTCP( - const sp<ABuffer> &buffer) { - const uint8_t *data = buffer->data(); - size_t size = buffer->size(); - - while (size > 0) { - if (size < 8) { - // Too short to be a valid RTCP header - return ERROR_MALFORMED; - } - - if ((data[0] >> 6) != 2) { - // Unsupported version. - return ERROR_UNSUPPORTED; - } - - if (data[0] & 0x20) { - // Padding present. - - size_t paddingLength = data[size - 1]; - - if (paddingLength + 12 > size) { - // If we removed this much padding we'd end up with something - // that's too short to be a valid RTP header. - return ERROR_MALFORMED; - } - - size -= paddingLength; - } - - size_t headerLength = 4 * (data[2] << 8 | data[3]) + 4; - - if (size < headerLength) { - // Only received a partial packet? - return ERROR_MALFORMED; - } - - switch (data[1]) { - case 200: - case 201: // RR - case 202: // SDES - case 203: - case 204: // APP - break; - -#if ENABLE_RETRANSMISSION - case 205: // TSFB (transport layer specific feedback) - parseTSFB(data, headerLength); - break; -#endif - - case 206: // PSFB (payload specific feedback) - hexdump(data, headerLength); - break; - - default: - { - ALOGW("Unknown RTCP packet type %u of size %d", - (unsigned)data[1], headerLength); - break; - } - } - - data += headerLength; - size -= headerLength; - } - - return OK; -} - -status_t Sender::sendPacket( - int32_t sessionID, const void *data, size_t size) { - return mNetSession->sendRequest(sessionID, data, size); -} - -void Sender::notifyInitDone() { - sp<AMessage> notify = mNotify->dup(); - notify->setInt32("what", kWhatInitDone); - notify->post(); -} - -void Sender::notifySessionDead() { - sp<AMessage> notify = mNotify->dup(); - notify->setInt32("what", kWhatSessionDead); - notify->post(); -} - -void Sender::onDrainQueue(const sp<ABuffer> &udpPackets) { - static const size_t kFullRTPPacketSize = - 12 + 188 * kMaxNumTSPacketsPerRTPPacket; - - size_t srcOffset = 0; - while (srcOffset < udpPackets->size()) { - uint8_t *rtp = udpPackets->data() + srcOffset; - - size_t rtpPacketSize = udpPackets->size() - srcOffset; - if (rtpPacketSize > kFullRTPPacketSize) { - rtpPacketSize = kFullRTPPacketSize; - } - - int64_t nowUs = ALooper::GetNowUs(); - mLastNTPTime = GetNowNTP(); - - // 90kHz time scale - uint32_t rtpTime = (nowUs * 9ll) / 100ll; - - rtp[4] = rtpTime >> 24; - rtp[5] = (rtpTime >> 16) & 0xff; - rtp[6] = (rtpTime >> 8) & 0xff; - rtp[7] = rtpTime & 0xff; - - ++mNumRTPSent; - mNumRTPOctetsSent += rtpPacketSize - 12; - - mLastRTPTime = rtpTime; - - if (mTransportMode == TRANSPORT_TCP_INTERLEAVED) { - sp<AMessage> notify = mNotify->dup(); - notify->setInt32("what", kWhatBinaryData); - - sp<ABuffer> data = new ABuffer(rtpPacketSize); - memcpy(data->data(), rtp, rtpPacketSize); - - notify->setInt32("channel", mRTPChannel); - notify->setBuffer("data", data); - notify->post(); - } else { - sendPacket(mRTPSessionID, rtp, rtpPacketSize); - -#if TRACK_BANDWIDTH - mTotalBytesSent += rtpPacketSize->size(); - int64_t delayUs = ALooper::GetNowUs() - mFirstPacketTimeUs; - - if (delayUs > 0ll) { - ALOGI("approx. net bandwidth used: %.2f Mbit/sec", - mTotalBytesSent * 8.0 / delayUs); - } -#endif - } - -#if ENABLE_RETRANSMISSION - addToHistory(rtp, rtpPacketSize); -#endif - - srcOffset += rtpPacketSize; - } - -#if 0 - int64_t timeUs; - CHECK(udpPackets->meta()->findInt64("timeUs", &timeUs)); - - ALOGI("dTimeUs = %lld us", ALooper::GetNowUs() - timeUs); -#endif -} - -#if ENABLE_RETRANSMISSION -void Sender::addToHistory(const uint8_t *rtp, size_t rtpPacketSize) { - sp<ABuffer> packet = new ABuffer(rtpPacketSize); - memcpy(packet->data(), rtp, rtpPacketSize); - - unsigned rtpSeqNo = U16_AT(&rtp[2]); - packet->setInt32Data(rtpSeqNo); - - mHistory.push_back(packet); - ++mHistoryLength; - - if (mHistoryLength > kMaxHistoryLength) { - mHistory.erase(mHistory.begin()); - --mHistoryLength; - } -} -#endif - -} // namespace android - diff --git a/media/libstagefright/wifi-display/source/Sender.h b/media/libstagefright/wifi-display/source/Sender.h deleted file mode 100644 index 66951f7..0000000 --- a/media/libstagefright/wifi-display/source/Sender.h +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright 2012, The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef SENDER_H_ - -#define SENDER_H_ - -#include <media/stagefright/foundation/AHandler.h> - -namespace android { - -#define LOG_TRANSPORT_STREAM 0 -#define TRACK_BANDWIDTH 0 - -#define ENABLE_RETRANSMISSION 1 - -// If retransmission is enabled the following define determines what -// kind we support, if RETRANSMISSION_ACCORDING_TO_RFC_XXXX is 0 -// we'll send NACKs on the original RTCP channel and retransmit packets -// on the original RTP channel, otherwise a separate channel pair is used -// for this purpose. -#define RETRANSMISSION_ACCORDING_TO_RFC_XXXX 0 - -struct ABuffer; -struct ANetworkSession; - -struct Sender : public AHandler { - Sender(const sp<ANetworkSession> &netSession, const sp<AMessage> ¬ify); - - enum { - kWhatInitDone, - kWhatSessionDead, - kWhatBinaryData, - }; - - enum TransportMode { - TRANSPORT_UDP, - TRANSPORT_TCP_INTERLEAVED, - TRANSPORT_TCP, - }; - status_t init( - const char *clientIP, int32_t clientRtp, int32_t clientRtcp, - TransportMode transportMode); - - status_t finishInit(); - - int32_t getRTPPort() const; - - void queuePackets(int64_t timeUs, const sp<ABuffer> &tsPackets); - void scheduleSendSR(); - -protected: - virtual ~Sender(); - virtual void onMessageReceived(const sp<AMessage> &msg); - -private: - enum { - kWhatDrainQueue, - kWhatSendSR, - kWhatRTPNotify, - kWhatRTCPNotify, -#if ENABLE_RETRANSMISSION && RETRANSMISSION_ACCORDING_TO_RFC_XXXX - kWhatRTPRetransmissionNotify, - kWhatRTCPRetransmissionNotify, -#endif - }; - - static const int64_t kSendSRIntervalUs = 10000000ll; - - static const uint32_t kSourceID = 0xdeadbeef; - static const size_t kMaxHistoryLength = 128; - -#if ENABLE_RETRANSMISSION && RETRANSMISSION_ACCORDING_TO_RFC_XXXX - static const size_t kRetransmissionPortOffset = 120; -#endif - - sp<ANetworkSession> mNetSession; - sp<AMessage> mNotify; - - TransportMode mTransportMode; - AString mClientIP; - - // in TCP mode - int32_t mRTPChannel; - int32_t mRTCPChannel; - - // in UDP mode - int32_t mRTPPort; - int32_t mRTPSessionID; - int32_t mRTCPSessionID; - -#if ENABLE_RETRANSMISSION && RETRANSMISSION_ACCORDING_TO_RFC_XXXX - int32_t mRTPRetransmissionSessionID; - int32_t mRTCPRetransmissionSessionID; -#endif - - int32_t mClientRTPPort; - int32_t mClientRTCPPort; - bool mRTPConnected; - bool mRTCPConnected; - - int64_t mFirstOutputBufferReadyTimeUs; - int64_t mFirstOutputBufferSentTimeUs; - - uint32_t mRTPSeqNo; -#if ENABLE_RETRANSMISSION && RETRANSMISSION_ACCORDING_TO_RFC_XXXX - uint32_t mRTPRetransmissionSeqNo; -#endif - - uint64_t mLastNTPTime; - uint32_t mLastRTPTime; - uint32_t mNumRTPSent; - uint32_t mNumRTPOctetsSent; - uint32_t mNumSRsSent; - - bool mSendSRPending; - -#if ENABLE_RETRANSMISSION - List<sp<ABuffer> > mHistory; - size_t mHistoryLength; -#endif - -#if TRACK_BANDWIDTH - int64_t mFirstPacketTimeUs; - uint64_t mTotalBytesSent; -#endif - -#if LOG_TRANSPORT_STREAM - FILE *mLogFile; -#endif - - void onSendSR(); - void addSR(const sp<ABuffer> &buffer); - void addSDES(const sp<ABuffer> &buffer); - static uint64_t GetNowNTP(); - -#if ENABLE_RETRANSMISSION - status_t parseTSFB(const uint8_t *data, size_t size); - void addToHistory(const uint8_t *rtp, size_t rtpPacketSize); -#endif - - status_t parseRTCP(const sp<ABuffer> &buffer); - - status_t sendPacket(int32_t sessionID, const void *data, size_t size); - - void notifyInitDone(); - void notifySessionDead(); - - void onDrainQueue(const sp<ABuffer> &udpPackets); - - DISALLOW_EVIL_CONSTRUCTORS(Sender); -}; - -} // namespace android - -#endif // SENDER_H_ diff --git a/media/libstagefright/wifi-display/source/TSPacketizer.cpp b/media/libstagefright/wifi-display/source/TSPacketizer.cpp index ef57a4d..2c4a373 100644 --- a/media/libstagefright/wifi-display/source/TSPacketizer.cpp +++ b/media/libstagefright/wifi-display/source/TSPacketizer.cpp @@ -58,6 +58,7 @@ struct TSPacketizer::Track : public RefBase { sp<ABuffer> descriptorAt(size_t index) const; void finalize(); + void extractCSDIfNecessary(); protected: virtual ~Track(); @@ -77,6 +78,7 @@ private: bool mAudioLacksATDSHeaders; bool mFinalized; + bool mExtractedCSD; DISALLOW_EVIL_CONSTRUCTORS(Track); }; @@ -90,14 +92,21 @@ TSPacketizer::Track::Track( mStreamID(streamID), mContinuityCounter(0), mAudioLacksATDSHeaders(false), - mFinalized(false) { + mFinalized(false), + mExtractedCSD(false) { CHECK(format->findString("mime", &mMIME)); +} + +void TSPacketizer::Track::extractCSDIfNecessary() { + if (mExtractedCSD) { + return; + } if (!strcasecmp(mMIME.c_str(), MEDIA_MIMETYPE_VIDEO_AVC) || !strcasecmp(mMIME.c_str(), MEDIA_MIMETYPE_AUDIO_AAC)) { for (size_t i = 0;; ++i) { sp<ABuffer> csd; - if (!format->findBuffer(StringPrintf("csd-%d", i).c_str(), &csd)) { + if (!mFormat->findBuffer(StringPrintf("csd-%d", i).c_str(), &csd)) { break; } @@ -111,6 +120,8 @@ TSPacketizer::Track::Track( } } } + + mExtractedCSD = true; } TSPacketizer::Track::~Track() { @@ -250,7 +261,7 @@ void TSPacketizer::Track::finalize() { data[0] = 40; // descriptor_tag data[1] = 4; // descriptor_length - CHECK_EQ(mCSD.size(), 1u); + CHECK_GE(mCSD.size(), 1u); const sp<ABuffer> &sps = mCSD.itemAt(0); CHECK(!memcmp("\x00\x00\x00\x01", sps->data(), 4)); CHECK_GE(sps->size(), 7u); @@ -314,12 +325,31 @@ void TSPacketizer::Track::finalize() { mDescriptors.push_back(descriptor); } - int32_t hdcpVersion; - if (mFormat->findInt32("hdcp-version", &hdcpVersion)) { - // HDCP descriptor + mFinalized = true; +} + +//////////////////////////////////////////////////////////////////////////////// - CHECK(hdcpVersion == 0x20 || hdcpVersion == 0x21); +TSPacketizer::TSPacketizer(uint32_t flags) + : mFlags(flags), + mPATContinuityCounter(0), + mPMTContinuityCounter(0) { + initCrcTable(); + if (flags & (EMIT_HDCP20_DESCRIPTOR | EMIT_HDCP21_DESCRIPTOR)) { + int32_t hdcpVersion; + if (flags & EMIT_HDCP20_DESCRIPTOR) { + CHECK(!(flags & EMIT_HDCP21_DESCRIPTOR)); + hdcpVersion = 0x20; + } else { + CHECK(!(flags & EMIT_HDCP20_DESCRIPTOR)); + + // HDCP2.0 _and_ HDCP 2.1 specs say to set the version + // inside the HDCP descriptor to 0x20!!! + hdcpVersion = 0x20; + } + + // HDCP descriptor sp<ABuffer> descriptor = new ABuffer(7); uint8_t *data = descriptor->data(); data[0] = 0x05; // descriptor_tag @@ -330,18 +360,8 @@ void TSPacketizer::Track::finalize() { data[5] = 'P'; data[6] = hdcpVersion; - mDescriptors.push_back(descriptor); + mProgramInfoDescriptors.push_back(descriptor); } - - mFinalized = true; -} - -//////////////////////////////////////////////////////////////////////////////// - -TSPacketizer::TSPacketizer() - : mPATContinuityCounter(0), - mPMTContinuityCounter(0) { - initCrcTable(); } TSPacketizer::~TSPacketizer() { @@ -407,6 +427,17 @@ ssize_t TSPacketizer::addTrack(const sp<AMessage> &format) { return mTracks.add(track); } +status_t TSPacketizer::extractCSDIfNecessary(size_t trackIndex) { + if (trackIndex >= mTracks.size()) { + return -ERANGE; + } + + const sp<Track> &track = mTracks.itemAt(trackIndex); + track->extractCSDIfNecessary(); + + return OK; +} + status_t TSPacketizer::packetize( size_t trackIndex, const sp<ABuffer> &_accessUnit, @@ -471,16 +502,121 @@ status_t TSPacketizer::packetize( // reserved = b1 // the first fragment of "buffer" follows + // Each transport packet (except for the last one contributing to the PES + // payload) must contain a multiple of 16 bytes of payload per HDCP spec. + bool alignPayload = + (mFlags & (EMIT_HDCP20_DESCRIPTOR | EMIT_HDCP21_DESCRIPTOR)); + + /* + a) The very first PES transport stream packet contains + + 4 bytes of TS header + ... padding + 14 bytes of static PES header + PES_private_data_len + 1 bytes (only if PES_private_data_len > 0) + numStuffingBytes bytes + + followed by the payload + + b) Subsequent PES transport stream packets contain + + 4 bytes of TS header + ... padding + + followed by the payload + */ + size_t PES_packet_length = accessUnit->size() + 8 + numStuffingBytes; if (PES_private_data_len > 0) { PES_packet_length += PES_private_data_len + 1; } - size_t numTSPackets; - if (PES_packet_length <= 178) { - numTSPackets = 1; - } else { - numTSPackets = 1 + ((PES_packet_length - 178) + 183) / 184; + size_t numTSPackets = 1; + + { + // Make sure the PES header fits into a single TS packet: + size_t PES_header_size = 14 + numStuffingBytes; + if (PES_private_data_len > 0) { + PES_header_size += PES_private_data_len + 1; + } + + CHECK_LE(PES_header_size, 188u - 4u); + + size_t sizeAvailableForPayload = 188 - 4 - PES_header_size; + size_t numBytesOfPayload = accessUnit->size(); + + if (numBytesOfPayload > sizeAvailableForPayload) { + numBytesOfPayload = sizeAvailableForPayload; + + if (alignPayload && numBytesOfPayload > 16) { + numBytesOfPayload -= (numBytesOfPayload % 16); + } + } + + // size_t numPaddingBytes = sizeAvailableForPayload - numBytesOfPayload; + ALOGV("packet 1 contains %zd padding bytes and %zd bytes of payload", + numPaddingBytes, numBytesOfPayload); + + size_t numBytesOfPayloadRemaining = accessUnit->size() - numBytesOfPayload; + +#if 0 + // The following hopefully illustrates the logic that led to the + // more efficient computation in the #else block... + + while (numBytesOfPayloadRemaining > 0) { + size_t sizeAvailableForPayload = 188 - 4; + + size_t numBytesOfPayload = numBytesOfPayloadRemaining; + + if (numBytesOfPayload > sizeAvailableForPayload) { + numBytesOfPayload = sizeAvailableForPayload; + + if (alignPayload && numBytesOfPayload > 16) { + numBytesOfPayload -= (numBytesOfPayload % 16); + } + } + + size_t numPaddingBytes = sizeAvailableForPayload - numBytesOfPayload; + ALOGI("packet %zd contains %zd padding bytes and %zd bytes of payload", + numTSPackets + 1, numPaddingBytes, numBytesOfPayload); + + numBytesOfPayloadRemaining -= numBytesOfPayload; + ++numTSPackets; + } +#else + // This is how many bytes of payload each subsequent TS packet + // can contain at most. + sizeAvailableForPayload = 188 - 4; + size_t sizeAvailableForAlignedPayload = sizeAvailableForPayload; + if (alignPayload) { + // We're only going to use a subset of the available space + // since we need to make each fragment a multiple of 16 in size. + sizeAvailableForAlignedPayload -= + (sizeAvailableForAlignedPayload % 16); + } + + size_t numFullTSPackets = + numBytesOfPayloadRemaining / sizeAvailableForAlignedPayload; + + numTSPackets += numFullTSPackets; + + numBytesOfPayloadRemaining -= + numFullTSPackets * sizeAvailableForAlignedPayload; + + // numBytesOfPayloadRemaining < sizeAvailableForAlignedPayload + if (numFullTSPackets == 0 && numBytesOfPayloadRemaining > 0) { + // There wasn't enough payload left to form a full aligned payload, + // the last packet doesn't have to be aligned. + ++numTSPackets; + } else if (numFullTSPackets > 0 + && numBytesOfPayloadRemaining + + sizeAvailableForAlignedPayload > sizeAvailableForPayload) { + // The last packet emitted had a full aligned payload and together + // with the bytes remaining does exceed the unaligned payload + // size, so we need another packet. + ++numTSPackets; + } +#endif } if (flags & EMIT_PAT_AND_PMT) { @@ -583,8 +719,9 @@ status_t TSPacketizer::packetize( // reserved = b111 // PCR_PID = kPCR_PID (13 bits) // reserved = b1111 - // program_info_length = 0x000 - // one or more elementary stream descriptions follow: + // program_info_length = 0x??? + // program_info_descriptors follow + // one or more elementary stream descriptions follow: // stream_type = 0x?? // reserved = b111 // elementary_PID = b? ???? ???? ???? (13 bits) @@ -616,8 +753,21 @@ status_t TSPacketizer::packetize( *ptr++ = 0x00; *ptr++ = 0xe0 | (kPID_PCR >> 8); *ptr++ = kPID_PCR & 0xff; - *ptr++ = 0xf0; - *ptr++ = 0x00; + + size_t program_info_length = 0; + for (size_t i = 0; i < mProgramInfoDescriptors.size(); ++i) { + program_info_length += mProgramInfoDescriptors.itemAt(i)->size(); + } + + CHECK_LT(program_info_length, 0x400); + *ptr++ = 0xf0 | (program_info_length >> 8); + *ptr++ = (program_info_length & 0xff); + + for (size_t i = 0; i < mProgramInfoDescriptors.size(); ++i) { + const sp<ABuffer> &desc = mProgramInfoDescriptors.itemAt(i); + memcpy(ptr, desc->data(), desc->size()); + ptr += desc->size(); + } for (size_t i = 0; i < mTracks.size(); ++i) { const sp<Track> &track = mTracks.itemAt(i); @@ -710,8 +860,6 @@ status_t TSPacketizer::packetize( uint64_t PTS = (timeUs * 9ll) / 100ll; - bool padding = (PES_packet_length < (188 - 10)); - if (PES_packet_length >= 65536) { // This really should only happen for video. CHECK(track->isVideo()); @@ -720,19 +868,37 @@ status_t TSPacketizer::packetize( PES_packet_length = 0; } + size_t sizeAvailableForPayload = 188 - 4 - 14 - numStuffingBytes; + if (PES_private_data_len > 0) { + sizeAvailableForPayload -= PES_private_data_len + 1; + } + + size_t copy = accessUnit->size(); + + if (copy > sizeAvailableForPayload) { + copy = sizeAvailableForPayload; + + if (alignPayload && copy > 16) { + copy -= (copy % 16); + } + } + + size_t numPaddingBytes = sizeAvailableForPayload - copy; + uint8_t *ptr = packetDataStart; *ptr++ = 0x47; *ptr++ = 0x40 | (track->PID() >> 8); *ptr++ = track->PID() & 0xff; - *ptr++ = (padding ? 0x30 : 0x10) | track->incrementContinuityCounter(); - if (padding) { - size_t paddingSize = 188 - 10 - PES_packet_length; - *ptr++ = paddingSize - 1; - if (paddingSize >= 2) { + *ptr++ = (numPaddingBytes > 0 ? 0x30 : 0x10) + | track->incrementContinuityCounter(); + + if (numPaddingBytes > 0) { + *ptr++ = numPaddingBytes - 1; + if (numPaddingBytes >= 2) { *ptr++ = 0x00; - memset(ptr, 0xff, paddingSize - 2); - ptr += paddingSize - 2; + memset(ptr, 0xff, numPaddingBytes - 2); + ptr += numPaddingBytes - 2; } } @@ -768,25 +934,14 @@ status_t TSPacketizer::packetize( *ptr++ = 0xff; } - // 18 bytes of TS/PES header leave 188 - 18 = 170 bytes for the payload - - size_t sizeLeft = packetDataStart + 188 - ptr; - size_t copy = accessUnit->size(); - if (copy > sizeLeft) { - copy = sizeLeft; - } - memcpy(ptr, accessUnit->data(), copy); ptr += copy; - CHECK_EQ(sizeLeft, copy); - memset(ptr, 0xff, sizeLeft - copy); + CHECK_EQ(ptr, packetDataStart + 188); packetDataStart += 188; size_t offset = copy; while (offset < accessUnit->size()) { - bool padding = (accessUnit->size() - offset) < (188 - 4); - // for subsequent fragments of "buffer": // 0x47 // transport_error_indicator = b0 @@ -798,35 +953,40 @@ status_t TSPacketizer::packetize( // continuity_counter = b???? // the fragment of "buffer" follows. + size_t sizeAvailableForPayload = 188 - 4; + + size_t copy = accessUnit->size() - offset; + + if (copy > sizeAvailableForPayload) { + copy = sizeAvailableForPayload; + + if (alignPayload && copy > 16) { + copy -= (copy % 16); + } + } + + size_t numPaddingBytes = sizeAvailableForPayload - copy; + uint8_t *ptr = packetDataStart; *ptr++ = 0x47; *ptr++ = 0x00 | (track->PID() >> 8); *ptr++ = track->PID() & 0xff; - *ptr++ = (padding ? 0x30 : 0x10) | track->incrementContinuityCounter(); + *ptr++ = (numPaddingBytes > 0 ? 0x30 : 0x10) + | track->incrementContinuityCounter(); - if (padding) { - size_t paddingSize = 188 - 4 - (accessUnit->size() - offset); - *ptr++ = paddingSize - 1; - if (paddingSize >= 2) { + if (numPaddingBytes > 0) { + *ptr++ = numPaddingBytes - 1; + if (numPaddingBytes >= 2) { *ptr++ = 0x00; - memset(ptr, 0xff, paddingSize - 2); - ptr += paddingSize - 2; + memset(ptr, 0xff, numPaddingBytes - 2); + ptr += numPaddingBytes - 2; } } - // 4 bytes of TS header leave 188 - 4 = 184 bytes for the payload - - size_t sizeLeft = packetDataStart + 188 - ptr; - size_t copy = accessUnit->size() - offset; - if (copy > sizeLeft) { - copy = sizeLeft; - } - memcpy(ptr, accessUnit->data() + offset, copy); ptr += copy; - CHECK_EQ(sizeLeft, copy); - memset(ptr, 0xff, sizeLeft - copy); + CHECK_EQ(ptr, packetDataStart + 188); offset += copy; packetDataStart += 188; diff --git a/media/libstagefright/wifi-display/source/TSPacketizer.h b/media/libstagefright/wifi-display/source/TSPacketizer.h index a37917d..4a664ee 100644 --- a/media/libstagefright/wifi-display/source/TSPacketizer.h +++ b/media/libstagefright/wifi-display/source/TSPacketizer.h @@ -32,7 +32,11 @@ struct AMessage; // Emits metadata tables (PAT and PMT) and timestamp stream (PCR) based // on flags. struct TSPacketizer : public RefBase { - TSPacketizer(); + enum { + EMIT_HDCP20_DESCRIPTOR = 1, + EMIT_HDCP21_DESCRIPTOR = 2, + }; + TSPacketizer(uint32_t flags); // Returns trackIndex or error. ssize_t addTrack(const sp<AMessage> &format); @@ -50,6 +54,8 @@ struct TSPacketizer : public RefBase { const uint8_t *PES_private_data, size_t PES_private_data_len, size_t numStuffingBytes = 0); + status_t extractCSDIfNecessary(size_t trackIndex); + // XXX to be removed once encoder config option takes care of this for // encrypted mode. sp<ABuffer> prependCSD( @@ -66,8 +72,11 @@ private: struct Track; + uint32_t mFlags; Vector<sp<Track> > mTracks; + Vector<sp<ABuffer> > mProgramInfoDescriptors; + unsigned mPATContinuityCounter; unsigned mPMTContinuityCounter; diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp index 08f67f9..22dd0b1 100644 --- a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp +++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp @@ -22,10 +22,10 @@ #include "PlaybackSession.h" #include "Parameters.h" #include "ParsedMessage.h" -#include "Sender.h" +#include "rtp/RTPSender.h" #include <binder/IServiceManager.h> -#include <gui/ISurfaceTexture.h> +#include <gui/IGraphicBufferProducer.h> #include <media/IHDCP.h> #include <media/IMediaPlayerService.h> #include <media/IRemoteDisplayClient.h> @@ -33,6 +33,7 @@ #include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/foundation/AMessage.h> #include <media/stagefright/MediaErrors.h> +#include <media/stagefright/Utils.h> #include <arpa/inet.h> #include <cutils/properties.h> @@ -41,9 +42,13 @@ namespace android { +// static +const AString WifiDisplaySource::sUserAgent = MakeUserAgent(); + WifiDisplaySource::WifiDisplaySource( const sp<ANetworkSession> &netSession, - const sp<IRemoteDisplayClient> &client) + const sp<IRemoteDisplayClient> &client, + const char *path) : mState(INITIALIZED), mNetSession(netSession), mClient(client), @@ -58,8 +63,16 @@ WifiDisplaySource::WifiDisplaySource( mIsHDCP2_0(false), mHDCPPort(0), mHDCPInitializationComplete(false), - mSetupTriggerDeferred(false) -{ + mSetupTriggerDeferred(false), + mPlaybackSessionEstablished(false) { + if (path != NULL) { + mMediaPath.setTo(path); + } + + mSupportedSourceVideoFormats.disableAll(); + + mSupportedSourceVideoFormats.setNativeResolution( + VideoFormats::RESOLUTION_CEA, 5); // 1280x720 p30 } WifiDisplaySource::~WifiDisplaySource() { @@ -151,9 +164,7 @@ void WifiDisplaySource::onMessageReceived(const sp<AMessage> &msg) { } else { err = -EINVAL; } - } - if (err == OK) { mState = AWAITING_CLIENT_CONNECTION; } @@ -253,7 +264,8 @@ void WifiDisplaySource::onMessageReceived(const sp<AMessage> &msg) { if (!strcasecmp(val, "pause") && mState == PLAYING) { mState = PLAYING_TO_PAUSED; sendTrigger(mClientSessionID, TRIGGER_PAUSE); - } else if (!strcasecmp(val, "play") && mState == PAUSED) { + } else if (!strcasecmp(val, "play") + && mState == PAUSED) { mState = PAUSED_TO_PLAYING; sendTrigger(mClientSessionID, TRIGGER_PLAY); } @@ -262,6 +274,11 @@ void WifiDisplaySource::onMessageReceived(const sp<AMessage> &msg) { break; } + case ANetworkSession::kWhatNetworkStall: + { + break; + } + default: TRESPASS(); } @@ -374,16 +391,41 @@ void WifiDisplaySource::onMessageReceived(const sp<AMessage> &msg) { mClient->onDisplayError( IRemoteDisplayClient::kDisplayErrorUnknown); } else if (what == PlaybackSession::kWhatSessionEstablished) { + mPlaybackSessionEstablished = true; + if (mClient != NULL) { - mClient->onDisplayConnected( - mClientInfo.mPlaybackSession->getSurfaceTexture(), - mClientInfo.mPlaybackSession->width(), - mClientInfo.mPlaybackSession->height(), - mUsingHDCP - ? IRemoteDisplayClient::kDisplayFlagSecure - : 0); + if (!mSinkSupportsVideo) { + mClient->onDisplayConnected( + NULL, // SurfaceTexture + 0, // width, + 0, // height, + mUsingHDCP + ? IRemoteDisplayClient::kDisplayFlagSecure + : 0); + } else { + size_t width, height; + + CHECK(VideoFormats::GetConfiguration( + mChosenVideoResolutionType, + mChosenVideoResolutionIndex, + &width, + &height, + NULL /* framesPerSecond */, + NULL /* interlaced */)); + + mClient->onDisplayConnected( + mClientInfo.mPlaybackSession + ->getSurfaceTexture(), + width, + height, + mUsingHDCP + ? IRemoteDisplayClient::kDisplayFlagSecure + : 0); + } } + finishPlay(); + if (mState == ABOUT_TO_PLAY) { mState = PLAYING; } @@ -564,55 +606,38 @@ status_t WifiDisplaySource::sendM3(int32_t sessionID) { } status_t WifiDisplaySource::sendM4(int32_t sessionID) { - // wfd_video_formats: - // 1 byte "native" - // 1 byte "preferred-display-mode-supported" 0 or 1 - // one or more avc codec structures - // 1 byte profile - // 1 byte level - // 4 byte CEA mask - // 4 byte VESA mask - // 4 byte HH mask - // 1 byte latency - // 2 byte min-slice-slice - // 2 byte slice-enc-params - // 1 byte framerate-control-support - // max-hres (none or 2 byte) - // max-vres (none or 2 byte) - CHECK_EQ(sessionID, mClientSessionID); - AString transportString = "UDP"; - - char val[PROPERTY_VALUE_MAX]; - if (property_get("media.wfd.enable-tcp", val, NULL) - && (!strcasecmp("true", val) || !strcmp("1", val))) { - ALOGI("Using TCP transport."); - transportString = "TCP"; - } - - // For 720p60: - // use "30 00 02 02 00000040 00000000 00000000 00 0000 0000 00 none none\r\n" - // For 720p30: - // use "28 00 02 02 00000020 00000000 00000000 00 0000 0000 00 none none\r\n" - // For 720p24: - // use "78 00 02 02 00008000 00000000 00000000 00 0000 0000 00 none none\r\n" - // For 1080p30: - // use "38 00 02 02 00000080 00000000 00000000 00 0000 0000 00 none none\r\n" - AString body = StringPrintf( - "wfd_video_formats: " -#if USE_1080P - "38 00 02 02 00000080 00000000 00000000 00 0000 0000 00 none none\r\n" -#else - "28 00 02 02 00000020 00000000 00000000 00 0000 0000 00 none none\r\n" -#endif - "wfd_audio_codecs: %s\r\n" - "wfd_presentation_URL: rtsp://%s/wfd1.0/streamid=0 none\r\n" - "wfd_client_rtp_ports: RTP/AVP/%s;unicast %d 0 mode=play\r\n", - (mUsingPCMAudio - ? "LPCM 00000002 00" // 2 ch PCM 48kHz - : "AAC 00000001 00"), // 2 ch AAC 48kHz - mClientInfo.mLocalIP.c_str(), transportString.c_str(), mChosenRTPPort); + AString body; + + if (mSinkSupportsVideo) { + body.append("wfd_video_formats: "); + + VideoFormats chosenVideoFormat; + chosenVideoFormat.disableAll(); + chosenVideoFormat.setNativeResolution( + mChosenVideoResolutionType, mChosenVideoResolutionIndex); + + body.append(chosenVideoFormat.getFormatSpec(true /* forM4Message */)); + body.append("\r\n"); + } + + if (mSinkSupportsAudio) { + body.append( + StringPrintf("wfd_audio_codecs: %s\r\n", + (mUsingPCMAudio + ? "LPCM 00000002 00" // 2 ch PCM 48kHz + : "AAC 00000001 00"))); // 2 ch AAC 48kHz + } + + body.append( + StringPrintf( + "wfd_presentation_URL: rtsp://%s/wfd1.0/streamid=0 none\r\n", + mClientInfo.mLocalIP.c_str())); + + body.append( + StringPrintf( + "wfd_client_rtp_ports: %s\r\n", mWfdClientRtpPorts.c_str())); AString request = "SET_PARAMETER rtsp://localhost/wfd1.0 RTSP/1.0\r\n"; AppendCommonResponse(&request, mNextCSeq); @@ -775,53 +800,115 @@ status_t WifiDisplaySource::onReceiveM3Response( return ERROR_MALFORMED; } - unsigned port0, port1; + unsigned port0 = 0, port1 = 0; if (sscanf(value.c_str(), "RTP/AVP/UDP;unicast %u %u mode=play", &port0, - &port1) != 2 - || port0 == 0 || port0 > 65535 || port1 != 0) { - ALOGE("Sink chose its wfd_client_rtp_ports poorly (%s)", + &port1) == 2 + || sscanf(value.c_str(), + "RTP/AVP/TCP;unicast %u %u mode=play", + &port0, + &port1) == 2) { + if (port0 == 0 || port0 > 65535 || port1 != 0) { + ALOGE("Sink chose its wfd_client_rtp_ports poorly (%s)", + value.c_str()); + + return ERROR_MALFORMED; + } + } else if (strcmp(value.c_str(), "RTP/AVP/TCP;interleaved mode=play")) { + ALOGE("Unsupported value for wfd_client_rtp_ports (%s)", value.c_str()); - return ERROR_MALFORMED; + return ERROR_UNSUPPORTED; } + mWfdClientRtpPorts = value; mChosenRTPPort = port0; + if (!params->findParameter("wfd_video_formats", &value)) { + ALOGE("Sink doesn't report its choice of wfd_video_formats."); + return ERROR_MALFORMED; + } + + mSinkSupportsVideo = false; + + if (!(value == "none")) { + mSinkSupportsVideo = true; + if (!mSupportedSinkVideoFormats.parseFormatSpec(value.c_str())) { + ALOGE("Failed to parse sink provided wfd_video_formats (%s)", + value.c_str()); + + return ERROR_MALFORMED; + } + + if (!VideoFormats::PickBestFormat( + mSupportedSinkVideoFormats, + mSupportedSourceVideoFormats, + &mChosenVideoResolutionType, + &mChosenVideoResolutionIndex)) { + ALOGE("Sink and source share no commonly supported video " + "formats."); + + return ERROR_UNSUPPORTED; + } + + size_t width, height, framesPerSecond; + bool interlaced; + CHECK(VideoFormats::GetConfiguration( + mChosenVideoResolutionType, + mChosenVideoResolutionIndex, + &width, + &height, + &framesPerSecond, + &interlaced)); + + ALOGI("Picked video resolution %u x %u %c%u", + width, height, interlaced ? 'i' : 'p', framesPerSecond); + } else { + ALOGI("Sink doesn't support video at all."); + } + if (!params->findParameter("wfd_audio_codecs", &value)) { ALOGE("Sink doesn't report its choice of wfd_audio_codecs."); return ERROR_MALFORMED; } - if (value == "none") { - ALOGE("Sink doesn't support audio at all."); - return ERROR_UNSUPPORTED; - } + mSinkSupportsAudio = false; - uint32_t modes; - GetAudioModes(value.c_str(), "AAC", &modes); + if (!(value == "none")) { + mSinkSupportsAudio = true; - bool supportsAAC = (modes & 1) != 0; // AAC 2ch 48kHz + uint32_t modes; + GetAudioModes(value.c_str(), "AAC", &modes); - GetAudioModes(value.c_str(), "LPCM", &modes); + bool supportsAAC = (modes & 1) != 0; // AAC 2ch 48kHz - bool supportsPCM = (modes & 2) != 0; // LPCM 2ch 48kHz + GetAudioModes(value.c_str(), "LPCM", &modes); - char val[PROPERTY_VALUE_MAX]; - if (supportsPCM - && property_get("media.wfd.use-pcm-audio", val, NULL) - && (!strcasecmp("true", val) || !strcmp("1", val))) { - ALOGI("Using PCM audio."); - mUsingPCMAudio = true; - } else if (supportsAAC) { - ALOGI("Using AAC audio."); - mUsingPCMAudio = false; - } else if (supportsPCM) { - ALOGI("Using PCM audio."); - mUsingPCMAudio = true; + bool supportsPCM = (modes & 2) != 0; // LPCM 2ch 48kHz + + char val[PROPERTY_VALUE_MAX]; + if (supportsPCM + && property_get("media.wfd.use-pcm-audio", val, NULL) + && (!strcasecmp("true", val) || !strcmp("1", val))) { + ALOGI("Using PCM audio."); + mUsingPCMAudio = true; + } else if (supportsAAC) { + ALOGI("Using AAC audio."); + mUsingPCMAudio = false; + } else if (supportsPCM) { + ALOGI("Using PCM audio."); + mUsingPCMAudio = true; + } else { + ALOGI("Sink doesn't support an audio format we do."); + return ERROR_UNSUPPORTED; + } } else { - ALOGI("Sink doesn't support an audio format we do."); + ALOGI("Sink doesn't support audio at all."); + } + + if (!mSinkSupportsVideo && !mSinkSupportsAudio) { + ALOGE("Sink supports neither video nor audio..."); return ERROR_UNSUPPORTED; } @@ -1065,7 +1152,7 @@ status_t WifiDisplaySource::onSetupRequest( return ERROR_MALFORMED; } - Sender::TransportMode transportMode = Sender::TRANSPORT_UDP; + RTPSender::TransportMode rtpMode = RTPSender::TRANSPORT_UDP; int clientRtp, clientRtcp; if (transport.startsWith("RTP/AVP/TCP;")) { @@ -1074,7 +1161,7 @@ status_t WifiDisplaySource::onSetupRequest( transport.c_str(), "interleaved", &interleaved) && sscanf(interleaved.c_str(), "%d-%d", &clientRtp, &clientRtcp) == 2) { - transportMode = Sender::TRANSPORT_TCP_INTERLEAVED; + rtpMode = RTPSender::TRANSPORT_TCP_INTERLEAVED; } else { bool badRequest = false; @@ -1096,7 +1183,7 @@ status_t WifiDisplaySource::onSetupRequest( return ERROR_MALFORMED; } - transportMode = Sender::TRANSPORT_TCP; + rtpMode = RTPSender::TRANSPORT_TCP; } } else if (transport.startsWith("RTP/AVP;unicast;") || transport.startsWith("RTP/AVP/UDP;unicast;")) { @@ -1138,7 +1225,7 @@ status_t WifiDisplaySource::onSetupRequest( sp<PlaybackSession> playbackSession = new PlaybackSession( - mNetSession, notify, mInterfaceAddr, mHDCP); + mNetSession, notify, mInterfaceAddr, mHDCP, mMediaPath.c_str()); looper()->registerHandler(playbackSession); @@ -1155,12 +1242,22 @@ status_t WifiDisplaySource::onSetupRequest( return ERROR_MALFORMED; } + RTPSender::TransportMode rtcpMode = RTPSender::TRANSPORT_UDP; + if (clientRtcp < 0) { + rtcpMode = RTPSender::TRANSPORT_NONE; + } + status_t err = playbackSession->init( mClientInfo.mRemoteIP.c_str(), clientRtp, + rtpMode, clientRtcp, - transportMode, - mUsingPCMAudio); + rtcpMode, + mSinkSupportsAudio, + mUsingPCMAudio, + mSinkSupportsVideo, + mChosenVideoResolutionType, + mChosenVideoResolutionIndex); if (err != OK) { looper()->unregisterHandler(playbackSession->id()); @@ -1184,7 +1281,7 @@ status_t WifiDisplaySource::onSetupRequest( AString response = "RTSP/1.0 200 OK\r\n"; AppendCommonResponse(&response, cseq, playbackSessionID); - if (transportMode == Sender::TRANSPORT_TCP_INTERLEAVED) { + if (rtpMode == RTPSender::TRANSPORT_TCP_INTERLEAVED) { response.append( StringPrintf( "Transport: RTP/AVP/TCP;interleaved=%d-%d;", @@ -1193,7 +1290,7 @@ status_t WifiDisplaySource::onSetupRequest( int32_t serverRtp = playbackSession->getRTPPort(); AString transportString = "UDP"; - if (transportMode == Sender::TRANSPORT_TCP) { + if (rtpMode == RTPSender::TRANSPORT_TCP) { transportString = "TCP"; } @@ -1243,17 +1340,28 @@ status_t WifiDisplaySource::onPlayRequest( return ERROR_MALFORMED; } - ALOGI("Received PLAY request."); + if (mState != AWAITING_CLIENT_PLAY) { + ALOGW("Received PLAY request but we're in state %d", mState); - status_t err = playbackSession->play(); - CHECK_EQ(err, (status_t)OK); + sendErrorResponse( + sessionID, "455 Method Not Valid in This State", cseq); + + return INVALID_OPERATION; + } + + ALOGI("Received PLAY request."); + if (mPlaybackSessionEstablished) { + finishPlay(); + } else { + ALOGI("deferring PLAY request until session established."); + } AString response = "RTSP/1.0 200 OK\r\n"; AppendCommonResponse(&response, cseq, playbackSessionID); response.append("Range: npt=now-\r\n"); response.append("\r\n"); - err = mNetSession->sendRequest(sessionID, response.c_str()); + status_t err = mNetSession->sendRequest(sessionID, response.c_str()); if (err != OK) { return err; @@ -1264,14 +1372,20 @@ status_t WifiDisplaySource::onPlayRequest( return OK; } - playbackSession->finishPlay(); - CHECK_EQ(mState, AWAITING_CLIENT_PLAY); mState = ABOUT_TO_PLAY; return OK; } +void WifiDisplaySource::finishPlay() { + const sp<PlaybackSession> &playbackSession = + mClientInfo.mPlaybackSession; + + status_t err = playbackSession->play(); + CHECK_EQ(err, (status_t)OK); +} + status_t WifiDisplaySource::onPauseRequest( int32_t sessionID, int32_t cseq, @@ -1447,7 +1561,7 @@ void WifiDisplaySource::AppendCommonResponse( response->append(buf); response->append("\r\n"); - response->append("Server: Mine/1.0\r\n"); + response->append(StringPrintf("Server: %s\r\n", sUserAgent.c_str())); if (cseq >= 0) { response->append(StringPrintf("CSeq: %d\r\n", cseq)); @@ -1557,10 +1671,13 @@ void WifiDisplaySource::HDCPObserver::notify( status_t WifiDisplaySource::makeHDCP() { sp<IServiceManager> sm = defaultServiceManager(); sp<IBinder> binder = sm->getService(String16("media.player")); - sp<IMediaPlayerService> service = interface_cast<IMediaPlayerService>(binder); + + sp<IMediaPlayerService> service = + interface_cast<IMediaPlayerService>(binder); + CHECK(service != NULL); - mHDCP = service->makeHDCP(); + mHDCP = service->makeHDCP(true /* createEncryptionModule */); if (mHDCP == NULL) { return ERROR_UNSUPPORTED; diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.h b/media/libstagefright/wifi-display/source/WifiDisplaySource.h index 974e070..44d3e4d 100644 --- a/media/libstagefright/wifi-display/source/WifiDisplaySource.h +++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.h @@ -19,6 +19,7 @@ #define WIFI_DISPLAY_SOURCE_H_ #include "ANetworkSession.h" +#include "VideoFormats.h" #include <media/stagefright/foundation/AHandler.h> @@ -26,8 +27,6 @@ namespace android { -#define USE_1080P 0 - struct IHDCP; struct IRemoteDisplayClient; struct ParsedMessage; @@ -39,7 +38,8 @@ struct WifiDisplaySource : public AHandler { WifiDisplaySource( const sp<ANetworkSession> &netSession, - const sp<IRemoteDisplayClient> &client); + const sp<IRemoteDisplayClient> &client, + const char *path = NULL); status_t start(const char *iface); status_t stop(); @@ -111,16 +111,29 @@ private: static const int64_t kPlaybackSessionTimeoutUs = kPlaybackSessionTimeoutSecs * 1000000ll; + static const AString sUserAgent; + State mState; + VideoFormats mSupportedSourceVideoFormats; sp<ANetworkSession> mNetSession; sp<IRemoteDisplayClient> mClient; + AString mMediaPath; struct in_addr mInterfaceAddr; int32_t mSessionID; uint32_t mStopReplyID; + AString mWfdClientRtpPorts; int32_t mChosenRTPPort; // extracted from "wfd_client_rtp_ports" + bool mSinkSupportsVideo; + VideoFormats mSupportedSinkVideoFormats; + + VideoFormats::ResolutionType mChosenVideoResolutionType; + size_t mChosenVideoResolutionIndex; + + bool mSinkSupportsAudio; + bool mUsingPCMAudio; int32_t mClientSessionID; @@ -149,6 +162,8 @@ private: bool mHDCPInitializationComplete; bool mSetupTriggerDeferred; + bool mPlaybackSessionEstablished; + status_t makeHDCP(); // <<<< HDCP specific section @@ -245,6 +260,8 @@ private: void finishStopAfterDisconnectingClient(); void finishStop2(); + void finishPlay(); + DISALLOW_EVIL_CONSTRUCTORS(WifiDisplaySource); }; diff --git a/media/libstagefright/wifi-display/udptest.cpp b/media/libstagefright/wifi-display/udptest.cpp deleted file mode 100644 index 1cd82c3..0000000 --- a/media/libstagefright/wifi-display/udptest.cpp +++ /dev/null @@ -1,355 +0,0 @@ -/* - * Copyright 2012, The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//#define LOG_NEBUG 0 -#define LOG_TAG "udptest" -#include <utils/Log.h> - -#include "ANetworkSession.h" - -#include <binder/ProcessState.h> -#include <media/stagefright/foundation/ABuffer.h> -#include <media/stagefright/foundation/ADebug.h> -#include <media/stagefright/foundation/AHandler.h> -#include <media/stagefright/foundation/ALooper.h> -#include <media/stagefright/foundation/AMessage.h> -#include <media/stagefright/Utils.h> - -namespace android { - -struct TestHandler : public AHandler { - TestHandler(const sp<ANetworkSession> &netSession); - - void startServer(unsigned localPort); - void startClient(const char *remoteHost, unsigned remotePort); - -protected: - virtual ~TestHandler(); - - virtual void onMessageReceived(const sp<AMessage> &msg); - -private: - enum { - kWhatStartServer, - kWhatStartClient, - kWhatUDPNotify, - kWhatSendPacket, - }; - - sp<ANetworkSession> mNetSession; - - bool mIsServer; - bool mConnected; - int32_t mUDPSession; - uint32_t mSeqNo; - double mTotalTimeUs; - int32_t mCount; - - void postSendPacket(int64_t delayUs = 0ll); - - DISALLOW_EVIL_CONSTRUCTORS(TestHandler); -}; - -TestHandler::TestHandler(const sp<ANetworkSession> &netSession) - : mNetSession(netSession), - mIsServer(false), - mConnected(false), - mUDPSession(0), - mSeqNo(0), - mTotalTimeUs(0.0), - mCount(0) { -} - -TestHandler::~TestHandler() { -} - -void TestHandler::startServer(unsigned localPort) { - sp<AMessage> msg = new AMessage(kWhatStartServer, id()); - msg->setInt32("localPort", localPort); - msg->post(); -} - -void TestHandler::startClient(const char *remoteHost, unsigned remotePort) { - sp<AMessage> msg = new AMessage(kWhatStartClient, id()); - msg->setString("remoteHost", remoteHost); - msg->setInt32("remotePort", remotePort); - msg->post(); -} - -void TestHandler::onMessageReceived(const sp<AMessage> &msg) { - switch (msg->what()) { - case kWhatStartClient: - { - AString remoteHost; - CHECK(msg->findString("remoteHost", &remoteHost)); - - int32_t remotePort; - CHECK(msg->findInt32("remotePort", &remotePort)); - - sp<AMessage> notify = new AMessage(kWhatUDPNotify, id()); - - CHECK_EQ((status_t)OK, - mNetSession->createUDPSession( - 0 /* localPort */, - remoteHost.c_str(), - remotePort, - notify, - &mUDPSession)); - - postSendPacket(); - break; - } - - case kWhatStartServer: - { - mIsServer = true; - - int32_t localPort; - CHECK(msg->findInt32("localPort", &localPort)); - - sp<AMessage> notify = new AMessage(kWhatUDPNotify, id()); - - CHECK_EQ((status_t)OK, - mNetSession->createUDPSession( - localPort, notify, &mUDPSession)); - - break; - } - - case kWhatSendPacket: - { - char buffer[12]; - memset(buffer, 0, sizeof(buffer)); - - buffer[0] = mSeqNo >> 24; - buffer[1] = (mSeqNo >> 16) & 0xff; - buffer[2] = (mSeqNo >> 8) & 0xff; - buffer[3] = mSeqNo & 0xff; - ++mSeqNo; - - int64_t nowUs = ALooper::GetNowUs(); - buffer[4] = nowUs >> 56; - buffer[5] = (nowUs >> 48) & 0xff; - buffer[6] = (nowUs >> 40) & 0xff; - buffer[7] = (nowUs >> 32) & 0xff; - buffer[8] = (nowUs >> 24) & 0xff; - buffer[9] = (nowUs >> 16) & 0xff; - buffer[10] = (nowUs >> 8) & 0xff; - buffer[11] = nowUs & 0xff; - - CHECK_EQ((status_t)OK, - mNetSession->sendRequest( - mUDPSession, buffer, sizeof(buffer))); - - postSendPacket(20000ll); - break; - } - - case kWhatUDPNotify: - { - int32_t reason; - CHECK(msg->findInt32("reason", &reason)); - - switch (reason) { - case ANetworkSession::kWhatError: - { - int32_t sessionID; - CHECK(msg->findInt32("sessionID", &sessionID)); - - int32_t err; - CHECK(msg->findInt32("err", &err)); - - AString detail; - CHECK(msg->findString("detail", &detail)); - - ALOGE("An error occurred in session %d (%d, '%s/%s').", - sessionID, - err, - detail.c_str(), - strerror(-err)); - - mNetSession->destroySession(sessionID); - break; - } - - case ANetworkSession::kWhatDatagram: - { - int32_t sessionID; - CHECK(msg->findInt32("sessionID", &sessionID)); - - sp<ABuffer> data; - CHECK(msg->findBuffer("data", &data)); - - if (mIsServer) { - if (!mConnected) { - AString fromAddr; - CHECK(msg->findString("fromAddr", &fromAddr)); - - int32_t fromPort; - CHECK(msg->findInt32("fromPort", &fromPort)); - - CHECK_EQ((status_t)OK, - mNetSession->connectUDPSession( - mUDPSession, fromAddr.c_str(), fromPort)); - - mConnected = true; - } - - int64_t nowUs = ALooper::GetNowUs(); - - sp<ABuffer> buffer = new ABuffer(data->size() + 8); - memcpy(buffer->data(), data->data(), data->size()); - - uint8_t *ptr = buffer->data() + data->size(); - - *ptr++ = nowUs >> 56; - *ptr++ = (nowUs >> 48) & 0xff; - *ptr++ = (nowUs >> 40) & 0xff; - *ptr++ = (nowUs >> 32) & 0xff; - *ptr++ = (nowUs >> 24) & 0xff; - *ptr++ = (nowUs >> 16) & 0xff; - *ptr++ = (nowUs >> 8) & 0xff; - *ptr++ = nowUs & 0xff; - - CHECK_EQ((status_t)OK, - mNetSession->sendRequest( - mUDPSession, buffer->data(), buffer->size())); - } else { - CHECK_EQ(data->size(), 20u); - - uint32_t seqNo = U32_AT(data->data()); - int64_t t1 = U64_AT(data->data() + 4); - int64_t t2 = U64_AT(data->data() + 12); - - int64_t t3; - CHECK(data->meta()->findInt64("arrivalTimeUs", &t3)); - -#if 0 - printf("roundtrip seqNo %u, time = %lld us\n", - seqNo, t3 - t1); -#else - mTotalTimeUs += t3 - t1; - ++mCount; - printf("avg. roundtrip time %.2f us\n", mTotalTimeUs / mCount); -#endif - } - break; - } - - default: - TRESPASS(); - } - - break; - } - - default: - TRESPASS(); - } -} - -void TestHandler::postSendPacket(int64_t delayUs) { - (new AMessage(kWhatSendPacket, id()))->post(delayUs); -} - -} // namespace android - -static void usage(const char *me) { - fprintf(stderr, - "usage: %s -c host[:port]\tconnect to test server\n" - " -l \tcreate a test server\n", - me); -} - -int main(int argc, char **argv) { - using namespace android; - - ProcessState::self()->startThreadPool(); - - int32_t localPort = -1; - int32_t connectToPort = -1; - AString connectToHost; - - int res; - while ((res = getopt(argc, argv, "hc:l:")) >= 0) { - switch (res) { - case 'c': - { - const char *colonPos = strrchr(optarg, ':'); - - if (colonPos == NULL) { - connectToHost = optarg; - connectToPort = 49152; - } else { - connectToHost.setTo(optarg, colonPos - optarg); - - char *end; - connectToPort = strtol(colonPos + 1, &end, 10); - - if (*end != '\0' || end == colonPos + 1 - || connectToPort < 1 || connectToPort > 65535) { - fprintf(stderr, "Illegal port specified.\n"); - exit(1); - } - } - break; - } - - case 'l': - { - char *end; - localPort = strtol(optarg, &end, 10); - - if (*end != '\0' || end == optarg - || localPort < 1 || localPort > 65535) { - fprintf(stderr, "Illegal port specified.\n"); - exit(1); - } - break; - } - - case '?': - case 'h': - usage(argv[0]); - exit(1); - } - } - - if (localPort < 0 && connectToPort < 0) { - fprintf(stderr, - "You need to select either client or server mode.\n"); - exit(1); - } - - sp<ANetworkSession> netSession = new ANetworkSession; - netSession->start(); - - sp<ALooper> looper = new ALooper; - - sp<TestHandler> handler = new TestHandler(netSession); - looper->registerHandler(handler); - - if (localPort >= 0) { - handler->startServer(localPort); - } else { - handler->startClient(connectToHost.c_str(), connectToPort); - } - - looper->start(true /* runOnCallingThread */); - - return 0; -} - diff --git a/media/libstagefright/wifi-display/wfd.cpp b/media/libstagefright/wifi-display/wfd.cpp index 03a1123..c947765 100644 --- a/media/libstagefright/wifi-display/wfd.cpp +++ b/media/libstagefright/wifi-display/wfd.cpp @@ -18,11 +18,11 @@ #define LOG_TAG "wfd" #include <utils/Log.h> -#include "sink/WifiDisplaySink.h" #include "source/WifiDisplaySource.h" #include <binder/ProcessState.h> #include <binder/IServiceManager.h> +#include <gui/ISurfaceComposer.h> #include <gui/SurfaceComposerClient.h> #include <media/AudioSystem.h> #include <media/IMediaPlayerService.h> @@ -30,16 +30,16 @@ #include <media/IRemoteDisplayClient.h> #include <media/stagefright/DataSource.h> #include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/AMessage.h> +#include <ui/DisplayInfo.h> namespace android { static void usage(const char *me) { fprintf(stderr, "usage:\n" - " %s -c host[:port]\tconnect to wifi source\n" - " -u uri \tconnect to an rtsp uri\n" - " -l ip[:port] \tlisten on the specified port " - "(create a sink)\n", + " %s -l iface[:port]\tcreate a wifi display source\n" + " -f(ilename) \tstream media\n", me); } @@ -47,7 +47,7 @@ struct RemoteDisplayClient : public BnRemoteDisplayClient { RemoteDisplayClient(); virtual void onDisplayConnected( - const sp<ISurfaceTexture> &surfaceTexture, + const sp<IGraphicBufferProducer> &bufferProducer, uint32_t width, uint32_t height, uint32_t flags); @@ -67,7 +67,7 @@ private: bool mDone; sp<SurfaceComposerClient> mComposerClient; - sp<ISurfaceTexture> mSurfaceTexture; + sp<IGraphicBufferProducer> mSurfaceTexture; sp<IBinder> mDisplayBinder; DISALLOW_EVIL_CONSTRUCTORS(RemoteDisplayClient); @@ -83,29 +83,31 @@ RemoteDisplayClient::~RemoteDisplayClient() { } void RemoteDisplayClient::onDisplayConnected( - const sp<ISurfaceTexture> &surfaceTexture, + const sp<IGraphicBufferProducer> &bufferProducer, uint32_t width, uint32_t height, uint32_t flags) { ALOGI("onDisplayConnected width=%u, height=%u, flags = 0x%08x", width, height, flags); - mSurfaceTexture = surfaceTexture; - mDisplayBinder = mComposerClient->createDisplay( - String8("foo"), false /* secure */); + if (bufferProducer != NULL) { + mSurfaceTexture = bufferProducer; + mDisplayBinder = mComposerClient->createDisplay( + String8("foo"), false /* secure */); - SurfaceComposerClient::openGlobalTransaction(); - mComposerClient->setDisplaySurface(mDisplayBinder, mSurfaceTexture); + SurfaceComposerClient::openGlobalTransaction(); + mComposerClient->setDisplaySurface(mDisplayBinder, mSurfaceTexture); - Rect layerStackRect(1280, 720); // XXX fix this. - Rect displayRect(1280, 720); + Rect layerStackRect(1280, 720); // XXX fix this. + Rect displayRect(1280, 720); - mComposerClient->setDisplayProjection( - mDisplayBinder, 0 /* 0 degree rotation */, - layerStackRect, - displayRect); + mComposerClient->setDisplayProjection( + mDisplayBinder, 0 /* 0 degree rotation */, + layerStackRect, + displayRect); - SurfaceComposerClient::closeGlobalTransaction(); + SurfaceComposerClient::closeGlobalTransaction(); + } } void RemoteDisplayClient::onDisplayDisconnected() { @@ -178,6 +180,26 @@ static void createSource(const AString &addr, int32_t port) { enableAudioSubmix(false /* enable */); } +static void createFileSource( + const AString &addr, int32_t port, const char *path) { + sp<ANetworkSession> session = new ANetworkSession; + session->start(); + + sp<ALooper> looper = new ALooper; + looper->start(); + + sp<RemoteDisplayClient> client = new RemoteDisplayClient; + sp<WifiDisplaySource> source = new WifiDisplaySource(session, client, path); + looper->registerHandler(source); + + AString iface = StringPrintf("%s:%d", addr.c_str(), port); + CHECK_EQ((status_t)OK, source->start(iface.c_str())); + + client->waitUntilDone(); + + source->stop(); +} + } // namespace android int main(int argc, char **argv) { @@ -187,41 +209,17 @@ int main(int argc, char **argv) { DataSource::RegisterDefaultSniffers(); - AString connectToHost; - int32_t connectToPort = -1; - AString uri; - AString listenOnAddr; int32_t listenOnPort = -1; + AString path; + int res; - while ((res = getopt(argc, argv, "hc:l:u:")) >= 0) { + while ((res = getopt(argc, argv, "hl:f:")) >= 0) { switch (res) { - case 'c': - { - const char *colonPos = strrchr(optarg, ':'); - - if (colonPos == NULL) { - connectToHost = optarg; - connectToPort = WifiDisplaySource::kWifiDisplayDefaultPort; - } else { - connectToHost.setTo(optarg, colonPos - optarg); - - char *end; - connectToPort = strtol(colonPos + 1, &end, 10); - - if (*end != '\0' || end == colonPos + 1 - || connectToPort < 1 || connectToPort > 65535) { - fprintf(stderr, "Illegal port specified.\n"); - exit(1); - } - } - break; - } - - case 'u': + case 'f': { - uri = optarg; + path = optarg; break; } @@ -255,47 +253,17 @@ int main(int argc, char **argv) { } } - if (connectToPort >= 0 && listenOnPort >= 0) { - fprintf(stderr, - "You can connect to a source or create one, " - "but not both at the same time.\n"); - exit(1); - } - if (listenOnPort >= 0) { - createSource(listenOnAddr, listenOnPort); - exit(0); - } - - if (connectToPort < 0 && uri.empty()) { - fprintf(stderr, - "You need to select either source host or uri.\n"); - - exit(1); - } - - if (connectToPort >= 0 && !uri.empty()) { - fprintf(stderr, - "You need to either connect to a wfd host or an rtsp url, " - "not both.\n"); - exit(1); - } - - sp<ANetworkSession> session = new ANetworkSession; - session->start(); - - sp<ALooper> looper = new ALooper; - - sp<WifiDisplaySink> sink = new WifiDisplaySink(session); - looper->registerHandler(sink); + if (path.empty()) { + createSource(listenOnAddr, listenOnPort); + } else { + createFileSource(listenOnAddr, listenOnPort, path.c_str()); + } - if (connectToPort >= 0) { - sink->start(connectToHost.c_str(), connectToPort); - } else { - sink->start(uri.c_str()); + exit(0); } - looper->start(true /* runOnCallingThread */); + usage(argv[0]); return 0; } diff --git a/media/libstagefright/yuv/Android.mk b/media/libstagefright/yuv/Android.mk index a4253f6..b3f7b1b 100644 --- a/media/libstagefright/yuv/Android.mk +++ b/media/libstagefright/yuv/Android.mk @@ -6,7 +6,8 @@ LOCAL_SRC_FILES:= \ YUVCanvas.cpp LOCAL_SHARED_LIBRARIES := \ - libcutils + libcutils \ + liblog LOCAL_MODULE:= libstagefright_yuv diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk index 5a73cdd..1ac647a 100644 --- a/media/mediaserver/Android.mk +++ b/media/mediaserver/Android.mk @@ -1,4 +1,13 @@ LOCAL_PATH:= $(call my-dir) + +ifneq ($(BOARD_USE_CUSTOM_MEDIASERVEREXTENSIONS),true) +include $(CLEAR_VARS) +LOCAL_SRC_FILES := register.cpp +LOCAL_MODULE := libregistermsext +LOCAL_MODULE_TAGS := optional +include $(BUILD_STATIC_LIBRARY) +endif + include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ @@ -7,16 +16,23 @@ LOCAL_SRC_FILES:= \ LOCAL_SHARED_LIBRARIES := \ libaudioflinger \ libcameraservice \ + libmedialogservice \ + libcutils \ + libnbaio \ + libmedia \ libmediaplayerservice \ libutils \ + liblog \ libbinder -# FIXME The duplicate audioflinger is temporary +LOCAL_STATIC_LIBRARIES := \ + libregistermsext + LOCAL_C_INCLUDES := \ frameworks/av/media/libmediaplayerservice \ + frameworks/av/services/medialog \ frameworks/av/services/audioflinger \ - frameworks/av/services/camera/libcameraservice \ - frameworks/native/services/audioflinger + frameworks/av/services/camera/libcameraservice LOCAL_MODULE:= mediaserver diff --git a/media/mediaserver/RegisterExtensions.h b/media/mediaserver/RegisterExtensions.h new file mode 100644 index 0000000..9a8c03c --- /dev/null +++ b/media/mediaserver/RegisterExtensions.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef REGISTER_EXTENSIONS_H +#define REGISTER_EXTENSIONS_H + +extern void registerExtensions(); + +#endif // REGISTER_EXTENSIONS_H diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp index ddd5b84..d5207d5 100644 --- a/media/mediaserver/main_mediaserver.cpp +++ b/media/mediaserver/main_mediaserver.cpp @@ -18,14 +18,20 @@ #define LOG_TAG "mediaserver" //#define LOG_NDEBUG 0 +#include <fcntl.h> +#include <sys/prctl.h> +#include <sys/wait.h> #include <binder/IPCThreadState.h> #include <binder/ProcessState.h> #include <binder/IServiceManager.h> +#include <cutils/properties.h> #include <utils/Log.h> +#include "RegisterExtensions.h" // from LOCAL_C_INCLUDES #include "AudioFlinger.h" #include "CameraService.h" +#include "MediaLogService.h" #include "MediaPlayerService.h" #include "AudioPolicyService.h" @@ -34,13 +40,96 @@ using namespace android; int main(int argc, char** argv) { signal(SIGPIPE, SIG_IGN); - sp<ProcessState> proc(ProcessState::self()); - sp<IServiceManager> sm = defaultServiceManager(); - ALOGI("ServiceManager: %p", sm.get()); - AudioFlinger::instantiate(); - MediaPlayerService::instantiate(); - CameraService::instantiate(); - AudioPolicyService::instantiate(); - ProcessState::self()->startThreadPool(); - IPCThreadState::self()->joinThreadPool(); + char value[PROPERTY_VALUE_MAX]; + bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1); + pid_t childPid; + // FIXME The advantage of making the process containing media.log service the parent process of + // the process that contains all the other real services, is that it allows us to collect more + // detailed information such as signal numbers, stop and continue, resource usage, etc. + // But it is also more complex. Consider replacing this by independent processes, and using + // binder on death notification instead. + if (doLog && (childPid = fork()) != 0) { + // media.log service + //prctl(PR_SET_NAME, (unsigned long) "media.log", 0, 0, 0); + // unfortunately ps ignores PR_SET_NAME for the main thread, so use this ugly hack + strcpy(argv[0], "media.log"); + sp<ProcessState> proc(ProcessState::self()); + MediaLogService::instantiate(); + ProcessState::self()->startThreadPool(); + for (;;) { + siginfo_t info; + int ret = waitid(P_PID, childPid, &info, WEXITED | WSTOPPED | WCONTINUED); + if (ret == EINTR) { + continue; + } + if (ret < 0) { + break; + } + char buffer[32]; + const char *code; + switch (info.si_code) { + case CLD_EXITED: + code = "CLD_EXITED"; + break; + case CLD_KILLED: + code = "CLD_KILLED"; + break; + case CLD_DUMPED: + code = "CLD_DUMPED"; + break; + case CLD_STOPPED: + code = "CLD_STOPPED"; + break; + case CLD_TRAPPED: + code = "CLD_TRAPPED"; + break; + case CLD_CONTINUED: + code = "CLD_CONTINUED"; + break; + default: + snprintf(buffer, sizeof(buffer), "unknown (%d)", info.si_code); + code = buffer; + break; + } + struct rusage usage; + getrusage(RUSAGE_CHILDREN, &usage); + ALOG(LOG_ERROR, "media.log", "pid %d status %d code %s user %ld.%03lds sys %ld.%03lds", + info.si_pid, info.si_status, code, + usage.ru_utime.tv_sec, usage.ru_utime.tv_usec / 1000, + usage.ru_stime.tv_sec, usage.ru_stime.tv_usec / 1000); + sp<IServiceManager> sm = defaultServiceManager(); + sp<IBinder> binder = sm->getService(String16("media.log")); + if (binder != 0) { + Vector<String16> args; + binder->dump(-1, args); + } + switch (info.si_code) { + case CLD_EXITED: + case CLD_KILLED: + case CLD_DUMPED: { + ALOG(LOG_INFO, "media.log", "exiting"); + _exit(0); + // not reached + } + default: + break; + } + } + } else { + // all other services + if (doLog) { + prctl(PR_SET_PDEATHSIG, SIGKILL); // if parent media.log dies before me, kill me also + setpgid(0, 0); // but if I die first, don't kill my parent + } + sp<ProcessState> proc(ProcessState::self()); + sp<IServiceManager> sm = defaultServiceManager(); + ALOGI("ServiceManager: %p", sm.get()); + AudioFlinger::instantiate(); + MediaPlayerService::instantiate(); + CameraService::instantiate(); + AudioPolicyService::instantiate(); + registerExtensions(); + ProcessState::self()->startThreadPool(); + IPCThreadState::self()->joinThreadPool(); + } } diff --git a/media/mediaserver/register.cpp b/media/mediaserver/register.cpp new file mode 100644 index 0000000..4ffb2ba --- /dev/null +++ b/media/mediaserver/register.cpp @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "RegisterExtensions.h" + +void registerExtensions() +{ +} diff --git a/media/mtp/Android.mk b/media/mtp/Android.mk index bee28d4..ac608a1 100644 --- a/media/mtp/Android.mk +++ b/media/mtp/Android.mk @@ -42,6 +42,6 @@ LOCAL_CFLAGS := -DMTP_DEVICE -DMTP_HOST # Needed for <bionic_time.h> LOCAL_C_INCLUDES := bionic/libc/private -LOCAL_SHARED_LIBRARIES := libutils libcutils libusbhost libbinder +LOCAL_SHARED_LIBRARIES := libutils libcutils liblog libusbhost libbinder include $(BUILD_SHARED_LIBRARY) diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp index 662a93d..df87db4 100644 --- a/media/mtp/MtpServer.cpp +++ b/media/mtp/MtpServer.cpp @@ -704,7 +704,8 @@ MtpResponseCode MtpServer::doGetObjectInfo() { mData.putUInt32(info.mAssociationDesc); mData.putUInt32(info.mSequenceNumber); mData.putString(info.mName); - mData.putEmptyString(); // date created + formatDateTime(info.mDateCreated, date, sizeof(date)); + mData.putString(date); // date created formatDateTime(info.mDateModified, date, sizeof(date)); mData.putString(date); // date modified mData.putEmptyString(); // keywords @@ -1118,7 +1119,7 @@ MtpResponseCode MtpServer::doSendPartialObject() { int initialData = ret - MTP_CONTAINER_HEADER_SIZE; if (initialData > 0) { - ret = write(edit->mFD, mData.getData(), initialData); + ret = pwrite(edit->mFD, mData.getData(), initialData, offset); offset += initialData; length -= initialData; } diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk index 2899953..061a079 100644 --- a/services/audioflinger/Android.mk +++ b/services/audioflinger/Android.mk @@ -15,11 +15,14 @@ include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ AudioFlinger.cpp \ + Threads.cpp \ + Tracks.cpp \ + Effects.cpp \ AudioMixer.cpp.arm \ AudioResampler.cpp.arm \ AudioPolicyService.cpp \ ServiceUtilities.cpp \ - AudioResamplerCubic.cpp.arm \ + AudioResamplerCubic.cpp.arm \ AudioResamplerSinc.cpp.arm LOCAL_SRC_FILES += StateQueue.cpp @@ -31,15 +34,14 @@ LOCAL_C_INCLUDES := \ $(call include-path-for, audio-effects) \ $(call include-path-for, audio-utils) -# FIXME keep libmedia_native but remove libmedia after split LOCAL_SHARED_LIBRARIES := \ libaudioutils \ libcommon_time_client \ libcutils \ libutils \ + liblog \ libbinder \ libmedia \ - libmedia_native \ libnbaio \ libhardware \ libhardware_legacy \ @@ -65,17 +67,20 @@ LOCAL_CFLAGS += -DSTATE_QUEUE_INSTANTIATIONS='"StateQueueInstantiations.cpp"' LOCAL_CFLAGS += -UFAST_TRACKS_AT_NON_NATIVE_SAMPLE_RATE -# uncomment for systrace -# LOCAL_CFLAGS += -DATRACE_TAG=ATRACE_TAG_AUDIO - -# uncomment for dumpsys to write most recent audio output to .wav file -# 47.5 seconds at 44.1 kHz, 8 megabytes -# LOCAL_CFLAGS += -DTEE_SINK_FRAMES=0x200000 +# uncomment to allow tee sink debugging to be enabled by property +# LOCAL_CFLAGS += -DTEE_SINK # uncomment to enable the audio watchdog # LOCAL_SRC_FILES += AudioWatchdog.cpp # LOCAL_CFLAGS += -DAUDIO_WATCHDOG +# Define ANDROID_SMP appropriately. Used to get inline tracing fast-path. +ifeq ($(TARGET_CPU_SMP),true) + LOCAL_CFLAGS += -DANDROID_SMP=1 +else + LOCAL_CFLAGS += -DANDROID_SMP=0 +endif + include $(BUILD_SHARED_LIBRARY) # @@ -90,9 +95,10 @@ LOCAL_SRC_FILES:= \ AudioResamplerSinc.cpp.arm LOCAL_SHARED_LIBRARIES := \ - libdl \ + libdl \ libcutils \ - libutils + libutils \ + liblog LOCAL_MODULE:= test-resample @@ -100,5 +106,4 @@ LOCAL_MODULE_TAGS := optional include $(BUILD_EXECUTABLE) - include $(call all-makefiles-under,$(LOCAL_PATH)) diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp index 1913b6f..87eb6aa 100644 --- a/services/audioflinger/AudioFlinger.cpp +++ b/services/audioflinger/AudioFlinger.cpp @@ -19,6 +19,7 @@ #define LOG_TAG "AudioFlinger" //#define LOG_NDEBUG 0 +#include <dirent.h> #include <math.h> #include <signal.h> #include <sys/time.h> @@ -29,7 +30,6 @@ #include <utils/Log.h> #include <utils/Trace.h> #include <binder/Parcel.h> -#include <binder/IPCThreadState.h> #include <utils/String16.h> #include <utils/threads.h> #include <utils/Atomic.h> @@ -38,15 +38,8 @@ #include <cutils/properties.h> #include <cutils/compiler.h> -#undef ADD_BATTERY_DATA - -#ifdef ADD_BATTERY_DATA -#include <media/IMediaPlayerService.h> -#include <media/IMediaDeathNotifier.h> -#endif - -#include <private/media/AudioTrackShared.h> -#include <private/media/AudioEffectShared.h> +//#include <private/media/AudioTrackShared.h> +//#include <private/media/AudioEffectShared.h> #include <system/audio.h> #include <hardware/audio.h> @@ -64,26 +57,13 @@ #include <powermanager/PowerManager.h> -// #define DEBUG_CPU_USAGE 10 // log statistics every n wall clock seconds -#ifdef DEBUG_CPU_USAGE -#include <cpustats/CentralTendencyStatistics.h> -#include <cpustats/ThreadCpuUsage.h> -#endif - #include <common_time/cc_helper.h> -#include <common_time/local_clock.h> +//#include <common_time/local_clock.h> -#include "FastMixer.h" +#include <media/IMediaLogService.h> -// NBAIO implementations -#include <media/nbaio/AudioStreamOutSink.h> -#include <media/nbaio/MonoPipe.h> -#include <media/nbaio/MonoPipeReader.h> #include <media/nbaio/Pipe.h> #include <media/nbaio/PipeReader.h> -#include <media/nbaio/SourceAudioBufferProvider.h> - -#include "SchedulingPolicyService.h" // ---------------------------------------------------------------------------- @@ -105,90 +85,23 @@ namespace android { static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n"; static const char kHardwareLockedString[] = "Hardware lock is taken\n"; -static const float MAX_GAIN = 4096.0f; -static const uint32_t MAX_GAIN_INT = 0x1000; - -// retry counts for buffer fill timeout -// 50 * ~20msecs = 1 second -static const int8_t kMaxTrackRetries = 50; -static const int8_t kMaxTrackStartupRetries = 50; -// allow less retry attempts on direct output thread. -// direct outputs can be a scarce resource in audio hardware and should -// be released as quickly as possible. -static const int8_t kMaxTrackRetriesDirect = 2; - -static const int kDumpLockRetries = 50; -static const int kDumpLockSleepUs = 20000; - -// don't warn about blocked writes or record buffer overflows more often than this -static const nsecs_t kWarningThrottleNs = seconds(5); - -// RecordThread loop sleep time upon application overrun or audio HAL read error -static const int kRecordThreadSleepUs = 5000; - -// maximum time to wait for setParameters to complete -static const nsecs_t kSetParametersTimeoutNs = seconds(2); - -// minimum sleep time for the mixer thread loop when tracks are active but in underrun -static const uint32_t kMinThreadSleepTimeUs = 5000; -// maximum divider applied to the active sleep time in the mixer thread loop -static const uint32_t kMaxThreadSleepTimeShift = 2; - -// minimum normal mix buffer size, expressed in milliseconds rather than frames -static const uint32_t kMinNormalMixBufferSizeMs = 20; -// maximum normal mix buffer size -static const uint32_t kMaxNormalMixBufferSizeMs = 24; nsecs_t AudioFlinger::mStandbyTimeInNsecs = kDefaultStandbyTimeInNsecs; -// Whether to use fast mixer -static const enum { - FastMixer_Never, // never initialize or use: for debugging only - FastMixer_Always, // always initialize and use, even if not needed: for debugging only - // normal mixer multiplier is 1 - FastMixer_Static, // initialize if needed, then use all the time if initialized, - // multiplier is calculated based on min & max normal mixer buffer size - FastMixer_Dynamic, // initialize if needed, then use dynamically depending on track load, - // multiplier is calculated based on min & max normal mixer buffer size - // FIXME for FastMixer_Dynamic: - // Supporting this option will require fixing HALs that can't handle large writes. - // For example, one HAL implementation returns an error from a large write, - // and another HAL implementation corrupts memory, possibly in the sample rate converter. - // We could either fix the HAL implementations, or provide a wrapper that breaks - // up large writes into smaller ones, and the wrapper would need to deal with scheduler. -} kUseFastMixer = FastMixer_Static; - -static uint32_t gScreenState; // incremented by 2 when screen state changes, bit 0 == 1 means "off" - // AudioFlinger::setParameters() updates, other threads read w/o lock - -// Priorities for requestPriority -static const int kPriorityAudioApp = 2; -static const int kPriorityFastMixer = 3; - -// IAudioFlinger::createTrack() reports back to client the total size of shared memory area -// for the track. The client then sub-divides this into smaller buffers for its use. -// Currently the client uses double-buffering by default, but doesn't tell us about that. -// So for now we just assume that client is double-buffered. -// FIXME It would be better for client to tell AudioFlinger whether it wants double-buffering or -// N-buffering, so AudioFlinger could allocate the right amount of memory. -// See the client's minBufCount and mNotificationFramesAct calculations for details. -static const int kFastTrackMultiplier = 2; - -// ---------------------------------------------------------------------------- +uint32_t AudioFlinger::mScreenState; -#ifdef ADD_BATTERY_DATA -// To collect the amplifier usage -static void addBatteryData(uint32_t params) { - sp<IMediaPlayerService> service = IMediaDeathNotifier::getMediaPlayerService(); - if (service == NULL) { - // it already logged - return; - } +#ifdef TEE_SINK +bool AudioFlinger::mTeeSinkInputEnabled = false; +bool AudioFlinger::mTeeSinkOutputEnabled = false; +bool AudioFlinger::mTeeSinkTrackEnabled = false; - service->addBatteryData(params); -} +size_t AudioFlinger::mTeeSinkInputFrames = kTeeSinkInputFramesDefault; +size_t AudioFlinger::mTeeSinkOutputFrames = kTeeSinkOutputFramesDefault; +size_t AudioFlinger::mTeeSinkTrackFrames = kTeeSinkTrackFramesDefault; #endif +// ---------------------------------------------------------------------------- + static int load_audio_interface(const char *if_name, audio_hw_device_t **dev) { const hw_module_t *mod; @@ -230,6 +143,27 @@ AudioFlinger::AudioFlinger() mMode(AUDIO_MODE_INVALID), mBtNrecIsOff(false) { + getpid_cached = getpid(); + char value[PROPERTY_VALUE_MAX]; + bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1); + if (doLog) { + mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters"); + } +#ifdef TEE_SINK + (void) property_get("ro.debuggable", value, "0"); + int debuggable = atoi(value); + int teeEnabled = 0; + if (debuggable) { + (void) property_get("af.tee", value, "0"); + teeEnabled = atoi(value); + } + if (teeEnabled & 1) + mTeeSinkInputEnabled = true; + if (teeEnabled & 2) + mTeeSinkOutputEnabled = true; + if (teeEnabled & 4) + mTeeSinkTrackEnabled = true; +#endif } void AudioFlinger::onFirstRef() @@ -364,7 +298,7 @@ void AudioFlinger::dumpPermissionDenial(int fd, const Vector<String16>& args) write(fd, result.string(), result.size()); } -static bool tryLock(Mutex& mutex) +bool AudioFlinger::dumpTryLock(Mutex& mutex) { bool locked = false; for (int i = 0; i < kDumpLockRetries; ++i) { @@ -383,7 +317,7 @@ status_t AudioFlinger::dump(int fd, const Vector<String16>& args) dumpPermissionDenial(fd, args); } else { // get state of hardware lock - bool hardwareLocked = tryLock(mHardwareLock); + bool hardwareLocked = dumpTryLock(mHardwareLock); if (!hardwareLocked) { String8 result(kHardwareLockedString); write(fd, result.string(), result.size()); @@ -391,7 +325,7 @@ status_t AudioFlinger::dump(int fd, const Vector<String16>& args) mHardwareLock.unlock(); } - bool locked = tryLock(mLock); + bool locked = dumpTryLock(mLock); // failed to lock - AudioFlinger is probably deadlocked if (!locked) { @@ -417,7 +351,28 @@ status_t AudioFlinger::dump(int fd, const Vector<String16>& args) audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice(); dev->dump(dev, fd); } - if (locked) mLock.unlock(); + +#ifdef TEE_SINK + // dump the serially shared record tee sink + if (mRecordTeeSource != 0) { + dumpTee(fd, mRecordTeeSource); + } +#endif + + if (locked) { + mLock.unlock(); + } + + // append a copy of media.log here by forwarding fd to it, but don't attempt + // to lookup the service if it's not running, as it will block for a second + if (mLogMemoryDealer != 0) { + sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log")); + if (binder != 0) { + fdprintf(fd, "\nmedia.log:\n"); + Vector<String16> args; + binder->dump(fd, args); + } + } } return NO_ERROR; } @@ -435,17 +390,48 @@ sp<AudioFlinger::Client> AudioFlinger::registerPid_l(pid_t pid) return client; } +sp<NBLog::Writer> AudioFlinger::newWriter_l(size_t size, const char *name) +{ + if (mLogMemoryDealer == 0) { + return new NBLog::Writer(); + } + sp<IMemory> shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size)); + sp<NBLog::Writer> writer = new NBLog::Writer(size, shared); + sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log")); + if (binder != 0) { + interface_cast<IMediaLogService>(binder)->registerWriter(shared, size, name); + } + return writer; +} + +void AudioFlinger::unregisterWriter(const sp<NBLog::Writer>& writer) +{ + if (writer == 0) { + return; + } + sp<IMemory> iMemory(writer->getIMemory()); + if (iMemory == 0) { + return; + } + sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log")); + if (binder != 0) { + interface_cast<IMediaLogService>(binder)->unregisterWriter(iMemory); + // Now the media.log remote reference to IMemory is gone. + // When our last local reference to IMemory also drops to zero, + // the IMemory destructor will deallocate the region from mMemoryDealer. + } +} + // IAudioFlinger interface sp<IAudioTrack> AudioFlinger::createTrack( - pid_t pid, audio_stream_type_t streamType, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - int frameCount, - IAudioFlinger::track_flags_t flags, + size_t frameCount, + IAudioFlinger::track_flags_t *flags, const sp<IMemory>& sharedBuffer, audio_io_handle_t output, pid_t tid, @@ -466,16 +452,25 @@ sp<IAudioTrack> AudioFlinger::createTrack( goto Exit; } + // client is responsible for conversion of 8-bit PCM to 16-bit PCM, + // and we don't yet support 8.24 or 32-bit PCM + if (audio_is_linear_pcm(format) && format != AUDIO_FORMAT_PCM_16_BIT) { + ALOGE("createTrack() invalid format %d", format); + lStatus = BAD_VALUE; + goto Exit; + } + { Mutex::Autolock _l(mLock); PlaybackThread *thread = checkPlaybackThread_l(output); PlaybackThread *effectThread = NULL; if (thread == NULL) { - ALOGE("unknown output thread"); + ALOGE("no playback thread found for output handle %d", output); lStatus = BAD_VALUE; goto Exit; } + pid_t pid = IPCThreadState::self()->getCallingPid(); client = registerPid_l(pid); ALOGV("createTrack() sessionId: %d", (sessionId == NULL) ? -2 : *sessionId); @@ -595,7 +590,7 @@ uint32_t AudioFlinger::latency(audio_io_handle_t output) const Mutex::Autolock _l(mLock); PlaybackThread *thread = checkPlaybackThread_l(output); if (thread == NULL) { - ALOGW("latency() unknown thread %d", output); + ALOGW("latency(): no playback thread found for output handle %d", output); return 0; } return thread->latency(); @@ -856,8 +851,9 @@ bool AudioFlinger::streamMute(audio_stream_type_t stream) const status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs) { - ALOGV("setParameters(): io %d, keyvalue %s, tid %d, calling pid %d", - ioHandle, keyValuePairs.string(), gettid(), IPCThreadState::self()->getCallingPid()); + ALOGV("setParameters(): io %d, keyvalue %s, calling pid %d", + ioHandle, keyValuePairs.string(), IPCThreadState::self()->getCallingPid()); + // check calling permissions if (!settingsAllowed()) { return PERMISSION_DENIED; @@ -906,8 +902,8 @@ status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& String8 screenState; if (param.get(String8(AudioParameter::keyScreenState), screenState) == NO_ERROR) { bool isOff = screenState == "off"; - if (isOff != (gScreenState & 1)) { - gScreenState = ((gScreenState & ~1) + 2) | isOff; + if (isOff != (AudioFlinger::mScreenState & 1)) { + AudioFlinger::mScreenState = ((AudioFlinger::mScreenState & ~1) + 2) | isOff; } } return final_result; @@ -941,8 +937,8 @@ status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& String8 AudioFlinger::getParameters(audio_io_handle_t ioHandle, const String8& keys) const { -// ALOGV("getParameters() io %d, keys %s, tid %d, calling pid %d", -// ioHandle, keys.string(), gettid(), IPCThreadState::self()->getCallingPid()); + ALOGVV("getParameters() io %d, keys %s, calling pid %d", + ioHandle, keys.string(), IPCThreadState::self()->getCallingPid()); Mutex::Autolock _l(mLock); @@ -1028,7 +1024,7 @@ status_t AudioFlinger::setVoiceVolume(float value) return ret; } -status_t AudioFlinger::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, +status_t AudioFlinger::getRenderPosition(size_t *halFrames, size_t *dspFrames, audio_io_handle_t output) const { status_t status; @@ -1112,7 +1108,8 @@ void AudioFlinger::audioConfigChanged_l(int event, audio_io_handle_t ioHandle, c // removeClient_l() must be called with AudioFlinger::mLock held void AudioFlinger::removeClient_l(pid_t pid) { - ALOGV("removeClient_l() pid %d, tid %d, calling tid %d", pid, gettid(), IPCThreadState::self()->getCallingPid()); + ALOGV("removeClient_l() pid %d, calling pid %d", pid, + IPCThreadState::self()->getCallingPid()); mClients.removeItem(pid); } @@ -1131,4596 +1128,7 @@ sp<AudioFlinger::PlaybackThread> AudioFlinger::getEffectThread_l(int sessionId, return thread; } -// ---------------------------------------------------------------------------- - -AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id, - audio_devices_t outDevice, audio_devices_t inDevice, type_t type) - : Thread(false /*canCallJava*/), - mType(type), - mAudioFlinger(audioFlinger), mSampleRate(0), mFrameCount(0), mNormalFrameCount(0), - // mChannelMask - mChannelCount(0), - mFrameSize(1), mFormat(AUDIO_FORMAT_INVALID), - mParamStatus(NO_ERROR), - mStandby(false), mOutDevice(outDevice), mInDevice(inDevice), - mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id), - // mName will be set by concrete (non-virtual) subclass - mDeathRecipient(new PMDeathRecipient(this)) -{ -} - -AudioFlinger::ThreadBase::~ThreadBase() -{ - mParamCond.broadcast(); - // do not lock the mutex in destructor - releaseWakeLock_l(); - if (mPowerManager != 0) { - sp<IBinder> binder = mPowerManager->asBinder(); - binder->unlinkToDeath(mDeathRecipient); - } -} - -void AudioFlinger::ThreadBase::exit() -{ - ALOGV("ThreadBase::exit"); - // do any cleanup required for exit to succeed - preExit(); - { - // This lock prevents the following race in thread (uniprocessor for illustration): - // if (!exitPending()) { - // // context switch from here to exit() - // // exit() calls requestExit(), what exitPending() observes - // // exit() calls signal(), which is dropped since no waiters - // // context switch back from exit() to here - // mWaitWorkCV.wait(...); - // // now thread is hung - // } - AutoMutex lock(mLock); - requestExit(); - mWaitWorkCV.broadcast(); - } - // When Thread::requestExitAndWait is made virtual and this method is renamed to - // "virtual status_t requestExitAndWait()", replace by "return Thread::requestExitAndWait();" - requestExitAndWait(); -} - -status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs) -{ - status_t status; - - ALOGV("ThreadBase::setParameters() %s", keyValuePairs.string()); - Mutex::Autolock _l(mLock); - - mNewParameters.add(keyValuePairs); - mWaitWorkCV.signal(); - // wait condition with timeout in case the thread loop has exited - // before the request could be processed - if (mParamCond.waitRelative(mLock, kSetParametersTimeoutNs) == NO_ERROR) { - status = mParamStatus; - mWaitWorkCV.signal(); - } else { - status = TIMED_OUT; - } - return status; -} - -void AudioFlinger::ThreadBase::sendIoConfigEvent(int event, int param) -{ - Mutex::Autolock _l(mLock); - sendIoConfigEvent_l(event, param); -} - -// sendIoConfigEvent_l() must be called with ThreadBase::mLock held -void AudioFlinger::ThreadBase::sendIoConfigEvent_l(int event, int param) -{ - IoConfigEvent *ioEvent = new IoConfigEvent(event, param); - mConfigEvents.add(static_cast<ConfigEvent *>(ioEvent)); - ALOGV("sendIoConfigEvent() num events %d event %d, param %d", mConfigEvents.size(), event, param); - mWaitWorkCV.signal(); -} - -// sendPrioConfigEvent_l() must be called with ThreadBase::mLock held -void AudioFlinger::ThreadBase::sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio) -{ - PrioConfigEvent *prioEvent = new PrioConfigEvent(pid, tid, prio); - mConfigEvents.add(static_cast<ConfigEvent *>(prioEvent)); - ALOGV("sendPrioConfigEvent_l() num events %d pid %d, tid %d prio %d", - mConfigEvents.size(), pid, tid, prio); - mWaitWorkCV.signal(); -} - -void AudioFlinger::ThreadBase::processConfigEvents() -{ - mLock.lock(); - while (!mConfigEvents.isEmpty()) { - ALOGV("processConfigEvents() remaining events %d", mConfigEvents.size()); - ConfigEvent *event = mConfigEvents[0]; - mConfigEvents.removeAt(0); - // release mLock before locking AudioFlinger mLock: lock order is always - // AudioFlinger then ThreadBase to avoid cross deadlock - mLock.unlock(); - switch(event->type()) { - case CFG_EVENT_PRIO: { - PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event); - int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio()); - if (err != 0) { - ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d", - prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err); - } - } break; - case CFG_EVENT_IO: { - IoConfigEvent *ioEvent = static_cast<IoConfigEvent *>(event); - mAudioFlinger->mLock.lock(); - audioConfigChanged_l(ioEvent->event(), ioEvent->param()); - mAudioFlinger->mLock.unlock(); - } break; - default: - ALOGE("processConfigEvents() unknown event type %d", event->type()); - break; - } - delete event; - mLock.lock(); - } - mLock.unlock(); -} - -void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args) -{ - const size_t SIZE = 256; - char buffer[SIZE]; - String8 result; - - bool locked = tryLock(mLock); - if (!locked) { - snprintf(buffer, SIZE, "thread %p maybe dead locked\n", this); - write(fd, buffer, strlen(buffer)); - } - - snprintf(buffer, SIZE, "io handle: %d\n", mId); - result.append(buffer); - snprintf(buffer, SIZE, "TID: %d\n", getTid()); - result.append(buffer); - snprintf(buffer, SIZE, "standby: %d\n", mStandby); - result.append(buffer); - snprintf(buffer, SIZE, "Sample rate: %d\n", mSampleRate); - result.append(buffer); - snprintf(buffer, SIZE, "HAL frame count: %d\n", mFrameCount); - result.append(buffer); - snprintf(buffer, SIZE, "Normal frame count: %d\n", mNormalFrameCount); - result.append(buffer); - snprintf(buffer, SIZE, "Channel Count: %d\n", mChannelCount); - result.append(buffer); - snprintf(buffer, SIZE, "Channel Mask: 0x%08x\n", mChannelMask); - result.append(buffer); - snprintf(buffer, SIZE, "Format: %d\n", mFormat); - result.append(buffer); - snprintf(buffer, SIZE, "Frame size: %u\n", mFrameSize); - result.append(buffer); - - snprintf(buffer, SIZE, "\nPending setParameters commands: \n"); - result.append(buffer); - result.append(" Index Command"); - for (size_t i = 0; i < mNewParameters.size(); ++i) { - snprintf(buffer, SIZE, "\n %02d ", i); - result.append(buffer); - result.append(mNewParameters[i]); - } - - snprintf(buffer, SIZE, "\n\nPending config events: \n"); - result.append(buffer); - for (size_t i = 0; i < mConfigEvents.size(); i++) { - mConfigEvents[i]->dump(buffer, SIZE); - result.append(buffer); - } - result.append("\n"); - - write(fd, result.string(), result.size()); - - if (locked) { - mLock.unlock(); - } -} - -void AudioFlinger::ThreadBase::dumpEffectChains(int fd, const Vector<String16>& args) -{ - const size_t SIZE = 256; - char buffer[SIZE]; - String8 result; - - snprintf(buffer, SIZE, "\n- %d Effect Chains:\n", mEffectChains.size()); - write(fd, buffer, strlen(buffer)); - - for (size_t i = 0; i < mEffectChains.size(); ++i) { - sp<EffectChain> chain = mEffectChains[i]; - if (chain != 0) { - chain->dump(fd, args); - } - } -} - -void AudioFlinger::ThreadBase::acquireWakeLock() -{ - Mutex::Autolock _l(mLock); - acquireWakeLock_l(); -} - -void AudioFlinger::ThreadBase::acquireWakeLock_l() -{ - if (mPowerManager == 0) { - // use checkService() to avoid blocking if power service is not up yet - sp<IBinder> binder = - defaultServiceManager()->checkService(String16("power")); - if (binder == 0) { - ALOGW("Thread %s cannot connect to the power manager service", mName); - } else { - mPowerManager = interface_cast<IPowerManager>(binder); - binder->linkToDeath(mDeathRecipient); - } - } - if (mPowerManager != 0) { - sp<IBinder> binder = new BBinder(); - status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK, - binder, - String16(mName)); - if (status == NO_ERROR) { - mWakeLockToken = binder; - } - ALOGV("acquireWakeLock_l() %s status %d", mName, status); - } -} - -void AudioFlinger::ThreadBase::releaseWakeLock() -{ - Mutex::Autolock _l(mLock); - releaseWakeLock_l(); -} - -void AudioFlinger::ThreadBase::releaseWakeLock_l() -{ - if (mWakeLockToken != 0) { - ALOGV("releaseWakeLock_l() %s", mName); - if (mPowerManager != 0) { - mPowerManager->releaseWakeLock(mWakeLockToken, 0); - } - mWakeLockToken.clear(); - } -} - -void AudioFlinger::ThreadBase::clearPowerManager() -{ - Mutex::Autolock _l(mLock); - releaseWakeLock_l(); - mPowerManager.clear(); -} - -void AudioFlinger::ThreadBase::PMDeathRecipient::binderDied(const wp<IBinder>& who) -{ - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - thread->clearPowerManager(); - } - ALOGW("power manager service died !!!"); -} - -void AudioFlinger::ThreadBase::setEffectSuspended( - const effect_uuid_t *type, bool suspend, int sessionId) -{ - Mutex::Autolock _l(mLock); - setEffectSuspended_l(type, suspend, sessionId); -} - -void AudioFlinger::ThreadBase::setEffectSuspended_l( - const effect_uuid_t *type, bool suspend, int sessionId) -{ - sp<EffectChain> chain = getEffectChain_l(sessionId); - if (chain != 0) { - if (type != NULL) { - chain->setEffectSuspended_l(type, suspend); - } else { - chain->setEffectSuspendedAll_l(suspend); - } - } - - updateSuspendedSessions_l(type, suspend, sessionId); -} - -void AudioFlinger::ThreadBase::checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain) -{ - ssize_t index = mSuspendedSessions.indexOfKey(chain->sessionId()); - if (index < 0) { - return; - } - - const KeyedVector <int, sp<SuspendedSessionDesc> >& sessionEffects = - mSuspendedSessions.valueAt(index); - - for (size_t i = 0; i < sessionEffects.size(); i++) { - sp<SuspendedSessionDesc> desc = sessionEffects.valueAt(i); - for (int j = 0; j < desc->mRefCount; j++) { - if (sessionEffects.keyAt(i) == EffectChain::kKeyForSuspendAll) { - chain->setEffectSuspendedAll_l(true); - } else { - ALOGV("checkSuspendOnAddEffectChain_l() suspending effects %08x", - desc->mType.timeLow); - chain->setEffectSuspended_l(&desc->mType, true); - } - } - } -} - -void AudioFlinger::ThreadBase::updateSuspendedSessions_l(const effect_uuid_t *type, - bool suspend, - int sessionId) -{ - ssize_t index = mSuspendedSessions.indexOfKey(sessionId); - - KeyedVector <int, sp<SuspendedSessionDesc> > sessionEffects; - - if (suspend) { - if (index >= 0) { - sessionEffects = mSuspendedSessions.valueAt(index); - } else { - mSuspendedSessions.add(sessionId, sessionEffects); - } - } else { - if (index < 0) { - return; - } - sessionEffects = mSuspendedSessions.valueAt(index); - } - - - int key = EffectChain::kKeyForSuspendAll; - if (type != NULL) { - key = type->timeLow; - } - index = sessionEffects.indexOfKey(key); - - sp<SuspendedSessionDesc> desc; - if (suspend) { - if (index >= 0) { - desc = sessionEffects.valueAt(index); - } else { - desc = new SuspendedSessionDesc(); - if (type != NULL) { - desc->mType = *type; - } - sessionEffects.add(key, desc); - ALOGV("updateSuspendedSessions_l() suspend adding effect %08x", key); - } - desc->mRefCount++; - } else { - if (index < 0) { - return; - } - desc = sessionEffects.valueAt(index); - if (--desc->mRefCount == 0) { - ALOGV("updateSuspendedSessions_l() restore removing effect %08x", key); - sessionEffects.removeItemsAt(index); - if (sessionEffects.isEmpty()) { - ALOGV("updateSuspendedSessions_l() restore removing session %d", - sessionId); - mSuspendedSessions.removeItem(sessionId); - } - } - } - if (!sessionEffects.isEmpty()) { - mSuspendedSessions.replaceValueFor(sessionId, sessionEffects); - } -} - -void AudioFlinger::ThreadBase::checkSuspendOnEffectEnabled(const sp<EffectModule>& effect, - bool enabled, - int sessionId) -{ - Mutex::Autolock _l(mLock); - checkSuspendOnEffectEnabled_l(effect, enabled, sessionId); -} - -void AudioFlinger::ThreadBase::checkSuspendOnEffectEnabled_l(const sp<EffectModule>& effect, - bool enabled, - int sessionId) -{ - if (mType != RECORD) { - // suspend all effects in AUDIO_SESSION_OUTPUT_MIX when enabling any effect on - // another session. This gives the priority to well behaved effect control panels - // and applications not using global effects. - // Enabling post processing in AUDIO_SESSION_OUTPUT_STAGE session does not affect - // global effects - if ((sessionId != AUDIO_SESSION_OUTPUT_MIX) && (sessionId != AUDIO_SESSION_OUTPUT_STAGE)) { - setEffectSuspended_l(NULL, enabled, AUDIO_SESSION_OUTPUT_MIX); - } - } - - sp<EffectChain> chain = getEffectChain_l(sessionId); - if (chain != 0) { - chain->checkSuspendOnEffectEnabled(effect, enabled); - } -} - -// ---------------------------------------------------------------------------- - -AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinger, - AudioStreamOut* output, - audio_io_handle_t id, - audio_devices_t device, - type_t type) - : ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type), - mMixBuffer(NULL), mSuspended(0), mBytesWritten(0), - // mStreamTypes[] initialized in constructor body - mOutput(output), - mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false), - mMixerStatus(MIXER_IDLE), - mMixerStatusIgnoringFastTracks(MIXER_IDLE), - standbyDelay(AudioFlinger::mStandbyTimeInNsecs), - mScreenState(gScreenState), - // index 0 is reserved for normal mixer's submix - mFastTrackAvailMask(((1 << FastMixerState::kMaxFastTracks) - 1) & ~1) -{ - snprintf(mName, kNameLength, "AudioOut_%X", id); - - // Assumes constructor is called by AudioFlinger with it's mLock held, but - // it would be safer to explicitly pass initial masterVolume/masterMute as - // parameter. - // - // If the HAL we are using has support for master volume or master mute, - // then do not attenuate or mute during mixing (just leave the volume at 1.0 - // and the mute set to false). - mMasterVolume = audioFlinger->masterVolume_l(); - mMasterMute = audioFlinger->masterMute_l(); - if (mOutput && mOutput->audioHwDev) { - if (mOutput->audioHwDev->canSetMasterVolume()) { - mMasterVolume = 1.0; - } - - if (mOutput->audioHwDev->canSetMasterMute()) { - mMasterMute = false; - } - } - - readOutputParameters(); - - // mStreamTypes[AUDIO_STREAM_CNT] is initialized by stream_type_t default constructor - // There is no AUDIO_STREAM_MIN, and ++ operator does not compile - for (audio_stream_type_t stream = (audio_stream_type_t) 0; stream < AUDIO_STREAM_CNT; - stream = (audio_stream_type_t) (stream + 1)) { - mStreamTypes[stream].volume = mAudioFlinger->streamVolume_l(stream); - mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream); - } - // mStreamTypes[AUDIO_STREAM_CNT] exists but isn't explicitly initialized here, - // because mAudioFlinger doesn't have one to copy from -} - -AudioFlinger::PlaybackThread::~PlaybackThread() -{ - delete [] mMixBuffer; -} - -void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args) -{ - dumpInternals(fd, args); - dumpTracks(fd, args); - dumpEffectChains(fd, args); -} - -void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args) -{ - const size_t SIZE = 256; - char buffer[SIZE]; - String8 result; - - result.appendFormat("Output thread %p stream volumes in dB:\n ", this); - for (int i = 0; i < AUDIO_STREAM_CNT; ++i) { - const stream_type_t *st = &mStreamTypes[i]; - if (i > 0) { - result.appendFormat(", "); - } - result.appendFormat("%d:%.2g", i, 20.0 * log10(st->volume)); - if (st->mute) { - result.append("M"); - } - } - result.append("\n"); - write(fd, result.string(), result.length()); - result.clear(); - - snprintf(buffer, SIZE, "Output thread %p tracks\n", this); - result.append(buffer); - Track::appendDumpHeader(result); - for (size_t i = 0; i < mTracks.size(); ++i) { - sp<Track> track = mTracks[i]; - if (track != 0) { - track->dump(buffer, SIZE); - result.append(buffer); - } - } - - snprintf(buffer, SIZE, "Output thread %p active tracks\n", this); - result.append(buffer); - Track::appendDumpHeader(result); - for (size_t i = 0; i < mActiveTracks.size(); ++i) { - sp<Track> track = mActiveTracks[i].promote(); - if (track != 0) { - track->dump(buffer, SIZE); - result.append(buffer); - } - } - write(fd, result.string(), result.size()); - - // These values are "raw"; they will wrap around. See prepareTracks_l() for a better way. - FastTrackUnderruns underruns = getFastTrackUnderruns(0); - fdprintf(fd, "Normal mixer raw underrun counters: partial=%u empty=%u\n", - underruns.mBitFields.mPartial, underruns.mBitFields.mEmpty); -} - -void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args) -{ - const size_t SIZE = 256; - char buffer[SIZE]; - String8 result; - - snprintf(buffer, SIZE, "\nOutput thread %p internals\n", this); - result.append(buffer); - snprintf(buffer, SIZE, "last write occurred (msecs): %llu\n", ns2ms(systemTime() - mLastWriteTime)); - result.append(buffer); - snprintf(buffer, SIZE, "total writes: %d\n", mNumWrites); - result.append(buffer); - snprintf(buffer, SIZE, "delayed writes: %d\n", mNumDelayedWrites); - result.append(buffer); - snprintf(buffer, SIZE, "blocked in write: %d\n", mInWrite); - result.append(buffer); - snprintf(buffer, SIZE, "suspend count: %d\n", mSuspended); - result.append(buffer); - snprintf(buffer, SIZE, "mix buffer : %p\n", mMixBuffer); - result.append(buffer); - write(fd, result.string(), result.size()); - fdprintf(fd, "Fast track availMask=%#x\n", mFastTrackAvailMask); - - dumpBase(fd, args); -} - -// Thread virtuals -status_t AudioFlinger::PlaybackThread::readyToRun() -{ - status_t status = initCheck(); - if (status == NO_ERROR) { - ALOGI("AudioFlinger's thread %p ready to run", this); - } else { - ALOGE("No working audio driver found."); - } - return status; -} - -void AudioFlinger::PlaybackThread::onFirstRef() -{ - run(mName, ANDROID_PRIORITY_URGENT_AUDIO); -} - -// ThreadBase virtuals -void AudioFlinger::PlaybackThread::preExit() -{ - ALOGV(" preExit()"); - // FIXME this is using hard-coded strings but in the future, this functionality will be - // converted to use audio HAL extensions required to support tunneling - mOutput->stream->common.set_parameters(&mOutput->stream->common, "exiting=1"); -} - -// PlaybackThread::createTrack_l() must be called with AudioFlinger::mLock held -sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l( - const sp<AudioFlinger::Client>& client, - audio_stream_type_t streamType, - uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, - const sp<IMemory>& sharedBuffer, - int sessionId, - IAudioFlinger::track_flags_t flags, - pid_t tid, - status_t *status) -{ - sp<Track> track; - status_t lStatus; - - bool isTimed = (flags & IAudioFlinger::TRACK_TIMED) != 0; - - // client expresses a preference for FAST, but we get the final say - if (flags & IAudioFlinger::TRACK_FAST) { - if ( - // not timed - (!isTimed) && - // either of these use cases: - ( - // use case 1: shared buffer with any frame count - ( - (sharedBuffer != 0) - ) || - // use case 2: callback handler and frame count is default or at least as large as HAL - ( - (tid != -1) && - ((frameCount == 0) || - (frameCount >= (int) (mFrameCount * kFastTrackMultiplier))) - ) - ) && - // PCM data - audio_is_linear_pcm(format) && - // mono or stereo - ( (channelMask == AUDIO_CHANNEL_OUT_MONO) || - (channelMask == AUDIO_CHANNEL_OUT_STEREO) ) && -#ifndef FAST_TRACKS_AT_NON_NATIVE_SAMPLE_RATE - // hardware sample rate - (sampleRate == mSampleRate) && -#endif - // normal mixer has an associated fast mixer - hasFastMixer() && - // there are sufficient fast track slots available - (mFastTrackAvailMask != 0) - // FIXME test that MixerThread for this fast track has a capable output HAL - // FIXME add a permission test also? - ) { - // if frameCount not specified, then it defaults to fast mixer (HAL) frame count - if (frameCount == 0) { - frameCount = mFrameCount * kFastTrackMultiplier; - } - ALOGV("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d", - frameCount, mFrameCount); - } else { - ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: isTimed=%d sharedBuffer=%p frameCount=%d " - "mFrameCount=%d format=%d isLinear=%d channelMask=%#x sampleRate=%d mSampleRate=%d " - "hasFastMixer=%d tid=%d fastTrackAvailMask=%#x", - isTimed, sharedBuffer.get(), frameCount, mFrameCount, format, - audio_is_linear_pcm(format), - channelMask, sampleRate, mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask); - flags &= ~IAudioFlinger::TRACK_FAST; - // For compatibility with AudioTrack calculation, buffer depth is forced - // to be at least 2 x the normal mixer frame count and cover audio hardware latency. - // This is probably too conservative, but legacy application code may depend on it. - // If you change this calculation, also review the start threshold which is related. - uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream); - uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate); - if (minBufCount < 2) { - minBufCount = 2; - } - int minFrameCount = mNormalFrameCount * minBufCount; - if (frameCount < minFrameCount) { - frameCount = minFrameCount; - } - } - } - - if (mType == DIRECT) { - if ((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_PCM) { - if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) { - ALOGE("createTrack_l() Bad parameter: sampleRate %d format %d, channelMask 0x%08x \"" - "for output %p with format %d", - sampleRate, format, channelMask, mOutput, mFormat); - lStatus = BAD_VALUE; - goto Exit; - } - } - } else { - // Resampler implementation limits input sampling rate to 2 x output sampling rate. - if (sampleRate > mSampleRate*2) { - ALOGE("Sample rate out of range: %d mSampleRate %d", sampleRate, mSampleRate); - lStatus = BAD_VALUE; - goto Exit; - } - } - - lStatus = initCheck(); - if (lStatus != NO_ERROR) { - ALOGE("Audio driver not initialized."); - goto Exit; - } - - { // scope for mLock - Mutex::Autolock _l(mLock); - - // all tracks in same audio session must share the same routing strategy otherwise - // conflicts will happen when tracks are moved from one output to another by audio policy - // manager - uint32_t strategy = AudioSystem::getStrategyForStream(streamType); - for (size_t i = 0; i < mTracks.size(); ++i) { - sp<Track> t = mTracks[i]; - if (t != 0 && !t->isOutputTrack()) { - uint32_t actual = AudioSystem::getStrategyForStream(t->streamType()); - if (sessionId == t->sessionId() && strategy != actual) { - ALOGE("createTrack_l() mismatched strategy; expected %u but found %u", - strategy, actual); - lStatus = BAD_VALUE; - goto Exit; - } - } - } - - if (!isTimed) { - track = new Track(this, client, streamType, sampleRate, format, - channelMask, frameCount, sharedBuffer, sessionId, flags); - } else { - track = TimedTrack::create(this, client, streamType, sampleRate, format, - channelMask, frameCount, sharedBuffer, sessionId); - } - if (track == 0 || track->getCblk() == NULL || track->name() < 0) { - lStatus = NO_MEMORY; - goto Exit; - } - mTracks.add(track); - - sp<EffectChain> chain = getEffectChain_l(sessionId); - if (chain != 0) { - ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer()); - track->setMainBuffer(chain->inBuffer()); - chain->setStrategy(AudioSystem::getStrategyForStream(track->streamType())); - chain->incTrackCnt(); - } - - if ((flags & IAudioFlinger::TRACK_FAST) && (tid != -1)) { - pid_t callingPid = IPCThreadState::self()->getCallingPid(); - // we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful, - // so ask activity manager to do this on our behalf - sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp); - } - } - - lStatus = NO_ERROR; - -Exit: - if (status) { - *status = lStatus; - } - return track; -} - -uint32_t AudioFlinger::MixerThread::correctLatency(uint32_t latency) const -{ - if (mFastMixer != NULL) { - MonoPipe *pipe = (MonoPipe *)mPipeSink.get(); - latency += (pipe->getAvgFrames() * 1000) / mSampleRate; - } - return latency; -} - -uint32_t AudioFlinger::PlaybackThread::correctLatency(uint32_t latency) const -{ - return latency; -} - -uint32_t AudioFlinger::PlaybackThread::latency() const -{ - Mutex::Autolock _l(mLock); - return latency_l(); -} -uint32_t AudioFlinger::PlaybackThread::latency_l() const -{ - if (initCheck() == NO_ERROR) { - return correctLatency(mOutput->stream->get_latency(mOutput->stream)); - } else { - return 0; - } -} - -void AudioFlinger::PlaybackThread::setMasterVolume(float value) -{ - Mutex::Autolock _l(mLock); - // Don't apply master volume in SW if our HAL can do it for us. - if (mOutput && mOutput->audioHwDev && - mOutput->audioHwDev->canSetMasterVolume()) { - mMasterVolume = 1.0; - } else { - mMasterVolume = value; - } -} - -void AudioFlinger::PlaybackThread::setMasterMute(bool muted) -{ - Mutex::Autolock _l(mLock); - // Don't apply master mute in SW if our HAL can do it for us. - if (mOutput && mOutput->audioHwDev && - mOutput->audioHwDev->canSetMasterMute()) { - mMasterMute = false; - } else { - mMasterMute = muted; - } -} - -void AudioFlinger::PlaybackThread::setStreamVolume(audio_stream_type_t stream, float value) -{ - Mutex::Autolock _l(mLock); - mStreamTypes[stream].volume = value; -} - -void AudioFlinger::PlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted) -{ - Mutex::Autolock _l(mLock); - mStreamTypes[stream].mute = muted; -} - -float AudioFlinger::PlaybackThread::streamVolume(audio_stream_type_t stream) const -{ - Mutex::Autolock _l(mLock); - return mStreamTypes[stream].volume; -} - -// addTrack_l() must be called with ThreadBase::mLock held -status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track) -{ - status_t status = ALREADY_EXISTS; - - // set retry count for buffer fill - track->mRetryCount = kMaxTrackStartupRetries; - if (mActiveTracks.indexOf(track) < 0) { - // the track is newly added, make sure it fills up all its - // buffers before playing. This is to ensure the client will - // effectively get the latency it requested. - track->mFillingUpStatus = Track::FS_FILLING; - track->mResetDone = false; - track->mPresentationCompleteFrames = 0; - mActiveTracks.add(track); - if (track->mainBuffer() != mMixBuffer) { - sp<EffectChain> chain = getEffectChain_l(track->sessionId()); - if (chain != 0) { - ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(), track->sessionId()); - chain->incActiveTrackCnt(); - } - } - - status = NO_ERROR; - } - - ALOGV("mWaitWorkCV.broadcast"); - mWaitWorkCV.broadcast(); - - return status; -} - -// destroyTrack_l() must be called with ThreadBase::mLock held -void AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track) -{ - track->mState = TrackBase::TERMINATED; - // active tracks are removed by threadLoop() - if (mActiveTracks.indexOf(track) < 0) { - removeTrack_l(track); - } -} - -void AudioFlinger::PlaybackThread::removeTrack_l(const sp<Track>& track) -{ - track->triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE); - mTracks.remove(track); - deleteTrackName_l(track->name()); - // redundant as track is about to be destroyed, for dumpsys only - track->mName = -1; - if (track->isFastTrack()) { - int index = track->mFastIndex; - ALOG_ASSERT(0 < index && index < (int)FastMixerState::kMaxFastTracks); - ALOG_ASSERT(!(mFastTrackAvailMask & (1 << index))); - mFastTrackAvailMask |= 1 << index; - // redundant as track is about to be destroyed, for dumpsys only - track->mFastIndex = -1; - } - sp<EffectChain> chain = getEffectChain_l(track->sessionId()); - if (chain != 0) { - chain->decTrackCnt(); - } -} - -String8 AudioFlinger::PlaybackThread::getParameters(const String8& keys) -{ - String8 out_s8 = String8(""); - char *s; - - Mutex::Autolock _l(mLock); - if (initCheck() != NO_ERROR) { - return out_s8; - } - - s = mOutput->stream->common.get_parameters(&mOutput->stream->common, keys.string()); - out_s8 = String8(s); - free(s); - return out_s8; -} - -// audioConfigChanged_l() must be called with AudioFlinger::mLock held -void AudioFlinger::PlaybackThread::audioConfigChanged_l(int event, int param) { - AudioSystem::OutputDescriptor desc; - void *param2 = NULL; - - ALOGV("PlaybackThread::audioConfigChanged_l, thread %p, event %d, param %d", this, event, param); - - switch (event) { - case AudioSystem::OUTPUT_OPENED: - case AudioSystem::OUTPUT_CONFIG_CHANGED: - desc.channels = mChannelMask; - desc.samplingRate = mSampleRate; - desc.format = mFormat; - desc.frameCount = mNormalFrameCount; // FIXME see AudioFlinger::frameCount(audio_io_handle_t) - desc.latency = latency(); - param2 = &desc; - break; - - case AudioSystem::STREAM_CONFIG_CHANGED: - param2 = ¶m; - case AudioSystem::OUTPUT_CLOSED: - default: - break; - } - mAudioFlinger->audioConfigChanged_l(event, mId, param2); -} - -void AudioFlinger::PlaybackThread::readOutputParameters() -{ - mSampleRate = mOutput->stream->common.get_sample_rate(&mOutput->stream->common); - mChannelMask = mOutput->stream->common.get_channels(&mOutput->stream->common); - mChannelCount = (uint16_t)popcount(mChannelMask); - mFormat = mOutput->stream->common.get_format(&mOutput->stream->common); - mFrameSize = audio_stream_frame_size(&mOutput->stream->common); - mFrameCount = mOutput->stream->common.get_buffer_size(&mOutput->stream->common) / mFrameSize; - if (mFrameCount & 15) { - ALOGW("HAL output buffer size is %u frames but AudioMixer requires multiples of 16 frames", - mFrameCount); - } - - // Calculate size of normal mix buffer relative to the HAL output buffer size - double multiplier = 1.0; - if (mType == MIXER && (kUseFastMixer == FastMixer_Static || kUseFastMixer == FastMixer_Dynamic)) { - size_t minNormalFrameCount = (kMinNormalMixBufferSizeMs * mSampleRate) / 1000; - size_t maxNormalFrameCount = (kMaxNormalMixBufferSizeMs * mSampleRate) / 1000; - // round up minimum and round down maximum to nearest 16 frames to satisfy AudioMixer - minNormalFrameCount = (minNormalFrameCount + 15) & ~15; - maxNormalFrameCount = maxNormalFrameCount & ~15; - if (maxNormalFrameCount < minNormalFrameCount) { - maxNormalFrameCount = minNormalFrameCount; - } - multiplier = (double) minNormalFrameCount / (double) mFrameCount; - if (multiplier <= 1.0) { - multiplier = 1.0; - } else if (multiplier <= 2.0) { - if (2 * mFrameCount <= maxNormalFrameCount) { - multiplier = 2.0; - } else { - multiplier = (double) maxNormalFrameCount / (double) mFrameCount; - } - } else { - // prefer an even multiplier, for compatibility with doubling of fast tracks due to HAL SRC - // (it would be unusual for the normal mix buffer size to not be a multiple of fast - // track, but we sometimes have to do this to satisfy the maximum frame count constraint) - // FIXME this rounding up should not be done if no HAL SRC - uint32_t truncMult = (uint32_t) multiplier; - if ((truncMult & 1)) { - if ((truncMult + 1) * mFrameCount <= maxNormalFrameCount) { - ++truncMult; - } - } - multiplier = (double) truncMult; - } - } - mNormalFrameCount = multiplier * mFrameCount; - // round up to nearest 16 frames to satisfy AudioMixer - mNormalFrameCount = (mNormalFrameCount + 15) & ~15; - ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount, mNormalFrameCount); - - delete[] mMixBuffer; - mMixBuffer = new int16_t[mNormalFrameCount * mChannelCount]; - memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t)); - - // force reconfiguration of effect chains and engines to take new buffer size and audio - // parameters into account - // Note that mLock is not held when readOutputParameters() is called from the constructor - // but in this case nothing is done below as no audio sessions have effect yet so it doesn't - // matter. - // create a copy of mEffectChains as calling moveEffectChain_l() can reorder some effect chains - Vector< sp<EffectChain> > effectChains = mEffectChains; - for (size_t i = 0; i < effectChains.size(); i ++) { - mAudioFlinger->moveEffectChain_l(effectChains[i]->sessionId(), this, this, false); - } -} - - -status_t AudioFlinger::PlaybackThread::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames) -{ - if (halFrames == NULL || dspFrames == NULL) { - return BAD_VALUE; - } - Mutex::Autolock _l(mLock); - if (initCheck() != NO_ERROR) { - return INVALID_OPERATION; - } - *halFrames = mBytesWritten / audio_stream_frame_size(&mOutput->stream->common); - - if (isSuspended()) { - // return an estimation of rendered frames when the output is suspended - int32_t frames = mBytesWritten - latency_l(); - if (frames < 0) { - frames = 0; - } - *dspFrames = (uint32_t)frames; - return NO_ERROR; - } else { - return mOutput->stream->get_render_position(mOutput->stream, dspFrames); - } -} - -uint32_t AudioFlinger::PlaybackThread::hasAudioSession(int sessionId) const -{ - Mutex::Autolock _l(mLock); - uint32_t result = 0; - if (getEffectChain_l(sessionId) != 0) { - result = EFFECT_SESSION; - } - - for (size_t i = 0; i < mTracks.size(); ++i) { - sp<Track> track = mTracks[i]; - if (sessionId == track->sessionId() && - !(track->mCblk->flags & CBLK_INVALID_MSK)) { - result |= TRACK_SESSION; - break; - } - } - - return result; -} - -uint32_t AudioFlinger::PlaybackThread::getStrategyForSession_l(int sessionId) -{ - // session AUDIO_SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that - // it is moved to correct output by audio policy manager when A2DP is connected or disconnected - if (sessionId == AUDIO_SESSION_OUTPUT_MIX) { - return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC); - } - for (size_t i = 0; i < mTracks.size(); i++) { - sp<Track> track = mTracks[i]; - if (sessionId == track->sessionId() && - !(track->mCblk->flags & CBLK_INVALID_MSK)) { - return AudioSystem::getStrategyForStream(track->streamType()); - } - } - return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC); -} - - -AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::getOutput() const -{ - Mutex::Autolock _l(mLock); - return mOutput; -} - -AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::clearOutput() -{ - Mutex::Autolock _l(mLock); - AudioStreamOut *output = mOutput; - mOutput = NULL; - // FIXME FastMixer might also have a raw ptr to mOutputSink; - // must push a NULL and wait for ack - mOutputSink.clear(); - mPipeSink.clear(); - mNormalSink.clear(); - return output; -} -// this method must always be called either with ThreadBase mLock held or inside the thread loop -audio_stream_t* AudioFlinger::PlaybackThread::stream() const -{ - if (mOutput == NULL) { - return NULL; - } - return &mOutput->stream->common; -} - -uint32_t AudioFlinger::PlaybackThread::activeSleepTimeUs() const -{ - return (uint32_t)((uint32_t)((mNormalFrameCount * 1000) / mSampleRate) * 1000); -} - -status_t AudioFlinger::PlaybackThread::setSyncEvent(const sp<SyncEvent>& event) -{ - if (!isValidSyncEvent(event)) { - return BAD_VALUE; - } - - Mutex::Autolock _l(mLock); - - for (size_t i = 0; i < mTracks.size(); ++i) { - sp<Track> track = mTracks[i]; - if (event->triggerSession() == track->sessionId()) { - (void) track->setSyncEvent(event); - return NO_ERROR; - } - } - - return NAME_NOT_FOUND; -} - -bool AudioFlinger::PlaybackThread::isValidSyncEvent(const sp<SyncEvent>& event) const -{ - return event->type() == AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE; -} - -void AudioFlinger::PlaybackThread::threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove) -{ - size_t count = tracksToRemove.size(); - if (CC_UNLIKELY(count)) { - for (size_t i = 0 ; i < count ; i++) { - const sp<Track>& track = tracksToRemove.itemAt(i); - if ((track->sharedBuffer() != 0) && - (track->mState == TrackBase::ACTIVE || track->mState == TrackBase::RESUMING)) { - AudioSystem::stopOutput(mId, track->streamType(), track->sessionId()); - } - } - } - -} - -// ---------------------------------------------------------------------------- - -AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, - audio_io_handle_t id, audio_devices_t device, type_t type) - : PlaybackThread(audioFlinger, output, id, device, type), - // mAudioMixer below - // mFastMixer below - mFastMixerFutex(0) - // mOutputSink below - // mPipeSink below - // mNormalSink below -{ - ALOGV("MixerThread() id=%d device=%#x type=%d", id, device, type); - ALOGV("mSampleRate=%d, mChannelMask=%#x, mChannelCount=%d, mFormat=%d, mFrameSize=%d, " - "mFrameCount=%d, mNormalFrameCount=%d", - mSampleRate, mChannelMask, mChannelCount, mFormat, mFrameSize, mFrameCount, - mNormalFrameCount); - mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate); - - // FIXME - Current mixer implementation only supports stereo output - if (mChannelCount != FCC_2) { - ALOGE("Invalid audio hardware channel count %d", mChannelCount); - } - - // create an NBAIO sink for the HAL output stream, and negotiate - mOutputSink = new AudioStreamOutSink(output->stream); - size_t numCounterOffers = 0; - const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount)}; - ssize_t index = mOutputSink->negotiate(offers, 1, NULL, numCounterOffers); - ALOG_ASSERT(index == 0); - - // initialize fast mixer depending on configuration - bool initFastMixer; - switch (kUseFastMixer) { - case FastMixer_Never: - initFastMixer = false; - break; - case FastMixer_Always: - initFastMixer = true; - break; - case FastMixer_Static: - case FastMixer_Dynamic: - initFastMixer = mFrameCount < mNormalFrameCount; - break; - } - if (initFastMixer) { - - // create a MonoPipe to connect our submix to FastMixer - NBAIO_Format format = mOutputSink->format(); - // This pipe depth compensates for scheduling latency of the normal mixer thread. - // When it wakes up after a maximum latency, it runs a few cycles quickly before - // finally blocking. Note the pipe implementation rounds up the request to a power of 2. - MonoPipe *monoPipe = new MonoPipe(mNormalFrameCount * 4, format, true /*writeCanBlock*/); - const NBAIO_Format offers[1] = {format}; - size_t numCounterOffers = 0; - ssize_t index = monoPipe->negotiate(offers, 1, NULL, numCounterOffers); - ALOG_ASSERT(index == 0); - monoPipe->setAvgFrames((mScreenState & 1) ? - (monoPipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2); - mPipeSink = monoPipe; - -#ifdef TEE_SINK_FRAMES - // create a Pipe to archive a copy of FastMixer's output for dumpsys - Pipe *teeSink = new Pipe(TEE_SINK_FRAMES, format); - numCounterOffers = 0; - index = teeSink->negotiate(offers, 1, NULL, numCounterOffers); - ALOG_ASSERT(index == 0); - mTeeSink = teeSink; - PipeReader *teeSource = new PipeReader(*teeSink); - numCounterOffers = 0; - index = teeSource->negotiate(offers, 1, NULL, numCounterOffers); - ALOG_ASSERT(index == 0); - mTeeSource = teeSource; -#endif - - // create fast mixer and configure it initially with just one fast track for our submix - mFastMixer = new FastMixer(); - FastMixerStateQueue *sq = mFastMixer->sq(); -#ifdef STATE_QUEUE_DUMP - sq->setObserverDump(&mStateQueueObserverDump); - sq->setMutatorDump(&mStateQueueMutatorDump); -#endif - FastMixerState *state = sq->begin(); - FastTrack *fastTrack = &state->mFastTracks[0]; - // wrap the source side of the MonoPipe to make it an AudioBufferProvider - fastTrack->mBufferProvider = new SourceAudioBufferProvider(new MonoPipeReader(monoPipe)); - fastTrack->mVolumeProvider = NULL; - fastTrack->mGeneration++; - state->mFastTracksGen++; - state->mTrackMask = 1; - // fast mixer will use the HAL output sink - state->mOutputSink = mOutputSink.get(); - state->mOutputSinkGen++; - state->mFrameCount = mFrameCount; - state->mCommand = FastMixerState::COLD_IDLE; - // already done in constructor initialization list - //mFastMixerFutex = 0; - state->mColdFutexAddr = &mFastMixerFutex; - state->mColdGen++; - state->mDumpState = &mFastMixerDumpState; - state->mTeeSink = mTeeSink.get(); - sq->end(); - sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED); - - // start the fast mixer - mFastMixer->run("FastMixer", PRIORITY_URGENT_AUDIO); - pid_t tid = mFastMixer->getTid(); - int err = requestPriority(getpid_cached, tid, kPriorityFastMixer); - if (err != 0) { - ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d", - kPriorityFastMixer, getpid_cached, tid, err); - } - -#ifdef AUDIO_WATCHDOG - // create and start the watchdog - mAudioWatchdog = new AudioWatchdog(); - mAudioWatchdog->setDump(&mAudioWatchdogDump); - mAudioWatchdog->run("AudioWatchdog", PRIORITY_URGENT_AUDIO); - tid = mAudioWatchdog->getTid(); - err = requestPriority(getpid_cached, tid, kPriorityFastMixer); - if (err != 0) { - ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d", - kPriorityFastMixer, getpid_cached, tid, err); - } -#endif - - } else { - mFastMixer = NULL; - } - - switch (kUseFastMixer) { - case FastMixer_Never: - case FastMixer_Dynamic: - mNormalSink = mOutputSink; - break; - case FastMixer_Always: - mNormalSink = mPipeSink; - break; - case FastMixer_Static: - mNormalSink = initFastMixer ? mPipeSink : mOutputSink; - break; - } -} - -AudioFlinger::MixerThread::~MixerThread() -{ - if (mFastMixer != NULL) { - FastMixerStateQueue *sq = mFastMixer->sq(); - FastMixerState *state = sq->begin(); - if (state->mCommand == FastMixerState::COLD_IDLE) { - int32_t old = android_atomic_inc(&mFastMixerFutex); - if (old == -1) { - __futex_syscall3(&mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1); - } - } - state->mCommand = FastMixerState::EXIT; - sq->end(); - sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED); - mFastMixer->join(); - // Though the fast mixer thread has exited, it's state queue is still valid. - // We'll use that extract the final state which contains one remaining fast track - // corresponding to our sub-mix. - state = sq->begin(); - ALOG_ASSERT(state->mTrackMask == 1); - FastTrack *fastTrack = &state->mFastTracks[0]; - ALOG_ASSERT(fastTrack->mBufferProvider != NULL); - delete fastTrack->mBufferProvider; - sq->end(false /*didModify*/); - delete mFastMixer; -#ifdef AUDIO_WATCHDOG - if (mAudioWatchdog != 0) { - mAudioWatchdog->requestExit(); - mAudioWatchdog->requestExitAndWait(); - mAudioWatchdog.clear(); - } -#endif - } - delete mAudioMixer; -} - -class CpuStats { -public: - CpuStats(); - void sample(const String8 &title); -#ifdef DEBUG_CPU_USAGE -private: - ThreadCpuUsage mCpuUsage; // instantaneous thread CPU usage in wall clock ns - CentralTendencyStatistics mWcStats; // statistics on thread CPU usage in wall clock ns - - CentralTendencyStatistics mHzStats; // statistics on thread CPU usage in cycles - - int mCpuNum; // thread's current CPU number - int mCpukHz; // frequency of thread's current CPU in kHz -#endif -}; - -CpuStats::CpuStats() -#ifdef DEBUG_CPU_USAGE - : mCpuNum(-1), mCpukHz(-1) -#endif -{ -} - -void CpuStats::sample(const String8 &title) { -#ifdef DEBUG_CPU_USAGE - // get current thread's delta CPU time in wall clock ns - double wcNs; - bool valid = mCpuUsage.sampleAndEnable(wcNs); - - // record sample for wall clock statistics - if (valid) { - mWcStats.sample(wcNs); - } - - // get the current CPU number - int cpuNum = sched_getcpu(); - - // get the current CPU frequency in kHz - int cpukHz = mCpuUsage.getCpukHz(cpuNum); - - // check if either CPU number or frequency changed - if (cpuNum != mCpuNum || cpukHz != mCpukHz) { - mCpuNum = cpuNum; - mCpukHz = cpukHz; - // ignore sample for purposes of cycles - valid = false; - } - - // if no change in CPU number or frequency, then record sample for cycle statistics - if (valid && mCpukHz > 0) { - double cycles = wcNs * cpukHz * 0.000001; - mHzStats.sample(cycles); - } - - unsigned n = mWcStats.n(); - // mCpuUsage.elapsed() is expensive, so don't call it every loop - if ((n & 127) == 1) { - long long elapsed = mCpuUsage.elapsed(); - if (elapsed >= DEBUG_CPU_USAGE * 1000000000LL) { - double perLoop = elapsed / (double) n; - double perLoop100 = perLoop * 0.01; - double perLoop1k = perLoop * 0.001; - double mean = mWcStats.mean(); - double stddev = mWcStats.stddev(); - double minimum = mWcStats.minimum(); - double maximum = mWcStats.maximum(); - double meanCycles = mHzStats.mean(); - double stddevCycles = mHzStats.stddev(); - double minCycles = mHzStats.minimum(); - double maxCycles = mHzStats.maximum(); - mCpuUsage.resetElapsed(); - mWcStats.reset(); - mHzStats.reset(); - ALOGD("CPU usage for %s over past %.1f secs\n" - " (%u mixer loops at %.1f mean ms per loop):\n" - " us per mix loop: mean=%.0f stddev=%.0f min=%.0f max=%.0f\n" - " %% of wall: mean=%.1f stddev=%.1f min=%.1f max=%.1f\n" - " MHz: mean=%.1f, stddev=%.1f, min=%.1f max=%.1f", - title.string(), - elapsed * .000000001, n, perLoop * .000001, - mean * .001, - stddev * .001, - minimum * .001, - maximum * .001, - mean / perLoop100, - stddev / perLoop100, - minimum / perLoop100, - maximum / perLoop100, - meanCycles / perLoop1k, - stddevCycles / perLoop1k, - minCycles / perLoop1k, - maxCycles / perLoop1k); - - } - } -#endif -}; - -void AudioFlinger::PlaybackThread::checkSilentMode_l() -{ - if (!mMasterMute) { - char value[PROPERTY_VALUE_MAX]; - if (property_get("ro.audio.silent", value, "0") > 0) { - char *endptr; - unsigned long ul = strtoul(value, &endptr, 0); - if (*endptr == '\0' && ul != 0) { - ALOGD("Silence is golden"); - // The setprop command will not allow a property to be changed after - // the first time it is set, so we don't have to worry about un-muting. - setMasterMute_l(true); - } - } - } -} - -bool AudioFlinger::PlaybackThread::threadLoop() -{ - Vector< sp<Track> > tracksToRemove; - - standbyTime = systemTime(); - - // MIXER - nsecs_t lastWarning = 0; - - // DUPLICATING - // FIXME could this be made local to while loop? - writeFrames = 0; - - cacheParameters_l(); - sleepTime = idleSleepTime; - - if (mType == MIXER) { - sleepTimeShift = 0; - } - - CpuStats cpuStats; - const String8 myName(String8::format("thread %p type %d TID %d", this, mType, gettid())); - - acquireWakeLock(); - - while (!exitPending()) - { - cpuStats.sample(myName); - - Vector< sp<EffectChain> > effectChains; - - processConfigEvents(); - - { // scope for mLock - - Mutex::Autolock _l(mLock); - - if (checkForNewParameters_l()) { - cacheParameters_l(); - } - - saveOutputTracks(); - - // put audio hardware into standby after short delay - if (CC_UNLIKELY((!mActiveTracks.size() && systemTime() > standbyTime) || - isSuspended())) { - if (!mStandby) { - - threadLoop_standby(); - - mStandby = true; - } - - if (!mActiveTracks.size() && mConfigEvents.isEmpty()) { - // we're about to wait, flush the binder command buffer - IPCThreadState::self()->flushCommands(); - - clearOutputTracks(); - - if (exitPending()) break; - - releaseWakeLock_l(); - // wait until we have something to do... - ALOGV("%s going to sleep", myName.string()); - mWaitWorkCV.wait(mLock); - ALOGV("%s waking up", myName.string()); - acquireWakeLock_l(); - - mMixerStatus = MIXER_IDLE; - mMixerStatusIgnoringFastTracks = MIXER_IDLE; - mBytesWritten = 0; - - checkSilentMode_l(); - - standbyTime = systemTime() + standbyDelay; - sleepTime = idleSleepTime; - if (mType == MIXER) { - sleepTimeShift = 0; - } - - continue; - } - } - - // mMixerStatusIgnoringFastTracks is also updated internally - mMixerStatus = prepareTracks_l(&tracksToRemove); - - // prevent any changes in effect chain list and in each effect chain - // during mixing and effect process as the audio buffers could be deleted - // or modified if an effect is created or deleted - lockEffectChains_l(effectChains); - } - - if (CC_LIKELY(mMixerStatus == MIXER_TRACKS_READY)) { - threadLoop_mix(); - } else { - threadLoop_sleepTime(); - } - - if (isSuspended()) { - sleepTime = suspendSleepTimeUs(); - mBytesWritten += mixBufferSize; - } - - // only process effects if we're going to write - if (sleepTime == 0) { - for (size_t i = 0; i < effectChains.size(); i ++) { - effectChains[i]->process_l(); - } - } - - // enable changes in effect chain - unlockEffectChains(effectChains); - - // sleepTime == 0 means we must write to audio hardware - if (sleepTime == 0) { - - threadLoop_write(); - -if (mType == MIXER) { - // write blocked detection - nsecs_t now = systemTime(); - nsecs_t delta = now - mLastWriteTime; - if (!mStandby && delta > maxPeriod) { - mNumDelayedWrites++; - if ((now - lastWarning) > kWarningThrottleNs) { -#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER) - ScopedTrace st(ATRACE_TAG, "underrun"); -#endif - ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p", - ns2ms(delta), mNumDelayedWrites, this); - lastWarning = now; - } - } -} - - mStandby = false; - } else { - usleep(sleepTime); - } - - // Finally let go of removed track(s), without the lock held - // since we can't guarantee the destructors won't acquire that - // same lock. This will also mutate and push a new fast mixer state. - threadLoop_removeTracks(tracksToRemove); - tracksToRemove.clear(); - - // FIXME I don't understand the need for this here; - // it was in the original code but maybe the - // assignment in saveOutputTracks() makes this unnecessary? - clearOutputTracks(); - - // Effect chains will be actually deleted here if they were removed from - // mEffectChains list during mixing or effects processing - effectChains.clear(); - - // FIXME Note that the above .clear() is no longer necessary since effectChains - // is now local to this block, but will keep it for now (at least until merge done). - } - - // for DuplicatingThread, standby mode is handled by the outputTracks, otherwise ... - if (mType == MIXER || mType == DIRECT) { - // put output stream into standby mode - if (!mStandby) { - mOutput->stream->common.standby(&mOutput->stream->common); - } - } - - releaseWakeLock(); - - ALOGV("Thread %p type %d exiting", this, mType); - return false; -} - -void AudioFlinger::MixerThread::threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove) -{ - PlaybackThread::threadLoop_removeTracks(tracksToRemove); -} - -void AudioFlinger::MixerThread::threadLoop_write() -{ - // FIXME we should only do one push per cycle; confirm this is true - // Start the fast mixer if it's not already running - if (mFastMixer != NULL) { - FastMixerStateQueue *sq = mFastMixer->sq(); - FastMixerState *state = sq->begin(); - if (state->mCommand != FastMixerState::MIX_WRITE && - (kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)) { - if (state->mCommand == FastMixerState::COLD_IDLE) { - int32_t old = android_atomic_inc(&mFastMixerFutex); - if (old == -1) { - __futex_syscall3(&mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1); - } -#ifdef AUDIO_WATCHDOG - if (mAudioWatchdog != 0) { - mAudioWatchdog->resume(); - } -#endif - } - state->mCommand = FastMixerState::MIX_WRITE; - sq->end(); - sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED); - if (kUseFastMixer == FastMixer_Dynamic) { - mNormalSink = mPipeSink; - } - } else { - sq->end(false /*didModify*/); - } - } - PlaybackThread::threadLoop_write(); -} - -// shared by MIXER and DIRECT, overridden by DUPLICATING -void AudioFlinger::PlaybackThread::threadLoop_write() -{ - // FIXME rewrite to reduce number of system calls - mLastWriteTime = systemTime(); - mInWrite = true; - int bytesWritten; - - // If an NBAIO sink is present, use it to write the normal mixer's submix - if (mNormalSink != 0) { -#define mBitShift 2 // FIXME - size_t count = mixBufferSize >> mBitShift; -#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER) - Tracer::traceBegin(ATRACE_TAG, "write"); -#endif - // update the setpoint when gScreenState changes - uint32_t screenState = gScreenState; - if (screenState != mScreenState) { - mScreenState = screenState; - MonoPipe *pipe = (MonoPipe *)mPipeSink.get(); - if (pipe != NULL) { - pipe->setAvgFrames((mScreenState & 1) ? - (pipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2); - } - } - ssize_t framesWritten = mNormalSink->write(mMixBuffer, count); -#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER) - Tracer::traceEnd(ATRACE_TAG); -#endif - if (framesWritten > 0) { - bytesWritten = framesWritten << mBitShift; - } else { - bytesWritten = framesWritten; - } - // otherwise use the HAL / AudioStreamOut directly - } else { - // Direct output thread. - bytesWritten = (int)mOutput->stream->write(mOutput->stream, mMixBuffer, mixBufferSize); - } - - if (bytesWritten > 0) mBytesWritten += mixBufferSize; - mNumWrites++; - mInWrite = false; -} - -void AudioFlinger::MixerThread::threadLoop_standby() -{ - // Idle the fast mixer if it's currently running - if (mFastMixer != NULL) { - FastMixerStateQueue *sq = mFastMixer->sq(); - FastMixerState *state = sq->begin(); - if (!(state->mCommand & FastMixerState::IDLE)) { - state->mCommand = FastMixerState::COLD_IDLE; - state->mColdFutexAddr = &mFastMixerFutex; - state->mColdGen++; - mFastMixerFutex = 0; - sq->end(); - // BLOCK_UNTIL_PUSHED would be insufficient, as we need it to stop doing I/O now - sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED); - if (kUseFastMixer == FastMixer_Dynamic) { - mNormalSink = mOutputSink; - } -#ifdef AUDIO_WATCHDOG - if (mAudioWatchdog != 0) { - mAudioWatchdog->pause(); - } -#endif - } else { - sq->end(false /*didModify*/); - } - } - PlaybackThread::threadLoop_standby(); -} - -// shared by MIXER and DIRECT, overridden by DUPLICATING -void AudioFlinger::PlaybackThread::threadLoop_standby() -{ - ALOGV("Audio hardware entering standby, mixer %p, suspend count %d", this, mSuspended); - mOutput->stream->common.standby(&mOutput->stream->common); -} - -void AudioFlinger::MixerThread::threadLoop_mix() -{ - // obtain the presentation timestamp of the next output buffer - int64_t pts; - status_t status = INVALID_OPERATION; - - if (mNormalSink != 0) { - status = mNormalSink->getNextWriteTimestamp(&pts); - } else { - status = mOutputSink->getNextWriteTimestamp(&pts); - } - - if (status != NO_ERROR) { - pts = AudioBufferProvider::kInvalidPTS; - } - - // mix buffers... - mAudioMixer->process(pts); - // increase sleep time progressively when application underrun condition clears. - // Only increase sleep time if the mixer is ready for two consecutive times to avoid - // that a steady state of alternating ready/not ready conditions keeps the sleep time - // such that we would underrun the audio HAL. - if ((sleepTime == 0) && (sleepTimeShift > 0)) { - sleepTimeShift--; - } - sleepTime = 0; - standbyTime = systemTime() + standbyDelay; - //TODO: delay standby when effects have a tail -} - -void AudioFlinger::MixerThread::threadLoop_sleepTime() -{ - // If no tracks are ready, sleep once for the duration of an output - // buffer size, then write 0s to the output - if (sleepTime == 0) { - if (mMixerStatus == MIXER_TRACKS_ENABLED) { - sleepTime = activeSleepTime >> sleepTimeShift; - if (sleepTime < kMinThreadSleepTimeUs) { - sleepTime = kMinThreadSleepTimeUs; - } - // reduce sleep time in case of consecutive application underruns to avoid - // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer - // duration we would end up writing less data than needed by the audio HAL if - // the condition persists. - if (sleepTimeShift < kMaxThreadSleepTimeShift) { - sleepTimeShift++; - } - } else { - sleepTime = idleSleepTime; - } - } else if (mBytesWritten != 0 || (mMixerStatus == MIXER_TRACKS_ENABLED)) { - memset (mMixBuffer, 0, mixBufferSize); - sleepTime = 0; - ALOGV_IF((mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED)), "anticipated start"); - } - // TODO add standby time extension fct of effect tail -} - -// prepareTracks_l() must be called with ThreadBase::mLock held -AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTracks_l( - Vector< sp<Track> > *tracksToRemove) -{ - - mixer_state mixerStatus = MIXER_IDLE; - // find out which tracks need to be processed - size_t count = mActiveTracks.size(); - size_t mixedTracks = 0; - size_t tracksWithEffect = 0; - // counts only _active_ fast tracks - size_t fastTracks = 0; - uint32_t resetMask = 0; // bit mask of fast tracks that need to be reset - - float masterVolume = mMasterVolume; - bool masterMute = mMasterMute; - - if (masterMute) { - masterVolume = 0; - } - // Delegate master volume control to effect in output mix effect chain if needed - sp<EffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX); - if (chain != 0) { - uint32_t v = (uint32_t)(masterVolume * (1 << 24)); - chain->setVolume_l(&v, &v); - masterVolume = (float)((v + (1 << 23)) >> 24); - chain.clear(); - } - - // prepare a new state to push - FastMixerStateQueue *sq = NULL; - FastMixerState *state = NULL; - bool didModify = false; - FastMixerStateQueue::block_t block = FastMixerStateQueue::BLOCK_UNTIL_PUSHED; - if (mFastMixer != NULL) { - sq = mFastMixer->sq(); - state = sq->begin(); - } - - for (size_t i=0 ; i<count ; i++) { - sp<Track> t = mActiveTracks[i].promote(); - if (t == 0) continue; - - // this const just means the local variable doesn't change - Track* const track = t.get(); - - // process fast tracks - if (track->isFastTrack()) { - - // It's theoretically possible (though unlikely) for a fast track to be created - // and then removed within the same normal mix cycle. This is not a problem, as - // the track never becomes active so it's fast mixer slot is never touched. - // The converse, of removing an (active) track and then creating a new track - // at the identical fast mixer slot within the same normal mix cycle, - // is impossible because the slot isn't marked available until the end of each cycle. - int j = track->mFastIndex; - ALOG_ASSERT(0 < j && j < (int)FastMixerState::kMaxFastTracks); - ALOG_ASSERT(!(mFastTrackAvailMask & (1 << j))); - FastTrack *fastTrack = &state->mFastTracks[j]; - - // Determine whether the track is currently in underrun condition, - // and whether it had a recent underrun. - FastTrackDump *ftDump = &mFastMixerDumpState.mTracks[j]; - FastTrackUnderruns underruns = ftDump->mUnderruns; - uint32_t recentFull = (underruns.mBitFields.mFull - - track->mObservedUnderruns.mBitFields.mFull) & UNDERRUN_MASK; - uint32_t recentPartial = (underruns.mBitFields.mPartial - - track->mObservedUnderruns.mBitFields.mPartial) & UNDERRUN_MASK; - uint32_t recentEmpty = (underruns.mBitFields.mEmpty - - track->mObservedUnderruns.mBitFields.mEmpty) & UNDERRUN_MASK; - uint32_t recentUnderruns = recentPartial + recentEmpty; - track->mObservedUnderruns = underruns; - // don't count underruns that occur while stopping or pausing - // or stopped which can occur when flush() is called while active - if (!(track->isStopping() || track->isPausing() || track->isStopped())) { - track->mUnderrunCount += recentUnderruns; - } - - // This is similar to the state machine for normal tracks, - // with a few modifications for fast tracks. - bool isActive = true; - switch (track->mState) { - case TrackBase::STOPPING_1: - // track stays active in STOPPING_1 state until first underrun - if (recentUnderruns > 0) { - track->mState = TrackBase::STOPPING_2; - } - break; - case TrackBase::PAUSING: - // ramp down is not yet implemented - track->setPaused(); - break; - case TrackBase::RESUMING: - // ramp up is not yet implemented - track->mState = TrackBase::ACTIVE; - break; - case TrackBase::ACTIVE: - if (recentFull > 0 || recentPartial > 0) { - // track has provided at least some frames recently: reset retry count - track->mRetryCount = kMaxTrackRetries; - } - if (recentUnderruns == 0) { - // no recent underruns: stay active - break; - } - // there has recently been an underrun of some kind - if (track->sharedBuffer() == 0) { - // were any of the recent underruns "empty" (no frames available)? - if (recentEmpty == 0) { - // no, then ignore the partial underruns as they are allowed indefinitely - break; - } - // there has recently been an "empty" underrun: decrement the retry counter - if (--(track->mRetryCount) > 0) { - break; - } - // indicate to client process that the track was disabled because of underrun; - // it will then automatically call start() when data is available - android_atomic_or(CBLK_DISABLED_ON, &track->mCblk->flags); - // remove from active list, but state remains ACTIVE [confusing but true] - isActive = false; - break; - } - // fall through - case TrackBase::STOPPING_2: - case TrackBase::PAUSED: - case TrackBase::TERMINATED: - case TrackBase::STOPPED: - case TrackBase::FLUSHED: // flush() while active - // Check for presentation complete if track is inactive - // We have consumed all the buffers of this track. - // This would be incomplete if we auto-paused on underrun - { - size_t audioHALFrames = - (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000; - size_t framesWritten = - mBytesWritten / audio_stream_frame_size(&mOutput->stream->common); - if (!(mStandby || track->presentationComplete(framesWritten, audioHALFrames))) { - // track stays in active list until presentation is complete - break; - } - } - if (track->isStopping_2()) { - track->mState = TrackBase::STOPPED; - } - if (track->isStopped()) { - // Can't reset directly, as fast mixer is still polling this track - // track->reset(); - // So instead mark this track as needing to be reset after push with ack - resetMask |= 1 << i; - } - isActive = false; - break; - case TrackBase::IDLE: - default: - LOG_FATAL("unexpected track state %d", track->mState); - } - - if (isActive) { - // was it previously inactive? - if (!(state->mTrackMask & (1 << j))) { - ExtendedAudioBufferProvider *eabp = track; - VolumeProvider *vp = track; - fastTrack->mBufferProvider = eabp; - fastTrack->mVolumeProvider = vp; - fastTrack->mSampleRate = track->mSampleRate; - fastTrack->mChannelMask = track->mChannelMask; - fastTrack->mGeneration++; - state->mTrackMask |= 1 << j; - didModify = true; - // no acknowledgement required for newly active tracks - } - // cache the combined master volume and stream type volume for fast mixer; this - // lacks any synchronization or barrier so VolumeProvider may read a stale value - track->mCachedVolume = track->isMuted() ? - 0 : masterVolume * mStreamTypes[track->streamType()].volume; - ++fastTracks; - } else { - // was it previously active? - if (state->mTrackMask & (1 << j)) { - fastTrack->mBufferProvider = NULL; - fastTrack->mGeneration++; - state->mTrackMask &= ~(1 << j); - didModify = true; - // If any fast tracks were removed, we must wait for acknowledgement - // because we're about to decrement the last sp<> on those tracks. - block = FastMixerStateQueue::BLOCK_UNTIL_ACKED; - } else { - LOG_FATAL("fast track %d should have been active", j); - } - tracksToRemove->add(track); - // Avoids a misleading display in dumpsys - track->mObservedUnderruns.mBitFields.mMostRecent = UNDERRUN_FULL; - } - continue; - } - - { // local variable scope to avoid goto warning - - audio_track_cblk_t* cblk = track->cblk(); - - // The first time a track is added we wait - // for all its buffers to be filled before processing it - int name = track->name(); - // make sure that we have enough frames to mix one full buffer. - // enforce this condition only once to enable draining the buffer in case the client - // app does not call stop() and relies on underrun to stop: - // hence the test on (mMixerStatus == MIXER_TRACKS_READY) meaning the track was mixed - // during last round - uint32_t minFrames = 1; - if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() && - (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) { - if (t->sampleRate() == (int)mSampleRate) { - minFrames = mNormalFrameCount; - } else { - // +1 for rounding and +1 for additional sample needed for interpolation - minFrames = (mNormalFrameCount * t->sampleRate()) / mSampleRate + 1 + 1; - // add frames already consumed but not yet released by the resampler - // because cblk->framesReady() will include these frames - minFrames += mAudioMixer->getUnreleasedFrames(track->name()); - // the minimum track buffer size is normally twice the number of frames necessary - // to fill one buffer and the resampler should not leave more than one buffer worth - // of unreleased frames after each pass, but just in case... - ALOG_ASSERT(minFrames <= cblk->frameCount); - } - } - if ((track->framesReady() >= minFrames) && track->isReady() && - !track->isPaused() && !track->isTerminated()) - { - //ALOGV("track %d u=%08x, s=%08x [OK] on thread %p", name, cblk->user, cblk->server, this); - - mixedTracks++; - - // track->mainBuffer() != mMixBuffer means there is an effect chain - // connected to the track - chain.clear(); - if (track->mainBuffer() != mMixBuffer) { - chain = getEffectChain_l(track->sessionId()); - // Delegate volume control to effect in track effect chain if needed - if (chain != 0) { - tracksWithEffect++; - } else { - ALOGW("prepareTracks_l(): track %d attached to effect but no chain found on session %d", - name, track->sessionId()); - } - } - - - int param = AudioMixer::VOLUME; - if (track->mFillingUpStatus == Track::FS_FILLED) { - // no ramp for the first volume setting - track->mFillingUpStatus = Track::FS_ACTIVE; - if (track->mState == TrackBase::RESUMING) { - track->mState = TrackBase::ACTIVE; - param = AudioMixer::RAMP_VOLUME; - } - mAudioMixer->setParameter(name, AudioMixer::RESAMPLE, AudioMixer::RESET, NULL); - } else if (cblk->server != 0) { - // If the track is stopped before the first frame was mixed, - // do not apply ramp - param = AudioMixer::RAMP_VOLUME; - } - - // compute volume for this track - uint32_t vl, vr, va; - if (track->isMuted() || track->isPausing() || - mStreamTypes[track->streamType()].mute) { - vl = vr = va = 0; - if (track->isPausing()) { - track->setPaused(); - } - } else { - - // read original volumes with volume control - float typeVolume = mStreamTypes[track->streamType()].volume; - float v = masterVolume * typeVolume; - uint32_t vlr = cblk->getVolumeLR(); - vl = vlr & 0xFFFF; - vr = vlr >> 16; - // track volumes come from shared memory, so can't be trusted and must be clamped - if (vl > MAX_GAIN_INT) { - ALOGV("Track left volume out of range: %04X", vl); - vl = MAX_GAIN_INT; - } - if (vr > MAX_GAIN_INT) { - ALOGV("Track right volume out of range: %04X", vr); - vr = MAX_GAIN_INT; - } - // now apply the master volume and stream type volume - vl = (uint32_t)(v * vl) << 12; - vr = (uint32_t)(v * vr) << 12; - // assuming master volume and stream type volume each go up to 1.0, - // vl and vr are now in 8.24 format - - uint16_t sendLevel = cblk->getSendLevel_U4_12(); - // send level comes from shared memory and so may be corrupt - if (sendLevel > MAX_GAIN_INT) { - ALOGV("Track send level out of range: %04X", sendLevel); - sendLevel = MAX_GAIN_INT; - } - va = (uint32_t)(v * sendLevel); - } - // Delegate volume control to effect in track effect chain if needed - if (chain != 0 && chain->setVolume_l(&vl, &vr)) { - // Do not ramp volume if volume is controlled by effect - param = AudioMixer::VOLUME; - track->mHasVolumeController = true; - } else { - // force no volume ramp when volume controller was just disabled or removed - // from effect chain to avoid volume spike - if (track->mHasVolumeController) { - param = AudioMixer::VOLUME; - } - track->mHasVolumeController = false; - } - - // Convert volumes from 8.24 to 4.12 format - // This additional clamping is needed in case chain->setVolume_l() overshot - vl = (vl + (1 << 11)) >> 12; - if (vl > MAX_GAIN_INT) vl = MAX_GAIN_INT; - vr = (vr + (1 << 11)) >> 12; - if (vr > MAX_GAIN_INT) vr = MAX_GAIN_INT; - - if (va > MAX_GAIN_INT) va = MAX_GAIN_INT; // va is uint32_t, so no need to check for - - - // XXX: these things DON'T need to be done each time - mAudioMixer->setBufferProvider(name, track); - mAudioMixer->enable(name); - - mAudioMixer->setParameter(name, param, AudioMixer::VOLUME0, (void *)vl); - mAudioMixer->setParameter(name, param, AudioMixer::VOLUME1, (void *)vr); - mAudioMixer->setParameter(name, param, AudioMixer::AUXLEVEL, (void *)va); - mAudioMixer->setParameter( - name, - AudioMixer::TRACK, - AudioMixer::FORMAT, (void *)track->format()); - mAudioMixer->setParameter( - name, - AudioMixer::TRACK, - AudioMixer::CHANNEL_MASK, (void *)track->channelMask()); - mAudioMixer->setParameter( - name, - AudioMixer::RESAMPLE, - AudioMixer::SAMPLE_RATE, - (void *)(cblk->sampleRate)); - mAudioMixer->setParameter( - name, - AudioMixer::TRACK, - AudioMixer::MAIN_BUFFER, (void *)track->mainBuffer()); - mAudioMixer->setParameter( - name, - AudioMixer::TRACK, - AudioMixer::AUX_BUFFER, (void *)track->auxBuffer()); - - // reset retry count - track->mRetryCount = kMaxTrackRetries; - - // If one track is ready, set the mixer ready if: - // - the mixer was not ready during previous round OR - // - no other track is not ready - if (mMixerStatusIgnoringFastTracks != MIXER_TRACKS_READY || - mixerStatus != MIXER_TRACKS_ENABLED) { - mixerStatus = MIXER_TRACKS_READY; - } - } else { - // clear effect chain input buffer if an active track underruns to avoid sending - // previous audio buffer again to effects - chain = getEffectChain_l(track->sessionId()); - if (chain != 0) { - chain->clearInputBuffer(); - } - - //ALOGV("track %d u=%08x, s=%08x [NOT READY] on thread %p", name, cblk->user, cblk->server, this); - if ((track->sharedBuffer() != 0) || track->isTerminated() || - track->isStopped() || track->isPaused()) { - // We have consumed all the buffers of this track. - // Remove it from the list of active tracks. - // TODO: use actual buffer filling status instead of latency when available from - // audio HAL - size_t audioHALFrames = (latency_l() * mSampleRate) / 1000; - size_t framesWritten = - mBytesWritten / audio_stream_frame_size(&mOutput->stream->common); - if (mStandby || track->presentationComplete(framesWritten, audioHALFrames)) { - if (track->isStopped()) { - track->reset(); - } - tracksToRemove->add(track); - } - } else { - track->mUnderrunCount++; - // No buffers for this track. Give it a few chances to - // fill a buffer, then remove it from active list. - if (--(track->mRetryCount) <= 0) { - ALOGV("BUFFER TIMEOUT: remove(%d) from active list on thread %p", name, this); - tracksToRemove->add(track); - // indicate to client process that the track was disabled because of underrun; - // it will then automatically call start() when data is available - android_atomic_or(CBLK_DISABLED_ON, &cblk->flags); - // If one track is not ready, mark the mixer also not ready if: - // - the mixer was ready during previous round OR - // - no other track is ready - } else if (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY || - mixerStatus != MIXER_TRACKS_READY) { - mixerStatus = MIXER_TRACKS_ENABLED; - } - } - mAudioMixer->disable(name); - } - - } // local variable scope to avoid goto warning -track_is_ready: ; - - } - - // Push the new FastMixer state if necessary - bool pauseAudioWatchdog = false; - if (didModify) { - state->mFastTracksGen++; - // if the fast mixer was active, but now there are no fast tracks, then put it in cold idle - if (kUseFastMixer == FastMixer_Dynamic && - state->mCommand == FastMixerState::MIX_WRITE && state->mTrackMask <= 1) { - state->mCommand = FastMixerState::COLD_IDLE; - state->mColdFutexAddr = &mFastMixerFutex; - state->mColdGen++; - mFastMixerFutex = 0; - if (kUseFastMixer == FastMixer_Dynamic) { - mNormalSink = mOutputSink; - } - // If we go into cold idle, need to wait for acknowledgement - // so that fast mixer stops doing I/O. - block = FastMixerStateQueue::BLOCK_UNTIL_ACKED; - pauseAudioWatchdog = true; - } - sq->end(); - } - if (sq != NULL) { - sq->end(didModify); - sq->push(block); - } -#ifdef AUDIO_WATCHDOG - if (pauseAudioWatchdog && mAudioWatchdog != 0) { - mAudioWatchdog->pause(); - } -#endif - - // Now perform the deferred reset on fast tracks that have stopped - while (resetMask != 0) { - size_t i = __builtin_ctz(resetMask); - ALOG_ASSERT(i < count); - resetMask &= ~(1 << i); - sp<Track> t = mActiveTracks[i].promote(); - if (t == 0) continue; - Track* track = t.get(); - ALOG_ASSERT(track->isFastTrack() && track->isStopped()); - track->reset(); - } - - // remove all the tracks that need to be... - count = tracksToRemove->size(); - if (CC_UNLIKELY(count)) { - for (size_t i=0 ; i<count ; i++) { - const sp<Track>& track = tracksToRemove->itemAt(i); - mActiveTracks.remove(track); - if (track->mainBuffer() != mMixBuffer) { - chain = getEffectChain_l(track->sessionId()); - if (chain != 0) { - ALOGV("stopping track on chain %p for session Id: %d", chain.get(), track->sessionId()); - chain->decActiveTrackCnt(); - } - } - if (track->isTerminated()) { - removeTrack_l(track); - } - } - } - - // mix buffer must be cleared if all tracks are connected to an - // effect chain as in this case the mixer will not write to - // mix buffer and track effects will accumulate into it - if ((mixedTracks != 0 && mixedTracks == tracksWithEffect) || (mixedTracks == 0 && fastTracks > 0)) { - // FIXME as a performance optimization, should remember previous zero status - memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t)); - } - - // if any fast tracks, then status is ready - mMixerStatusIgnoringFastTracks = mixerStatus; - if (fastTracks > 0) { - mixerStatus = MIXER_TRACKS_READY; - } - return mixerStatus; -} - -/* -The derived values that are cached: - - mixBufferSize from frame count * frame size - - activeSleepTime from activeSleepTimeUs() - - idleSleepTime from idleSleepTimeUs() - - standbyDelay from mActiveSleepTimeUs (DIRECT only) - - maxPeriod from frame count and sample rate (MIXER only) - -The parameters that affect these derived values are: - - frame count - - frame size - - sample rate - - device type: A2DP or not - - device latency - - format: PCM or not - - active sleep time - - idle sleep time -*/ - -void AudioFlinger::PlaybackThread::cacheParameters_l() -{ - mixBufferSize = mNormalFrameCount * mFrameSize; - activeSleepTime = activeSleepTimeUs(); - idleSleepTime = idleSleepTimeUs(); -} - -void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType) -{ - ALOGV ("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d", - this, streamType, mTracks.size()); - Mutex::Autolock _l(mLock); - - size_t size = mTracks.size(); - for (size_t i = 0; i < size; i++) { - sp<Track> t = mTracks[i]; - if (t->streamType() == streamType) { - android_atomic_or(CBLK_INVALID_ON, &t->mCblk->flags); - t->mCblk->cv.signal(); - } - } -} - -// getTrackName_l() must be called with ThreadBase::mLock held -int AudioFlinger::MixerThread::getTrackName_l(audio_channel_mask_t channelMask, int sessionId) -{ - return mAudioMixer->getTrackName(channelMask, sessionId); -} - -// deleteTrackName_l() must be called with ThreadBase::mLock held -void AudioFlinger::MixerThread::deleteTrackName_l(int name) -{ - ALOGV("remove track (%d) and delete from mixer", name); - mAudioMixer->deleteTrackName(name); -} - -// checkForNewParameters_l() must be called with ThreadBase::mLock held -bool AudioFlinger::MixerThread::checkForNewParameters_l() -{ - // if !&IDLE, holds the FastMixer state to restore after new parameters processed - FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE; - bool reconfig = false; - - while (!mNewParameters.isEmpty()) { - - if (mFastMixer != NULL) { - FastMixerStateQueue *sq = mFastMixer->sq(); - FastMixerState *state = sq->begin(); - if (!(state->mCommand & FastMixerState::IDLE)) { - previousCommand = state->mCommand; - state->mCommand = FastMixerState::HOT_IDLE; - sq->end(); - sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED); - } else { - sq->end(false /*didModify*/); - } - } - - status_t status = NO_ERROR; - String8 keyValuePair = mNewParameters[0]; - AudioParameter param = AudioParameter(keyValuePair); - int value; - - if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) { - reconfig = true; - } - if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) { - if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) { - status = BAD_VALUE; - } else { - reconfig = true; - } - } - if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) { - if (value != AUDIO_CHANNEL_OUT_STEREO) { - status = BAD_VALUE; - } else { - reconfig = true; - } - } - if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) { - // do not accept frame count changes if tracks are open as the track buffer - // size depends on frame count and correct behavior would not be guaranteed - // if frame count is changed after track creation - if (!mTracks.isEmpty()) { - status = INVALID_OPERATION; - } else { - reconfig = true; - } - } - if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) { -#ifdef ADD_BATTERY_DATA - // when changing the audio output device, call addBatteryData to notify - // the change - if (mOutDevice != value) { - uint32_t params = 0; - // check whether speaker is on - if (value & AUDIO_DEVICE_OUT_SPEAKER) { - params |= IMediaPlayerService::kBatteryDataSpeakerOn; - } - - audio_devices_t deviceWithoutSpeaker - = AUDIO_DEVICE_OUT_ALL & ~AUDIO_DEVICE_OUT_SPEAKER; - // check if any other device (except speaker) is on - if (value & deviceWithoutSpeaker ) { - params |= IMediaPlayerService::kBatteryDataOtherAudioDeviceOn; - } - - if (params != 0) { - addBatteryData(params); - } - } -#endif - - // forward device change to effects that have requested to be - // aware of attached audio device. - mOutDevice = value; - for (size_t i = 0; i < mEffectChains.size(); i++) { - mEffectChains[i]->setDevice_l(mOutDevice); - } - } - - if (status == NO_ERROR) { - status = mOutput->stream->common.set_parameters(&mOutput->stream->common, - keyValuePair.string()); - if (!mStandby && status == INVALID_OPERATION) { - mOutput->stream->common.standby(&mOutput->stream->common); - mStandby = true; - mBytesWritten = 0; - status = mOutput->stream->common.set_parameters(&mOutput->stream->common, - keyValuePair.string()); - } - if (status == NO_ERROR && reconfig) { - delete mAudioMixer; - // for safety in case readOutputParameters() accesses mAudioMixer (it doesn't) - mAudioMixer = NULL; - readOutputParameters(); - mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate); - for (size_t i = 0; i < mTracks.size() ; i++) { - int name = getTrackName_l(mTracks[i]->mChannelMask, mTracks[i]->mSessionId); - if (name < 0) break; - mTracks[i]->mName = name; - // limit track sample rate to 2 x new output sample rate - if (mTracks[i]->mCblk->sampleRate > 2 * sampleRate()) { - mTracks[i]->mCblk->sampleRate = 2 * sampleRate(); - } - } - sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED); - } - } - - mNewParameters.removeAt(0); - - mParamStatus = status; - mParamCond.signal(); - // wait for condition with time out in case the thread calling ThreadBase::setParameters() - // already timed out waiting for the status and will never signal the condition. - mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs); - } - - if (!(previousCommand & FastMixerState::IDLE)) { - ALOG_ASSERT(mFastMixer != NULL); - FastMixerStateQueue *sq = mFastMixer->sq(); - FastMixerState *state = sq->begin(); - ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE); - state->mCommand = previousCommand; - sq->end(); - sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED); - } - - return reconfig; -} - -void AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>& args) -{ - const size_t SIZE = 256; - char buffer[SIZE]; - String8 result; - - PlaybackThread::dumpInternals(fd, args); - - snprintf(buffer, SIZE, "AudioMixer tracks: %08x\n", mAudioMixer->trackNames()); - result.append(buffer); - write(fd, result.string(), result.size()); - - // Make a non-atomic copy of fast mixer dump state so it won't change underneath us - FastMixerDumpState copy = mFastMixerDumpState; - copy.dump(fd); - -#ifdef STATE_QUEUE_DUMP - // Similar for state queue - StateQueueObserverDump observerCopy = mStateQueueObserverDump; - observerCopy.dump(fd); - StateQueueMutatorDump mutatorCopy = mStateQueueMutatorDump; - mutatorCopy.dump(fd); -#endif - - // Write the tee output to a .wav file - NBAIO_Source *teeSource = mTeeSource.get(); - if (teeSource != NULL) { - char teePath[64]; - struct timeval tv; - gettimeofday(&tv, NULL); - struct tm tm; - localtime_r(&tv.tv_sec, &tm); - strftime(teePath, sizeof(teePath), "/data/misc/media/%T.wav", &tm); - int teeFd = open(teePath, O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR); - if (teeFd >= 0) { - char wavHeader[44]; - memcpy(wavHeader, - "RIFF\0\0\0\0WAVEfmt \20\0\0\0\1\0\2\0\104\254\0\0\0\0\0\0\4\0\20\0data\0\0\0\0", - sizeof(wavHeader)); - NBAIO_Format format = teeSource->format(); - unsigned channelCount = Format_channelCount(format); - ALOG_ASSERT(channelCount <= FCC_2); - unsigned sampleRate = Format_sampleRate(format); - wavHeader[22] = channelCount; // number of channels - wavHeader[24] = sampleRate; // sample rate - wavHeader[25] = sampleRate >> 8; - wavHeader[32] = channelCount * 2; // block alignment - write(teeFd, wavHeader, sizeof(wavHeader)); - size_t total = 0; - bool firstRead = true; - for (;;) { -#define TEE_SINK_READ 1024 - short buffer[TEE_SINK_READ * FCC_2]; - size_t count = TEE_SINK_READ; - ssize_t actual = teeSource->read(buffer, count, - AudioBufferProvider::kInvalidPTS); - bool wasFirstRead = firstRead; - firstRead = false; - if (actual <= 0) { - if (actual == (ssize_t) OVERRUN && wasFirstRead) { - continue; - } - break; - } - ALOG_ASSERT(actual <= (ssize_t)count); - write(teeFd, buffer, actual * channelCount * sizeof(short)); - total += actual; - } - lseek(teeFd, (off_t) 4, SEEK_SET); - uint32_t temp = 44 + total * channelCount * sizeof(short) - 8; - write(teeFd, &temp, sizeof(temp)); - lseek(teeFd, (off_t) 40, SEEK_SET); - temp = total * channelCount * sizeof(short); - write(teeFd, &temp, sizeof(temp)); - close(teeFd); - fdprintf(fd, "FastMixer tee copied to %s\n", teePath); - } else { - fdprintf(fd, "FastMixer unable to create tee %s: \n", strerror(errno)); - } - } - -#ifdef AUDIO_WATCHDOG - if (mAudioWatchdog != 0) { - // Make a non-atomic copy of audio watchdog dump so it won't change underneath us - AudioWatchdogDump wdCopy = mAudioWatchdogDump; - wdCopy.dump(fd); - } -#endif -} - -uint32_t AudioFlinger::MixerThread::idleSleepTimeUs() const -{ - return (uint32_t)(((mNormalFrameCount * 1000) / mSampleRate) * 1000) / 2; -} - -uint32_t AudioFlinger::MixerThread::suspendSleepTimeUs() const -{ - return (uint32_t)(((mNormalFrameCount * 1000) / mSampleRate) * 1000); -} - -void AudioFlinger::MixerThread::cacheParameters_l() -{ - PlaybackThread::cacheParameters_l(); - - // FIXME: Relaxed timing because of a certain device that can't meet latency - // Should be reduced to 2x after the vendor fixes the driver issue - // increase threshold again due to low power audio mode. The way this warning - // threshold is calculated and its usefulness should be reconsidered anyway. - maxPeriod = seconds(mNormalFrameCount) / mSampleRate * 15; -} - -// ---------------------------------------------------------------------------- -AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger, - AudioStreamOut* output, audio_io_handle_t id, audio_devices_t device) - : PlaybackThread(audioFlinger, output, id, device, DIRECT) - // mLeftVolFloat, mRightVolFloat -{ -} - -AudioFlinger::DirectOutputThread::~DirectOutputThread() -{ -} - -AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prepareTracks_l( - Vector< sp<Track> > *tracksToRemove -) -{ - sp<Track> trackToRemove; - - mixer_state mixerStatus = MIXER_IDLE; - - // find out which tracks need to be processed - if (mActiveTracks.size() != 0) { - sp<Track> t = mActiveTracks[0].promote(); - // The track died recently - if (t == 0) return MIXER_IDLE; - - Track* const track = t.get(); - audio_track_cblk_t* cblk = track->cblk(); - - // The first time a track is added we wait - // for all its buffers to be filled before processing it - uint32_t minFrames; - if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing()) { - minFrames = mNormalFrameCount; - } else { - minFrames = 1; - } - if ((track->framesReady() >= minFrames) && track->isReady() && - !track->isPaused() && !track->isTerminated()) - { - //ALOGV("track %d u=%08x, s=%08x [OK]", track->name(), cblk->user, cblk->server); - - if (track->mFillingUpStatus == Track::FS_FILLED) { - track->mFillingUpStatus = Track::FS_ACTIVE; - mLeftVolFloat = mRightVolFloat = 0; - if (track->mState == TrackBase::RESUMING) { - track->mState = TrackBase::ACTIVE; - } - } - - // compute volume for this track - float left, right; - if (track->isMuted() || mMasterMute || track->isPausing() || - mStreamTypes[track->streamType()].mute) { - left = right = 0; - if (track->isPausing()) { - track->setPaused(); - } - } else { - float typeVolume = mStreamTypes[track->streamType()].volume; - float v = mMasterVolume * typeVolume; - uint32_t vlr = cblk->getVolumeLR(); - float v_clamped = v * (vlr & 0xFFFF); - if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN; - left = v_clamped/MAX_GAIN; - v_clamped = v * (vlr >> 16); - if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN; - right = v_clamped/MAX_GAIN; - } - - if (left != mLeftVolFloat || right != mRightVolFloat) { - mLeftVolFloat = left; - mRightVolFloat = right; - - // Convert volumes from float to 8.24 - uint32_t vl = (uint32_t)(left * (1 << 24)); - uint32_t vr = (uint32_t)(right * (1 << 24)); - - // Delegate volume control to effect in track effect chain if needed - // only one effect chain can be present on DirectOutputThread, so if - // there is one, the track is connected to it - if (!mEffectChains.isEmpty()) { - // Do not ramp volume if volume is controlled by effect - mEffectChains[0]->setVolume_l(&vl, &vr); - left = (float)vl / (1 << 24); - right = (float)vr / (1 << 24); - } - mOutput->stream->set_volume(mOutput->stream, left, right); - } - - // reset retry count - track->mRetryCount = kMaxTrackRetriesDirect; - mActiveTrack = t; - mixerStatus = MIXER_TRACKS_READY; - } else { - // clear effect chain input buffer if an active track underruns to avoid sending - // previous audio buffer again to effects - if (!mEffectChains.isEmpty()) { - mEffectChains[0]->clearInputBuffer(); - } - - //ALOGV("track %d u=%08x, s=%08x [NOT READY]", track->name(), cblk->user, cblk->server); - if ((track->sharedBuffer() != 0) || track->isTerminated() || - track->isStopped() || track->isPaused()) { - // We have consumed all the buffers of this track. - // Remove it from the list of active tracks. - // TODO: implement behavior for compressed audio - size_t audioHALFrames = (latency_l() * mSampleRate) / 1000; - size_t framesWritten = - mBytesWritten / audio_stream_frame_size(&mOutput->stream->common); - if (mStandby || track->presentationComplete(framesWritten, audioHALFrames)) { - if (track->isStopped()) { - track->reset(); - } - trackToRemove = track; - } - } else { - // No buffers for this track. Give it a few chances to - // fill a buffer, then remove it from active list. - if (--(track->mRetryCount) <= 0) { - ALOGV("BUFFER TIMEOUT: remove(%d) from active list", track->name()); - trackToRemove = track; - } else { - mixerStatus = MIXER_TRACKS_ENABLED; - } - } - } - } - - // FIXME merge this with similar code for removing multiple tracks - // remove all the tracks that need to be... - if (CC_UNLIKELY(trackToRemove != 0)) { - tracksToRemove->add(trackToRemove); - mActiveTracks.remove(trackToRemove); - if (!mEffectChains.isEmpty()) { - ALOGV("stopping track on chain %p for session Id: %d", mEffectChains[0].get(), - trackToRemove->sessionId()); - mEffectChains[0]->decActiveTrackCnt(); - } - if (trackToRemove->isTerminated()) { - removeTrack_l(trackToRemove); - } - } - - return mixerStatus; -} - -void AudioFlinger::DirectOutputThread::threadLoop_mix() -{ - AudioBufferProvider::Buffer buffer; - size_t frameCount = mFrameCount; - int8_t *curBuf = (int8_t *)mMixBuffer; - // output audio to hardware - while (frameCount) { - buffer.frameCount = frameCount; - mActiveTrack->getNextBuffer(&buffer); - if (CC_UNLIKELY(buffer.raw == NULL)) { - memset(curBuf, 0, frameCount * mFrameSize); - break; - } - memcpy(curBuf, buffer.raw, buffer.frameCount * mFrameSize); - frameCount -= buffer.frameCount; - curBuf += buffer.frameCount * mFrameSize; - mActiveTrack->releaseBuffer(&buffer); - } - sleepTime = 0; - standbyTime = systemTime() + standbyDelay; - mActiveTrack.clear(); - -} - -void AudioFlinger::DirectOutputThread::threadLoop_sleepTime() -{ - if (sleepTime == 0) { - if (mMixerStatus == MIXER_TRACKS_ENABLED) { - sleepTime = activeSleepTime; - } else { - sleepTime = idleSleepTime; - } - } else if (mBytesWritten != 0 && audio_is_linear_pcm(mFormat)) { - memset(mMixBuffer, 0, mFrameCount * mFrameSize); - sleepTime = 0; - } -} - -// getTrackName_l() must be called with ThreadBase::mLock held -int AudioFlinger::DirectOutputThread::getTrackName_l(audio_channel_mask_t channelMask, - int sessionId) -{ - return 0; -} - -// deleteTrackName_l() must be called with ThreadBase::mLock held -void AudioFlinger::DirectOutputThread::deleteTrackName_l(int name) -{ -} - -// checkForNewParameters_l() must be called with ThreadBase::mLock held -bool AudioFlinger::DirectOutputThread::checkForNewParameters_l() -{ - bool reconfig = false; - - while (!mNewParameters.isEmpty()) { - status_t status = NO_ERROR; - String8 keyValuePair = mNewParameters[0]; - AudioParameter param = AudioParameter(keyValuePair); - int value; - - if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) { - // do not accept frame count changes if tracks are open as the track buffer - // size depends on frame count and correct behavior would not be garantied - // if frame count is changed after track creation - if (!mTracks.isEmpty()) { - status = INVALID_OPERATION; - } else { - reconfig = true; - } - } - if (status == NO_ERROR) { - status = mOutput->stream->common.set_parameters(&mOutput->stream->common, - keyValuePair.string()); - if (!mStandby && status == INVALID_OPERATION) { - mOutput->stream->common.standby(&mOutput->stream->common); - mStandby = true; - mBytesWritten = 0; - status = mOutput->stream->common.set_parameters(&mOutput->stream->common, - keyValuePair.string()); - } - if (status == NO_ERROR && reconfig) { - readOutputParameters(); - sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED); - } - } - - mNewParameters.removeAt(0); - - mParamStatus = status; - mParamCond.signal(); - // wait for condition with time out in case the thread calling ThreadBase::setParameters() - // already timed out waiting for the status and will never signal the condition. - mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs); - } - return reconfig; -} - -uint32_t AudioFlinger::DirectOutputThread::activeSleepTimeUs() const -{ - uint32_t time; - if (audio_is_linear_pcm(mFormat)) { - time = PlaybackThread::activeSleepTimeUs(); - } else { - time = 10000; - } - return time; -} - -uint32_t AudioFlinger::DirectOutputThread::idleSleepTimeUs() const -{ - uint32_t time; - if (audio_is_linear_pcm(mFormat)) { - time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000) / 2; - } else { - time = 10000; - } - return time; -} - -uint32_t AudioFlinger::DirectOutputThread::suspendSleepTimeUs() const -{ - uint32_t time; - if (audio_is_linear_pcm(mFormat)) { - time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000); - } else { - time = 10000; - } - return time; -} - -void AudioFlinger::DirectOutputThread::cacheParameters_l() -{ - PlaybackThread::cacheParameters_l(); - - // use shorter standby delay as on normal output to release - // hardware resources as soon as possible - standbyDelay = microseconds(activeSleepTime*2); -} - -// ---------------------------------------------------------------------------- - -AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger, - AudioFlinger::MixerThread* mainThread, audio_io_handle_t id) - : MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->outDevice(), DUPLICATING), - mWaitTimeMs(UINT_MAX) -{ - addOutputTrack(mainThread); -} - -AudioFlinger::DuplicatingThread::~DuplicatingThread() -{ - for (size_t i = 0; i < mOutputTracks.size(); i++) { - mOutputTracks[i]->destroy(); - } -} - -void AudioFlinger::DuplicatingThread::threadLoop_mix() -{ - // mix buffers... - if (outputsReady(outputTracks)) { - mAudioMixer->process(AudioBufferProvider::kInvalidPTS); - } else { - memset(mMixBuffer, 0, mixBufferSize); - } - sleepTime = 0; - writeFrames = mNormalFrameCount; - standbyTime = systemTime() + standbyDelay; -} - -void AudioFlinger::DuplicatingThread::threadLoop_sleepTime() -{ - if (sleepTime == 0) { - if (mMixerStatus == MIXER_TRACKS_ENABLED) { - sleepTime = activeSleepTime; - } else { - sleepTime = idleSleepTime; - } - } else if (mBytesWritten != 0) { - if (mMixerStatus == MIXER_TRACKS_ENABLED) { - writeFrames = mNormalFrameCount; - memset(mMixBuffer, 0, mixBufferSize); - } else { - // flush remaining overflow buffers in output tracks - writeFrames = 0; - } - sleepTime = 0; - } -} - -void AudioFlinger::DuplicatingThread::threadLoop_write() -{ - for (size_t i = 0; i < outputTracks.size(); i++) { - outputTracks[i]->write(mMixBuffer, writeFrames); - } - mBytesWritten += mixBufferSize; -} - -void AudioFlinger::DuplicatingThread::threadLoop_standby() -{ - // DuplicatingThread implements standby by stopping all tracks - for (size_t i = 0; i < outputTracks.size(); i++) { - outputTracks[i]->stop(); - } -} - -void AudioFlinger::DuplicatingThread::saveOutputTracks() -{ - outputTracks = mOutputTracks; -} - -void AudioFlinger::DuplicatingThread::clearOutputTracks() -{ - outputTracks.clear(); -} - -void AudioFlinger::DuplicatingThread::addOutputTrack(MixerThread *thread) -{ - Mutex::Autolock _l(mLock); - // FIXME explain this formula - int frameCount = (3 * mNormalFrameCount * mSampleRate) / thread->sampleRate(); - OutputTrack *outputTrack = new OutputTrack(thread, - this, - mSampleRate, - mFormat, - mChannelMask, - frameCount); - if (outputTrack->cblk() != NULL) { - thread->setStreamVolume(AUDIO_STREAM_CNT, 1.0f); - mOutputTracks.add(outputTrack); - ALOGV("addOutputTrack() track %p, on thread %p", outputTrack, thread); - updateWaitTime_l(); - } -} - -void AudioFlinger::DuplicatingThread::removeOutputTrack(MixerThread *thread) -{ - Mutex::Autolock _l(mLock); - for (size_t i = 0; i < mOutputTracks.size(); i++) { - if (mOutputTracks[i]->thread() == thread) { - mOutputTracks[i]->destroy(); - mOutputTracks.removeAt(i); - updateWaitTime_l(); - return; - } - } - ALOGV("removeOutputTrack(): unkonwn thread: %p", thread); -} - -// caller must hold mLock -void AudioFlinger::DuplicatingThread::updateWaitTime_l() -{ - mWaitTimeMs = UINT_MAX; - for (size_t i = 0; i < mOutputTracks.size(); i++) { - sp<ThreadBase> strong = mOutputTracks[i]->thread().promote(); - if (strong != 0) { - uint32_t waitTimeMs = (strong->frameCount() * 2 * 1000) / strong->sampleRate(); - if (waitTimeMs < mWaitTimeMs) { - mWaitTimeMs = waitTimeMs; - } - } - } -} - - -bool AudioFlinger::DuplicatingThread::outputsReady(const SortedVector< sp<OutputTrack> > &outputTracks) -{ - for (size_t i = 0; i < outputTracks.size(); i++) { - sp<ThreadBase> thread = outputTracks[i]->thread().promote(); - if (thread == 0) { - ALOGW("DuplicatingThread::outputsReady() could not promote thread on output track %p", outputTracks[i].get()); - return false; - } - PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); - // see note at standby() declaration - if (playbackThread->standby() && !playbackThread->isSuspended()) { - ALOGV("DuplicatingThread output track %p on thread %p Not Ready", outputTracks[i].get(), thread.get()); - return false; - } - } - return true; -} - -uint32_t AudioFlinger::DuplicatingThread::activeSleepTimeUs() const -{ - return (mWaitTimeMs * 1000) / 2; -} - -void AudioFlinger::DuplicatingThread::cacheParameters_l() -{ - // updateWaitTime_l() sets mWaitTimeMs, which affects activeSleepTimeUs(), so call it first - updateWaitTime_l(); - - MixerThread::cacheParameters_l(); -} - -// ---------------------------------------------------------------------------- - -// TrackBase constructor must be called with AudioFlinger::mLock held -AudioFlinger::ThreadBase::TrackBase::TrackBase( - ThreadBase *thread, - const sp<Client>& client, - uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, - const sp<IMemory>& sharedBuffer, - int sessionId) - : RefBase(), - mThread(thread), - mClient(client), - mCblk(NULL), - // mBuffer - // mBufferEnd - mFrameCount(0), - mState(IDLE), - mSampleRate(sampleRate), - mFormat(format), - mStepServerFailed(false), - mSessionId(sessionId) - // mChannelCount - // mChannelMask -{ - ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size()); - - // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize); - size_t size = sizeof(audio_track_cblk_t); - uint8_t channelCount = popcount(channelMask); - size_t bufferSize = frameCount*channelCount*sizeof(int16_t); - if (sharedBuffer == 0) { - size += bufferSize; - } - - if (client != NULL) { - mCblkMemory = client->heap()->allocate(size); - if (mCblkMemory != 0) { - mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer()); - if (mCblk != NULL) { // construct the shared structure in-place. - new(mCblk) audio_track_cblk_t(); - // clear all buffers - mCblk->frameCount = frameCount; - mCblk->sampleRate = sampleRate; -// uncomment the following lines to quickly test 32-bit wraparound -// mCblk->user = 0xffff0000; -// mCblk->server = 0xffff0000; -// mCblk->userBase = 0xffff0000; -// mCblk->serverBase = 0xffff0000; - mChannelCount = channelCount; - mChannelMask = channelMask; - if (sharedBuffer == 0) { - mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t); - memset(mBuffer, 0, frameCount*channelCount*sizeof(int16_t)); - // Force underrun condition to avoid false underrun callback until first data is - // written to buffer (other flags are cleared) - mCblk->flags = CBLK_UNDERRUN_ON; - } else { - mBuffer = sharedBuffer->pointer(); - } - mBufferEnd = (uint8_t *)mBuffer + bufferSize; - } - } else { - ALOGE("not enough memory for AudioTrack size=%u", size); - client->heap()->dump("AudioTrack"); - return; - } - } else { - mCblk = (audio_track_cblk_t *)(new uint8_t[size]); - // construct the shared structure in-place. - new(mCblk) audio_track_cblk_t(); - // clear all buffers - mCblk->frameCount = frameCount; - mCblk->sampleRate = sampleRate; -// uncomment the following lines to quickly test 32-bit wraparound -// mCblk->user = 0xffff0000; -// mCblk->server = 0xffff0000; -// mCblk->userBase = 0xffff0000; -// mCblk->serverBase = 0xffff0000; - mChannelCount = channelCount; - mChannelMask = channelMask; - mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t); - memset(mBuffer, 0, frameCount*channelCount*sizeof(int16_t)); - // Force underrun condition to avoid false underrun callback until first data is - // written to buffer (other flags are cleared) - mCblk->flags = CBLK_UNDERRUN_ON; - mBufferEnd = (uint8_t *)mBuffer + bufferSize; - } -} - -AudioFlinger::ThreadBase::TrackBase::~TrackBase() -{ - if (mCblk != NULL) { - if (mClient == 0) { - delete mCblk; - } else { - mCblk->~audio_track_cblk_t(); // destroy our shared-structure. - } - } - mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to - if (mClient != 0) { - // Client destructor must run with AudioFlinger mutex locked - Mutex::Autolock _l(mClient->audioFlinger()->mLock); - // If the client's reference count drops to zero, the associated destructor - // must run with AudioFlinger lock held. Thus the explicit clear() rather than - // relying on the automatic clear() at end of scope. - mClient.clear(); - } -} - -// AudioBufferProvider interface -// getNextBuffer() = 0; -// This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack -void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer) -{ - buffer->raw = NULL; - mFrameCount = buffer->frameCount; - // FIXME See note at getNextBuffer() - (void) step(); // ignore return value of step() - buffer->frameCount = 0; -} - -bool AudioFlinger::ThreadBase::TrackBase::step() { - bool result; - audio_track_cblk_t* cblk = this->cblk(); - - result = cblk->stepServer(mFrameCount); - if (!result) { - ALOGV("stepServer failed acquiring cblk mutex"); - mStepServerFailed = true; - } - return result; -} - -void AudioFlinger::ThreadBase::TrackBase::reset() { - audio_track_cblk_t* cblk = this->cblk(); - - cblk->user = 0; - cblk->server = 0; - cblk->userBase = 0; - cblk->serverBase = 0; - mStepServerFailed = false; - ALOGV("TrackBase::reset"); -} - -int AudioFlinger::ThreadBase::TrackBase::sampleRate() const { - return (int)mCblk->sampleRate; -} - -void* AudioFlinger::ThreadBase::TrackBase::getBuffer(uint32_t offset, uint32_t frames) const { - audio_track_cblk_t* cblk = this->cblk(); - size_t frameSize = cblk->frameSize; - int8_t *bufferStart = (int8_t *)mBuffer + (offset-cblk->serverBase)*frameSize; - int8_t *bufferEnd = bufferStart + frames * frameSize; - - // Check validity of returned pointer in case the track control block would have been corrupted. - ALOG_ASSERT(!(bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd), - "TrackBase::getBuffer buffer out of range:\n" - " start: %p, end %p , mBuffer %p mBufferEnd %p\n" - " server %u, serverBase %u, user %u, userBase %u, frameSize %d", - bufferStart, bufferEnd, mBuffer, mBufferEnd, - cblk->server, cblk->serverBase, cblk->user, cblk->userBase, frameSize); - - return bufferStart; -} - -status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event) -{ - mSyncEvents.add(event); - return NO_ERROR; -} - -// ---------------------------------------------------------------------------- - -// Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held -AudioFlinger::PlaybackThread::Track::Track( - PlaybackThread *thread, - const sp<Client>& client, - audio_stream_type_t streamType, - uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, - const sp<IMemory>& sharedBuffer, - int sessionId, - IAudioFlinger::track_flags_t flags) - : TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer, sessionId), - mMute(false), - mFillingUpStatus(FS_INVALID), - // mRetryCount initialized later when needed - mSharedBuffer(sharedBuffer), - mStreamType(streamType), - mName(-1), // see note below - mMainBuffer(thread->mixBuffer()), - mAuxBuffer(NULL), - mAuxEffectId(0), mHasVolumeController(false), - mPresentationCompleteFrames(0), - mFlags(flags), - mFastIndex(-1), - mUnderrunCount(0), - mCachedVolume(1.0) -{ - if (mCblk != NULL) { - // NOTE: audio_track_cblk_t::frameSize for 8 bit PCM data is based on a sample size of - // 16 bit because data is converted to 16 bit before being stored in buffer by AudioTrack - mCblk->frameSize = audio_is_linear_pcm(format) ? mChannelCount * sizeof(int16_t) : sizeof(uint8_t); - // to avoid leaking a track name, do not allocate one unless there is an mCblk - mName = thread->getTrackName_l(channelMask, sessionId); - mCblk->mName = mName; - if (mName < 0) { - ALOGE("no more track names available"); - return; - } - // only allocate a fast track index if we were able to allocate a normal track name - if (flags & IAudioFlinger::TRACK_FAST) { - mCblk->flags |= CBLK_FAST; // atomic op not needed yet - ALOG_ASSERT(thread->mFastTrackAvailMask != 0); - int i = __builtin_ctz(thread->mFastTrackAvailMask); - ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks); - // FIXME This is too eager. We allocate a fast track index before the - // fast track becomes active. Since fast tracks are a scarce resource, - // this means we are potentially denying other more important fast tracks from - // being created. It would be better to allocate the index dynamically. - mFastIndex = i; - mCblk->mName = i; - // Read the initial underruns because this field is never cleared by the fast mixer - mObservedUnderruns = thread->getFastTrackUnderruns(i); - thread->mFastTrackAvailMask &= ~(1 << i); - } - } - ALOGV("Track constructor name %d, calling pid %d", mName, IPCThreadState::self()->getCallingPid()); -} - -AudioFlinger::PlaybackThread::Track::~Track() -{ - ALOGV("PlaybackThread::Track destructor"); -} - -void AudioFlinger::PlaybackThread::Track::destroy() -{ - // NOTE: destroyTrack_l() can remove a strong reference to this Track - // by removing it from mTracks vector, so there is a risk that this Tracks's - // destructor is called. As the destructor needs to lock mLock, - // we must acquire a strong reference on this Track before locking mLock - // here so that the destructor is called only when exiting this function. - // On the other hand, as long as Track::destroy() is only called by - // TrackHandle destructor, the TrackHandle still holds a strong ref on - // this Track with its member mTrack. - sp<Track> keep(this); - { // scope for mLock - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - if (!isOutputTrack()) { - if (mState == ACTIVE || mState == RESUMING) { - AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId); - -#ifdef ADD_BATTERY_DATA - // to track the speaker usage - addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop); -#endif - } - AudioSystem::releaseOutput(thread->id()); - } - Mutex::Autolock _l(thread->mLock); - PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); - playbackThread->destroyTrack_l(this); - } - } -} - -/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result) -{ - result.append(" Name Client Type Fmt Chn mask Session mFrCnt fCount S M F SRate L dB R dB " - " Server User Main buf Aux Buf Flags Underruns\n"); -} - -void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size) -{ - uint32_t vlr = mCblk->getVolumeLR(); - if (isFastTrack()) { - sprintf(buffer, " F %2d", mFastIndex); - } else { - sprintf(buffer, " %4d", mName - AudioMixer::TRACK0); - } - track_state state = mState; - char stateChar; - switch (state) { - case IDLE: - stateChar = 'I'; - break; - case TERMINATED: - stateChar = 'T'; - break; - case STOPPING_1: - stateChar = 's'; - break; - case STOPPING_2: - stateChar = '5'; - break; - case STOPPED: - stateChar = 'S'; - break; - case RESUMING: - stateChar = 'R'; - break; - case ACTIVE: - stateChar = 'A'; - break; - case PAUSING: - stateChar = 'p'; - break; - case PAUSED: - stateChar = 'P'; - break; - case FLUSHED: - stateChar = 'F'; - break; - default: - stateChar = '?'; - break; - } - char nowInUnderrun; - switch (mObservedUnderruns.mBitFields.mMostRecent) { - case UNDERRUN_FULL: - nowInUnderrun = ' '; - break; - case UNDERRUN_PARTIAL: - nowInUnderrun = '<'; - break; - case UNDERRUN_EMPTY: - nowInUnderrun = '*'; - break; - default: - nowInUnderrun = '?'; - break; - } - snprintf(&buffer[7], size-7, " %6d %4u %3u 0x%08x %7u %6u %6u %1c %1d %1d %5u %5.2g %5.2g " - "0x%08x 0x%08x 0x%08x 0x%08x %#5x %9u%c\n", - (mClient == 0) ? getpid_cached : mClient->pid(), - mStreamType, - mFormat, - mChannelMask, - mSessionId, - mFrameCount, - mCblk->frameCount, - stateChar, - mMute, - mFillingUpStatus, - mCblk->sampleRate, - 20.0 * log10((vlr & 0xFFFF) / 4096.0), - 20.0 * log10((vlr >> 16) / 4096.0), - mCblk->server, - mCblk->user, - (int)mMainBuffer, - (int)mAuxBuffer, - mCblk->flags, - mUnderrunCount, - nowInUnderrun); -} - -// AudioBufferProvider interface -status_t AudioFlinger::PlaybackThread::Track::getNextBuffer( - AudioBufferProvider::Buffer* buffer, int64_t pts) -{ - audio_track_cblk_t* cblk = this->cblk(); - uint32_t framesReady; - uint32_t framesReq = buffer->frameCount; - - // Check if last stepServer failed, try to step now - if (mStepServerFailed) { - // FIXME When called by fast mixer, this takes a mutex with tryLock(). - // Since the fast mixer is higher priority than client callback thread, - // it does not result in priority inversion for client. - // But a non-blocking solution would be preferable to avoid - // fast mixer being unable to tryLock(), and - // to avoid the extra context switches if the client wakes up, - // discovers the mutex is locked, then has to wait for fast mixer to unlock. - if (!step()) goto getNextBuffer_exit; - ALOGV("stepServer recovered"); - mStepServerFailed = false; - } - - // FIXME Same as above - framesReady = cblk->framesReady(); - - if (CC_LIKELY(framesReady)) { - uint32_t s = cblk->server; - uint32_t bufferEnd = cblk->serverBase + cblk->frameCount; - - bufferEnd = (cblk->loopEnd < bufferEnd) ? cblk->loopEnd : bufferEnd; - if (framesReq > framesReady) { - framesReq = framesReady; - } - if (framesReq > bufferEnd - s) { - framesReq = bufferEnd - s; - } - - buffer->raw = getBuffer(s, framesReq); - buffer->frameCount = framesReq; - return NO_ERROR; - } - -getNextBuffer_exit: - buffer->raw = NULL; - buffer->frameCount = 0; - ALOGV("getNextBuffer() no more data for track %d on thread %p", mName, mThread.unsafe_get()); - return NOT_ENOUGH_DATA; -} - -// Note that framesReady() takes a mutex on the control block using tryLock(). -// This could result in priority inversion if framesReady() is called by the normal mixer, -// as the normal mixer thread runs at lower -// priority than the client's callback thread: there is a short window within framesReady() -// during which the normal mixer could be preempted, and the client callback would block. -// Another problem can occur if framesReady() is called by the fast mixer: -// the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer. -// FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue. -size_t AudioFlinger::PlaybackThread::Track::framesReady() const { - return mCblk->framesReady(); -} - -// Don't call for fast tracks; the framesReady() could result in priority inversion -bool AudioFlinger::PlaybackThread::Track::isReady() const { - if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) return true; - - if (framesReady() >= mCblk->frameCount || - (mCblk->flags & CBLK_FORCEREADY_MSK)) { - mFillingUpStatus = FS_FILLED; - android_atomic_and(~CBLK_FORCEREADY_MSK, &mCblk->flags); - return true; - } - return false; -} - -status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event, - int triggerSession) -{ - status_t status = NO_ERROR; - ALOGV("start(%d), calling pid %d session %d", - mName, IPCThreadState::self()->getCallingPid(), mSessionId); - - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - Mutex::Autolock _l(thread->mLock); - track_state state = mState; - // here the track could be either new, or restarted - // in both cases "unstop" the track - if (mState == PAUSED) { - mState = TrackBase::RESUMING; - ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this); - } else { - mState = TrackBase::ACTIVE; - ALOGV("? => ACTIVE (%d) on thread %p", mName, this); - } - - if (!isOutputTrack() && state != ACTIVE && state != RESUMING) { - thread->mLock.unlock(); - status = AudioSystem::startOutput(thread->id(), mStreamType, mSessionId); - thread->mLock.lock(); - -#ifdef ADD_BATTERY_DATA - // to track the speaker usage - if (status == NO_ERROR) { - addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart); - } -#endif - } - if (status == NO_ERROR) { - PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); - playbackThread->addTrack_l(this); - } else { - mState = state; - triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE); - } - } else { - status = BAD_VALUE; - } - return status; -} - -void AudioFlinger::PlaybackThread::Track::stop() -{ - ALOGV("stop(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid()); - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - Mutex::Autolock _l(thread->mLock); - track_state state = mState; - if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) { - // If the track is not active (PAUSED and buffers full), flush buffers - PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); - if (playbackThread->mActiveTracks.indexOf(this) < 0) { - reset(); - mState = STOPPED; - } else if (!isFastTrack()) { - mState = STOPPED; - } else { - // prepareTracks_l() will set state to STOPPING_2 after next underrun, - // and then to STOPPED and reset() when presentation is complete - mState = STOPPING_1; - } - ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName, playbackThread); - } - if (!isOutputTrack() && (state == ACTIVE || state == RESUMING)) { - thread->mLock.unlock(); - AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId); - thread->mLock.lock(); - -#ifdef ADD_BATTERY_DATA - // to track the speaker usage - addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop); -#endif - } - } -} - -void AudioFlinger::PlaybackThread::Track::pause() -{ - ALOGV("pause(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid()); - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - Mutex::Autolock _l(thread->mLock); - if (mState == ACTIVE || mState == RESUMING) { - mState = PAUSING; - ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get()); - if (!isOutputTrack()) { - thread->mLock.unlock(); - AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId); - thread->mLock.lock(); - -#ifdef ADD_BATTERY_DATA - // to track the speaker usage - addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop); -#endif - } - } - } -} - -void AudioFlinger::PlaybackThread::Track::flush() -{ - ALOGV("flush(%d)", mName); - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - Mutex::Autolock _l(thread->mLock); - if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED && mState != PAUSED && - mState != PAUSING && mState != IDLE && mState != FLUSHED) { - return; - } - // No point remaining in PAUSED state after a flush => go to - // FLUSHED state - mState = FLUSHED; - // do not reset the track if it is still in the process of being stopped or paused. - // this will be done by prepareTracks_l() when the track is stopped. - // prepareTracks_l() will see mState == FLUSHED, then - // remove from active track list, reset(), and trigger presentation complete - PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); - if (playbackThread->mActiveTracks.indexOf(this) < 0) { - reset(); - } - } -} - -void AudioFlinger::PlaybackThread::Track::reset() -{ - // Do not reset twice to avoid discarding data written just after a flush and before - // the audioflinger thread detects the track is stopped. - if (!mResetDone) { - TrackBase::reset(); - // Force underrun condition to avoid false underrun callback until first data is - // written to buffer - android_atomic_and(~CBLK_FORCEREADY_MSK, &mCblk->flags); - android_atomic_or(CBLK_UNDERRUN_ON, &mCblk->flags); - mFillingUpStatus = FS_FILLING; - mResetDone = true; - if (mState == FLUSHED) { - mState = IDLE; - } - } -} - -void AudioFlinger::PlaybackThread::Track::mute(bool muted) -{ - mMute = muted; -} - -status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId) -{ - status_t status = DEAD_OBJECT; - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); - sp<AudioFlinger> af = mClient->audioFlinger(); - - Mutex::Autolock _l(af->mLock); - - sp<PlaybackThread> srcThread = af->getEffectThread_l(AUDIO_SESSION_OUTPUT_MIX, EffectId); - - if (EffectId != 0 && srcThread != 0 && playbackThread != srcThread.get()) { - Mutex::Autolock _dl(playbackThread->mLock); - Mutex::Autolock _sl(srcThread->mLock); - sp<EffectChain> chain = srcThread->getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX); - if (chain == 0) { - return INVALID_OPERATION; - } - - sp<EffectModule> effect = chain->getEffectFromId_l(EffectId); - if (effect == 0) { - return INVALID_OPERATION; - } - srcThread->removeEffect_l(effect); - playbackThread->addEffect_l(effect); - // removeEffect_l() has stopped the effect if it was active so it must be restarted - if (effect->state() == EffectModule::ACTIVE || - effect->state() == EffectModule::STOPPING) { - effect->start(); - } - - sp<EffectChain> dstChain = effect->chain().promote(); - if (dstChain == 0) { - srcThread->addEffect_l(effect); - return INVALID_OPERATION; - } - AudioSystem::unregisterEffect(effect->id()); - AudioSystem::registerEffect(&effect->desc(), - srcThread->id(), - dstChain->strategy(), - AUDIO_SESSION_OUTPUT_MIX, - effect->id()); - } - status = playbackThread->attachAuxEffect(this, EffectId); - } - return status; -} - -void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer) -{ - mAuxEffectId = EffectId; - mAuxBuffer = buffer; -} - -bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten, - size_t audioHalFrames) -{ - // a track is considered presented when the total number of frames written to audio HAL - // corresponds to the number of frames written when presentationComplete() is called for the - // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time. - if (mPresentationCompleteFrames == 0) { - mPresentationCompleteFrames = framesWritten + audioHalFrames; - ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d", - mPresentationCompleteFrames, audioHalFrames); - } - if (framesWritten >= mPresentationCompleteFrames) { - ALOGV("presentationComplete() session %d complete: framesWritten %d", - mSessionId, framesWritten); - triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE); - return true; - } - return false; -} - -void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type) -{ - for (int i = 0; i < (int)mSyncEvents.size(); i++) { - if (mSyncEvents[i]->type() == type) { - mSyncEvents[i]->trigger(); - mSyncEvents.removeAt(i); - i--; - } - } -} - -// implement VolumeBufferProvider interface - -uint32_t AudioFlinger::PlaybackThread::Track::getVolumeLR() -{ - // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs - ALOG_ASSERT(isFastTrack() && (mCblk != NULL)); - uint32_t vlr = mCblk->getVolumeLR(); - uint32_t vl = vlr & 0xFFFF; - uint32_t vr = vlr >> 16; - // track volumes come from shared memory, so can't be trusted and must be clamped - if (vl > MAX_GAIN_INT) { - vl = MAX_GAIN_INT; - } - if (vr > MAX_GAIN_INT) { - vr = MAX_GAIN_INT; - } - // now apply the cached master volume and stream type volume; - // this is trusted but lacks any synchronization or barrier so may be stale - float v = mCachedVolume; - vl *= v; - vr *= v; - // re-combine into U4.16 - vlr = (vr << 16) | (vl & 0xFFFF); - // FIXME look at mute, pause, and stop flags - return vlr; -} - -status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event) -{ - if (mState == TERMINATED || mState == PAUSED || - ((framesReady() == 0) && ((mSharedBuffer != 0) || - (mState == STOPPED)))) { - ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ", - mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady()); - event->cancel(); - return INVALID_OPERATION; - } - (void) TrackBase::setSyncEvent(event); - return NO_ERROR; -} - -// timed audio tracks - -sp<AudioFlinger::PlaybackThread::TimedTrack> -AudioFlinger::PlaybackThread::TimedTrack::create( - PlaybackThread *thread, - const sp<Client>& client, - audio_stream_type_t streamType, - uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, - const sp<IMemory>& sharedBuffer, - int sessionId) { - if (!client->reserveTimedTrack()) - return 0; - - return new TimedTrack( - thread, client, streamType, sampleRate, format, channelMask, frameCount, - sharedBuffer, sessionId); -} - -AudioFlinger::PlaybackThread::TimedTrack::TimedTrack( - PlaybackThread *thread, - const sp<Client>& client, - audio_stream_type_t streamType, - uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, - const sp<IMemory>& sharedBuffer, - int sessionId) - : Track(thread, client, streamType, sampleRate, format, channelMask, - frameCount, sharedBuffer, sessionId, IAudioFlinger::TRACK_TIMED), - mQueueHeadInFlight(false), - mTrimQueueHeadOnRelease(false), - mFramesPendingInQueue(0), - mTimedSilenceBuffer(NULL), - mTimedSilenceBufferSize(0), - mTimedAudioOutputOnTime(false), - mMediaTimeTransformValid(false) -{ - LocalClock lc; - mLocalTimeFreq = lc.getLocalFreq(); - - mLocalTimeToSampleTransform.a_zero = 0; - mLocalTimeToSampleTransform.b_zero = 0; - mLocalTimeToSampleTransform.a_to_b_numer = sampleRate; - mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq; - LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer, - &mLocalTimeToSampleTransform.a_to_b_denom); - - mMediaTimeToSampleTransform.a_zero = 0; - mMediaTimeToSampleTransform.b_zero = 0; - mMediaTimeToSampleTransform.a_to_b_numer = sampleRate; - mMediaTimeToSampleTransform.a_to_b_denom = 1000000; - LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer, - &mMediaTimeToSampleTransform.a_to_b_denom); -} - -AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() { - mClient->releaseTimedTrack(); - delete [] mTimedSilenceBuffer; -} - -status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer( - size_t size, sp<IMemory>* buffer) { - - Mutex::Autolock _l(mTimedBufferQueueLock); - - trimTimedBufferQueue_l(); - - // lazily initialize the shared memory heap for timed buffers - if (mTimedMemoryDealer == NULL) { - const int kTimedBufferHeapSize = 512 << 10; - - mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize, - "AudioFlingerTimed"); - if (mTimedMemoryDealer == NULL) - return NO_MEMORY; - } - - sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size); - if (newBuffer == NULL) { - newBuffer = mTimedMemoryDealer->allocate(size); - if (newBuffer == NULL) - return NO_MEMORY; - } - - *buffer = newBuffer; - return NO_ERROR; -} - -// caller must hold mTimedBufferQueueLock -void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() { - int64_t mediaTimeNow; - { - Mutex::Autolock mttLock(mMediaTimeTransformLock); - if (!mMediaTimeTransformValid) - return; - - int64_t targetTimeNow; - status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) - ? mCCHelper.getCommonTime(&targetTimeNow) - : mCCHelper.getLocalTime(&targetTimeNow); - - if (OK != res) - return; - - if (!mMediaTimeTransform.doReverseTransform(targetTimeNow, - &mediaTimeNow)) { - return; - } - } - - size_t trimEnd; - for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) { - int64_t bufEnd; - - if ((trimEnd + 1) < mTimedBufferQueue.size()) { - // We have a next buffer. Just use its PTS as the PTS of the frame - // following the last frame in this buffer. If the stream is sparse - // (ie, there are deliberate gaps left in the stream which should be - // filled with silence by the TimedAudioTrack), then this can result - // in one extra buffer being left un-trimmed when it could have - // been. In general, this is not typical, and we would rather - // optimized away the TS calculation below for the more common case - // where PTSes are contiguous. - bufEnd = mTimedBufferQueue[trimEnd + 1].pts(); - } else { - // We have no next buffer. Compute the PTS of the frame following - // the last frame in this buffer by computing the duration of of - // this frame in media time units and adding it to the PTS of the - // buffer. - int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size() - / mCblk->frameSize; - - if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount, - &bufEnd)) { - ALOGE("Failed to convert frame count of %lld to media time" - " duration" " (scale factor %d/%u) in %s", - frameCount, - mMediaTimeToSampleTransform.a_to_b_numer, - mMediaTimeToSampleTransform.a_to_b_denom, - __PRETTY_FUNCTION__); - break; - } - bufEnd += mTimedBufferQueue[trimEnd].pts(); - } - - if (bufEnd > mediaTimeNow) - break; - - // Is the buffer we want to use in the middle of a mix operation right - // now? If so, don't actually trim it. Just wait for the releaseBuffer - // from the mixer which should be coming back shortly. - if (!trimEnd && mQueueHeadInFlight) { - mTrimQueueHeadOnRelease = true; - } - } - - size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0; - if (trimStart < trimEnd) { - // Update the bookkeeping for framesReady() - for (size_t i = trimStart; i < trimEnd; ++i) { - updateFramesPendingAfterTrim_l(mTimedBufferQueue[i], "trim"); - } - - // Now actually remove the buffers from the queue. - mTimedBufferQueue.removeItemsAt(trimStart, trimEnd); - } -} - -void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l( - const char* logTag) { - ALOG_ASSERT(mTimedBufferQueue.size() > 0, - "%s called (reason \"%s\"), but timed buffer queue has no" - " elements to trim.", __FUNCTION__, logTag); - - updateFramesPendingAfterTrim_l(mTimedBufferQueue[0], logTag); - mTimedBufferQueue.removeAt(0); -} - -void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l( - const TimedBuffer& buf, - const char* logTag) { - uint32_t bufBytes = buf.buffer()->size(); - uint32_t consumedAlready = buf.position(); - - ALOG_ASSERT(consumedAlready <= bufBytes, - "Bad bookkeeping while updating frames pending. Timed buffer is" - " only %u bytes long, but claims to have consumed %u" - " bytes. (update reason: \"%s\")", - bufBytes, consumedAlready, logTag); - - uint32_t bufFrames = (bufBytes - consumedAlready) / mCblk->frameSize; - ALOG_ASSERT(mFramesPendingInQueue >= bufFrames, - "Bad bookkeeping while updating frames pending. Should have at" - " least %u queued frames, but we think we have only %u. (update" - " reason: \"%s\")", - bufFrames, mFramesPendingInQueue, logTag); - - mFramesPendingInQueue -= bufFrames; -} - -status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer( - const sp<IMemory>& buffer, int64_t pts) { - - { - Mutex::Autolock mttLock(mMediaTimeTransformLock); - if (!mMediaTimeTransformValid) - return INVALID_OPERATION; - } - - Mutex::Autolock _l(mTimedBufferQueueLock); - - uint32_t bufFrames = buffer->size() / mCblk->frameSize; - mFramesPendingInQueue += bufFrames; - mTimedBufferQueue.add(TimedBuffer(buffer, pts)); - - return NO_ERROR; -} - -status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform( - const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) { - - ALOGVV("setMediaTimeTransform az=%lld bz=%lld n=%d d=%u tgt=%d", - xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom, - target); - - if (!(target == TimedAudioTrack::LOCAL_TIME || - target == TimedAudioTrack::COMMON_TIME)) { - return BAD_VALUE; - } - - Mutex::Autolock lock(mMediaTimeTransformLock); - mMediaTimeTransform = xform; - mMediaTimeTransformTarget = target; - mMediaTimeTransformValid = true; - - return NO_ERROR; -} - -#define min(a, b) ((a) < (b) ? (a) : (b)) - -// implementation of getNextBuffer for tracks whose buffers have timestamps -status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer( - AudioBufferProvider::Buffer* buffer, int64_t pts) -{ - if (pts == AudioBufferProvider::kInvalidPTS) { - buffer->raw = NULL; - buffer->frameCount = 0; - mTimedAudioOutputOnTime = false; - return INVALID_OPERATION; - } - - Mutex::Autolock _l(mTimedBufferQueueLock); - - ALOG_ASSERT(!mQueueHeadInFlight, - "getNextBuffer called without releaseBuffer!"); - - while (true) { - - // if we have no timed buffers, then fail - if (mTimedBufferQueue.isEmpty()) { - buffer->raw = NULL; - buffer->frameCount = 0; - return NOT_ENOUGH_DATA; - } - - TimedBuffer& head = mTimedBufferQueue.editItemAt(0); - - // calculate the PTS of the head of the timed buffer queue expressed in - // local time - int64_t headLocalPTS; - { - Mutex::Autolock mttLock(mMediaTimeTransformLock); - - ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid"); - - if (mMediaTimeTransform.a_to_b_denom == 0) { - // the transform represents a pause, so yield silence - timedYieldSilence_l(buffer->frameCount, buffer); - return NO_ERROR; - } - - int64_t transformedPTS; - if (!mMediaTimeTransform.doForwardTransform(head.pts(), - &transformedPTS)) { - // the transform failed. this shouldn't happen, but if it does - // then just drop this buffer - ALOGW("timedGetNextBuffer transform failed"); - buffer->raw = NULL; - buffer->frameCount = 0; - trimTimedBufferQueueHead_l("getNextBuffer; no transform"); - return NO_ERROR; - } - - if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) { - if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS, - &headLocalPTS)) { - buffer->raw = NULL; - buffer->frameCount = 0; - return INVALID_OPERATION; - } - } else { - headLocalPTS = transformedPTS; - } - } - - // adjust the head buffer's PTS to reflect the portion of the head buffer - // that has already been consumed - int64_t effectivePTS = headLocalPTS + - ((head.position() / mCblk->frameSize) * mLocalTimeFreq / sampleRate()); - - // Calculate the delta in samples between the head of the input buffer - // queue and the start of the next output buffer that will be written. - // If the transformation fails because of over or underflow, it means - // that the sample's position in the output stream is so far out of - // whack that it should just be dropped. - int64_t sampleDelta; - if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) { - ALOGV("*** head buffer is too far from PTS: dropped buffer"); - trimTimedBufferQueueHead_l("getNextBuffer, buf pts too far from" - " mix"); - continue; - } - if (!mLocalTimeToSampleTransform.doForwardTransform( - (effectivePTS - pts) << 32, &sampleDelta)) { - ALOGV("*** too late during sample rate transform: dropped buffer"); - trimTimedBufferQueueHead_l("getNextBuffer, bad local to sample"); - continue; - } - - ALOGVV("*** getNextBuffer head.pts=%lld head.pos=%d pts=%lld" - " sampleDelta=[%d.%08x]", - head.pts(), head.position(), pts, - static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1) - + (sampleDelta >> 32)), - static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF)); - - // if the delta between the ideal placement for the next input sample and - // the current output position is within this threshold, then we will - // concatenate the next input samples to the previous output - const int64_t kSampleContinuityThreshold = - (static_cast<int64_t>(sampleRate()) << 32) / 250; - - // if this is the first buffer of audio that we're emitting from this track - // then it should be almost exactly on time. - const int64_t kSampleStartupThreshold = 1LL << 32; - - if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) || - (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) { - // the next input is close enough to being on time, so concatenate it - // with the last output - timedYieldSamples_l(buffer); - - ALOGVV("*** on time: head.pos=%d frameCount=%u", - head.position(), buffer->frameCount); - return NO_ERROR; - } - - // Looks like our output is not on time. Reset our on timed status. - // Next time we mix samples from our input queue, then should be within - // the StartupThreshold. - mTimedAudioOutputOnTime = false; - if (sampleDelta > 0) { - // the gap between the current output position and the proper start of - // the next input sample is too big, so fill it with silence - uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32; - - timedYieldSilence_l(framesUntilNextInput, buffer); - ALOGV("*** silence: frameCount=%u", buffer->frameCount); - return NO_ERROR; - } else { - // the next input sample is late - uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32)); - size_t onTimeSamplePosition = - head.position() + lateFrames * mCblk->frameSize; - - if (onTimeSamplePosition > head.buffer()->size()) { - // all the remaining samples in the head are too late, so - // drop it and move on - ALOGV("*** too late: dropped buffer"); - trimTimedBufferQueueHead_l("getNextBuffer, dropped late buffer"); - continue; - } else { - // skip over the late samples - head.setPosition(onTimeSamplePosition); - - // yield the available samples - timedYieldSamples_l(buffer); - - ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount); - return NO_ERROR; - } - } - } -} - -// Yield samples from the timed buffer queue head up to the given output -// buffer's capacity. -// -// Caller must hold mTimedBufferQueueLock -void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l( - AudioBufferProvider::Buffer* buffer) { - - const TimedBuffer& head = mTimedBufferQueue[0]; - - buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) + - head.position()); - - uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) / - mCblk->frameSize); - size_t framesRequested = buffer->frameCount; - buffer->frameCount = min(framesLeftInHead, framesRequested); - - mQueueHeadInFlight = true; - mTimedAudioOutputOnTime = true; -} - -// Yield samples of silence up to the given output buffer's capacity -// -// Caller must hold mTimedBufferQueueLock -void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l( - uint32_t numFrames, AudioBufferProvider::Buffer* buffer) { - - // lazily allocate a buffer filled with silence - if (mTimedSilenceBufferSize < numFrames * mCblk->frameSize) { - delete [] mTimedSilenceBuffer; - mTimedSilenceBufferSize = numFrames * mCblk->frameSize; - mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize]; - memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize); - } - - buffer->raw = mTimedSilenceBuffer; - size_t framesRequested = buffer->frameCount; - buffer->frameCount = min(numFrames, framesRequested); - - mTimedAudioOutputOnTime = false; -} - -// AudioBufferProvider interface -void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer( - AudioBufferProvider::Buffer* buffer) { - - Mutex::Autolock _l(mTimedBufferQueueLock); - - // If the buffer which was just released is part of the buffer at the head - // of the queue, be sure to update the amt of the buffer which has been - // consumed. If the buffer being returned is not part of the head of the - // queue, its either because the buffer is part of the silence buffer, or - // because the head of the timed queue was trimmed after the mixer called - // getNextBuffer but before the mixer called releaseBuffer. - if (buffer->raw == mTimedSilenceBuffer) { - ALOG_ASSERT(!mQueueHeadInFlight, - "Queue head in flight during release of silence buffer!"); - goto done; - } - - ALOG_ASSERT(mQueueHeadInFlight, - "TimedTrack::releaseBuffer of non-silence buffer, but no queue" - " head in flight."); - - if (mTimedBufferQueue.size()) { - TimedBuffer& head = mTimedBufferQueue.editItemAt(0); - - void* start = head.buffer()->pointer(); - void* end = reinterpret_cast<void*>( - reinterpret_cast<uint8_t*>(head.buffer()->pointer()) - + head.buffer()->size()); - - ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end), - "released buffer not within the head of the timed buffer" - " queue; qHead = [%p, %p], released buffer = %p", - start, end, buffer->raw); - - head.setPosition(head.position() + - (buffer->frameCount * mCblk->frameSize)); - mQueueHeadInFlight = false; - - ALOG_ASSERT(mFramesPendingInQueue >= buffer->frameCount, - "Bad bookkeeping during releaseBuffer! Should have at" - " least %u queued frames, but we think we have only %u", - buffer->frameCount, mFramesPendingInQueue); - - mFramesPendingInQueue -= buffer->frameCount; - - if ((static_cast<size_t>(head.position()) >= head.buffer()->size()) - || mTrimQueueHeadOnRelease) { - trimTimedBufferQueueHead_l("releaseBuffer"); - mTrimQueueHeadOnRelease = false; - } - } else { - LOG_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no" - " buffers in the timed buffer queue"); - } - -done: - buffer->raw = 0; - buffer->frameCount = 0; -} - -size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const { - Mutex::Autolock _l(mTimedBufferQueueLock); - return mFramesPendingInQueue; -} - -AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer() - : mPTS(0), mPosition(0) {} - -AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer( - const sp<IMemory>& buffer, int64_t pts) - : mBuffer(buffer), mPTS(pts), mPosition(0) {} - -// ---------------------------------------------------------------------------- - -// RecordTrack constructor must be called with AudioFlinger::mLock held -AudioFlinger::RecordThread::RecordTrack::RecordTrack( - RecordThread *thread, - const sp<Client>& client, - uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, - int sessionId) - : TrackBase(thread, client, sampleRate, format, - channelMask, frameCount, 0 /*sharedBuffer*/, sessionId), - mOverflow(false) -{ - if (mCblk != NULL) { - ALOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer); - if (format == AUDIO_FORMAT_PCM_16_BIT) { - mCblk->frameSize = mChannelCount * sizeof(int16_t); - } else if (format == AUDIO_FORMAT_PCM_8_BIT) { - mCblk->frameSize = mChannelCount * sizeof(int8_t); - } else { - mCblk->frameSize = sizeof(int8_t); - } - } -} - -AudioFlinger::RecordThread::RecordTrack::~RecordTrack() -{ - ALOGV("%s", __func__); -} - -// AudioBufferProvider interface -status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts) -{ - audio_track_cblk_t* cblk = this->cblk(); - uint32_t framesAvail; - uint32_t framesReq = buffer->frameCount; - - // Check if last stepServer failed, try to step now - if (mStepServerFailed) { - if (!step()) goto getNextBuffer_exit; - ALOGV("stepServer recovered"); - mStepServerFailed = false; - } - - framesAvail = cblk->framesAvailable_l(); - - if (CC_LIKELY(framesAvail)) { - uint32_t s = cblk->server; - uint32_t bufferEnd = cblk->serverBase + cblk->frameCount; - - if (framesReq > framesAvail) { - framesReq = framesAvail; - } - if (framesReq > bufferEnd - s) { - framesReq = bufferEnd - s; - } - - buffer->raw = getBuffer(s, framesReq); - buffer->frameCount = framesReq; - return NO_ERROR; - } - -getNextBuffer_exit: - buffer->raw = NULL; - buffer->frameCount = 0; - return NOT_ENOUGH_DATA; -} - -status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event, - int triggerSession) -{ - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - RecordThread *recordThread = (RecordThread *)thread.get(); - return recordThread->start(this, event, triggerSession); - } else { - return BAD_VALUE; - } -} - -void AudioFlinger::RecordThread::RecordTrack::stop() -{ - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - RecordThread *recordThread = (RecordThread *)thread.get(); - recordThread->mLock.lock(); - bool doStop = recordThread->stop_l(this); - if (doStop) { - TrackBase::reset(); - // Force overrun condition to avoid false overrun callback until first data is - // read from buffer - android_atomic_or(CBLK_UNDERRUN_ON, &mCblk->flags); - } - recordThread->mLock.unlock(); - if (doStop) { - AudioSystem::stopInput(recordThread->id()); - } - } -} - -/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result) -{ - result.append(" Clien Fmt Chn mask Session Buf S SRate Serv User FrameCount\n"); -} - -void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size) -{ - snprintf(buffer, size, " %05d %03u 0x%08x %05d %04u %01d %05u %08x %08x %05d\n", - (mClient == 0) ? getpid_cached : mClient->pid(), - mFormat, - mChannelMask, - mSessionId, - mFrameCount, - mState, - mCblk->sampleRate, - mCblk->server, - mCblk->user, - mCblk->frameCount); -} - - -// ---------------------------------------------------------------------------- - -AudioFlinger::PlaybackThread::OutputTrack::OutputTrack( - PlaybackThread *playbackThread, - DuplicatingThread *sourceThread, - uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount) - : Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount, - NULL, 0, IAudioFlinger::TRACK_DEFAULT), - mActive(false), mSourceThread(sourceThread) -{ - - if (mCblk != NULL) { - mCblk->flags |= CBLK_DIRECTION_OUT; - mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t); - mOutBuffer.frameCount = 0; - playbackThread->mTracks.add(this); - ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, mCblk->buffers %p, " \ - "mCblk->frameCount %d, mCblk->sampleRate %d, mChannelMask 0x%08x mBufferEnd %p", - mCblk, mBuffer, mCblk->buffers, - mCblk->frameCount, mCblk->sampleRate, mChannelMask, mBufferEnd); - } else { - ALOGW("Error creating output track on thread %p", playbackThread); - } -} - -AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack() -{ - clearBufferQueue(); -} - -status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event, - int triggerSession) -{ - status_t status = Track::start(event, triggerSession); - if (status != NO_ERROR) { - return status; - } - - mActive = true; - mRetryCount = 127; - return status; -} - -void AudioFlinger::PlaybackThread::OutputTrack::stop() -{ - Track::stop(); - clearBufferQueue(); - mOutBuffer.frameCount = 0; - mActive = false; -} - -bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames) -{ - Buffer *pInBuffer; - Buffer inBuffer; - uint32_t channelCount = mChannelCount; - bool outputBufferFull = false; - inBuffer.frameCount = frames; - inBuffer.i16 = data; - - uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs(); - - if (!mActive && frames != 0) { - start(); - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - MixerThread *mixerThread = (MixerThread *)thread.get(); - if (mCblk->frameCount > frames){ - if (mBufferQueue.size() < kMaxOverFlowBuffers) { - uint32_t startFrames = (mCblk->frameCount - frames); - pInBuffer = new Buffer; - pInBuffer->mBuffer = new int16_t[startFrames * channelCount]; - pInBuffer->frameCount = startFrames; - pInBuffer->i16 = pInBuffer->mBuffer; - memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t)); - mBufferQueue.add(pInBuffer); - } else { - ALOGW ("OutputTrack::write() %p no more buffers in queue", this); - } - } - } - } - - while (waitTimeLeftMs) { - // First write pending buffers, then new data - if (mBufferQueue.size()) { - pInBuffer = mBufferQueue.itemAt(0); - } else { - pInBuffer = &inBuffer; - } - - if (pInBuffer->frameCount == 0) { - break; - } - - if (mOutBuffer.frameCount == 0) { - mOutBuffer.frameCount = pInBuffer->frameCount; - nsecs_t startTime = systemTime(); - if (obtainBuffer(&mOutBuffer, waitTimeLeftMs) == (status_t)NO_MORE_BUFFERS) { - ALOGV ("OutputTrack::write() %p thread %p no more output buffers", this, mThread.unsafe_get()); - outputBufferFull = true; - break; - } - uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime); - if (waitTimeLeftMs >= waitTimeMs) { - waitTimeLeftMs -= waitTimeMs; - } else { - waitTimeLeftMs = 0; - } - } - - uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount : pInBuffer->frameCount; - memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t)); - mCblk->stepUser(outFrames); - pInBuffer->frameCount -= outFrames; - pInBuffer->i16 += outFrames * channelCount; - mOutBuffer.frameCount -= outFrames; - mOutBuffer.i16 += outFrames * channelCount; - - if (pInBuffer->frameCount == 0) { - if (mBufferQueue.size()) { - mBufferQueue.removeAt(0); - delete [] pInBuffer->mBuffer; - delete pInBuffer; - ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this, mThread.unsafe_get(), mBufferQueue.size()); - } else { - break; - } - } - } - - // If we could not write all frames, allocate a buffer and queue it for next time. - if (inBuffer.frameCount) { - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0 && !thread->standby()) { - if (mBufferQueue.size() < kMaxOverFlowBuffers) { - pInBuffer = new Buffer; - pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount]; - pInBuffer->frameCount = inBuffer.frameCount; - pInBuffer->i16 = pInBuffer->mBuffer; - memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount * sizeof(int16_t)); - mBufferQueue.add(pInBuffer); - ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this, mThread.unsafe_get(), mBufferQueue.size()); - } else { - ALOGW("OutputTrack::write() %p thread %p no more overflow buffers", mThread.unsafe_get(), this); - } - } - } - - // Calling write() with a 0 length buffer, means that no more data will be written: - // If no more buffers are pending, fill output track buffer to make sure it is started - // by output mixer. - if (frames == 0 && mBufferQueue.size() == 0) { - if (mCblk->user < mCblk->frameCount) { - frames = mCblk->frameCount - mCblk->user; - pInBuffer = new Buffer; - pInBuffer->mBuffer = new int16_t[frames * channelCount]; - pInBuffer->frameCount = frames; - pInBuffer->i16 = pInBuffer->mBuffer; - memset(pInBuffer->raw, 0, frames * channelCount * sizeof(int16_t)); - mBufferQueue.add(pInBuffer); - } else if (mActive) { - stop(); - } - } - - return outputBufferFull; -} - -status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs) -{ - int active; - status_t result; - audio_track_cblk_t* cblk = mCblk; - uint32_t framesReq = buffer->frameCount; - -// ALOGV("OutputTrack::obtainBuffer user %d, server %d", cblk->user, cblk->server); - buffer->frameCount = 0; - - uint32_t framesAvail = cblk->framesAvailable(); - - - if (framesAvail == 0) { - Mutex::Autolock _l(cblk->lock); - goto start_loop_here; - while (framesAvail == 0) { - active = mActive; - if (CC_UNLIKELY(!active)) { - ALOGV("Not active and NO_MORE_BUFFERS"); - return NO_MORE_BUFFERS; - } - result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs)); - if (result != NO_ERROR) { - return NO_MORE_BUFFERS; - } - // read the server count again - start_loop_here: - framesAvail = cblk->framesAvailable_l(); - } - } - -// if (framesAvail < framesReq) { -// return NO_MORE_BUFFERS; -// } - - if (framesReq > framesAvail) { - framesReq = framesAvail; - } - - uint32_t u = cblk->user; - uint32_t bufferEnd = cblk->userBase + cblk->frameCount; - - if (framesReq > bufferEnd - u) { - framesReq = bufferEnd - u; - } - - buffer->frameCount = framesReq; - buffer->raw = (void *)cblk->buffer(u); - return NO_ERROR; -} - - -void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue() -{ - size_t size = mBufferQueue.size(); - - for (size_t i = 0; i < size; i++) { - Buffer *pBuffer = mBufferQueue.itemAt(i); - delete [] pBuffer->mBuffer; - delete pBuffer; - } - mBufferQueue.clear(); -} // ---------------------------------------------------------------------------- @@ -5790,98 +1198,15 @@ void AudioFlinger::NotificationClient::binderDied(const wp<IBinder>& who) mAudioFlinger->removeNotificationClient(mPid); } -// ---------------------------------------------------------------------------- - -AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track) - : BnAudioTrack(), - mTrack(track) -{ -} - -AudioFlinger::TrackHandle::~TrackHandle() { - // just stop the track on deletion, associated resources - // will be freed from the main thread once all pending buffers have - // been played. Unless it's not in the active track list, in which - // case we free everything now... - mTrack->destroy(); -} - -sp<IMemory> AudioFlinger::TrackHandle::getCblk() const { - return mTrack->getCblk(); -} - -status_t AudioFlinger::TrackHandle::start() { - return mTrack->start(); -} - -void AudioFlinger::TrackHandle::stop() { - mTrack->stop(); -} - -void AudioFlinger::TrackHandle::flush() { - mTrack->flush(); -} - -void AudioFlinger::TrackHandle::mute(bool e) { - mTrack->mute(e); -} - -void AudioFlinger::TrackHandle::pause() { - mTrack->pause(); -} - -status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId) -{ - return mTrack->attachAuxEffect(EffectId); -} - -status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size, - sp<IMemory>* buffer) { - if (!mTrack->isTimedTrack()) - return INVALID_OPERATION; - - PlaybackThread::TimedTrack* tt = - reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); - return tt->allocateTimedBuffer(size, buffer); -} - -status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer, - int64_t pts) { - if (!mTrack->isTimedTrack()) - return INVALID_OPERATION; - - PlaybackThread::TimedTrack* tt = - reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); - return tt->queueTimedBuffer(buffer, pts); -} - -status_t AudioFlinger::TrackHandle::setMediaTimeTransform( - const LinearTransform& xform, int target) { - - if (!mTrack->isTimedTrack()) - return INVALID_OPERATION; - - PlaybackThread::TimedTrack* tt = - reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); - return tt->setMediaTimeTransform( - xform, static_cast<TimedAudioTrack::TargetTimeline>(target)); -} - -status_t AudioFlinger::TrackHandle::onTransact( - uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) -{ - return BnAudioTrack::onTransact(code, data, reply, flags); -} // ---------------------------------------------------------------------------- sp<IAudioRecord> AudioFlinger::openRecord( - pid_t pid, audio_io_handle_t input, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - int frameCount, + size_t frameCount, IAudioFlinger::track_flags_t flags, pid_t tid, int *sessionId, @@ -5910,6 +1235,7 @@ sp<IAudioRecord> AudioFlinger::openRecord( goto Exit; } + pid_t pid = IPCThreadState::self()->getCallingPid(); client = registerPid_l(pid); // If no audio session id is provided, create one here @@ -5921,13 +1247,14 @@ sp<IAudioRecord> AudioFlinger::openRecord( *sessionId = lSessionId; } } - // create new record track. The record track uses one track in mHardwareMixerThread by convention. + // create new record track. + // The record track uses one track in mHardwareMixerThread by convention. recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask, frameCount, lSessionId, flags, tid, &lStatus); } if (lStatus != NO_ERROR) { - // remove local strong reference to Client before deleting the RecordTrack so that the Client - // destructor is called by the TrackBase destructor with mLock held + // remove local strong reference to Client before deleting the RecordTrack so that the + // Client destructor is called by the TrackBase destructor with mLock held client.clear(); recordTrack.clear(); goto Exit; @@ -5944,891 +1271,6 @@ Exit: return recordHandle; } -// ---------------------------------------------------------------------------- - -AudioFlinger::RecordHandle::RecordHandle(const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack) - : BnAudioRecord(), - mRecordTrack(recordTrack) -{ -} - -AudioFlinger::RecordHandle::~RecordHandle() { - stop_nonvirtual(); - mRecordTrack->destroy(); -} - -sp<IMemory> AudioFlinger::RecordHandle::getCblk() const { - return mRecordTrack->getCblk(); -} - -status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event, int triggerSession) { - ALOGV("RecordHandle::start()"); - return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession); -} - -void AudioFlinger::RecordHandle::stop() { - stop_nonvirtual(); -} - -void AudioFlinger::RecordHandle::stop_nonvirtual() { - ALOGV("RecordHandle::stop()"); - mRecordTrack->stop(); -} - -status_t AudioFlinger::RecordHandle::onTransact( - uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) -{ - return BnAudioRecord::onTransact(code, data, reply, flags); -} - -// ---------------------------------------------------------------------------- - -AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger, - AudioStreamIn *input, - uint32_t sampleRate, - audio_channel_mask_t channelMask, - audio_io_handle_t id, - audio_devices_t device) : - ThreadBase(audioFlinger, id, AUDIO_DEVICE_NONE, device, RECORD), - mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL), - // mRsmpInIndex and mInputBytes set by readInputParameters() - mReqChannelCount(popcount(channelMask)), - mReqSampleRate(sampleRate) - // mBytesRead is only meaningful while active, and so is cleared in start() - // (but might be better to also clear here for dump?) -{ - snprintf(mName, kNameLength, "AudioIn_%X", id); - - readInputParameters(); -} - - -AudioFlinger::RecordThread::~RecordThread() -{ - delete[] mRsmpInBuffer; - delete mResampler; - delete[] mRsmpOutBuffer; -} - -void AudioFlinger::RecordThread::onFirstRef() -{ - run(mName, PRIORITY_URGENT_AUDIO); -} - -status_t AudioFlinger::RecordThread::readyToRun() -{ - status_t status = initCheck(); - ALOGW_IF(status != NO_ERROR,"RecordThread %p could not initialize", this); - return status; -} - -bool AudioFlinger::RecordThread::threadLoop() -{ - AudioBufferProvider::Buffer buffer; - sp<RecordTrack> activeTrack; - Vector< sp<EffectChain> > effectChains; - - nsecs_t lastWarning = 0; - - inputStandBy(); - acquireWakeLock(); - - // used to verify we've read at least once before evaluating how many bytes were read - bool readOnce = false; - - // start recording - while (!exitPending()) { - - processConfigEvents(); - - { // scope for mLock - Mutex::Autolock _l(mLock); - checkForNewParameters_l(); - if (mActiveTrack == 0 && mConfigEvents.isEmpty()) { - standby(); - - if (exitPending()) break; - - releaseWakeLock_l(); - ALOGV("RecordThread: loop stopping"); - // go to sleep - mWaitWorkCV.wait(mLock); - ALOGV("RecordThread: loop starting"); - acquireWakeLock_l(); - continue; - } - if (mActiveTrack != 0) { - if (mActiveTrack->mState == TrackBase::PAUSING) { - standby(); - mActiveTrack.clear(); - mStartStopCond.broadcast(); - } else if (mActiveTrack->mState == TrackBase::RESUMING) { - if (mReqChannelCount != mActiveTrack->channelCount()) { - mActiveTrack.clear(); - mStartStopCond.broadcast(); - } else if (readOnce) { - // record start succeeds only if first read from audio input - // succeeds - if (mBytesRead >= 0) { - mActiveTrack->mState = TrackBase::ACTIVE; - } else { - mActiveTrack.clear(); - } - mStartStopCond.broadcast(); - } - mStandby = false; - } else if (mActiveTrack->mState == TrackBase::TERMINATED) { - removeTrack_l(mActiveTrack); - mActiveTrack.clear(); - } - } - lockEffectChains_l(effectChains); - } - - if (mActiveTrack != 0) { - if (mActiveTrack->mState != TrackBase::ACTIVE && - mActiveTrack->mState != TrackBase::RESUMING) { - unlockEffectChains(effectChains); - usleep(kRecordThreadSleepUs); - continue; - } - for (size_t i = 0; i < effectChains.size(); i ++) { - effectChains[i]->process_l(); - } - - buffer.frameCount = mFrameCount; - if (CC_LIKELY(mActiveTrack->getNextBuffer(&buffer) == NO_ERROR)) { - readOnce = true; - size_t framesOut = buffer.frameCount; - if (mResampler == NULL) { - // no resampling - while (framesOut) { - size_t framesIn = mFrameCount - mRsmpInIndex; - if (framesIn) { - int8_t *src = (int8_t *)mRsmpInBuffer + mRsmpInIndex * mFrameSize; - int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) * mActiveTrack->mCblk->frameSize; - if (framesIn > framesOut) - framesIn = framesOut; - mRsmpInIndex += framesIn; - framesOut -= framesIn; - if ((int)mChannelCount == mReqChannelCount || - mFormat != AUDIO_FORMAT_PCM_16_BIT) { - memcpy(dst, src, framesIn * mFrameSize); - } else { - if (mChannelCount == 1) { - upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, - (int16_t *)src, framesIn); - } else { - downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, - (int16_t *)src, framesIn); - } - } - } - if (framesOut && mFrameCount == mRsmpInIndex) { - if (framesOut == mFrameCount && - ((int)mChannelCount == mReqChannelCount || mFormat != AUDIO_FORMAT_PCM_16_BIT)) { - mBytesRead = mInput->stream->read(mInput->stream, buffer.raw, mInputBytes); - framesOut = 0; - } else { - mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mInputBytes); - mRsmpInIndex = 0; - } - if (mBytesRead <= 0) { - if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE)) - { - ALOGE("Error reading audio input"); - // Force input into standby so that it tries to - // recover at next read attempt - inputStandBy(); - usleep(kRecordThreadSleepUs); - } - mRsmpInIndex = mFrameCount; - framesOut = 0; - buffer.frameCount = 0; - } - } - } - } else { - // resampling - - memset(mRsmpOutBuffer, 0, framesOut * 2 * sizeof(int32_t)); - // alter output frame count as if we were expecting stereo samples - if (mChannelCount == 1 && mReqChannelCount == 1) { - framesOut >>= 1; - } - mResampler->resample(mRsmpOutBuffer, framesOut, this /* AudioBufferProvider* */); - // ditherAndClamp() works as long as all buffers returned by mActiveTrack->getNextBuffer() - // are 32 bit aligned which should be always true. - if (mChannelCount == 2 && mReqChannelCount == 1) { - ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut); - // the resampler always outputs stereo samples: do post stereo to mono conversion - downmix_to_mono_i16_from_stereo_i16(buffer.i16, (int16_t *)mRsmpOutBuffer, - framesOut); - } else { - ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut); - } - - } - if (mFramestoDrop == 0) { - mActiveTrack->releaseBuffer(&buffer); - } else { - if (mFramestoDrop > 0) { - mFramestoDrop -= buffer.frameCount; - if (mFramestoDrop <= 0) { - clearSyncStartEvent(); - } - } else { - mFramestoDrop += buffer.frameCount; - if (mFramestoDrop >= 0 || mSyncStartEvent == 0 || - mSyncStartEvent->isCancelled()) { - ALOGW("Synced record %s, session %d, trigger session %d", - (mFramestoDrop >= 0) ? "timed out" : "cancelled", - mActiveTrack->sessionId(), - (mSyncStartEvent != 0) ? mSyncStartEvent->triggerSession() : 0); - clearSyncStartEvent(); - } - } - } - mActiveTrack->clearOverflow(); - } - // client isn't retrieving buffers fast enough - else { - if (!mActiveTrack->setOverflow()) { - nsecs_t now = systemTime(); - if ((now - lastWarning) > kWarningThrottleNs) { - ALOGW("RecordThread: buffer overflow"); - lastWarning = now; - } - } - // Release the processor for a while before asking for a new buffer. - // This will give the application more chance to read from the buffer and - // clear the overflow. - usleep(kRecordThreadSleepUs); - } - } - // enable changes in effect chain - unlockEffectChains(effectChains); - effectChains.clear(); - } - - standby(); - - { - Mutex::Autolock _l(mLock); - mActiveTrack.clear(); - mStartStopCond.broadcast(); - } - - releaseWakeLock(); - - ALOGV("RecordThread %p exiting", this); - return false; -} - -void AudioFlinger::RecordThread::standby() -{ - if (!mStandby) { - inputStandBy(); - mStandby = true; - } -} - -void AudioFlinger::RecordThread::inputStandBy() -{ - mInput->stream->common.standby(&mInput->stream->common); -} - -sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l( - const sp<AudioFlinger::Client>& client, - uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, - int sessionId, - IAudioFlinger::track_flags_t flags, - pid_t tid, - status_t *status) -{ - sp<RecordTrack> track; - status_t lStatus; - - lStatus = initCheck(); - if (lStatus != NO_ERROR) { - ALOGE("Audio driver not initialized."); - goto Exit; - } - - // FIXME use flags and tid similar to createTrack_l() - - { // scope for mLock - Mutex::Autolock _l(mLock); - - track = new RecordTrack(this, client, sampleRate, - format, channelMask, frameCount, sessionId); - - if (track->getCblk() == 0) { - lStatus = NO_MEMORY; - goto Exit; - } - mTracks.add(track); - - // disable AEC and NS if the device is a BT SCO headset supporting those pre processings - bool suspend = audio_is_bluetooth_sco_device(mInDevice) && - mAudioFlinger->btNrecIsOff(); - setEffectSuspended_l(FX_IID_AEC, suspend, sessionId); - setEffectSuspended_l(FX_IID_NS, suspend, sessionId); - } - lStatus = NO_ERROR; - -Exit: - if (status) { - *status = lStatus; - } - return track; -} - -status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack, - AudioSystem::sync_event_t event, - int triggerSession) -{ - ALOGV("RecordThread::start event %d, triggerSession %d", event, triggerSession); - sp<ThreadBase> strongMe = this; - status_t status = NO_ERROR; - - if (event == AudioSystem::SYNC_EVENT_NONE) { - clearSyncStartEvent(); - } else if (event != AudioSystem::SYNC_EVENT_SAME) { - mSyncStartEvent = mAudioFlinger->createSyncEvent(event, - triggerSession, - recordTrack->sessionId(), - syncStartEventCallback, - this); - // Sync event can be cancelled by the trigger session if the track is not in a - // compatible state in which case we start record immediately - if (mSyncStartEvent->isCancelled()) { - clearSyncStartEvent(); - } else { - // do not wait for the event for more than AudioSystem::kSyncRecordStartTimeOutMs - mFramestoDrop = - ((AudioSystem::kSyncRecordStartTimeOutMs * mReqSampleRate) / 1000); - } - } - - { - AutoMutex lock(mLock); - if (mActiveTrack != 0) { - if (recordTrack != mActiveTrack.get()) { - status = -EBUSY; - } else if (mActiveTrack->mState == TrackBase::PAUSING) { - mActiveTrack->mState = TrackBase::ACTIVE; - } - return status; - } - - recordTrack->mState = TrackBase::IDLE; - mActiveTrack = recordTrack; - mLock.unlock(); - status_t status = AudioSystem::startInput(mId); - mLock.lock(); - if (status != NO_ERROR) { - mActiveTrack.clear(); - clearSyncStartEvent(); - return status; - } - mRsmpInIndex = mFrameCount; - mBytesRead = 0; - if (mResampler != NULL) { - mResampler->reset(); - } - mActiveTrack->mState = TrackBase::RESUMING; - // signal thread to start - ALOGV("Signal record thread"); - mWaitWorkCV.broadcast(); - // do not wait for mStartStopCond if exiting - if (exitPending()) { - mActiveTrack.clear(); - status = INVALID_OPERATION; - goto startError; - } - mStartStopCond.wait(mLock); - if (mActiveTrack == 0) { - ALOGV("Record failed to start"); - status = BAD_VALUE; - goto startError; - } - ALOGV("Record started OK"); - return status; - } -startError: - AudioSystem::stopInput(mId); - clearSyncStartEvent(); - return status; -} - -void AudioFlinger::RecordThread::clearSyncStartEvent() -{ - if (mSyncStartEvent != 0) { - mSyncStartEvent->cancel(); - } - mSyncStartEvent.clear(); - mFramestoDrop = 0; -} - -void AudioFlinger::RecordThread::syncStartEventCallback(const wp<SyncEvent>& event) -{ - sp<SyncEvent> strongEvent = event.promote(); - - if (strongEvent != 0) { - RecordThread *me = (RecordThread *)strongEvent->cookie(); - me->handleSyncStartEvent(strongEvent); - } -} - -void AudioFlinger::RecordThread::handleSyncStartEvent(const sp<SyncEvent>& event) -{ - if (event == mSyncStartEvent) { - // TODO: use actual buffer filling status instead of 2 buffers when info is available - // from audio HAL - mFramestoDrop = mFrameCount * 2; - } -} - -bool AudioFlinger::RecordThread::stop_l(RecordThread::RecordTrack* recordTrack) { - ALOGV("RecordThread::stop"); - if (recordTrack != mActiveTrack.get() || recordTrack->mState == TrackBase::PAUSING) { - return false; - } - recordTrack->mState = TrackBase::PAUSING; - // do not wait for mStartStopCond if exiting - if (exitPending()) { - return true; - } - mStartStopCond.wait(mLock); - // if we have been restarted, recordTrack == mActiveTrack.get() here - if (exitPending() || recordTrack != mActiveTrack.get()) { - ALOGV("Record stopped OK"); - return true; - } - return false; -} - -bool AudioFlinger::RecordThread::isValidSyncEvent(const sp<SyncEvent>& event) const -{ - return false; -} - -status_t AudioFlinger::RecordThread::setSyncEvent(const sp<SyncEvent>& event) -{ -#if 0 // This branch is currently dead code, but is preserved in case it will be needed in future - if (!isValidSyncEvent(event)) { - return BAD_VALUE; - } - - int eventSession = event->triggerSession(); - status_t ret = NAME_NOT_FOUND; - - Mutex::Autolock _l(mLock); - - for (size_t i = 0; i < mTracks.size(); i++) { - sp<RecordTrack> track = mTracks[i]; - if (eventSession == track->sessionId()) { - (void) track->setSyncEvent(event); - ret = NO_ERROR; - } - } - return ret; -#else - return BAD_VALUE; -#endif -} - -void AudioFlinger::RecordThread::RecordTrack::destroy() -{ - // see comments at AudioFlinger::PlaybackThread::Track::destroy() - sp<RecordTrack> keep(this); - { - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - if (mState == ACTIVE || mState == RESUMING) { - AudioSystem::stopInput(thread->id()); - } - AudioSystem::releaseInput(thread->id()); - Mutex::Autolock _l(thread->mLock); - RecordThread *recordThread = (RecordThread *) thread.get(); - recordThread->destroyTrack_l(this); - } - } -} - -// destroyTrack_l() must be called with ThreadBase::mLock held -void AudioFlinger::RecordThread::destroyTrack_l(const sp<RecordTrack>& track) -{ - track->mState = TrackBase::TERMINATED; - // active tracks are removed by threadLoop() - if (mActiveTrack != track) { - removeTrack_l(track); - } -} - -void AudioFlinger::RecordThread::removeTrack_l(const sp<RecordTrack>& track) -{ - mTracks.remove(track); - // need anything related to effects here? -} - -void AudioFlinger::RecordThread::dump(int fd, const Vector<String16>& args) -{ - dumpInternals(fd, args); - dumpTracks(fd, args); - dumpEffectChains(fd, args); -} - -void AudioFlinger::RecordThread::dumpInternals(int fd, const Vector<String16>& args) -{ - const size_t SIZE = 256; - char buffer[SIZE]; - String8 result; - - snprintf(buffer, SIZE, "\nInput thread %p internals\n", this); - result.append(buffer); - - if (mActiveTrack != 0) { - snprintf(buffer, SIZE, "In index: %d\n", mRsmpInIndex); - result.append(buffer); - snprintf(buffer, SIZE, "In size: %d\n", mInputBytes); - result.append(buffer); - snprintf(buffer, SIZE, "Resampling: %d\n", (mResampler != NULL)); - result.append(buffer); - snprintf(buffer, SIZE, "Out channel count: %d\n", mReqChannelCount); - result.append(buffer); - snprintf(buffer, SIZE, "Out sample rate: %d\n", mReqSampleRate); - result.append(buffer); - } else { - result.append("No active record client\n"); - } - - write(fd, result.string(), result.size()); - - dumpBase(fd, args); -} - -void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args) -{ - const size_t SIZE = 256; - char buffer[SIZE]; - String8 result; - - snprintf(buffer, SIZE, "Input thread %p tracks\n", this); - result.append(buffer); - RecordTrack::appendDumpHeader(result); - for (size_t i = 0; i < mTracks.size(); ++i) { - sp<RecordTrack> track = mTracks[i]; - if (track != 0) { - track->dump(buffer, SIZE); - result.append(buffer); - } - } - - if (mActiveTrack != 0) { - snprintf(buffer, SIZE, "\nInput thread %p active tracks\n", this); - result.append(buffer); - RecordTrack::appendDumpHeader(result); - mActiveTrack->dump(buffer, SIZE); - result.append(buffer); - - } - write(fd, result.string(), result.size()); -} - -// AudioBufferProvider interface -status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts) -{ - size_t framesReq = buffer->frameCount; - size_t framesReady = mFrameCount - mRsmpInIndex; - int channelCount; - - if (framesReady == 0) { - mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mInputBytes); - if (mBytesRead <= 0) { - if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE)) { - ALOGE("RecordThread::getNextBuffer() Error reading audio input"); - // Force input into standby so that it tries to - // recover at next read attempt - inputStandBy(); - usleep(kRecordThreadSleepUs); - } - buffer->raw = NULL; - buffer->frameCount = 0; - return NOT_ENOUGH_DATA; - } - mRsmpInIndex = 0; - framesReady = mFrameCount; - } - - if (framesReq > framesReady) { - framesReq = framesReady; - } - - if (mChannelCount == 1 && mReqChannelCount == 2) { - channelCount = 1; - } else { - channelCount = 2; - } - buffer->raw = mRsmpInBuffer + mRsmpInIndex * channelCount; - buffer->frameCount = framesReq; - return NO_ERROR; -} - -// AudioBufferProvider interface -void AudioFlinger::RecordThread::releaseBuffer(AudioBufferProvider::Buffer* buffer) -{ - mRsmpInIndex += buffer->frameCount; - buffer->frameCount = 0; -} - -bool AudioFlinger::RecordThread::checkForNewParameters_l() -{ - bool reconfig = false; - - while (!mNewParameters.isEmpty()) { - status_t status = NO_ERROR; - String8 keyValuePair = mNewParameters[0]; - AudioParameter param = AudioParameter(keyValuePair); - int value; - audio_format_t reqFormat = mFormat; - int reqSamplingRate = mReqSampleRate; - int reqChannelCount = mReqChannelCount; - - if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) { - reqSamplingRate = value; - reconfig = true; - } - if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) { - reqFormat = (audio_format_t) value; - reconfig = true; - } - if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) { - reqChannelCount = popcount(value); - reconfig = true; - } - if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) { - // do not accept frame count changes if tracks are open as the track buffer - // size depends on frame count and correct behavior would not be guaranteed - // if frame count is changed after track creation - if (mActiveTrack != 0) { - status = INVALID_OPERATION; - } else { - reconfig = true; - } - } - if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) { - // forward device change to effects that have requested to be - // aware of attached audio device. - for (size_t i = 0; i < mEffectChains.size(); i++) { - mEffectChains[i]->setDevice_l(value); - } - - // store input device and output device but do not forward output device to audio HAL. - // Note that status is ignored by the caller for output device - // (see AudioFlinger::setParameters() - if (audio_is_output_devices(value)) { - mOutDevice = value; - status = BAD_VALUE; - } else { - mInDevice = value; - // disable AEC and NS if the device is a BT SCO headset supporting those pre processings - if (mTracks.size() > 0) { - bool suspend = audio_is_bluetooth_sco_device(mInDevice) && - mAudioFlinger->btNrecIsOff(); - for (size_t i = 0; i < mTracks.size(); i++) { - sp<RecordTrack> track = mTracks[i]; - setEffectSuspended_l(FX_IID_AEC, suspend, track->sessionId()); - setEffectSuspended_l(FX_IID_NS, suspend, track->sessionId()); - } - } - } - } - if (param.getInt(String8(AudioParameter::keyInputSource), value) == NO_ERROR && - mAudioSource != (audio_source_t)value) { - // forward device change to effects that have requested to be - // aware of attached audio device. - for (size_t i = 0; i < mEffectChains.size(); i++) { - mEffectChains[i]->setAudioSource_l((audio_source_t)value); - } - mAudioSource = (audio_source_t)value; - } - if (status == NO_ERROR) { - status = mInput->stream->common.set_parameters(&mInput->stream->common, keyValuePair.string()); - if (status == INVALID_OPERATION) { - inputStandBy(); - status = mInput->stream->common.set_parameters(&mInput->stream->common, - keyValuePair.string()); - } - if (reconfig) { - if (status == BAD_VALUE && - reqFormat == mInput->stream->common.get_format(&mInput->stream->common) && - reqFormat == AUDIO_FORMAT_PCM_16_BIT && - ((int)mInput->stream->common.get_sample_rate(&mInput->stream->common) <= (2 * reqSamplingRate)) && - popcount(mInput->stream->common.get_channels(&mInput->stream->common)) <= FCC_2 && - (reqChannelCount <= FCC_2)) { - status = NO_ERROR; - } - if (status == NO_ERROR) { - readInputParameters(); - sendIoConfigEvent_l(AudioSystem::INPUT_CONFIG_CHANGED); - } - } - } - - mNewParameters.removeAt(0); - - mParamStatus = status; - mParamCond.signal(); - // wait for condition with time out in case the thread calling ThreadBase::setParameters() - // already timed out waiting for the status and will never signal the condition. - mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs); - } - return reconfig; -} - -String8 AudioFlinger::RecordThread::getParameters(const String8& keys) -{ - char *s; - String8 out_s8 = String8(); - - Mutex::Autolock _l(mLock); - if (initCheck() != NO_ERROR) { - return out_s8; - } - - s = mInput->stream->common.get_parameters(&mInput->stream->common, keys.string()); - out_s8 = String8(s); - free(s); - return out_s8; -} - -void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param) { - AudioSystem::OutputDescriptor desc; - void *param2 = NULL; - - switch (event) { - case AudioSystem::INPUT_OPENED: - case AudioSystem::INPUT_CONFIG_CHANGED: - desc.channels = mChannelMask; - desc.samplingRate = mSampleRate; - desc.format = mFormat; - desc.frameCount = mFrameCount; - desc.latency = 0; - param2 = &desc; - break; - - case AudioSystem::INPUT_CLOSED: - default: - break; - } - mAudioFlinger->audioConfigChanged_l(event, mId, param2); -} - -void AudioFlinger::RecordThread::readInputParameters() -{ - delete mRsmpInBuffer; - // mRsmpInBuffer is always assigned a new[] below - delete mRsmpOutBuffer; - mRsmpOutBuffer = NULL; - delete mResampler; - mResampler = NULL; - - mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common); - mChannelMask = mInput->stream->common.get_channels(&mInput->stream->common); - mChannelCount = (uint16_t)popcount(mChannelMask); - mFormat = mInput->stream->common.get_format(&mInput->stream->common); - mFrameSize = audio_stream_frame_size(&mInput->stream->common); - mInputBytes = mInput->stream->common.get_buffer_size(&mInput->stream->common); - mFrameCount = mInputBytes / mFrameSize; - mNormalFrameCount = mFrameCount; // not used by record, but used by input effects - mRsmpInBuffer = new int16_t[mFrameCount * mChannelCount]; - - if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2) - { - int channelCount; - // optimization: if mono to mono, use the resampler in stereo to stereo mode to avoid - // stereo to mono post process as the resampler always outputs stereo. - if (mChannelCount == 1 && mReqChannelCount == 2) { - channelCount = 1; - } else { - channelCount = 2; - } - mResampler = AudioResampler::create(16, channelCount, mReqSampleRate); - mResampler->setSampleRate(mSampleRate); - mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN); - mRsmpOutBuffer = new int32_t[mFrameCount * 2]; - - // optmization: if mono to mono, alter input frame count as if we were inputing stereo samples - if (mChannelCount == 1 && mReqChannelCount == 1) { - mFrameCount >>= 1; - } - - } - mRsmpInIndex = mFrameCount; -} - -unsigned int AudioFlinger::RecordThread::getInputFramesLost() -{ - Mutex::Autolock _l(mLock); - if (initCheck() != NO_ERROR) { - return 0; - } - - return mInput->stream->get_input_frames_lost(mInput->stream); -} - -uint32_t AudioFlinger::RecordThread::hasAudioSession(int sessionId) const -{ - Mutex::Autolock _l(mLock); - uint32_t result = 0; - if (getEffectChain_l(sessionId) != 0) { - result = EFFECT_SESSION; - } - - for (size_t i = 0; i < mTracks.size(); ++i) { - if (sessionId == mTracks[i]->sessionId()) { - result |= TRACK_SESSION; - break; - } - } - - return result; -} - -KeyedVector<int, bool> AudioFlinger::RecordThread::sessionIds() const -{ - KeyedVector<int, bool> ids; - Mutex::Autolock _l(mLock); - for (size_t j = 0; j < mTracks.size(); ++j) { - sp<RecordThread::RecordTrack> track = mTracks[j]; - int sessionId = track->sessionId(); - if (ids.indexOfKey(sessionId) < 0) { - ids.add(sessionId, true); - } - } - return ids; -} - -AudioFlinger::AudioStreamIn* AudioFlinger::RecordThread::clearInput() -{ - Mutex::Autolock _l(mLock); - AudioStreamIn *input = mInput; - mInput = NULL; - return input; -} - -// this method must always be called either with ThreadBase mLock held or inside the thread loop -audio_stream_t* AudioFlinger::RecordThread::stream() const -{ - if (mInput == NULL) { - return NULL; - } - return &mInput->stream->common; -} // ---------------------------------------------------------------------------- @@ -6924,14 +1366,14 @@ audio_module_handle_t AudioFlinger::loadHwModule_l(const char *name) // ---------------------------------------------------------------------------- -int32_t AudioFlinger::getPrimaryOutputSamplingRate() +uint32_t AudioFlinger::getPrimaryOutputSamplingRate() { Mutex::Autolock _l(mLock); PlaybackThread *thread = primaryPlaybackThread_l(); return thread != NULL ? thread->sampleRate() : 0; } -int32_t AudioFlinger::getPrimaryOutputFrameCount() +size_t AudioFlinger::getPrimaryOutputFrameCount() { Mutex::Autolock _l(mLock); PlaybackThread *thread = primaryPlaybackThread_l(); @@ -6989,7 +1431,8 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module, &outStream); mHardwareStatus = AUDIO_HW_IDLE; - ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, Channels %x, status %d", + ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, " + "Channels %x, status %d", outStream, config.sample_rate, config.format, @@ -7042,7 +1485,8 @@ audio_io_handle_t AudioFlinger::openDuplicateOutput(audio_io_handle_t output1, MixerThread *thread2 = checkMixerThread_l(output2); if (thread1 == NULL || thread2 == NULL) { - ALOGW("openDuplicateOutput() wrong output mixer type for output %d or %d", output1, output2); + ALOGW("openDuplicateOutput() wrong output mixer type for output %d or %d", output1, + output2); return 0; } @@ -7077,7 +1521,8 @@ status_t AudioFlinger::closeOutput_nonvirtual(audio_io_handle_t output) if (thread->type() == ThreadBase::MIXER) { for (size_t i = 0; i < mPlaybackThreads.size(); i++) { if (mPlaybackThreads.valueAt(i)->type() == ThreadBase::DUPLICATING) { - DuplicatingThread *dupThread = (DuplicatingThread *)mPlaybackThreads.valueAt(i).get(); + DuplicatingThread *dupThread = + (DuplicatingThread *)mPlaybackThreads.valueAt(i).get(); dupThread->removeOutputTrack((MixerThread *)thread.get()); } } @@ -7164,16 +1609,17 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module, status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, &inStream); - ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, status %d", + ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, " + "status %d", inStream, config.sample_rate, config.format, config.channel_mask, status); - // If the input could not be opened with the requested parameters and we can handle the conversion internally, - // try to open again with the proposed parameters. The AudioFlinger can resample the input and do mono to stereo - // or stereo to mono conversions on 16 bit PCM inputs. + // If the input could not be opened with the requested parameters and we can handle the + // conversion internally, try to open again with the proposed parameters. The AudioFlinger can + // resample the input and do mono to stereo or stereo to mono conversions on 16 bit PCM inputs. if (status == BAD_VALUE && reqFormat == config.format && config.format == AUDIO_FORMAT_PCM_16_BIT && (config.sample_rate <= 2 * reqSamplingRate) && @@ -7184,18 +1630,72 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module, } if (status == NO_ERROR && inStream != NULL) { + +#ifdef TEE_SINK + // Try to re-use most recently used Pipe to archive a copy of input for dumpsys, + // or (re-)create if current Pipe is idle and does not match the new format + sp<NBAIO_Sink> teeSink; + enum { + TEE_SINK_NO, // don't copy input + TEE_SINK_NEW, // copy input using a new pipe + TEE_SINK_OLD, // copy input using an existing pipe + } kind; + NBAIO_Format format = Format_from_SR_C(inStream->common.get_sample_rate(&inStream->common), + popcount(inStream->common.get_channels(&inStream->common))); + if (!mTeeSinkInputEnabled) { + kind = TEE_SINK_NO; + } else if (format == Format_Invalid) { + kind = TEE_SINK_NO; + } else if (mRecordTeeSink == 0) { + kind = TEE_SINK_NEW; + } else if (mRecordTeeSink->getStrongCount() != 1) { + kind = TEE_SINK_NO; + } else if (format == mRecordTeeSink->format()) { + kind = TEE_SINK_OLD; + } else { + kind = TEE_SINK_NEW; + } + switch (kind) { + case TEE_SINK_NEW: { + Pipe *pipe = new Pipe(mTeeSinkInputFrames, format); + size_t numCounterOffers = 0; + const NBAIO_Format offers[1] = {format}; + ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers); + ALOG_ASSERT(index == 0); + PipeReader *pipeReader = new PipeReader(*pipe); + numCounterOffers = 0; + index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers); + ALOG_ASSERT(index == 0); + mRecordTeeSink = pipe; + mRecordTeeSource = pipeReader; + teeSink = pipe; + } + break; + case TEE_SINK_OLD: + teeSink = mRecordTeeSink; + break; + case TEE_SINK_NO: + default: + break; + } +#endif + AudioStreamIn *input = new AudioStreamIn(inHwDev, inStream); // Start record thread // RecorThread require both input and output device indication to forward to audio // pre processing modules - audio_devices_t device = (*pDevices) | primaryOutputDevice_l(); thread = new RecordThread(this, input, reqSamplingRate, reqChannels, id, - device); + primaryOutputDevice_l(), + *pDevices +#ifdef TEE_SINK + , teeSink +#endif + ); mRecordThreads.add(id, thread); ALOGV("openInput() created record thread: ID %d thread %p", id, thread); if (pSamplingRate != NULL) *pSamplingRate = reqSamplingRate; @@ -7465,7 +1965,7 @@ status_t AudioFlinger::getEffectDescriptor(const effect_uuid_t *pUuid, } -sp<IEffect> AudioFlinger::createEffect(pid_t pid, +sp<IEffect> AudioFlinger::createEffect( effect_descriptor_t *pDesc, const sp<IEffectClient>& effectClient, int32_t priority, @@ -7479,6 +1979,7 @@ sp<IEffect> AudioFlinger::createEffect(pid_t pid, sp<EffectHandle> handle; effect_descriptor_t desc; + pid_t pid = IPCThreadState::self()->getCallingPid(); ALOGV("createEffect pid %d, effectClient %p, priority %d, sessionId %d, io %d", pid, effectClient.get(), priority, sessionId, io); @@ -7736,2024 +2237,133 @@ status_t AudioFlinger::moveEffectChain_l(int sessionId, return NO_ERROR; } +struct Entry { +#define MAX_NAME 32 // %Y%m%d%H%M%S_%d.wav + char mName[MAX_NAME]; +}; -// PlaybackThread::createEffect_l() must be called with AudioFlinger::mLock held -sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l( - const sp<AudioFlinger::Client>& client, - const sp<IEffectClient>& effectClient, - int32_t priority, - int sessionId, - effect_descriptor_t *desc, - int *enabled, - status_t *status - ) -{ - sp<EffectModule> effect; - sp<EffectHandle> handle; - status_t lStatus; - sp<EffectChain> chain; - bool chainCreated = false; - bool effectCreated = false; - bool effectRegistered = false; - - lStatus = initCheck(); - if (lStatus != NO_ERROR) { - ALOGW("createEffect_l() Audio driver not initialized."); - goto Exit; - } - - // Do not allow effects with session ID 0 on direct output or duplicating threads - // TODO: add rule for hw accelerated effects on direct outputs with non PCM format - if (sessionId == AUDIO_SESSION_OUTPUT_MIX && mType != MIXER) { - ALOGW("createEffect_l() Cannot add auxiliary effect %s to session %d", - desc->name, sessionId); - lStatus = BAD_VALUE; - goto Exit; - } - // Only Pre processor effects are allowed on input threads and only on input threads - if ((mType == RECORD) != ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) { - ALOGW("createEffect_l() effect %s (flags %08x) created on wrong thread type %d", - desc->name, desc->flags, mType); - lStatus = BAD_VALUE; - goto Exit; - } - - ALOGV("createEffect_l() thread %p effect %s on session %d", this, desc->name, sessionId); - - { // scope for mLock - Mutex::Autolock _l(mLock); - - // check for existing effect chain with the requested audio session - chain = getEffectChain_l(sessionId); - if (chain == 0) { - // create a new chain for this session - ALOGV("createEffect_l() new effect chain for session %d", sessionId); - chain = new EffectChain(this, sessionId); - addEffectChain_l(chain); - chain->setStrategy(getStrategyForSession_l(sessionId)); - chainCreated = true; - } else { - effect = chain->getEffectFromDesc_l(desc); - } - - ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get()); - - if (effect == 0) { - int id = mAudioFlinger->nextUniqueId(); - // Check CPU and memory usage - lStatus = AudioSystem::registerEffect(desc, mId, chain->strategy(), sessionId, id); - if (lStatus != NO_ERROR) { - goto Exit; - } - effectRegistered = true; - // create a new effect module if none present in the chain - effect = new EffectModule(this, chain, desc, id, sessionId); - lStatus = effect->status(); - if (lStatus != NO_ERROR) { - goto Exit; - } - lStatus = chain->addEffect_l(effect); - if (lStatus != NO_ERROR) { - goto Exit; - } - effectCreated = true; - - effect->setDevice(mOutDevice); - effect->setDevice(mInDevice); - effect->setMode(mAudioFlinger->getMode()); - effect->setAudioSource(mAudioSource); - } - // create effect handle and connect it to effect module - handle = new EffectHandle(effect, client, effectClient, priority); - lStatus = effect->addHandle(handle.get()); - if (enabled != NULL) { - *enabled = (int)effect->isEnabled(); - } - } - -Exit: - if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) { - Mutex::Autolock _l(mLock); - if (effectCreated) { - chain->removeEffect_l(effect); - } - if (effectRegistered) { - AudioSystem::unregisterEffect(effect->id()); - } - if (chainCreated) { - removeEffectChain_l(chain); - } - handle.clear(); - } - - if (status != NULL) { - *status = lStatus; - } - return handle; -} - -sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect(int sessionId, int effectId) -{ - Mutex::Autolock _l(mLock); - return getEffect_l(sessionId, effectId); -} - -sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect_l(int sessionId, int effectId) -{ - sp<EffectChain> chain = getEffectChain_l(sessionId); - return chain != 0 ? chain->getEffectFromId_l(effectId) : 0; -} - -// PlaybackThread::addEffect_l() must be called with AudioFlinger::mLock and -// PlaybackThread::mLock held -status_t AudioFlinger::ThreadBase::addEffect_l(const sp<EffectModule>& effect) -{ - // check for existing effect chain with the requested audio session - int sessionId = effect->sessionId(); - sp<EffectChain> chain = getEffectChain_l(sessionId); - bool chainCreated = false; - - if (chain == 0) { - // create a new chain for this session - ALOGV("addEffect_l() new effect chain for session %d", sessionId); - chain = new EffectChain(this, sessionId); - addEffectChain_l(chain); - chain->setStrategy(getStrategyForSession_l(sessionId)); - chainCreated = true; - } - ALOGV("addEffect_l() %p chain %p effect %p", this, chain.get(), effect.get()); - - if (chain->getEffectFromId_l(effect->id()) != 0) { - ALOGW("addEffect_l() %p effect %s already present in chain %p", - this, effect->desc().name, chain.get()); - return BAD_VALUE; - } - - status_t status = chain->addEffect_l(effect); - if (status != NO_ERROR) { - if (chainCreated) { - removeEffectChain_l(chain); - } - return status; - } - - effect->setDevice(mOutDevice); - effect->setDevice(mInDevice); - effect->setMode(mAudioFlinger->getMode()); - effect->setAudioSource(mAudioSource); - return NO_ERROR; -} - -void AudioFlinger::ThreadBase::removeEffect_l(const sp<EffectModule>& effect) { - - ALOGV("removeEffect_l() %p effect %p", this, effect.get()); - effect_descriptor_t desc = effect->desc(); - if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { - detachAuxEffect_l(effect->id()); - } - - sp<EffectChain> chain = effect->chain().promote(); - if (chain != 0) { - // remove effect chain if removing last effect - if (chain->removeEffect_l(effect) == 0) { - removeEffectChain_l(chain); - } - } else { - ALOGW("removeEffect_l() %p cannot promote chain for effect %p", this, effect.get()); - } -} - -void AudioFlinger::ThreadBase::lockEffectChains_l( - Vector< sp<AudioFlinger::EffectChain> >& effectChains) -{ - effectChains = mEffectChains; - for (size_t i = 0; i < mEffectChains.size(); i++) { - mEffectChains[i]->lock(); - } -} - -void AudioFlinger::ThreadBase::unlockEffectChains( - const Vector< sp<AudioFlinger::EffectChain> >& effectChains) -{ - for (size_t i = 0; i < effectChains.size(); i++) { - effectChains[i]->unlock(); - } -} - -sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain(int sessionId) -{ - Mutex::Autolock _l(mLock); - return getEffectChain_l(sessionId); -} - -sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain_l(int sessionId) const -{ - size_t size = mEffectChains.size(); - for (size_t i = 0; i < size; i++) { - if (mEffectChains[i]->sessionId() == sessionId) { - return mEffectChains[i]; - } - } - return 0; -} - -void AudioFlinger::ThreadBase::setMode(audio_mode_t mode) -{ - Mutex::Autolock _l(mLock); - size_t size = mEffectChains.size(); - for (size_t i = 0; i < size; i++) { - mEffectChains[i]->setMode_l(mode); - } -} - -void AudioFlinger::ThreadBase::disconnectEffect(const sp<EffectModule>& effect, - EffectHandle *handle, - bool unpinIfLast) { - - Mutex::Autolock _l(mLock); - ALOGV("disconnectEffect() %p effect %p", this, effect.get()); - // delete the effect module if removing last handle on it - if (effect->removeHandle(handle) == 0) { - if (!effect->isPinned() || unpinIfLast) { - removeEffect_l(effect); - AudioSystem::unregisterEffect(effect->id()); - } - } -} - -status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& chain) -{ - int session = chain->sessionId(); - int16_t *buffer = mMixBuffer; - bool ownsBuffer = false; - - ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session); - if (session > 0) { - // Only one effect chain can be present in direct output thread and it uses - // the mix buffer as input - if (mType != DIRECT) { - size_t numSamples = mNormalFrameCount * mChannelCount; - buffer = new int16_t[numSamples]; - memset(buffer, 0, numSamples * sizeof(int16_t)); - ALOGV("addEffectChain_l() creating new input buffer %p session %d", buffer, session); - ownsBuffer = true; - } - - // Attach all tracks with same session ID to this chain. - for (size_t i = 0; i < mTracks.size(); ++i) { - sp<Track> track = mTracks[i]; - if (session == track->sessionId()) { - ALOGV("addEffectChain_l() track->setMainBuffer track %p buffer %p", track.get(), buffer); - track->setMainBuffer(buffer); - chain->incTrackCnt(); - } - } - - // indicate all active tracks in the chain - for (size_t i = 0 ; i < mActiveTracks.size() ; ++i) { - sp<Track> track = mActiveTracks[i].promote(); - if (track == 0) continue; - if (session == track->sessionId()) { - ALOGV("addEffectChain_l() activating track %p on session %d", track.get(), session); - chain->incActiveTrackCnt(); - } - } - } - - chain->setInBuffer(buffer, ownsBuffer); - chain->setOutBuffer(mMixBuffer); - // Effect chain for session AUDIO_SESSION_OUTPUT_STAGE is inserted at end of effect - // chains list in order to be processed last as it contains output stage effects - // Effect chain for session AUDIO_SESSION_OUTPUT_MIX is inserted before - // session AUDIO_SESSION_OUTPUT_STAGE to be processed - // after track specific effects and before output stage - // It is therefore mandatory that AUDIO_SESSION_OUTPUT_MIX == 0 and - // that AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX - // Effect chain for other sessions are inserted at beginning of effect - // chains list to be processed before output mix effects. Relative order between other - // sessions is not important - size_t size = mEffectChains.size(); - size_t i = 0; - for (i = 0; i < size; i++) { - if (mEffectChains[i]->sessionId() < session) break; - } - mEffectChains.insertAt(chain, i); - checkSuspendOnAddEffectChain_l(chain); - - return NO_ERROR; -} - -size_t AudioFlinger::PlaybackThread::removeEffectChain_l(const sp<EffectChain>& chain) -{ - int session = chain->sessionId(); - - ALOGV("removeEffectChain_l() %p from thread %p for session %d", chain.get(), this, session); - - for (size_t i = 0; i < mEffectChains.size(); i++) { - if (chain == mEffectChains[i]) { - mEffectChains.removeAt(i); - // detach all active tracks from the chain - for (size_t i = 0 ; i < mActiveTracks.size() ; ++i) { - sp<Track> track = mActiveTracks[i].promote(); - if (track == 0) continue; - if (session == track->sessionId()) { - ALOGV("removeEffectChain_l(): stopping track on chain %p for session Id: %d", - chain.get(), session); - chain->decActiveTrackCnt(); - } - } - - // detach all tracks with same session ID from this chain - for (size_t i = 0; i < mTracks.size(); ++i) { - sp<Track> track = mTracks[i]; - if (session == track->sessionId()) { - track->setMainBuffer(mMixBuffer); - chain->decTrackCnt(); - } - } - break; - } - } - return mEffectChains.size(); -} - -status_t AudioFlinger::PlaybackThread::attachAuxEffect( - const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId) -{ - Mutex::Autolock _l(mLock); - return attachAuxEffect_l(track, EffectId); -} - -status_t AudioFlinger::PlaybackThread::attachAuxEffect_l( - const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId) -{ - status_t status = NO_ERROR; - - if (EffectId == 0) { - track->setAuxBuffer(0, NULL); - } else { - // Auxiliary effects are always in audio session AUDIO_SESSION_OUTPUT_MIX - sp<EffectModule> effect = getEffect_l(AUDIO_SESSION_OUTPUT_MIX, EffectId); - if (effect != 0) { - if ((effect->desc().flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { - track->setAuxBuffer(EffectId, (int32_t *)effect->inBuffer()); - } else { - status = INVALID_OPERATION; - } - } else { - status = BAD_VALUE; - } - } - return status; -} - -void AudioFlinger::PlaybackThread::detachAuxEffect_l(int effectId) -{ - for (size_t i = 0; i < mTracks.size(); ++i) { - sp<Track> track = mTracks[i]; - if (track->auxEffectId() == effectId) { - attachAuxEffect_l(track, 0); - } - } -} - -status_t AudioFlinger::RecordThread::addEffectChain_l(const sp<EffectChain>& chain) -{ - // only one chain per input thread - if (mEffectChains.size() != 0) { - return INVALID_OPERATION; - } - ALOGV("addEffectChain_l() %p on thread %p", chain.get(), this); - - chain->setInBuffer(NULL); - chain->setOutBuffer(NULL); - - checkSuspendOnAddEffectChain_l(chain); - - mEffectChains.add(chain); - - return NO_ERROR; -} - -size_t AudioFlinger::RecordThread::removeEffectChain_l(const sp<EffectChain>& chain) -{ - ALOGV("removeEffectChain_l() %p from thread %p", chain.get(), this); - ALOGW_IF(mEffectChains.size() != 1, - "removeEffectChain_l() %p invalid chain size %d on thread %p", - chain.get(), mEffectChains.size(), this); - if (mEffectChains.size() == 1) { - mEffectChains.removeAt(0); - } - return 0; -} - -// ---------------------------------------------------------------------------- -// EffectModule implementation -// ---------------------------------------------------------------------------- - -#undef LOG_TAG -#define LOG_TAG "AudioFlinger::EffectModule" - -AudioFlinger::EffectModule::EffectModule(ThreadBase *thread, - const wp<AudioFlinger::EffectChain>& chain, - effect_descriptor_t *desc, - int id, - int sessionId) - : mPinned(sessionId > AUDIO_SESSION_OUTPUT_MIX), - mThread(thread), mChain(chain), mId(id), mSessionId(sessionId), - mDescriptor(*desc), - // mConfig is set by configure() and not used before then - mEffectInterface(NULL), - mStatus(NO_INIT), mState(IDLE), - // mMaxDisableWaitCnt is set by configure() and not used before then - // mDisableWaitCnt is set by process() and updateState() and not used before then - mSuspended(false) -{ - ALOGV("Constructor %p", this); - int lStatus; - - // create effect engine from effect factory - mStatus = EffectCreate(&desc->uuid, sessionId, thread->id(), &mEffectInterface); - - if (mStatus != NO_ERROR) { - return; - } - lStatus = init(); - if (lStatus < 0) { - mStatus = lStatus; - goto Error; - } - - ALOGV("Constructor success name %s, Interface %p", mDescriptor.name, mEffectInterface); - return; -Error: - EffectRelease(mEffectInterface); - mEffectInterface = NULL; - ALOGV("Constructor Error %d", mStatus); -} - -AudioFlinger::EffectModule::~EffectModule() -{ - ALOGV("Destructor %p", this); - if (mEffectInterface != NULL) { - if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC || - (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) { - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - audio_stream_t *stream = thread->stream(); - if (stream != NULL) { - stream->remove_audio_effect(stream, mEffectInterface); - } - } - } - // release effect engine - EffectRelease(mEffectInterface); - } -} - -status_t AudioFlinger::EffectModule::addHandle(EffectHandle *handle) -{ - status_t status; - - Mutex::Autolock _l(mLock); - int priority = handle->priority(); - size_t size = mHandles.size(); - EffectHandle *controlHandle = NULL; - size_t i; - for (i = 0; i < size; i++) { - EffectHandle *h = mHandles[i]; - if (h == NULL || h->destroyed_l()) continue; - // first non destroyed handle is considered in control - if (controlHandle == NULL) - controlHandle = h; - if (h->priority() <= priority) break; - } - // if inserted in first place, move effect control from previous owner to this handle - if (i == 0) { - bool enabled = false; - if (controlHandle != NULL) { - enabled = controlHandle->enabled(); - controlHandle->setControl(false/*hasControl*/, true /*signal*/, enabled /*enabled*/); - } - handle->setControl(true /*hasControl*/, false /*signal*/, enabled /*enabled*/); - status = NO_ERROR; - } else { - status = ALREADY_EXISTS; - } - ALOGV("addHandle() %p added handle %p in position %d", this, handle, i); - mHandles.insertAt(handle, i); - return status; -} - -size_t AudioFlinger::EffectModule::removeHandle(EffectHandle *handle) -{ - Mutex::Autolock _l(mLock); - size_t size = mHandles.size(); - size_t i; - for (i = 0; i < size; i++) { - if (mHandles[i] == handle) break; - } - if (i == size) { - return size; - } - ALOGV("removeHandle() %p removed handle %p in position %d", this, handle, i); - - mHandles.removeAt(i); - // if removed from first place, move effect control from this handle to next in line - if (i == 0) { - EffectHandle *h = controlHandle_l(); - if (h != NULL) { - h->setControl(true /*hasControl*/, true /*signal*/ , handle->enabled() /*enabled*/); - } - } - - // Prevent calls to process() and other functions on effect interface from now on. - // The effect engine will be released by the destructor when the last strong reference on - // this object is released which can happen after next process is called. - if (mHandles.size() == 0 && !mPinned) { - mState = DESTROYED; - } - - return mHandles.size(); -} - -// must be called with EffectModule::mLock held -AudioFlinger::EffectHandle *AudioFlinger::EffectModule::controlHandle_l() -{ - // the first valid handle in the list has control over the module - for (size_t i = 0; i < mHandles.size(); i++) { - EffectHandle *h = mHandles[i]; - if (h != NULL && !h->destroyed_l()) { - return h; - } - } - - return NULL; -} - -size_t AudioFlinger::EffectModule::disconnect(EffectHandle *handle, bool unpinIfLast) -{ - ALOGV("disconnect() %p handle %p", this, handle); - // keep a strong reference on this EffectModule to avoid calling the - // destructor before we exit - sp<EffectModule> keep(this); - { - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - thread->disconnectEffect(keep, handle, unpinIfLast); - } - } - return mHandles.size(); -} - -void AudioFlinger::EffectModule::updateState() { - Mutex::Autolock _l(mLock); - - switch (mState) { - case RESTART: - reset_l(); - // FALL THROUGH - - case STARTING: - // clear auxiliary effect input buffer for next accumulation - if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { - memset(mConfig.inputCfg.buffer.raw, - 0, - mConfig.inputCfg.buffer.frameCount*sizeof(int32_t)); - } - start_l(); - mState = ACTIVE; - break; - case STOPPING: - stop_l(); - mDisableWaitCnt = mMaxDisableWaitCnt; - mState = STOPPED; - break; - case STOPPED: - // mDisableWaitCnt is forced to 1 by process() when the engine indicates the end of the - // turn off sequence. - if (--mDisableWaitCnt == 0) { - reset_l(); - mState = IDLE; - } - break; - default: //IDLE , ACTIVE, DESTROYED - break; - } -} - -void AudioFlinger::EffectModule::process() -{ - Mutex::Autolock _l(mLock); - - if (mState == DESTROYED || mEffectInterface == NULL || - mConfig.inputCfg.buffer.raw == NULL || - mConfig.outputCfg.buffer.raw == NULL) { - return; - } - - if (isProcessEnabled()) { - // do 32 bit to 16 bit conversion for auxiliary effect input buffer - if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { - ditherAndClamp(mConfig.inputCfg.buffer.s32, - mConfig.inputCfg.buffer.s32, - mConfig.inputCfg.buffer.frameCount/2); - } - - // do the actual processing in the effect engine - int ret = (*mEffectInterface)->process(mEffectInterface, - &mConfig.inputCfg.buffer, - &mConfig.outputCfg.buffer); - - // force transition to IDLE state when engine is ready - if (mState == STOPPED && ret == -ENODATA) { - mDisableWaitCnt = 1; - } - - // clear auxiliary effect input buffer for next accumulation - if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { - memset(mConfig.inputCfg.buffer.raw, 0, - mConfig.inputCfg.buffer.frameCount*sizeof(int32_t)); - } - } else if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_INSERT && - mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) { - // If an insert effect is idle and input buffer is different from output buffer, - // accumulate input onto output - sp<EffectChain> chain = mChain.promote(); - if (chain != 0 && chain->activeTrackCnt() != 0) { - size_t frameCnt = mConfig.inputCfg.buffer.frameCount * 2; //always stereo here - int16_t *in = mConfig.inputCfg.buffer.s16; - int16_t *out = mConfig.outputCfg.buffer.s16; - for (size_t i = 0; i < frameCnt; i++) { - out[i] = clamp16((int32_t)out[i] + (int32_t)in[i]); - } - } - } -} - -void AudioFlinger::EffectModule::reset_l() -{ - if (mEffectInterface == NULL) { - return; - } - (*mEffectInterface)->command(mEffectInterface, EFFECT_CMD_RESET, 0, NULL, 0, NULL); -} - -status_t AudioFlinger::EffectModule::configure() -{ - if (mEffectInterface == NULL) { - return NO_INIT; - } - - sp<ThreadBase> thread = mThread.promote(); - if (thread == 0) { - return DEAD_OBJECT; - } - - // TODO: handle configuration of effects replacing track process - audio_channel_mask_t channelMask = thread->channelMask(); - - if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { - mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_MONO; - } else { - mConfig.inputCfg.channels = channelMask; - } - mConfig.outputCfg.channels = channelMask; - mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT; - mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT; - mConfig.inputCfg.samplingRate = thread->sampleRate(); - mConfig.outputCfg.samplingRate = mConfig.inputCfg.samplingRate; - mConfig.inputCfg.bufferProvider.cookie = NULL; - mConfig.inputCfg.bufferProvider.getBuffer = NULL; - mConfig.inputCfg.bufferProvider.releaseBuffer = NULL; - mConfig.outputCfg.bufferProvider.cookie = NULL; - mConfig.outputCfg.bufferProvider.getBuffer = NULL; - mConfig.outputCfg.bufferProvider.releaseBuffer = NULL; - mConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ; - // Insert effect: - // - in session AUDIO_SESSION_OUTPUT_MIX or AUDIO_SESSION_OUTPUT_STAGE, - // always overwrites output buffer: input buffer == output buffer - // - in other sessions: - // last effect in the chain accumulates in output buffer: input buffer != output buffer - // other effect: overwrites output buffer: input buffer == output buffer - // Auxiliary effect: - // accumulates in output buffer: input buffer != output buffer - // Therefore: accumulate <=> input buffer != output buffer - if (mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) { - mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE; - } else { - mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE; - } - mConfig.inputCfg.mask = EFFECT_CONFIG_ALL; - mConfig.outputCfg.mask = EFFECT_CONFIG_ALL; - mConfig.inputCfg.buffer.frameCount = thread->frameCount(); - mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount; - - ALOGV("configure() %p thread %p buffer %p framecount %d", - this, thread.get(), mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount); - - status_t cmdStatus; - uint32_t size = sizeof(int); - status_t status = (*mEffectInterface)->command(mEffectInterface, - EFFECT_CMD_SET_CONFIG, - sizeof(effect_config_t), - &mConfig, - &size, - &cmdStatus); - if (status == 0) { - status = cmdStatus; - } - - if (status == 0 && - (memcmp(&mDescriptor.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0)) { - uint32_t buf32[sizeof(effect_param_t) / sizeof(uint32_t) + 2]; - effect_param_t *p = (effect_param_t *)buf32; - - p->psize = sizeof(uint32_t); - p->vsize = sizeof(uint32_t); - size = sizeof(int); - *(int32_t *)p->data = VISUALIZER_PARAM_LATENCY; - - uint32_t latency = 0; - PlaybackThread *pbt = thread->mAudioFlinger->checkPlaybackThread_l(thread->mId); - if (pbt != NULL) { - latency = pbt->latency_l(); - } - - *((int32_t *)p->data + 1)= latency; - (*mEffectInterface)->command(mEffectInterface, - EFFECT_CMD_SET_PARAM, - sizeof(effect_param_t) + 8, - &buf32, - &size, - &cmdStatus); - } - - mMaxDisableWaitCnt = (MAX_DISABLE_TIME_MS * mConfig.outputCfg.samplingRate) / - (1000 * mConfig.outputCfg.buffer.frameCount); - - return status; -} - -status_t AudioFlinger::EffectModule::init() -{ - Mutex::Autolock _l(mLock); - if (mEffectInterface == NULL) { - return NO_INIT; - } - status_t cmdStatus; - uint32_t size = sizeof(status_t); - status_t status = (*mEffectInterface)->command(mEffectInterface, - EFFECT_CMD_INIT, - 0, - NULL, - &size, - &cmdStatus); - if (status == 0) { - status = cmdStatus; - } - return status; -} - -status_t AudioFlinger::EffectModule::start() -{ - Mutex::Autolock _l(mLock); - return start_l(); -} - -status_t AudioFlinger::EffectModule::start_l() -{ - if (mEffectInterface == NULL) { - return NO_INIT; - } - status_t cmdStatus; - uint32_t size = sizeof(status_t); - status_t status = (*mEffectInterface)->command(mEffectInterface, - EFFECT_CMD_ENABLE, - 0, - NULL, - &size, - &cmdStatus); - if (status == 0) { - status = cmdStatus; - } - if (status == 0 && - ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC || - (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC)) { - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - audio_stream_t *stream = thread->stream(); - if (stream != NULL) { - stream->add_audio_effect(stream, mEffectInterface); - } - } - } - return status; -} - -status_t AudioFlinger::EffectModule::stop() -{ - Mutex::Autolock _l(mLock); - return stop_l(); -} - -status_t AudioFlinger::EffectModule::stop_l() -{ - if (mEffectInterface == NULL) { - return NO_INIT; - } - status_t cmdStatus; - uint32_t size = sizeof(status_t); - status_t status = (*mEffectInterface)->command(mEffectInterface, - EFFECT_CMD_DISABLE, - 0, - NULL, - &size, - &cmdStatus); - if (status == 0) { - status = cmdStatus; - } - if (status == 0 && - ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC || - (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC)) { - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - audio_stream_t *stream = thread->stream(); - if (stream != NULL) { - stream->remove_audio_effect(stream, mEffectInterface); - } - } - } - return status; -} - -status_t AudioFlinger::EffectModule::command(uint32_t cmdCode, - uint32_t cmdSize, - void *pCmdData, - uint32_t *replySize, - void *pReplyData) -{ - Mutex::Autolock _l(mLock); -// ALOGV("command(), cmdCode: %d, mEffectInterface: %p", cmdCode, mEffectInterface); - - if (mState == DESTROYED || mEffectInterface == NULL) { - return NO_INIT; - } - status_t status = (*mEffectInterface)->command(mEffectInterface, - cmdCode, - cmdSize, - pCmdData, - replySize, - pReplyData); - if (cmdCode != EFFECT_CMD_GET_PARAM && status == NO_ERROR) { - uint32_t size = (replySize == NULL) ? 0 : *replySize; - for (size_t i = 1; i < mHandles.size(); i++) { - EffectHandle *h = mHandles[i]; - if (h != NULL && !h->destroyed_l()) { - h->commandExecuted(cmdCode, cmdSize, pCmdData, size, pReplyData); - } - } - } - return status; -} - -status_t AudioFlinger::EffectModule::setEnabled(bool enabled) -{ - Mutex::Autolock _l(mLock); - return setEnabled_l(enabled); -} - -// must be called with EffectModule::mLock held -status_t AudioFlinger::EffectModule::setEnabled_l(bool enabled) -{ - - ALOGV("setEnabled %p enabled %d", this, enabled); - - if (enabled != isEnabled()) { - status_t status = AudioSystem::setEffectEnabled(mId, enabled); - if (enabled && status != NO_ERROR) { - return status; - } - - switch (mState) { - // going from disabled to enabled - case IDLE: - mState = STARTING; - break; - case STOPPED: - mState = RESTART; - break; - case STOPPING: - mState = ACTIVE; - break; - - // going from enabled to disabled - case RESTART: - mState = STOPPED; - break; - case STARTING: - mState = IDLE; - break; - case ACTIVE: - mState = STOPPING; - break; - case DESTROYED: - return NO_ERROR; // simply ignore as we are being destroyed - } - for (size_t i = 1; i < mHandles.size(); i++) { - EffectHandle *h = mHandles[i]; - if (h != NULL && !h->destroyed_l()) { - h->setEnabled(enabled); - } - } - } - return NO_ERROR; -} - -bool AudioFlinger::EffectModule::isEnabled() const -{ - switch (mState) { - case RESTART: - case STARTING: - case ACTIVE: - return true; - case IDLE: - case STOPPING: - case STOPPED: - case DESTROYED: - default: - return false; - } -} - -bool AudioFlinger::EffectModule::isProcessEnabled() const -{ - switch (mState) { - case RESTART: - case ACTIVE: - case STOPPING: - case STOPPED: - return true; - case IDLE: - case STARTING: - case DESTROYED: - default: - return false; - } -} - -status_t AudioFlinger::EffectModule::setVolume(uint32_t *left, uint32_t *right, bool controller) -{ - Mutex::Autolock _l(mLock); - status_t status = NO_ERROR; - - // Send volume indication if EFFECT_FLAG_VOLUME_IND is set and read back altered volume - // if controller flag is set (Note that controller == TRUE => EFFECT_FLAG_VOLUME_CTRL set) - if (isProcessEnabled() && - ((mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL || - (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_IND)) { - status_t cmdStatus; - uint32_t volume[2]; - uint32_t *pVolume = NULL; - uint32_t size = sizeof(volume); - volume[0] = *left; - volume[1] = *right; - if (controller) { - pVolume = volume; - } - status = (*mEffectInterface)->command(mEffectInterface, - EFFECT_CMD_SET_VOLUME, - size, - volume, - &size, - pVolume); - if (controller && status == NO_ERROR && size == sizeof(volume)) { - *left = volume[0]; - *right = volume[1]; - } - } - return status; -} - -status_t AudioFlinger::EffectModule::setDevice(audio_devices_t device) -{ - if (device == AUDIO_DEVICE_NONE) { - return NO_ERROR; - } - - Mutex::Autolock _l(mLock); - status_t status = NO_ERROR; - if (device && (mDescriptor.flags & EFFECT_FLAG_DEVICE_MASK) == EFFECT_FLAG_DEVICE_IND) { - status_t cmdStatus; - uint32_t size = sizeof(status_t); - uint32_t cmd = audio_is_output_devices(device) ? EFFECT_CMD_SET_DEVICE : - EFFECT_CMD_SET_INPUT_DEVICE; - status = (*mEffectInterface)->command(mEffectInterface, - cmd, - sizeof(uint32_t), - &device, - &size, - &cmdStatus); - } - return status; -} - -status_t AudioFlinger::EffectModule::setMode(audio_mode_t mode) -{ - Mutex::Autolock _l(mLock); - status_t status = NO_ERROR; - if ((mDescriptor.flags & EFFECT_FLAG_AUDIO_MODE_MASK) == EFFECT_FLAG_AUDIO_MODE_IND) { - status_t cmdStatus; - uint32_t size = sizeof(status_t); - status = (*mEffectInterface)->command(mEffectInterface, - EFFECT_CMD_SET_AUDIO_MODE, - sizeof(audio_mode_t), - &mode, - &size, - &cmdStatus); - if (status == NO_ERROR) { - status = cmdStatus; - } - } - return status; -} - -status_t AudioFlinger::EffectModule::setAudioSource(audio_source_t source) -{ - Mutex::Autolock _l(mLock); - status_t status = NO_ERROR; - if ((mDescriptor.flags & EFFECT_FLAG_AUDIO_SOURCE_MASK) == EFFECT_FLAG_AUDIO_SOURCE_IND) { - uint32_t size = 0; - status = (*mEffectInterface)->command(mEffectInterface, - EFFECT_CMD_SET_AUDIO_SOURCE, - sizeof(audio_source_t), - &source, - &size, - NULL); - } - return status; -} - -void AudioFlinger::EffectModule::setSuspended(bool suspended) -{ - Mutex::Autolock _l(mLock); - mSuspended = suspended; -} - -bool AudioFlinger::EffectModule::suspended() const -{ - Mutex::Autolock _l(mLock); - return mSuspended; -} - -bool AudioFlinger::EffectModule::purgeHandles() -{ - bool enabled = false; - Mutex::Autolock _l(mLock); - for (size_t i = 0; i < mHandles.size(); i++) { - EffectHandle *handle = mHandles[i]; - if (handle != NULL && !handle->destroyed_l()) { - handle->effect().clear(); - if (handle->hasControl()) { - enabled = handle->enabled(); - } - } - } - return enabled; -} - -void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args) -{ - const size_t SIZE = 256; - char buffer[SIZE]; - String8 result; - - snprintf(buffer, SIZE, "\tEffect ID %d:\n", mId); - result.append(buffer); - - bool locked = tryLock(mLock); - // failed to lock - AudioFlinger is probably deadlocked - if (!locked) { - result.append("\t\tCould not lock Fx mutex:\n"); - } - - result.append("\t\tSession Status State Engine:\n"); - snprintf(buffer, SIZE, "\t\t%05d %03d %03d 0x%08x\n", - mSessionId, mStatus, mState, (uint32_t)mEffectInterface); - result.append(buffer); - - result.append("\t\tDescriptor:\n"); - snprintf(buffer, SIZE, "\t\t- UUID: %08X-%04X-%04X-%04X-%02X%02X%02X%02X%02X%02X\n", - mDescriptor.uuid.timeLow, mDescriptor.uuid.timeMid, mDescriptor.uuid.timeHiAndVersion, - mDescriptor.uuid.clockSeq, mDescriptor.uuid.node[0], mDescriptor.uuid.node[1],mDescriptor.uuid.node[2], - mDescriptor.uuid.node[3],mDescriptor.uuid.node[4],mDescriptor.uuid.node[5]); - result.append(buffer); - snprintf(buffer, SIZE, "\t\t- TYPE: %08X-%04X-%04X-%04X-%02X%02X%02X%02X%02X%02X\n", - mDescriptor.type.timeLow, mDescriptor.type.timeMid, mDescriptor.type.timeHiAndVersion, - mDescriptor.type.clockSeq, mDescriptor.type.node[0], mDescriptor.type.node[1],mDescriptor.type.node[2], - mDescriptor.type.node[3],mDescriptor.type.node[4],mDescriptor.type.node[5]); - result.append(buffer); - snprintf(buffer, SIZE, "\t\t- apiVersion: %08X\n\t\t- flags: %08X\n", - mDescriptor.apiVersion, - mDescriptor.flags); - result.append(buffer); - snprintf(buffer, SIZE, "\t\t- name: %s\n", - mDescriptor.name); - result.append(buffer); - snprintf(buffer, SIZE, "\t\t- implementor: %s\n", - mDescriptor.implementor); - result.append(buffer); - - result.append("\t\t- Input configuration:\n"); - result.append("\t\t\tBuffer Frames Smp rate Channels Format\n"); - snprintf(buffer, SIZE, "\t\t\t0x%08x %05d %05d %08x %d\n", - (uint32_t)mConfig.inputCfg.buffer.raw, - mConfig.inputCfg.buffer.frameCount, - mConfig.inputCfg.samplingRate, - mConfig.inputCfg.channels, - mConfig.inputCfg.format); - result.append(buffer); - - result.append("\t\t- Output configuration:\n"); - result.append("\t\t\tBuffer Frames Smp rate Channels Format\n"); - snprintf(buffer, SIZE, "\t\t\t0x%08x %05d %05d %08x %d\n", - (uint32_t)mConfig.outputCfg.buffer.raw, - mConfig.outputCfg.buffer.frameCount, - mConfig.outputCfg.samplingRate, - mConfig.outputCfg.channels, - mConfig.outputCfg.format); - result.append(buffer); - - snprintf(buffer, SIZE, "\t\t%d Clients:\n", mHandles.size()); - result.append(buffer); - result.append("\t\t\tPid Priority Ctrl Locked client server\n"); - for (size_t i = 0; i < mHandles.size(); ++i) { - EffectHandle *handle = mHandles[i]; - if (handle != NULL && !handle->destroyed_l()) { - handle->dump(buffer, SIZE); - result.append(buffer); - } - } - - result.append("\n"); - - write(fd, result.string(), result.length()); - - if (locked) { - mLock.unlock(); - } -} - -// ---------------------------------------------------------------------------- -// EffectHandle implementation -// ---------------------------------------------------------------------------- - -#undef LOG_TAG -#define LOG_TAG "AudioFlinger::EffectHandle" - -AudioFlinger::EffectHandle::EffectHandle(const sp<EffectModule>& effect, - const sp<AudioFlinger::Client>& client, - const sp<IEffectClient>& effectClient, - int32_t priority) - : BnEffect(), - mEffect(effect), mEffectClient(effectClient), mClient(client), mCblk(NULL), - mPriority(priority), mHasControl(false), mEnabled(false), mDestroyed(false) -{ - ALOGV("constructor %p", this); - - if (client == 0) { - return; - } - int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int); - mCblkMemory = client->heap()->allocate(EFFECT_PARAM_BUFFER_SIZE + bufOffset); - if (mCblkMemory != 0) { - mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer()); - - if (mCblk != NULL) { - new(mCblk) effect_param_cblk_t(); - mBuffer = (uint8_t *)mCblk + bufOffset; - } - } else { - ALOGE("not enough memory for Effect size=%u", EFFECT_PARAM_BUFFER_SIZE + sizeof(effect_param_cblk_t)); - return; - } -} - -AudioFlinger::EffectHandle::~EffectHandle() -{ - ALOGV("Destructor %p", this); - - if (mEffect == 0) { - mDestroyed = true; - return; - } - mEffect->lock(); - mDestroyed = true; - mEffect->unlock(); - disconnect(false); -} - -status_t AudioFlinger::EffectHandle::enable() -{ - ALOGV("enable %p", this); - if (!mHasControl) return INVALID_OPERATION; - if (mEffect == 0) return DEAD_OBJECT; - - if (mEnabled) { - return NO_ERROR; - } - - mEnabled = true; - - sp<ThreadBase> thread = mEffect->thread().promote(); - if (thread != 0) { - thread->checkSuspendOnEffectEnabled(mEffect, true, mEffect->sessionId()); - } - - // checkSuspendOnEffectEnabled() can suspend this same effect when enabled - if (mEffect->suspended()) { - return NO_ERROR; - } - - status_t status = mEffect->setEnabled(true); - if (status != NO_ERROR) { - if (thread != 0) { - thread->checkSuspendOnEffectEnabled(mEffect, false, mEffect->sessionId()); - } - mEnabled = false; - } - return status; -} - -status_t AudioFlinger::EffectHandle::disable() -{ - ALOGV("disable %p", this); - if (!mHasControl) return INVALID_OPERATION; - if (mEffect == 0) return DEAD_OBJECT; - - if (!mEnabled) { - return NO_ERROR; - } - mEnabled = false; - - if (mEffect->suspended()) { - return NO_ERROR; - } - - status_t status = mEffect->setEnabled(false); - - sp<ThreadBase> thread = mEffect->thread().promote(); - if (thread != 0) { - thread->checkSuspendOnEffectEnabled(mEffect, false, mEffect->sessionId()); - } - - return status; -} - -void AudioFlinger::EffectHandle::disconnect() -{ - disconnect(true); -} - -void AudioFlinger::EffectHandle::disconnect(bool unpinIfLast) -{ - ALOGV("disconnect(%s)", unpinIfLast ? "true" : "false"); - if (mEffect == 0) { - return; - } - // restore suspended effects if the disconnected handle was enabled and the last one. - if ((mEffect->disconnect(this, unpinIfLast) == 0) && mEnabled) { - sp<ThreadBase> thread = mEffect->thread().promote(); - if (thread != 0) { - thread->checkSuspendOnEffectEnabled(mEffect, false, mEffect->sessionId()); - } - } - - // release sp on module => module destructor can be called now - mEffect.clear(); - if (mClient != 0) { - if (mCblk != NULL) { - // unlike ~TrackBase(), mCblk is never a local new, so don't delete - mCblk->~effect_param_cblk_t(); // destroy our shared-structure. - } - mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to - // Client destructor must run with AudioFlinger mutex locked - Mutex::Autolock _l(mClient->audioFlinger()->mLock); - mClient.clear(); - } -} - -status_t AudioFlinger::EffectHandle::command(uint32_t cmdCode, - uint32_t cmdSize, - void *pCmdData, - uint32_t *replySize, - void *pReplyData) -{ -// ALOGV("command(), cmdCode: %d, mHasControl: %d, mEffect: %p", -// cmdCode, mHasControl, (mEffect == 0) ? 0 : mEffect.get()); - - // only get parameter command is permitted for applications not controlling the effect - if (!mHasControl && cmdCode != EFFECT_CMD_GET_PARAM) { - return INVALID_OPERATION; - } - if (mEffect == 0) return DEAD_OBJECT; - if (mClient == 0) return INVALID_OPERATION; - - // handle commands that are not forwarded transparently to effect engine - if (cmdCode == EFFECT_CMD_SET_PARAM_COMMIT) { - // No need to trylock() here as this function is executed in the binder thread serving a particular client process: - // no risk to block the whole media server process or mixer threads is we are stuck here - Mutex::Autolock _l(mCblk->lock); - if (mCblk->clientIndex > EFFECT_PARAM_BUFFER_SIZE || - mCblk->serverIndex > EFFECT_PARAM_BUFFER_SIZE) { - mCblk->serverIndex = 0; - mCblk->clientIndex = 0; - return BAD_VALUE; - } - status_t status = NO_ERROR; - while (mCblk->serverIndex < mCblk->clientIndex) { - int reply; - uint32_t rsize = sizeof(int); - int *p = (int *)(mBuffer + mCblk->serverIndex); - int size = *p++; - if (((uint8_t *)p + size) > mBuffer + mCblk->clientIndex) { - ALOGW("command(): invalid parameter block size"); - break; - } - effect_param_t *param = (effect_param_t *)p; - if (param->psize == 0 || param->vsize == 0) { - ALOGW("command(): null parameter or value size"); - mCblk->serverIndex += size; - continue; - } - uint32_t psize = sizeof(effect_param_t) + - ((param->psize - 1) / sizeof(int) + 1) * sizeof(int) + - param->vsize; - status_t ret = mEffect->command(EFFECT_CMD_SET_PARAM, - psize, - p, - &rsize, - &reply); - // stop at first error encountered - if (ret != NO_ERROR) { - status = ret; - *(int *)pReplyData = reply; - break; - } else if (reply != NO_ERROR) { - *(int *)pReplyData = reply; - break; - } - mCblk->serverIndex += size; - } - mCblk->serverIndex = 0; - mCblk->clientIndex = 0; - return status; - } else if (cmdCode == EFFECT_CMD_ENABLE) { - *(int *)pReplyData = NO_ERROR; - return enable(); - } else if (cmdCode == EFFECT_CMD_DISABLE) { - *(int *)pReplyData = NO_ERROR; - return disable(); - } - - return mEffect->command(cmdCode, cmdSize, pCmdData, replySize, pReplyData); -} - -void AudioFlinger::EffectHandle::setControl(bool hasControl, bool signal, bool enabled) -{ - ALOGV("setControl %p control %d", this, hasControl); - - mHasControl = hasControl; - mEnabled = enabled; - - if (signal && mEffectClient != 0) { - mEffectClient->controlStatusChanged(hasControl); - } -} - -void AudioFlinger::EffectHandle::commandExecuted(uint32_t cmdCode, - uint32_t cmdSize, - void *pCmdData, - uint32_t replySize, - void *pReplyData) -{ - if (mEffectClient != 0) { - mEffectClient->commandExecuted(cmdCode, cmdSize, pCmdData, replySize, pReplyData); - } -} - - - -void AudioFlinger::EffectHandle::setEnabled(bool enabled) -{ - if (mEffectClient != 0) { - mEffectClient->enableStatusChanged(enabled); - } -} - -status_t AudioFlinger::EffectHandle::onTransact( - uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) -{ - return BnEffect::onTransact(code, data, reply, flags); -} - - -void AudioFlinger::EffectHandle::dump(char* buffer, size_t size) -{ - bool locked = mCblk != NULL && tryLock(mCblk->lock); - - snprintf(buffer, size, "\t\t\t%05d %05d %01u %01u %05u %05u\n", - (mClient == 0) ? getpid_cached : mClient->pid(), - mPriority, - mHasControl, - !locked, - mCblk ? mCblk->clientIndex : 0, - mCblk ? mCblk->serverIndex : 0 - ); - - if (locked) { - mCblk->lock.unlock(); - } -} - -#undef LOG_TAG -#define LOG_TAG "AudioFlinger::EffectChain" - -AudioFlinger::EffectChain::EffectChain(ThreadBase *thread, - int sessionId) - : mThread(thread), mSessionId(sessionId), mActiveTrackCnt(0), mTrackCnt(0), mTailBufferCount(0), - mOwnInBuffer(false), mVolumeCtrlIdx(-1), mLeftVolume(UINT_MAX), mRightVolume(UINT_MAX), - mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX) -{ - mStrategy = AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC); - if (thread == NULL) { - return; - } - mMaxTailBuffers = ((kProcessTailDurationMs * thread->sampleRate()) / 1000) / - thread->frameCount(); -} - -AudioFlinger::EffectChain::~EffectChain() -{ - if (mOwnInBuffer) { - delete mInBuffer; - } - -} - -// getEffectFromDesc_l() must be called with ThreadBase::mLock held -sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromDesc_l(effect_descriptor_t *descriptor) -{ - size_t size = mEffects.size(); - - for (size_t i = 0; i < size; i++) { - if (memcmp(&mEffects[i]->desc().uuid, &descriptor->uuid, sizeof(effect_uuid_t)) == 0) { - return mEffects[i]; - } - } - return 0; -} - -// getEffectFromId_l() must be called with ThreadBase::mLock held -sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromId_l(int id) -{ - size_t size = mEffects.size(); - - for (size_t i = 0; i < size; i++) { - // by convention, return first effect if id provided is 0 (0 is never a valid id) - if (id == 0 || mEffects[i]->id() == id) { - return mEffects[i]; - } - } - return 0; -} - -// getEffectFromType_l() must be called with ThreadBase::mLock held -sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromType_l( - const effect_uuid_t *type) -{ - size_t size = mEffects.size(); - - for (size_t i = 0; i < size; i++) { - if (memcmp(&mEffects[i]->desc().type, type, sizeof(effect_uuid_t)) == 0) { - return mEffects[i]; - } - } - return 0; -} - -void AudioFlinger::EffectChain::clearInputBuffer() -{ - Mutex::Autolock _l(mLock); - sp<ThreadBase> thread = mThread.promote(); - if (thread == 0) { - ALOGW("clearInputBuffer(): cannot promote mixer thread"); - return; - } - clearInputBuffer_l(thread); -} - -// Must be called with EffectChain::mLock locked -void AudioFlinger::EffectChain::clearInputBuffer_l(sp<ThreadBase> thread) +int comparEntry(const void *p1, const void *p2) { - size_t numSamples = thread->frameCount() * thread->channelCount(); - memset(mInBuffer, 0, numSamples * sizeof(int16_t)); - + return strcmp(((const Entry *) p1)->mName, ((const Entry *) p2)->mName); } -// Must be called with EffectChain::mLock locked -void AudioFlinger::EffectChain::process_l() +#ifdef TEE_SINK +void AudioFlinger::dumpTee(int fd, const sp<NBAIO_Source>& source, audio_io_handle_t id) { - sp<ThreadBase> thread = mThread.promote(); - if (thread == 0) { - ALOGW("process_l(): cannot promote mixer thread"); - return; - } - bool isGlobalSession = (mSessionId == AUDIO_SESSION_OUTPUT_MIX) || - (mSessionId == AUDIO_SESSION_OUTPUT_STAGE); - // always process effects unless no more tracks are on the session and the effect tail - // has been rendered - bool doProcess = true; - if (!isGlobalSession) { - bool tracksOnSession = (trackCnt() != 0); - - if (!tracksOnSession && mTailBufferCount == 0) { - doProcess = false; - } - - if (activeTrackCnt() == 0) { - // if no track is active and the effect tail has not been rendered, - // the input buffer must be cleared here as the mixer process will not do it - if (tracksOnSession || mTailBufferCount > 0) { - clearInputBuffer_l(thread); - if (mTailBufferCount > 0) { - mTailBufferCount--; - } - } - } - } - - size_t size = mEffects.size(); - if (doProcess) { - for (size_t i = 0; i < size; i++) { - mEffects[i]->process(); - } - } - for (size_t i = 0; i < size; i++) { - mEffects[i]->updateState(); - } -} - -// addEffect_l() must be called with PlaybackThread::mLock held -status_t AudioFlinger::EffectChain::addEffect_l(const sp<EffectModule>& effect) -{ - effect_descriptor_t desc = effect->desc(); - uint32_t insertPref = desc.flags & EFFECT_FLAG_INSERT_MASK; - - Mutex::Autolock _l(mLock); - effect->setChain(this); - sp<ThreadBase> thread = mThread.promote(); - if (thread == 0) { - return NO_INIT; - } - effect->setThread(thread); - - if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { - // Auxiliary effects are inserted at the beginning of mEffects vector as - // they are processed first and accumulated in chain input buffer - mEffects.insertAt(effect, 0); - - // the input buffer for auxiliary effect contains mono samples in - // 32 bit format. This is to avoid saturation in AudoMixer - // accumulation stage. Saturation is done in EffectModule::process() before - // calling the process in effect engine - size_t numSamples = thread->frameCount(); - int32_t *buffer = new int32_t[numSamples]; - memset(buffer, 0, numSamples * sizeof(int32_t)); - effect->setInBuffer((int16_t *)buffer); - // auxiliary effects output samples to chain input buffer for further processing - // by insert effects - effect->setOutBuffer(mInBuffer); - } else { - // Insert effects are inserted at the end of mEffects vector as they are processed - // after track and auxiliary effects. - // Insert effect order as a function of indicated preference: - // if EFFECT_FLAG_INSERT_EXCLUSIVE, insert in first position or reject if - // another effect is present - // else if EFFECT_FLAG_INSERT_FIRST, insert in first position or after the - // last effect claiming first position - // else if EFFECT_FLAG_INSERT_LAST, insert in last position or before the - // first effect claiming last position - // else if EFFECT_FLAG_INSERT_ANY insert after first or before last - // Reject insertion if an effect with EFFECT_FLAG_INSERT_EXCLUSIVE is - // already present - - size_t size = mEffects.size(); - size_t idx_insert = size; - ssize_t idx_insert_first = -1; - ssize_t idx_insert_last = -1; - - for (size_t i = 0; i < size; i++) { - effect_descriptor_t d = mEffects[i]->desc(); - uint32_t iMode = d.flags & EFFECT_FLAG_TYPE_MASK; - uint32_t iPref = d.flags & EFFECT_FLAG_INSERT_MASK; - if (iMode == EFFECT_FLAG_TYPE_INSERT) { - // check invalid effect chaining combinations - if (insertPref == EFFECT_FLAG_INSERT_EXCLUSIVE || - iPref == EFFECT_FLAG_INSERT_EXCLUSIVE) { - ALOGW("addEffect_l() could not insert effect %s: exclusive conflict with %s", desc.name, d.name); - return INVALID_OPERATION; + NBAIO_Source *teeSource = source.get(); + if (teeSource != NULL) { + // .wav rotation + // There is a benign race condition if 2 threads call this simultaneously. + // They would both traverse the directory, but the result would simply be + // failures at unlink() which are ignored. It's also unlikely since + // normally dumpsys is only done by bugreport or from the command line. + char teePath[32+256]; + strcpy(teePath, "/data/misc/media"); + size_t teePathLen = strlen(teePath); + DIR *dir = opendir(teePath); + teePath[teePathLen++] = '/'; + if (dir != NULL) { +#define MAX_SORT 20 // number of entries to sort +#define MAX_KEEP 10 // number of entries to keep + struct Entry entries[MAX_SORT]; + size_t entryCount = 0; + while (entryCount < MAX_SORT) { + struct dirent de; + struct dirent *result = NULL; + int rc = readdir_r(dir, &de, &result); + if (rc != 0) { + ALOGW("readdir_r failed %d", rc); + break; } - // remember position of first insert effect and by default - // select this as insert position for new effect - if (idx_insert == size) { - idx_insert = i; + if (result == NULL) { + break; } - // remember position of last insert effect claiming - // first position - if (iPref == EFFECT_FLAG_INSERT_FIRST) { - idx_insert_first = i; + if (result != &de) { + ALOGW("readdir_r returned unexpected result %p != %p", result, &de); + break; } - // remember position of first insert effect claiming - // last position - if (iPref == EFFECT_FLAG_INSERT_LAST && - idx_insert_last == -1) { - idx_insert_last = i; + // ignore non .wav file entries + size_t nameLen = strlen(de.d_name); + if (nameLen <= 4 || nameLen >= MAX_NAME || + strcmp(&de.d_name[nameLen - 4], ".wav")) { + continue; } + strcpy(entries[entryCount++].mName, de.d_name); } - } - - // modify idx_insert from first position if needed - if (insertPref == EFFECT_FLAG_INSERT_LAST) { - if (idx_insert_last != -1) { - idx_insert = idx_insert_last; - } else { - idx_insert = size; - } - } else { - if (idx_insert_first != -1) { - idx_insert = idx_insert_first + 1; - } - } - - // always read samples from chain input buffer - effect->setInBuffer(mInBuffer); - - // if last effect in the chain, output samples to chain - // output buffer, otherwise to chain input buffer - if (idx_insert == size) { - if (idx_insert != 0) { - mEffects[idx_insert-1]->setOutBuffer(mInBuffer); - mEffects[idx_insert-1]->configure(); - } - effect->setOutBuffer(mOutBuffer); - } else { - effect->setOutBuffer(mInBuffer); - } - mEffects.insertAt(effect, idx_insert); - - ALOGV("addEffect_l() effect %p, added in chain %p at rank %d", effect.get(), this, idx_insert); - } - effect->configure(); - return NO_ERROR; -} - -// removeEffect_l() must be called with PlaybackThread::mLock held -size_t AudioFlinger::EffectChain::removeEffect_l(const sp<EffectModule>& effect) -{ - Mutex::Autolock _l(mLock); - size_t size = mEffects.size(); - uint32_t type = effect->desc().flags & EFFECT_FLAG_TYPE_MASK; - - for (size_t i = 0; i < size; i++) { - if (effect == mEffects[i]) { - // calling stop here will remove pre-processing effect from the audio HAL. - // This is safe as we hold the EffectChain mutex which guarantees that we are not in - // the middle of a read from audio HAL - if (mEffects[i]->state() == EffectModule::ACTIVE || - mEffects[i]->state() == EffectModule::STOPPING) { - mEffects[i]->stop(); - } - if (type == EFFECT_FLAG_TYPE_AUXILIARY) { - delete[] effect->inBuffer(); - } else { - if (i == size - 1 && i != 0) { - mEffects[i - 1]->setOutBuffer(mOutBuffer); - mEffects[i - 1]->configure(); + (void) closedir(dir); + if (entryCount > MAX_KEEP) { + qsort(entries, entryCount, sizeof(Entry), comparEntry); + for (size_t i = 0; i < entryCount - MAX_KEEP; ++i) { + strcpy(&teePath[teePathLen], entries[i].mName); + (void) unlink(teePath); } } - mEffects.removeAt(i); - ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %d", effect.get(), this, i); - break; - } - } - - return mEffects.size(); -} - -// setDevice_l() must be called with PlaybackThread::mLock held -void AudioFlinger::EffectChain::setDevice_l(audio_devices_t device) -{ - size_t size = mEffects.size(); - for (size_t i = 0; i < size; i++) { - mEffects[i]->setDevice(device); - } -} - -// setMode_l() must be called with PlaybackThread::mLock held -void AudioFlinger::EffectChain::setMode_l(audio_mode_t mode) -{ - size_t size = mEffects.size(); - for (size_t i = 0; i < size; i++) { - mEffects[i]->setMode(mode); - } -} - -// setAudioSource_l() must be called with PlaybackThread::mLock held -void AudioFlinger::EffectChain::setAudioSource_l(audio_source_t source) -{ - size_t size = mEffects.size(); - for (size_t i = 0; i < size; i++) { - mEffects[i]->setAudioSource(source); - } -} - -// setVolume_l() must be called with PlaybackThread::mLock held -bool AudioFlinger::EffectChain::setVolume_l(uint32_t *left, uint32_t *right) -{ - uint32_t newLeft = *left; - uint32_t newRight = *right; - bool hasControl = false; - int ctrlIdx = -1; - size_t size = mEffects.size(); - - // first update volume controller - for (size_t i = size; i > 0; i--) { - if (mEffects[i - 1]->isProcessEnabled() && - (mEffects[i - 1]->desc().flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL) { - ctrlIdx = i - 1; - hasControl = true; - break; - } - } - - if (ctrlIdx == mVolumeCtrlIdx && *left == mLeftVolume && *right == mRightVolume) { - if (hasControl) { - *left = mNewLeftVolume; - *right = mNewRightVolume; - } - return hasControl; - } - - mVolumeCtrlIdx = ctrlIdx; - mLeftVolume = newLeft; - mRightVolume = newRight; - - // second get volume update from volume controller - if (ctrlIdx >= 0) { - mEffects[ctrlIdx]->setVolume(&newLeft, &newRight, true); - mNewLeftVolume = newLeft; - mNewRightVolume = newRight; - } - // then indicate volume to all other effects in chain. - // Pass altered volume to effects before volume controller - // and requested volume to effects after controller - uint32_t lVol = newLeft; - uint32_t rVol = newRight; - - for (size_t i = 0; i < size; i++) { - if ((int)i == ctrlIdx) continue; - // this also works for ctrlIdx == -1 when there is no volume controller - if ((int)i > ctrlIdx) { - lVol = *left; - rVol = *right; - } - mEffects[i]->setVolume(&lVol, &rVol, false); - } - *left = newLeft; - *right = newRight; - - return hasControl; -} - -void AudioFlinger::EffectChain::dump(int fd, const Vector<String16>& args) -{ - const size_t SIZE = 256; - char buffer[SIZE]; - String8 result; - - snprintf(buffer, SIZE, "Effects for session %d:\n", mSessionId); - result.append(buffer); - - bool locked = tryLock(mLock); - // failed to lock - AudioFlinger is probably deadlocked - if (!locked) { - result.append("\tCould not lock mutex:\n"); - } - - result.append("\tNum fx In buffer Out buffer Active tracks:\n"); - snprintf(buffer, SIZE, "\t%02d 0x%08x 0x%08x %d\n", - mEffects.size(), - (uint32_t)mInBuffer, - (uint32_t)mOutBuffer, - mActiveTrackCnt); - result.append(buffer); - write(fd, result.string(), result.size()); - - for (size_t i = 0; i < mEffects.size(); ++i) { - sp<EffectModule> effect = mEffects[i]; - if (effect != 0) { - effect->dump(fd, args); - } - } - - if (locked) { - mLock.unlock(); - } -} - -// must be called with ThreadBase::mLock held -void AudioFlinger::EffectChain::setEffectSuspended_l( - const effect_uuid_t *type, bool suspend) -{ - sp<SuspendedEffectDesc> desc; - // use effect type UUID timelow as key as there is no real risk of identical - // timeLow fields among effect type UUIDs. - ssize_t index = mSuspendedEffects.indexOfKey(type->timeLow); - if (suspend) { - if (index >= 0) { - desc = mSuspendedEffects.valueAt(index); } else { - desc = new SuspendedEffectDesc(); - desc->mType = *type; - mSuspendedEffects.add(type->timeLow, desc); - ALOGV("setEffectSuspended_l() add entry for %08x", type->timeLow); - } - if (desc->mRefCount++ == 0) { - sp<EffectModule> effect = getEffectIfEnabled(type); - if (effect != 0) { - desc->mEffect = effect; - effect->setSuspended(true); - effect->setEnabled(false); + if (fd >= 0) { + fdprintf(fd, "unable to rotate tees in %s: %s\n", teePath, strerror(errno)); } } - } else { - if (index < 0) { - return; - } - desc = mSuspendedEffects.valueAt(index); - if (desc->mRefCount <= 0) { - ALOGW("setEffectSuspended_l() restore refcount should not be 0 %d", desc->mRefCount); - desc->mRefCount = 1; - } - if (--desc->mRefCount == 0) { - ALOGV("setEffectSuspended_l() remove entry for %08x", mSuspendedEffects.keyAt(index)); - if (desc->mEffect != 0) { - sp<EffectModule> effect = desc->mEffect.promote(); - if (effect != 0) { - effect->setSuspended(false); - effect->lock(); - EffectHandle *handle = effect->controlHandle_l(); - if (handle != NULL && !handle->destroyed_l()) { - effect->setEnabled_l(handle->enabled()); + char teeTime[16]; + struct timeval tv; + gettimeofday(&tv, NULL); + struct tm tm; + localtime_r(&tv.tv_sec, &tm); + strftime(teeTime, sizeof(teeTime), "%Y%m%d%H%M%S", &tm); + snprintf(&teePath[teePathLen], sizeof(teePath) - teePathLen, "%s_%d.wav", teeTime, id); + // if 2 dumpsys are done within 1 second, and rotation didn't work, then discard 2nd + int teeFd = open(teePath, O_WRONLY | O_CREAT | O_EXCL | O_NOFOLLOW, S_IRUSR | S_IWUSR); + if (teeFd >= 0) { + char wavHeader[44]; + memcpy(wavHeader, + "RIFF\0\0\0\0WAVEfmt \20\0\0\0\1\0\2\0\104\254\0\0\0\0\0\0\4\0\20\0data\0\0\0\0", + sizeof(wavHeader)); + NBAIO_Format format = teeSource->format(); + unsigned channelCount = Format_channelCount(format); + ALOG_ASSERT(channelCount <= FCC_2); + uint32_t sampleRate = Format_sampleRate(format); + wavHeader[22] = channelCount; // number of channels + wavHeader[24] = sampleRate; // sample rate + wavHeader[25] = sampleRate >> 8; + wavHeader[32] = channelCount * 2; // block alignment + write(teeFd, wavHeader, sizeof(wavHeader)); + size_t total = 0; + bool firstRead = true; + for (;;) { +#define TEE_SINK_READ 1024 + short buffer[TEE_SINK_READ * FCC_2]; + size_t count = TEE_SINK_READ; + ssize_t actual = teeSource->read(buffer, count, + AudioBufferProvider::kInvalidPTS); + bool wasFirstRead = firstRead; + firstRead = false; + if (actual <= 0) { + if (actual == (ssize_t) OVERRUN && wasFirstRead) { + continue; } - effect->unlock(); - } - desc->mEffect.clear(); - } - mSuspendedEffects.removeItemsAt(index); - } - } -} - -// must be called with ThreadBase::mLock held -void AudioFlinger::EffectChain::setEffectSuspendedAll_l(bool suspend) -{ - sp<SuspendedEffectDesc> desc; - - ssize_t index = mSuspendedEffects.indexOfKey((int)kKeyForSuspendAll); - if (suspend) { - if (index >= 0) { - desc = mSuspendedEffects.valueAt(index); - } else { - desc = new SuspendedEffectDesc(); - mSuspendedEffects.add((int)kKeyForSuspendAll, desc); - ALOGV("setEffectSuspendedAll_l() add entry for 0"); - } - if (desc->mRefCount++ == 0) { - Vector< sp<EffectModule> > effects; - getSuspendEligibleEffects(effects); - for (size_t i = 0; i < effects.size(); i++) { - setEffectSuspended_l(&effects[i]->desc().type, true); - } - } - } else { - if (index < 0) { - return; - } - desc = mSuspendedEffects.valueAt(index); - if (desc->mRefCount <= 0) { - ALOGW("setEffectSuspendedAll_l() restore refcount should not be 0 %d", desc->mRefCount); - desc->mRefCount = 1; - } - if (--desc->mRefCount == 0) { - Vector<const effect_uuid_t *> types; - for (size_t i = 0; i < mSuspendedEffects.size(); i++) { - if (mSuspendedEffects.keyAt(i) == (int)kKeyForSuspendAll) { - continue; + break; } - types.add(&mSuspendedEffects.valueAt(i)->mType); - } - for (size_t i = 0; i < types.size(); i++) { - setEffectSuspended_l(types[i], false); - } - ALOGV("setEffectSuspendedAll_l() remove entry for %08x", mSuspendedEffects.keyAt(index)); - mSuspendedEffects.removeItem((int)kKeyForSuspendAll); - } - } -} - - -// The volume effect is used for automated tests only -#ifndef OPENSL_ES_H_ -static const effect_uuid_t SL_IID_VOLUME_ = { 0x09e8ede0, 0xddde, 0x11db, 0xb4f6, - { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }; -const effect_uuid_t * const SL_IID_VOLUME = &SL_IID_VOLUME_; -#endif //OPENSL_ES_H_ - -bool AudioFlinger::EffectChain::isEffectEligibleForSuspend(const effect_descriptor_t& desc) -{ - // auxiliary effects and visualizer are never suspended on output mix - if ((mSessionId == AUDIO_SESSION_OUTPUT_MIX) && - (((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) || - (memcmp(&desc.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0) || - (memcmp(&desc.type, SL_IID_VOLUME, sizeof(effect_uuid_t)) == 0))) { - return false; - } - return true; -} - -void AudioFlinger::EffectChain::getSuspendEligibleEffects(Vector< sp<AudioFlinger::EffectModule> > &effects) -{ - effects.clear(); - for (size_t i = 0; i < mEffects.size(); i++) { - if (isEffectEligibleForSuspend(mEffects[i]->desc())) { - effects.add(mEffects[i]); - } - } -} - -sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectIfEnabled( - const effect_uuid_t *type) -{ - sp<EffectModule> effect = getEffectFromType_l(type); - return effect != 0 && effect->isEnabled() ? effect : 0; -} - -void AudioFlinger::EffectChain::checkSuspendOnEffectEnabled(const sp<EffectModule>& effect, - bool enabled) -{ - ssize_t index = mSuspendedEffects.indexOfKey(effect->desc().type.timeLow); - if (enabled) { - if (index < 0) { - // if the effect is not suspend check if all effects are suspended - index = mSuspendedEffects.indexOfKey((int)kKeyForSuspendAll); - if (index < 0) { - return; + ALOG_ASSERT(actual <= (ssize_t)count); + write(teeFd, buffer, actual * channelCount * sizeof(short)); + total += actual; } - if (!isEffectEligibleForSuspend(effect->desc())) { - return; + lseek(teeFd, (off_t) 4, SEEK_SET); + uint32_t temp = 44 + total * channelCount * sizeof(short) - 8; + write(teeFd, &temp, sizeof(temp)); + lseek(teeFd, (off_t) 40, SEEK_SET); + temp = total * channelCount * sizeof(short); + write(teeFd, &temp, sizeof(temp)); + close(teeFd); + if (fd >= 0) { + fdprintf(fd, "tee copied to %s\n", teePath); } - setEffectSuspended_l(&effect->desc().type, enabled); - index = mSuspendedEffects.indexOfKey(effect->desc().type.timeLow); - if (index < 0) { - ALOGW("checkSuspendOnEffectEnabled() Fx should be suspended here!"); - return; + } else { + if (fd >= 0) { + fdprintf(fd, "unable to create tee %s: %s\n", teePath, strerror(errno)); } } - ALOGV("checkSuspendOnEffectEnabled() enable suspending fx %08x", - effect->desc().type.timeLow); - sp<SuspendedEffectDesc> desc = mSuspendedEffects.valueAt(index); - // if effect is requested to suspended but was not yet enabled, supend it now. - if (desc->mEffect == 0) { - desc->mEffect = effect; - effect->setEnabled(false); - effect->setSuspended(true); - } - } else { - if (index < 0) { - return; - } - ALOGV("checkSuspendOnEffectEnabled() disable restoring fx %08x", - effect->desc().type.timeLow); - sp<SuspendedEffectDesc> desc = mSuspendedEffects.valueAt(index); - desc->mEffect.clear(); - effect->setSuspended(false); } } - -#undef LOG_TAG -#define LOG_TAG "AudioFlinger" +#endif // ---------------------------------------------------------------------------- diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h index 49e2b2c..b0efef6 100644 --- a/services/audioflinger/AudioFlinger.h +++ b/services/audioflinger/AudioFlinger.h @@ -53,6 +53,8 @@ #include <powermanager/IPowerManager.h> +#include <media/nbaio/NBLog.h> + namespace android { class audio_track_cblk_t; @@ -61,6 +63,7 @@ class AudioMixer; class AudioBuffer; class AudioResampler; class FastMixer; +class ServerProxy; // ---------------------------------------------------------------------------- @@ -75,6 +78,11 @@ class FastMixer; static const nsecs_t kDefaultStandbyTimeInNsecs = seconds(3); +#define MAX_GAIN 4096.0f +#define MAX_GAIN_INT 0x1000 + +#define INCLUDING_FROM_AUDIOFLINGER_H + class AudioFlinger : public BinderService<AudioFlinger>, public BnAudioFlinger @@ -87,13 +95,12 @@ public: // IAudioFlinger interface, in binder opcode order virtual sp<IAudioTrack> createTrack( - pid_t pid, audio_stream_type_t streamType, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - int frameCount, - IAudioFlinger::track_flags_t flags, + size_t frameCount, + IAudioFlinger::track_flags_t *flags, const sp<IMemory>& sharedBuffer, audio_io_handle_t output, pid_t tid, @@ -101,12 +108,11 @@ public: status_t *status); virtual sp<IAudioRecord> openRecord( - pid_t pid, audio_io_handle_t input, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - int frameCount, + size_t frameCount, IAudioFlinger::track_flags_t flags, pid_t tid, int *sessionId, @@ -174,7 +180,7 @@ public: virtual status_t setVoiceVolume(float volume); - virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, + virtual status_t getRenderPosition(size_t *halFrames, size_t *dspFrames, audio_io_handle_t output) const; virtual unsigned int getInputFramesLost(audio_io_handle_t ioHandle) const; @@ -192,7 +198,7 @@ public: virtual status_t getEffectDescriptor(const effect_uuid_t *pUuid, effect_descriptor_t *descriptor) const; - virtual sp<IEffect> createEffect(pid_t pid, + virtual sp<IEffect> createEffect( effect_descriptor_t *pDesc, const sp<IEffectClient>& effectClient, int32_t priority, @@ -207,8 +213,8 @@ public: virtual audio_module_handle_t loadHwModule(const char *name); - virtual int32_t getPrimaryOutputSamplingRate(); - virtual int32_t getPrimaryOutputFrameCount(); + virtual uint32_t getPrimaryOutputSamplingRate(); + virtual size_t getPrimaryOutputFrameCount(); virtual status_t onTransact( uint32_t code, @@ -218,6 +224,13 @@ public: // end of IAudioFlinger interface + sp<NBLog::Writer> newWriter_l(size_t size, const char *name); + void unregisterWriter(const sp<NBLog::Writer>& writer); +private: + static const size_t kLogMemorySize = 10 * 1024; + sp<MemoryDealer> mLogMemoryDealer; // == 0 when NBLog is disabled +public: + class SyncEvent; typedef void (*sync_event_callback_t)(const wp<SyncEvent>& event) ; @@ -269,19 +282,28 @@ private: virtual ~AudioFlinger(); // call in any IAudioFlinger method that accesses mPrimaryHardwareDev - status_t initCheck() const { return mPrimaryHardwareDev == NULL ? NO_INIT : NO_ERROR; } + status_t initCheck() const { return mPrimaryHardwareDev == NULL ? + NO_INIT : NO_ERROR; } // RefBase virtual void onFirstRef(); - AudioHwDevice* findSuitableHwDev_l(audio_module_handle_t module, audio_devices_t devices); + AudioHwDevice* findSuitableHwDev_l(audio_module_handle_t module, + audio_devices_t devices); void purgeStaleEffects_l(); // standby delay for MIXER and DUPLICATING playback threads is read from property // ro.audio.flinger_standbytime_ms or defaults to kDefaultStandbyTimeInNsecs static nsecs_t mStandbyTimeInNsecs; + // incremented by 2 when screen state changes, bit 0 == 1 means "off" + // AudioFlinger::setParameters() updates, other threads read w/o lock + static uint32_t mScreenState; + // Internal dump utilities. + static const int kDumpLockRetries = 50; + static const int kDumpLockSleepUs = 20000; + static bool dumpTryLock(Mutex& mutex); void dumpPermissionDenial(int fd, const Vector<String16>& args); void dumpClients(int fd, const Vector<String16>& args); void dumpInternals(int fd, const Vector<String16>& args); @@ -346,409 +368,6 @@ private: struct AudioStreamOut; struct AudioStreamIn; - class ThreadBase : public Thread { - public: - - enum type_t { - MIXER, // Thread class is MixerThread - DIRECT, // Thread class is DirectOutputThread - DUPLICATING, // Thread class is DuplicatingThread - RECORD // Thread class is RecordThread - }; - - ThreadBase (const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id, - audio_devices_t outDevice, audio_devices_t inDevice, type_t type); - virtual ~ThreadBase(); - - void dumpBase(int fd, const Vector<String16>& args); - void dumpEffectChains(int fd, const Vector<String16>& args); - - void clearPowerManager(); - - // base for record and playback - class TrackBase : public ExtendedAudioBufferProvider, public RefBase { - - public: - enum track_state { - IDLE, - TERMINATED, - FLUSHED, - STOPPED, - // next 2 states are currently used for fast tracks only - STOPPING_1, // waiting for first underrun - STOPPING_2, // waiting for presentation complete - RESUMING, - ACTIVE, - PAUSING, - PAUSED - }; - - TrackBase(ThreadBase *thread, - const sp<Client>& client, - uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, - const sp<IMemory>& sharedBuffer, - int sessionId); - virtual ~TrackBase(); - - virtual status_t start(AudioSystem::sync_event_t event, - int triggerSession) = 0; - virtual void stop() = 0; - sp<IMemory> getCblk() const { return mCblkMemory; } - audio_track_cblk_t* cblk() const { return mCblk; } - int sessionId() const { return mSessionId; } - virtual status_t setSyncEvent(const sp<SyncEvent>& event); - - protected: - TrackBase(const TrackBase&); - TrackBase& operator = (const TrackBase&); - - // AudioBufferProvider interface - virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts) = 0; - virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer); - - // ExtendedAudioBufferProvider interface is only needed for Track, - // but putting it in TrackBase avoids the complexity of virtual inheritance - virtual size_t framesReady() const { return SIZE_MAX; } - - audio_format_t format() const { - return mFormat; - } - - int channelCount() const { return mChannelCount; } - - audio_channel_mask_t channelMask() const { return mChannelMask; } - - int sampleRate() const; // FIXME inline after cblk sr moved - - // Return a pointer to the start of a contiguous slice of the track buffer. - // Parameter 'offset' is the requested start position, expressed in - // monotonically increasing frame units relative to the track epoch. - // Parameter 'frames' is the requested length, also in frame units. - // Always returns non-NULL. It is the caller's responsibility to - // verify that this will be successful; the result of calling this - // function with invalid 'offset' or 'frames' is undefined. - void* getBuffer(uint32_t offset, uint32_t frames) const; - - bool isStopped() const { - return (mState == STOPPED || mState == FLUSHED); - } - - // for fast tracks only - bool isStopping() const { - return mState == STOPPING_1 || mState == STOPPING_2; - } - bool isStopping_1() const { - return mState == STOPPING_1; - } - bool isStopping_2() const { - return mState == STOPPING_2; - } - - bool isTerminated() const { - return mState == TERMINATED; - } - - bool step(); - void reset(); - - const wp<ThreadBase> mThread; - /*const*/ sp<Client> mClient; // see explanation at ~TrackBase() why not const - sp<IMemory> mCblkMemory; - audio_track_cblk_t* mCblk; - void* mBuffer; // start of track buffer, typically in shared memory - void* mBufferEnd; // &mBuffer[mFrameCount * frameSize], where frameSize - // is based on mChannelCount and 16-bit samples - uint32_t mFrameCount; - // we don't really need a lock for these - track_state mState; - const uint32_t mSampleRate; // initial sample rate only; for tracks which - // support dynamic rates, the current value is in control block - const audio_format_t mFormat; - bool mStepServerFailed; - const int mSessionId; - uint8_t mChannelCount; - audio_channel_mask_t mChannelMask; - Vector < sp<SyncEvent> >mSyncEvents; - }; - - enum { - CFG_EVENT_IO, - CFG_EVENT_PRIO - }; - - class ConfigEvent { - public: - ConfigEvent(int type) : mType(type) {} - virtual ~ConfigEvent() {} - - int type() const { return mType; } - - virtual void dump(char *buffer, size_t size) = 0; - - private: - const int mType; - }; - - class IoConfigEvent : public ConfigEvent { - public: - IoConfigEvent(int event, int param) : - ConfigEvent(CFG_EVENT_IO), mEvent(event), mParam(event) {} - virtual ~IoConfigEvent() {} - - int event() const { return mEvent; } - int param() const { return mParam; } - - virtual void dump(char *buffer, size_t size) { - snprintf(buffer, size, "IO event: event %d, param %d\n", mEvent, mParam); - } - - private: - const int mEvent; - const int mParam; - }; - - class PrioConfigEvent : public ConfigEvent { - public: - PrioConfigEvent(pid_t pid, pid_t tid, int32_t prio) : - ConfigEvent(CFG_EVENT_PRIO), mPid(pid), mTid(tid), mPrio(prio) {} - virtual ~PrioConfigEvent() {} - - pid_t pid() const { return mPid; } - pid_t tid() const { return mTid; } - int32_t prio() const { return mPrio; } - - virtual void dump(char *buffer, size_t size) { - snprintf(buffer, size, "Prio event: pid %d, tid %d, prio %d\n", mPid, mTid, mPrio); - } - - private: - const pid_t mPid; - const pid_t mTid; - const int32_t mPrio; - }; - - - class PMDeathRecipient : public IBinder::DeathRecipient { - public: - PMDeathRecipient(const wp<ThreadBase>& thread) : mThread(thread) {} - virtual ~PMDeathRecipient() {} - - // IBinder::DeathRecipient - virtual void binderDied(const wp<IBinder>& who); - - private: - PMDeathRecipient(const PMDeathRecipient&); - PMDeathRecipient& operator = (const PMDeathRecipient&); - - wp<ThreadBase> mThread; - }; - - virtual status_t initCheck() const = 0; - - // static externally-visible - type_t type() const { return mType; } - audio_io_handle_t id() const { return mId;} - - // dynamic externally-visible - uint32_t sampleRate() const { return mSampleRate; } - int channelCount() const { return mChannelCount; } - audio_channel_mask_t channelMask() const { return mChannelMask; } - audio_format_t format() const { return mFormat; } - // Called by AudioFlinger::frameCount(audio_io_handle_t output) and effects, - // and returns the normal mix buffer's frame count. - size_t frameCount() const { return mNormalFrameCount; } - // Return's the HAL's frame count i.e. fast mixer buffer size. - size_t frameCountHAL() const { return mFrameCount; } - - // Should be "virtual status_t requestExitAndWait()" and override same - // method in Thread, but Thread::requestExitAndWait() is not yet virtual. - void exit(); - virtual bool checkForNewParameters_l() = 0; - virtual status_t setParameters(const String8& keyValuePairs); - virtual String8 getParameters(const String8& keys) = 0; - virtual void audioConfigChanged_l(int event, int param = 0) = 0; - void sendIoConfigEvent(int event, int param = 0); - void sendIoConfigEvent_l(int event, int param = 0); - void sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio); - void processConfigEvents(); - - // see note at declaration of mStandby, mOutDevice and mInDevice - bool standby() const { return mStandby; } - audio_devices_t outDevice() const { return mOutDevice; } - audio_devices_t inDevice() const { return mInDevice; } - - virtual audio_stream_t* stream() const = 0; - - sp<EffectHandle> createEffect_l( - const sp<AudioFlinger::Client>& client, - const sp<IEffectClient>& effectClient, - int32_t priority, - int sessionId, - effect_descriptor_t *desc, - int *enabled, - status_t *status); - void disconnectEffect(const sp< EffectModule>& effect, - EffectHandle *handle, - bool unpinIfLast); - - // return values for hasAudioSession (bit field) - enum effect_state { - EFFECT_SESSION = 0x1, // the audio session corresponds to at least one - // effect - TRACK_SESSION = 0x2 // the audio session corresponds to at least one - // track - }; - - // get effect chain corresponding to session Id. - sp<EffectChain> getEffectChain(int sessionId); - // same as getEffectChain() but must be called with ThreadBase mutex locked - sp<EffectChain> getEffectChain_l(int sessionId) const; - // add an effect chain to the chain list (mEffectChains) - virtual status_t addEffectChain_l(const sp<EffectChain>& chain) = 0; - // remove an effect chain from the chain list (mEffectChains) - virtual size_t removeEffectChain_l(const sp<EffectChain>& chain) = 0; - // lock all effect chains Mutexes. Must be called before releasing the - // ThreadBase mutex before processing the mixer and effects. This guarantees the - // integrity of the chains during the process. - // Also sets the parameter 'effectChains' to current value of mEffectChains. - void lockEffectChains_l(Vector< sp<EffectChain> >& effectChains); - // unlock effect chains after process - void unlockEffectChains(const Vector< sp<EffectChain> >& effectChains); - // set audio mode to all effect chains - void setMode(audio_mode_t mode); - // get effect module with corresponding ID on specified audio session - sp<AudioFlinger::EffectModule> getEffect(int sessionId, int effectId); - sp<AudioFlinger::EffectModule> getEffect_l(int sessionId, int effectId); - // add and effect module. Also creates the effect chain is none exists for - // the effects audio session - status_t addEffect_l(const sp< EffectModule>& effect); - // remove and effect module. Also removes the effect chain is this was the last - // effect - void removeEffect_l(const sp< EffectModule>& effect); - // detach all tracks connected to an auxiliary effect - virtual void detachAuxEffect_l(int effectId) {} - // returns either EFFECT_SESSION if effects on this audio session exist in one - // chain, or TRACK_SESSION if tracks on this audio session exist, or both - virtual uint32_t hasAudioSession(int sessionId) const = 0; - // the value returned by default implementation is not important as the - // strategy is only meaningful for PlaybackThread which implements this method - virtual uint32_t getStrategyForSession_l(int sessionId) { return 0; } - - // suspend or restore effect according to the type of effect passed. a NULL - // type pointer means suspend all effects in the session - void setEffectSuspended(const effect_uuid_t *type, - bool suspend, - int sessionId = AUDIO_SESSION_OUTPUT_MIX); - // check if some effects must be suspended/restored when an effect is enabled - // or disabled - void checkSuspendOnEffectEnabled(const sp<EffectModule>& effect, - bool enabled, - int sessionId = AUDIO_SESSION_OUTPUT_MIX); - void checkSuspendOnEffectEnabled_l(const sp<EffectModule>& effect, - bool enabled, - int sessionId = AUDIO_SESSION_OUTPUT_MIX); - - virtual status_t setSyncEvent(const sp<SyncEvent>& event) = 0; - virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const = 0; - - - mutable Mutex mLock; - - protected: - - // entry describing an effect being suspended in mSuspendedSessions keyed vector - class SuspendedSessionDesc : public RefBase { - public: - SuspendedSessionDesc() : mRefCount(0) {} - - int mRefCount; // number of active suspend requests - effect_uuid_t mType; // effect type UUID - }; - - void acquireWakeLock(); - void acquireWakeLock_l(); - void releaseWakeLock(); - void releaseWakeLock_l(); - void setEffectSuspended_l(const effect_uuid_t *type, - bool suspend, - int sessionId); - // updated mSuspendedSessions when an effect suspended or restored - void updateSuspendedSessions_l(const effect_uuid_t *type, - bool suspend, - int sessionId); - // check if some effects must be suspended when an effect chain is added - void checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain); - - virtual void preExit() { } - - friend class AudioFlinger; // for mEffectChains - - const type_t mType; - - // Used by parameters, config events, addTrack_l, exit - Condition mWaitWorkCV; - - const sp<AudioFlinger> mAudioFlinger; - uint32_t mSampleRate; - size_t mFrameCount; // output HAL, direct output, record - size_t mNormalFrameCount; // normal mixer and effects - audio_channel_mask_t mChannelMask; - uint16_t mChannelCount; - size_t mFrameSize; - audio_format_t mFormat; - - // Parameter sequence by client: binder thread calling setParameters(): - // 1. Lock mLock - // 2. Append to mNewParameters - // 3. mWaitWorkCV.signal - // 4. mParamCond.waitRelative with timeout - // 5. read mParamStatus - // 6. mWaitWorkCV.signal - // 7. Unlock - // - // Parameter sequence by server: threadLoop calling checkForNewParameters_l(): - // 1. Lock mLock - // 2. If there is an entry in mNewParameters proceed ... - // 2. Read first entry in mNewParameters - // 3. Process - // 4. Remove first entry from mNewParameters - // 5. Set mParamStatus - // 6. mParamCond.signal - // 7. mWaitWorkCV.wait with timeout (this is to avoid overwriting mParamStatus) - // 8. Unlock - Condition mParamCond; - Vector<String8> mNewParameters; - status_t mParamStatus; - - Vector<ConfigEvent *> mConfigEvents; - - // These fields are written and read by thread itself without lock or barrier, - // and read by other threads without lock or barrier via standby() , outDevice() - // and inDevice(). - // Because of the absence of a lock or barrier, any other thread that reads - // these fields must use the information in isolation, or be prepared to deal - // with possibility that it might be inconsistent with other information. - bool mStandby; // Whether thread is currently in standby. - audio_devices_t mOutDevice; // output device - audio_devices_t mInDevice; // input device - audio_source_t mAudioSource; // (see audio.h, audio_source_t) - - const audio_io_handle_t mId; - Vector< sp<EffectChain> > mEffectChains; - - static const int kNameLength = 16; // prctl(PR_SET_NAME) limit - char mName[kNameLength]; - sp<IPowerManager> mPowerManager; - sp<IBinder> mWakeLockToken; - const sp<PMDeathRecipient> mDeathRecipient; - // list of suspended effects per session and per type. The first vector is - // keyed by session ID, the second by type UUID timeLow field - KeyedVector< int, KeyedVector< int, sp<SuspendedSessionDesc> > > mSuspendedSessions; - }; - struct stream_type_t { stream_type_t() : volume(1.0f), @@ -760,644 +379,10 @@ private: }; // --- PlaybackThread --- - class PlaybackThread : public ThreadBase { - public: - - enum mixer_state { - MIXER_IDLE, // no active tracks - MIXER_TRACKS_ENABLED, // at least one active track, but no track has any data ready - MIXER_TRACKS_READY // at least one active track, and at least one track has data - // standby mode does not have an enum value - // suspend by audio policy manager is orthogonal to mixer state - }; - - // playback track - class Track : public TrackBase, public VolumeProvider { - public: - Track( PlaybackThread *thread, - const sp<Client>& client, - audio_stream_type_t streamType, - uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, - const sp<IMemory>& sharedBuffer, - int sessionId, - IAudioFlinger::track_flags_t flags); - virtual ~Track(); - - static void appendDumpHeader(String8& result); - void dump(char* buffer, size_t size); - virtual status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE, - int triggerSession = 0); - virtual void stop(); - void pause(); - - void flush(); - void destroy(); - void mute(bool); - int name() const { return mName; } - - audio_stream_type_t streamType() const { - return mStreamType; - } - status_t attachAuxEffect(int EffectId); - void setAuxBuffer(int EffectId, int32_t *buffer); - int32_t *auxBuffer() const { return mAuxBuffer; } - void setMainBuffer(int16_t *buffer) { mMainBuffer = buffer; } - int16_t *mainBuffer() const { return mMainBuffer; } - int auxEffectId() const { return mAuxEffectId; } - - // implement FastMixerState::VolumeProvider interface - virtual uint32_t getVolumeLR(); - virtual status_t setSyncEvent(const sp<SyncEvent>& event); - - protected: - // for numerous - friend class PlaybackThread; - friend class MixerThread; - friend class DirectOutputThread; - - Track(const Track&); - Track& operator = (const Track&); - - // AudioBufferProvider interface - virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts = kInvalidPTS); - // releaseBuffer() not overridden - - virtual size_t framesReady() const; - - bool isMuted() const { return mMute; } - bool isPausing() const { - return mState == PAUSING; - } - bool isPaused() const { - return mState == PAUSED; - } - bool isResuming() const { - return mState == RESUMING; - } - bool isReady() const; - void setPaused() { mState = PAUSED; } - void reset(); - - bool isOutputTrack() const { - return (mStreamType == AUDIO_STREAM_CNT); - } - - sp<IMemory> sharedBuffer() const { return mSharedBuffer; } - - bool presentationComplete(size_t framesWritten, size_t audioHalFrames); - - public: - void triggerEvents(AudioSystem::sync_event_t type); - virtual bool isTimedTrack() const { return false; } - bool isFastTrack() const { return (mFlags & IAudioFlinger::TRACK_FAST) != 0; } - - protected: - - // written by Track::mute() called by binder thread(s), without a mutex or barrier. - // read by Track::isMuted() called by playback thread, also without a mutex or barrier. - // The lack of mutex or barrier is safe because the mute status is only used by itself. - bool mMute; - - // FILLED state is used for suppressing volume ramp at begin of playing - enum {FS_INVALID, FS_FILLING, FS_FILLED, FS_ACTIVE}; - mutable uint8_t mFillingUpStatus; - int8_t mRetryCount; - const sp<IMemory> mSharedBuffer; - bool mResetDone; - const audio_stream_type_t mStreamType; - int mName; // track name on the normal mixer, - // allocated statically at track creation time, - // and is even allocated (though unused) for fast tracks - // FIXME don't allocate track name for fast tracks - int16_t *mMainBuffer; - int32_t *mAuxBuffer; - int mAuxEffectId; - bool mHasVolumeController; - size_t mPresentationCompleteFrames; // number of frames written to the audio HAL - // when this track will be fully rendered - private: - IAudioFlinger::track_flags_t mFlags; - - // The following fields are only for fast tracks, and should be in a subclass - int mFastIndex; // index within FastMixerState::mFastTracks[]; - // either mFastIndex == -1 if not isFastTrack() - // or 0 < mFastIndex < FastMixerState::kMaxFast because - // index 0 is reserved for normal mixer's submix; - // index is allocated statically at track creation time - // but the slot is only used if track is active - FastTrackUnderruns mObservedUnderruns; // Most recently observed value of - // mFastMixerDumpState.mTracks[mFastIndex].mUnderruns - uint32_t mUnderrunCount; // Counter of total number of underruns, never reset - volatile float mCachedVolume; // combined master volume and stream type volume; - // 'volatile' means accessed without lock or - // barrier, but is read/written atomically - }; // end of Track - - class TimedTrack : public Track { - public: - static sp<TimedTrack> create(PlaybackThread *thread, - const sp<Client>& client, - audio_stream_type_t streamType, - uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, - const sp<IMemory>& sharedBuffer, - int sessionId); - virtual ~TimedTrack(); - - class TimedBuffer { - public: - TimedBuffer(); - TimedBuffer(const sp<IMemory>& buffer, int64_t pts); - const sp<IMemory>& buffer() const { return mBuffer; } - int64_t pts() const { return mPTS; } - uint32_t position() const { return mPosition; } - void setPosition(uint32_t pos) { mPosition = pos; } - private: - sp<IMemory> mBuffer; - int64_t mPTS; - uint32_t mPosition; - }; - - // Mixer facing methods. - virtual bool isTimedTrack() const { return true; } - virtual size_t framesReady() const; - - // AudioBufferProvider interface - virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, - int64_t pts); - virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer); - - // Client/App facing methods. - status_t allocateTimedBuffer(size_t size, - sp<IMemory>* buffer); - status_t queueTimedBuffer(const sp<IMemory>& buffer, - int64_t pts); - status_t setMediaTimeTransform(const LinearTransform& xform, - TimedAudioTrack::TargetTimeline target); - - private: - TimedTrack(PlaybackThread *thread, - const sp<Client>& client, - audio_stream_type_t streamType, - uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, - const sp<IMemory>& sharedBuffer, - int sessionId); - - void timedYieldSamples_l(AudioBufferProvider::Buffer* buffer); - void timedYieldSilence_l(uint32_t numFrames, - AudioBufferProvider::Buffer* buffer); - void trimTimedBufferQueue_l(); - void trimTimedBufferQueueHead_l(const char* logTag); - void updateFramesPendingAfterTrim_l(const TimedBuffer& buf, - const char* logTag); - - uint64_t mLocalTimeFreq; - LinearTransform mLocalTimeToSampleTransform; - LinearTransform mMediaTimeToSampleTransform; - sp<MemoryDealer> mTimedMemoryDealer; - - Vector<TimedBuffer> mTimedBufferQueue; - bool mQueueHeadInFlight; - bool mTrimQueueHeadOnRelease; - uint32_t mFramesPendingInQueue; - - uint8_t* mTimedSilenceBuffer; - uint32_t mTimedSilenceBufferSize; - mutable Mutex mTimedBufferQueueLock; - bool mTimedAudioOutputOnTime; - CCHelper mCCHelper; - - Mutex mMediaTimeTransformLock; - LinearTransform mMediaTimeTransform; - bool mMediaTimeTransformValid; - TimedAudioTrack::TargetTimeline mMediaTimeTransformTarget; - }; - - - // playback track - class OutputTrack : public Track { - public: - - class Buffer: public AudioBufferProvider::Buffer { - public: - int16_t *mBuffer; - }; - - OutputTrack(PlaybackThread *thread, - DuplicatingThread *sourceThread, - uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount); - virtual ~OutputTrack(); - - virtual status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE, - int triggerSession = 0); - virtual void stop(); - bool write(int16_t* data, uint32_t frames); - bool bufferQueueEmpty() const { return mBufferQueue.size() == 0; } - bool isActive() const { return mActive; } - const wp<ThreadBase>& thread() const { return mThread; } - - private: - - enum { - NO_MORE_BUFFERS = 0x80000001, // same in AudioTrack.h, ok to be different value - }; - - status_t obtainBuffer(AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs); - void clearBufferQueue(); - - // Maximum number of pending buffers allocated by OutputTrack::write() - static const uint8_t kMaxOverFlowBuffers = 10; - - Vector < Buffer* > mBufferQueue; - AudioBufferProvider::Buffer mOutBuffer; - bool mActive; - DuplicatingThread* const mSourceThread; // for waitTimeMs() in write() - }; // end of OutputTrack - - PlaybackThread (const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, - audio_io_handle_t id, audio_devices_t device, type_t type); - virtual ~PlaybackThread(); - - void dump(int fd, const Vector<String16>& args); - - // Thread virtuals - virtual status_t readyToRun(); - virtual bool threadLoop(); - - // RefBase - virtual void onFirstRef(); - -protected: - // Code snippets that were lifted up out of threadLoop() - virtual void threadLoop_mix() = 0; - virtual void threadLoop_sleepTime() = 0; - virtual void threadLoop_write(); - virtual void threadLoop_standby(); - virtual void threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove); - // prepareTracks_l reads and writes mActiveTracks, and returns - // the pending set of tracks to remove via Vector 'tracksToRemove'. The caller - // is responsible for clearing or destroying this Vector later on, when it - // is safe to do so. That will drop the final ref count and destroy the tracks. - virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0; +#include "Threads.h" - // ThreadBase virtuals - virtual void preExit(); - -public: - - virtual status_t initCheck() const { return (mOutput == NULL) ? NO_INIT : NO_ERROR; } - - // return estimated latency in milliseconds, as reported by HAL - uint32_t latency() const; - // same, but lock must already be held - uint32_t latency_l() const; - - void setMasterVolume(float value); - void setMasterMute(bool muted); - - void setStreamVolume(audio_stream_type_t stream, float value); - void setStreamMute(audio_stream_type_t stream, bool muted); - - float streamVolume(audio_stream_type_t stream) const; - - sp<Track> createTrack_l( - const sp<AudioFlinger::Client>& client, - audio_stream_type_t streamType, - uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, - const sp<IMemory>& sharedBuffer, - int sessionId, - IAudioFlinger::track_flags_t flags, - pid_t tid, - status_t *status); - - AudioStreamOut* getOutput() const; - AudioStreamOut* clearOutput(); - virtual audio_stream_t* stream() const; - - // a very large number of suspend() will eventually wraparound, but unlikely - void suspend() { (void) android_atomic_inc(&mSuspended); } - void restore() - { - // if restore() is done without suspend(), get back into - // range so that the next suspend() will operate correctly - if (android_atomic_dec(&mSuspended) <= 0) { - android_atomic_release_store(0, &mSuspended); - } - } - bool isSuspended() const - { return android_atomic_acquire_load(&mSuspended) > 0; } - - virtual String8 getParameters(const String8& keys); - virtual void audioConfigChanged_l(int event, int param = 0); - status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames); - int16_t *mixBuffer() const { return mMixBuffer; }; - - virtual void detachAuxEffect_l(int effectId); - status_t attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track> track, - int EffectId); - status_t attachAuxEffect_l(const sp<AudioFlinger::PlaybackThread::Track> track, - int EffectId); - - virtual status_t addEffectChain_l(const sp<EffectChain>& chain); - virtual size_t removeEffectChain_l(const sp<EffectChain>& chain); - virtual uint32_t hasAudioSession(int sessionId) const; - virtual uint32_t getStrategyForSession_l(int sessionId); - - - virtual status_t setSyncEvent(const sp<SyncEvent>& event); - virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const; - void invalidateTracks(audio_stream_type_t streamType); - - - protected: - int16_t* mMixBuffer; - - // suspend count, > 0 means suspended. While suspended, the thread continues to pull from - // tracks and mix, but doesn't write to HAL. A2DP and SCO HAL implementations can't handle - // concurrent use of both of them, so Audio Policy Service suspends one of the threads to - // workaround that restriction. - // 'volatile' means accessed via atomic operations and no lock. - volatile int32_t mSuspended; - - int mBytesWritten; - private: - // mMasterMute is in both PlaybackThread and in AudioFlinger. When a - // PlaybackThread needs to find out if master-muted, it checks it's local - // copy rather than the one in AudioFlinger. This optimization saves a lock. - bool mMasterMute; - void setMasterMute_l(bool muted) { mMasterMute = muted; } - protected: - SortedVector< wp<Track> > mActiveTracks; // FIXME check if this could be sp<> - - // Allocate a track name for a given channel mask. - // Returns name >= 0 if successful, -1 on failure. - virtual int getTrackName_l(audio_channel_mask_t channelMask, int sessionId) = 0; - virtual void deleteTrackName_l(int name) = 0; - - // Time to sleep between cycles when: - virtual uint32_t activeSleepTimeUs() const; // mixer state MIXER_TRACKS_ENABLED - virtual uint32_t idleSleepTimeUs() const = 0; // mixer state MIXER_IDLE - virtual uint32_t suspendSleepTimeUs() const = 0; // audio policy manager suspended us - // No sleep when mixer state == MIXER_TRACKS_READY; relies on audio HAL stream->write() - // No sleep in standby mode; waits on a condition - - // Code snippets that are temporarily lifted up out of threadLoop() until the merge - void checkSilentMode_l(); - - // Non-trivial for DUPLICATING only - virtual void saveOutputTracks() { } - virtual void clearOutputTracks() { } - - // Cache various calculated values, at threadLoop() entry and after a parameter change - virtual void cacheParameters_l(); - - virtual uint32_t correctLatency(uint32_t latency) const; - - private: - - friend class AudioFlinger; // for numerous - - PlaybackThread(const Client&); - PlaybackThread& operator = (const PlaybackThread&); - - status_t addTrack_l(const sp<Track>& track); - void destroyTrack_l(const sp<Track>& track); - void removeTrack_l(const sp<Track>& track); - - void readOutputParameters(); - - virtual void dumpInternals(int fd, const Vector<String16>& args); - void dumpTracks(int fd, const Vector<String16>& args); - - SortedVector< sp<Track> > mTracks; - // mStreamTypes[] uses 1 additional stream type internally for the OutputTrack used by DuplicatingThread - stream_type_t mStreamTypes[AUDIO_STREAM_CNT + 1]; - AudioStreamOut *mOutput; - - float mMasterVolume; - nsecs_t mLastWriteTime; - int mNumWrites; - int mNumDelayedWrites; - bool mInWrite; - - // FIXME rename these former local variables of threadLoop to standard "m" names - nsecs_t standbyTime; - size_t mixBufferSize; - - // cached copies of activeSleepTimeUs() and idleSleepTimeUs() made by cacheParameters_l() - uint32_t activeSleepTime; - uint32_t idleSleepTime; - - uint32_t sleepTime; - - // mixer status returned by prepareTracks_l() - mixer_state mMixerStatus; // current cycle - // previous cycle when in prepareTracks_l() - mixer_state mMixerStatusIgnoringFastTracks; - // FIXME or a separate ready state per track - - // FIXME move these declarations into the specific sub-class that needs them - // MIXER only - uint32_t sleepTimeShift; - - // same as AudioFlinger::mStandbyTimeInNsecs except for DIRECT which uses a shorter value - nsecs_t standbyDelay; - - // MIXER only - nsecs_t maxPeriod; - - // DUPLICATING only - uint32_t writeFrames; - - private: - // The HAL output sink is treated as non-blocking, but current implementation is blocking - sp<NBAIO_Sink> mOutputSink; - // If a fast mixer is present, the blocking pipe sink, otherwise clear - sp<NBAIO_Sink> mPipeSink; - // The current sink for the normal mixer to write it's (sub)mix, mOutputSink or mPipeSink - sp<NBAIO_Sink> mNormalSink; - // For dumpsys - sp<NBAIO_Sink> mTeeSink; - sp<NBAIO_Source> mTeeSource; - uint32_t mScreenState; // cached copy of gScreenState - public: - virtual bool hasFastMixer() const = 0; - virtual FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex) const - { FastTrackUnderruns dummy; return dummy; } - - protected: - // accessed by both binder threads and within threadLoop(), lock on mutex needed - unsigned mFastTrackAvailMask; // bit i set if fast track [i] is available - - }; - - class MixerThread : public PlaybackThread { - public: - MixerThread (const sp<AudioFlinger>& audioFlinger, - AudioStreamOut* output, - audio_io_handle_t id, - audio_devices_t device, - type_t type = MIXER); - virtual ~MixerThread(); - - // Thread virtuals - - virtual bool checkForNewParameters_l(); - virtual void dumpInternals(int fd, const Vector<String16>& args); - - protected: - virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove); - virtual int getTrackName_l(audio_channel_mask_t channelMask, int sessionId); - virtual void deleteTrackName_l(int name); - virtual uint32_t idleSleepTimeUs() const; - virtual uint32_t suspendSleepTimeUs() const; - virtual void cacheParameters_l(); - - // threadLoop snippets - virtual void threadLoop_write(); - virtual void threadLoop_standby(); - virtual void threadLoop_mix(); - virtual void threadLoop_sleepTime(); - virtual void threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove); - virtual uint32_t correctLatency(uint32_t latency) const; - - AudioMixer* mAudioMixer; // normal mixer - private: - // one-time initialization, no locks required - FastMixer* mFastMixer; // non-NULL if there is also a fast mixer - sp<AudioWatchdog> mAudioWatchdog; // non-0 if there is an audio watchdog thread - - // contents are not guaranteed to be consistent, no locks required - FastMixerDumpState mFastMixerDumpState; -#ifdef STATE_QUEUE_DUMP - StateQueueObserverDump mStateQueueObserverDump; - StateQueueMutatorDump mStateQueueMutatorDump; -#endif - AudioWatchdogDump mAudioWatchdogDump; - - // accessible only within the threadLoop(), no locks required - // mFastMixer->sq() // for mutating and pushing state - int32_t mFastMixerFutex; // for cold idle - - public: - virtual bool hasFastMixer() const { return mFastMixer != NULL; } - virtual FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex) const { - ALOG_ASSERT(fastIndex < FastMixerState::kMaxFastTracks); - return mFastMixerDumpState.mTracks[fastIndex].mUnderruns; - } - }; - - class DirectOutputThread : public PlaybackThread { - public: - - DirectOutputThread (const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, - audio_io_handle_t id, audio_devices_t device); - virtual ~DirectOutputThread(); - - // Thread virtuals - - virtual bool checkForNewParameters_l(); - - protected: - virtual int getTrackName_l(audio_channel_mask_t channelMask, int sessionId); - virtual void deleteTrackName_l(int name); - virtual uint32_t activeSleepTimeUs() const; - virtual uint32_t idleSleepTimeUs() const; - virtual uint32_t suspendSleepTimeUs() const; - virtual void cacheParameters_l(); - - // threadLoop snippets - virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove); - virtual void threadLoop_mix(); - virtual void threadLoop_sleepTime(); - - // volumes last sent to audio HAL with stream->set_volume() - float mLeftVolFloat; - float mRightVolFloat; - -private: - // prepareTracks_l() tells threadLoop_mix() the name of the single active track - sp<Track> mActiveTrack; - public: - virtual bool hasFastMixer() const { return false; } - }; - - class DuplicatingThread : public MixerThread { - public: - DuplicatingThread (const sp<AudioFlinger>& audioFlinger, MixerThread* mainThread, - audio_io_handle_t id); - virtual ~DuplicatingThread(); - - // Thread virtuals - void addOutputTrack(MixerThread* thread); - void removeOutputTrack(MixerThread* thread); - uint32_t waitTimeMs() const { return mWaitTimeMs; } - protected: - virtual uint32_t activeSleepTimeUs() const; - - private: - bool outputsReady(const SortedVector< sp<OutputTrack> > &outputTracks); - protected: - // threadLoop snippets - virtual void threadLoop_mix(); - virtual void threadLoop_sleepTime(); - virtual void threadLoop_write(); - virtual void threadLoop_standby(); - virtual void cacheParameters_l(); - - private: - // called from threadLoop, addOutputTrack, removeOutputTrack - virtual void updateWaitTime_l(); - protected: - virtual void saveOutputTracks(); - virtual void clearOutputTracks(); - private: - - uint32_t mWaitTimeMs; - SortedVector < sp<OutputTrack> > outputTracks; - SortedVector < sp<OutputTrack> > mOutputTracks; - public: - virtual bool hasFastMixer() const { return false; } - }; - - PlaybackThread *checkPlaybackThread_l(audio_io_handle_t output) const; - MixerThread *checkMixerThread_l(audio_io_handle_t output) const; - RecordThread *checkRecordThread_l(audio_io_handle_t input) const; - // no range check, AudioFlinger::mLock held - bool streamMute_l(audio_stream_type_t stream) const - { return mStreamTypes[stream].mute; } - // no range check, doesn't check per-thread stream volume, AudioFlinger::mLock held - float streamVolume_l(audio_stream_type_t stream) const - { return mStreamTypes[stream].volume; } - void audioConfigChanged_l(int event, audio_io_handle_t ioHandle, const void *param2); - - // allocate an audio_io_handle_t, session ID, or effect ID - uint32_t nextUniqueId(); - - status_t moveEffectChain_l(int sessionId, - PlaybackThread *srcThread, - PlaybackThread *dstThread, - bool reRegister); - // return thread associated with primary hardware device, or NULL - PlaybackThread *primaryPlaybackThread_l() const; - audio_devices_t primaryOutputDevice_l() const; - - sp<PlaybackThread> getEffectThread_l(int sessionId, int EffectId); +#include "Effects.h" // server side of the client's IAudioTrack class TrackHandle : public android::BnAudioTrack { @@ -1408,7 +393,6 @@ private: virtual status_t start(); virtual void stop(); virtual void flush(); - virtual void mute(bool); virtual void pause(); virtual status_t attachAuxEffect(int effectId); virtual status_t allocateTimedBuffer(size_t size, @@ -1423,157 +407,6 @@ private: const sp<PlaybackThread::Track> mTrack; }; - void removeClient_l(pid_t pid); - void removeNotificationClient(pid_t pid); - - - // record thread - class RecordThread : public ThreadBase, public AudioBufferProvider - // derives from AudioBufferProvider interface for use by resampler - { - public: - - // record track - class RecordTrack : public TrackBase { - public: - RecordTrack(RecordThread *thread, - const sp<Client>& client, - uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, - int sessionId); - virtual ~RecordTrack(); - - virtual status_t start(AudioSystem::sync_event_t event, int triggerSession); - virtual void stop(); - - void destroy(); - - // clear the buffer overflow flag - void clearOverflow() { mOverflow = false; } - // set the buffer overflow flag and return previous value - bool setOverflow() { bool tmp = mOverflow; mOverflow = true; return tmp; } - - static void appendDumpHeader(String8& result); - void dump(char* buffer, size_t size); - - private: - friend class AudioFlinger; // for mState - - RecordTrack(const RecordTrack&); - RecordTrack& operator = (const RecordTrack&); - - // AudioBufferProvider interface - virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts = kInvalidPTS); - // releaseBuffer() not overridden - - bool mOverflow; // overflow on most recent attempt to fill client buffer - }; - - RecordThread(const sp<AudioFlinger>& audioFlinger, - AudioStreamIn *input, - uint32_t sampleRate, - audio_channel_mask_t channelMask, - audio_io_handle_t id, - audio_devices_t device); - virtual ~RecordThread(); - - // no addTrack_l ? - void destroyTrack_l(const sp<RecordTrack>& track); - void removeTrack_l(const sp<RecordTrack>& track); - - void dumpInternals(int fd, const Vector<String16>& args); - void dumpTracks(int fd, const Vector<String16>& args); - - // Thread virtuals - virtual bool threadLoop(); - virtual status_t readyToRun(); - - // RefBase - virtual void onFirstRef(); - - virtual status_t initCheck() const { return (mInput == NULL) ? NO_INIT : NO_ERROR; } - sp<AudioFlinger::RecordThread::RecordTrack> createRecordTrack_l( - const sp<AudioFlinger::Client>& client, - uint32_t sampleRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int frameCount, - int sessionId, - IAudioFlinger::track_flags_t flags, - pid_t tid, - status_t *status); - - status_t start(RecordTrack* recordTrack, - AudioSystem::sync_event_t event, - int triggerSession); - - // ask the thread to stop the specified track, and - // return true if the caller should then do it's part of the stopping process - bool stop_l(RecordTrack* recordTrack); - - void dump(int fd, const Vector<String16>& args); - AudioStreamIn* clearInput(); - virtual audio_stream_t* stream() const; - - // AudioBufferProvider interface - virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts); - virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer); - - virtual bool checkForNewParameters_l(); - virtual String8 getParameters(const String8& keys); - virtual void audioConfigChanged_l(int event, int param = 0); - void readInputParameters(); - virtual unsigned int getInputFramesLost(); - - virtual status_t addEffectChain_l(const sp<EffectChain>& chain); - virtual size_t removeEffectChain_l(const sp<EffectChain>& chain); - virtual uint32_t hasAudioSession(int sessionId) const; - - // Return the set of unique session IDs across all tracks. - // The keys are the session IDs, and the associated values are meaningless. - // FIXME replace by Set [and implement Bag/Multiset for other uses]. - KeyedVector<int, bool> sessionIds() const; - - virtual status_t setSyncEvent(const sp<SyncEvent>& event); - virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const; - - static void syncStartEventCallback(const wp<SyncEvent>& event); - void handleSyncStartEvent(const sp<SyncEvent>& event); - - private: - void clearSyncStartEvent(); - - // Enter standby if not already in standby, and set mStandby flag - void standby(); - - // Call the HAL standby method unconditionally, and don't change mStandby flag - void inputStandBy(); - - AudioStreamIn *mInput; - SortedVector < sp<RecordTrack> > mTracks; - // mActiveTrack has dual roles: it indicates the current active track, and - // is used together with mStartStopCond to indicate start()/stop() progress - sp<RecordTrack> mActiveTrack; - Condition mStartStopCond; - AudioResampler *mResampler; - int32_t *mRsmpOutBuffer; - int16_t *mRsmpInBuffer; - size_t mRsmpInIndex; - size_t mInputBytes; - const int mReqChannelCount; - const uint32_t mReqSampleRate; - ssize_t mBytesRead; - // sync event triggering actual audio capture. Frames read before this event will - // be dropped and therefore not read by the application. - sp<SyncEvent> mSyncStartEvent; - // number of captured frames to drop after the start sync event has been received. - // when < 0, maximum frames to drop before starting capture even if sync event is - // not received - ssize_t mFramestoDrop; - }; - // server side of the client's IAudioRecord class RecordHandle : public android::BnAudioRecord { public: @@ -1591,343 +424,33 @@ private: void stop_nonvirtual(); }; - //--- Audio Effect Management - - // EffectModule and EffectChain classes both have their own mutex to protect - // state changes or resource modifications. Always respect the following order - // if multiple mutexes must be acquired to avoid cross deadlock: - // AudioFlinger -> ThreadBase -> EffectChain -> EffectModule - - // The EffectModule class is a wrapper object controlling the effect engine implementation - // in the effect library. It prevents concurrent calls to process() and command() functions - // from different client threads. It keeps a list of EffectHandle objects corresponding - // to all client applications using this effect and notifies applications of effect state, - // control or parameter changes. It manages the activation state machine to send appropriate - // reset, enable, disable commands to effect engine and provide volume - // ramping when effects are activated/deactivated. - // When controlling an auxiliary effect, the EffectModule also provides an input buffer used by - // the attached track(s) to accumulate their auxiliary channel. - class EffectModule: public RefBase { - public: - EffectModule(ThreadBase *thread, - const wp<AudioFlinger::EffectChain>& chain, - effect_descriptor_t *desc, - int id, - int sessionId); - virtual ~EffectModule(); - - enum effect_state { - IDLE, - RESTART, - STARTING, - ACTIVE, - STOPPING, - STOPPED, - DESTROYED - }; - - int id() const { return mId; } - void process(); - void updateState(); - status_t command(uint32_t cmdCode, - uint32_t cmdSize, - void *pCmdData, - uint32_t *replySize, - void *pReplyData); - - void reset_l(); - status_t configure(); - status_t init(); - effect_state state() const { - return mState; - } - uint32_t status() { - return mStatus; - } - int sessionId() const { - return mSessionId; - } - status_t setEnabled(bool enabled); - status_t setEnabled_l(bool enabled); - bool isEnabled() const; - bool isProcessEnabled() const; - - void setInBuffer(int16_t *buffer) { mConfig.inputCfg.buffer.s16 = buffer; } - int16_t *inBuffer() { return mConfig.inputCfg.buffer.s16; } - void setOutBuffer(int16_t *buffer) { mConfig.outputCfg.buffer.s16 = buffer; } - int16_t *outBuffer() { return mConfig.outputCfg.buffer.s16; } - void setChain(const wp<EffectChain>& chain) { mChain = chain; } - void setThread(const wp<ThreadBase>& thread) { mThread = thread; } - const wp<ThreadBase>& thread() { return mThread; } - - status_t addHandle(EffectHandle *handle); - size_t disconnect(EffectHandle *handle, bool unpinIfLast); - size_t removeHandle(EffectHandle *handle); - - const effect_descriptor_t& desc() const { return mDescriptor; } - wp<EffectChain>& chain() { return mChain; } - - status_t setDevice(audio_devices_t device); - status_t setVolume(uint32_t *left, uint32_t *right, bool controller); - status_t setMode(audio_mode_t mode); - status_t setAudioSource(audio_source_t source); - status_t start(); - status_t stop(); - void setSuspended(bool suspended); - bool suspended() const; - - EffectHandle* controlHandle_l(); - - bool isPinned() const { return mPinned; } - void unPin() { mPinned = false; } - bool purgeHandles(); - void lock() { mLock.lock(); } - void unlock() { mLock.unlock(); } - - void dump(int fd, const Vector<String16>& args); - - protected: - friend class AudioFlinger; // for mHandles - bool mPinned; - - // Maximum time allocated to effect engines to complete the turn off sequence - static const uint32_t MAX_DISABLE_TIME_MS = 10000; - - EffectModule(const EffectModule&); - EffectModule& operator = (const EffectModule&); - - status_t start_l(); - status_t stop_l(); - -mutable Mutex mLock; // mutex for process, commands and handles list protection - wp<ThreadBase> mThread; // parent thread - wp<EffectChain> mChain; // parent effect chain - const int mId; // this instance unique ID - const int mSessionId; // audio session ID - const effect_descriptor_t mDescriptor;// effect descriptor received from effect engine - effect_config_t mConfig; // input and output audio configuration - effect_handle_t mEffectInterface; // Effect module C API - status_t mStatus; // initialization status - effect_state mState; // current activation state - Vector<EffectHandle *> mHandles; // list of client handles - // First handle in mHandles has highest priority and controls the effect module - uint32_t mMaxDisableWaitCnt; // maximum grace period before forcing an effect off after - // sending disable command. - uint32_t mDisableWaitCnt; // current process() calls count during disable period. - bool mSuspended; // effect is suspended: temporarily disabled by framework - }; - - // The EffectHandle class implements the IEffect interface. It provides resources - // to receive parameter updates, keeps track of effect control - // ownership and state and has a pointer to the EffectModule object it is controlling. - // There is one EffectHandle object for each application controlling (or using) - // an effect module. - // The EffectHandle is obtained by calling AudioFlinger::createEffect(). - class EffectHandle: public android::BnEffect { - public: - - EffectHandle(const sp<EffectModule>& effect, - const sp<AudioFlinger::Client>& client, - const sp<IEffectClient>& effectClient, - int32_t priority); - virtual ~EffectHandle(); - - // IEffect - virtual status_t enable(); - virtual status_t disable(); - virtual status_t command(uint32_t cmdCode, - uint32_t cmdSize, - void *pCmdData, - uint32_t *replySize, - void *pReplyData); - virtual void disconnect(); - private: - void disconnect(bool unpinIfLast); - public: - virtual sp<IMemory> getCblk() const { return mCblkMemory; } - virtual status_t onTransact(uint32_t code, const Parcel& data, - Parcel* reply, uint32_t flags); - - - // Give or take control of effect module - // - hasControl: true if control is given, false if removed - // - signal: true client app should be signaled of change, false otherwise - // - enabled: state of the effect when control is passed - void setControl(bool hasControl, bool signal, bool enabled); - void commandExecuted(uint32_t cmdCode, - uint32_t cmdSize, - void *pCmdData, - uint32_t replySize, - void *pReplyData); - void setEnabled(bool enabled); - bool enabled() const { return mEnabled; } - - // Getters - int id() const { return mEffect->id(); } - int priority() const { return mPriority; } - bool hasControl() const { return mHasControl; } - sp<EffectModule> effect() const { return mEffect; } - // destroyed_l() must be called with the associated EffectModule mLock held - bool destroyed_l() const { return mDestroyed; } - - void dump(char* buffer, size_t size); - - protected: - friend class AudioFlinger; // for mEffect, mHasControl, mEnabled - EffectHandle(const EffectHandle&); - EffectHandle& operator =(const EffectHandle&); - - sp<EffectModule> mEffect; // pointer to controlled EffectModule - sp<IEffectClient> mEffectClient; // callback interface for client notifications - /*const*/ sp<Client> mClient; // client for shared memory allocation, see disconnect() - sp<IMemory> mCblkMemory; // shared memory for control block - effect_param_cblk_t* mCblk; // control block for deferred parameter setting via shared memory - uint8_t* mBuffer; // pointer to parameter area in shared memory - int mPriority; // client application priority to control the effect - bool mHasControl; // true if this handle is controlling the effect - bool mEnabled; // cached enable state: needed when the effect is - // restored after being suspended - bool mDestroyed; // Set to true by destructor. Access with EffectModule - // mLock held - }; - - // the EffectChain class represents a group of effects associated to one audio session. - // There can be any number of EffectChain objects per output mixer thread (PlaybackThread). - // The EffecChain with session ID 0 contains global effects applied to the output mix. - // Effects in this chain can be insert or auxiliary. Effects in other chains (attached to tracks) - // are insert only. The EffectChain maintains an ordered list of effect module, the order corresponding - // in the effect process order. When attached to a track (session ID != 0), it also provide it's own - // input buffer used by the track as accumulation buffer. - class EffectChain: public RefBase { - public: - EffectChain(const wp<ThreadBase>& wThread, int sessionId); - EffectChain(ThreadBase *thread, int sessionId); - virtual ~EffectChain(); - - // special key used for an entry in mSuspendedEffects keyed vector - // corresponding to a suspend all request. - static const int kKeyForSuspendAll = 0; - - // minimum duration during which we force calling effect process when last track on - // a session is stopped or removed to allow effect tail to be rendered - static const int kProcessTailDurationMs = 1000; - - void process_l(); - - void lock() { - mLock.lock(); - } - void unlock() { - mLock.unlock(); - } - - status_t addEffect_l(const sp<EffectModule>& handle); - size_t removeEffect_l(const sp<EffectModule>& handle); - - int sessionId() const { return mSessionId; } - void setSessionId(int sessionId) { mSessionId = sessionId; } - - sp<EffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor); - sp<EffectModule> getEffectFromId_l(int id); - sp<EffectModule> getEffectFromType_l(const effect_uuid_t *type); - bool setVolume_l(uint32_t *left, uint32_t *right); - void setDevice_l(audio_devices_t device); - void setMode_l(audio_mode_t mode); - void setAudioSource_l(audio_source_t source); - - void setInBuffer(int16_t *buffer, bool ownsBuffer = false) { - mInBuffer = buffer; - mOwnInBuffer = ownsBuffer; - } - int16_t *inBuffer() const { - return mInBuffer; - } - void setOutBuffer(int16_t *buffer) { - mOutBuffer = buffer; - } - int16_t *outBuffer() const { - return mOutBuffer; - } - - void incTrackCnt() { android_atomic_inc(&mTrackCnt); } - void decTrackCnt() { android_atomic_dec(&mTrackCnt); } - int32_t trackCnt() const { return android_atomic_acquire_load(&mTrackCnt); } - - void incActiveTrackCnt() { android_atomic_inc(&mActiveTrackCnt); - mTailBufferCount = mMaxTailBuffers; } - void decActiveTrackCnt() { android_atomic_dec(&mActiveTrackCnt); } - int32_t activeTrackCnt() const { return android_atomic_acquire_load(&mActiveTrackCnt); } - - uint32_t strategy() const { return mStrategy; } - void setStrategy(uint32_t strategy) - { mStrategy = strategy; } - - // suspend effect of the given type - void setEffectSuspended_l(const effect_uuid_t *type, - bool suspend); - // suspend all eligible effects - void setEffectSuspendedAll_l(bool suspend); - // check if effects should be suspend or restored when a given effect is enable or disabled - void checkSuspendOnEffectEnabled(const sp<EffectModule>& effect, - bool enabled); - - void clearInputBuffer(); + PlaybackThread *checkPlaybackThread_l(audio_io_handle_t output) const; + MixerThread *checkMixerThread_l(audio_io_handle_t output) const; + RecordThread *checkRecordThread_l(audio_io_handle_t input) const; + // no range check, AudioFlinger::mLock held + bool streamMute_l(audio_stream_type_t stream) const + { return mStreamTypes[stream].mute; } + // no range check, doesn't check per-thread stream volume, AudioFlinger::mLock held + float streamVolume_l(audio_stream_type_t stream) const + { return mStreamTypes[stream].volume; } + void audioConfigChanged_l(int event, audio_io_handle_t ioHandle, const void *param2); - void dump(int fd, const Vector<String16>& args); + // allocate an audio_io_handle_t, session ID, or effect ID + uint32_t nextUniqueId(); - protected: - friend class AudioFlinger; // for mThread, mEffects - EffectChain(const EffectChain&); - EffectChain& operator =(const EffectChain&); + status_t moveEffectChain_l(int sessionId, + PlaybackThread *srcThread, + PlaybackThread *dstThread, + bool reRegister); + // return thread associated with primary hardware device, or NULL + PlaybackThread *primaryPlaybackThread_l() const; + audio_devices_t primaryOutputDevice_l() const; - class SuspendedEffectDesc : public RefBase { - public: - SuspendedEffectDesc() : mRefCount(0) {} + sp<PlaybackThread> getEffectThread_l(int sessionId, int EffectId); - int mRefCount; - effect_uuid_t mType; - wp<EffectModule> mEffect; - }; - // get a list of effect modules to suspend when an effect of the type - // passed is enabled. - void getSuspendEligibleEffects(Vector< sp<EffectModule> > &effects); - - // get an effect module if it is currently enable - sp<EffectModule> getEffectIfEnabled(const effect_uuid_t *type); - // true if the effect whose descriptor is passed can be suspended - // OEMs can modify the rules implemented in this method to exclude specific effect - // types or implementations from the suspend/restore mechanism. - bool isEffectEligibleForSuspend(const effect_descriptor_t& desc); - - void clearInputBuffer_l(sp<ThreadBase> thread); - - wp<ThreadBase> mThread; // parent mixer thread - Mutex mLock; // mutex protecting effect list - Vector< sp<EffectModule> > mEffects; // list of effect modules - int mSessionId; // audio session ID - int16_t *mInBuffer; // chain input buffer - int16_t *mOutBuffer; // chain output buffer - - // 'volatile' here means these are accessed with atomic operations instead of mutex - volatile int32_t mActiveTrackCnt; // number of active tracks connected - volatile int32_t mTrackCnt; // number of tracks connected - - int32_t mTailBufferCount; // current effect tail buffer count - int32_t mMaxTailBuffers; // maximum effect tail buffers - bool mOwnInBuffer; // true if the chain owns its input buffer - int mVolumeCtrlIdx; // index of insert effect having control over volume - uint32_t mLeftVolume; // previous volume on left channel - uint32_t mRightVolume; // previous volume on right channel - uint32_t mNewLeftVolume; // new volume on left channel - uint32_t mNewRightVolume; // new volume on right channel - uint32_t mStrategy; // strategy for this effect chain - // mSuspendedEffects lists all effects currently suspended in the chain. - // Use effect type UUID timelow field as key. There is no real risk of identical - // timeLow fields among effect type UUIDs. - // Updated by updateSuspendedSessions_l() only. - KeyedVector< int, sp<SuspendedEffectDesc> > mSuspendedEffects; - }; + void removeClient_l(pid_t pid); + void removeNotificationClient(pid_t pid); class AudioHwDevice { public: @@ -2064,8 +587,40 @@ private: // for use from destructor status_t closeOutput_nonvirtual(audio_io_handle_t output); status_t closeInput_nonvirtual(audio_io_handle_t input); + +// do not use #ifdef here, since AudioFlinger.h is included by more than one module +//#ifdef TEE_SINK + // all record threads serially share a common tee sink, which is re-created on format change + sp<NBAIO_Sink> mRecordTeeSink; + sp<NBAIO_Source> mRecordTeeSource; +//#endif + +public: + +#ifdef TEE_SINK + // tee sink, if enabled by property, allows dumpsys to write most recent audio to .wav file + static void dumpTee(int fd, const sp<NBAIO_Source>& source, audio_io_handle_t id = 0); + + // whether tee sink is enabled by property + static bool mTeeSinkInputEnabled; + static bool mTeeSinkOutputEnabled; + static bool mTeeSinkTrackEnabled; + + // runtime configured size of each tee sink pipe, in frames + static size_t mTeeSinkInputFrames; + static size_t mTeeSinkOutputFrames; + static size_t mTeeSinkTrackFrames; + + // compile-time default size of tee sink pipes, in frames + // 0x200000 stereo 16-bit PCM frames = 47.5 seconds at 44.1 kHz, 8 megabytes + static const size_t kTeeSinkInputFramesDefault = 0x200000; + static const size_t kTeeSinkOutputFramesDefault = 0x200000; + static const size_t kTeeSinkTrackFramesDefault = 0x1000; +#endif + }; +#undef INCLUDING_FROM_AUDIOFLINGER_H // ---------------------------------------------------------------------------- diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp index 1e4049a..7d38f80 100644 --- a/services/audioflinger/AudioMixer.cpp +++ b/services/audioflinger/AudioMixer.cpp @@ -106,14 +106,23 @@ AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate, uint32_t maxNumTr ALOG_ASSERT(maxNumTracks <= MAX_NUM_TRACKS, "maxNumTracks %u > MAX_NUM_TRACKS %u", maxNumTracks, MAX_NUM_TRACKS); + // AudioMixer is not yet capable of more than 32 active track inputs + ALOG_ASSERT(32 >= MAX_NUM_TRACKS, "bad MAX_NUM_TRACKS %d", MAX_NUM_TRACKS); + + // AudioMixer is not yet capable of multi-channel output beyond stereo + ALOG_ASSERT(2 == MAX_NUM_CHANNELS, "bad MAX_NUM_CHANNELS %d", MAX_NUM_CHANNELS); + LocalClock lc; + pthread_once(&sOnceControl, &sInitRoutine); + mState.enabledTracks= 0; mState.needsChanged = 0; mState.frameCount = frameCount; mState.hook = process__nop; mState.outputTemp = NULL; mState.resampleTemp = NULL; + mState.mLog = &mDummyLog; // mState.reserved // FIXME Most of the following initialization is probably redundant since @@ -121,8 +130,6 @@ AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate, uint32_t maxNumTr // and mTrackNames is initially 0. However, leave it here until that's verified. track_t* t = mState.tracks; for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) { - // FIXME redundant per track - t->localTimeFreq = lc.getLocalFreq(); t->resampler = NULL; t->downmixerBufferProvider = NULL; t++; @@ -163,6 +170,11 @@ AudioMixer::~AudioMixer() delete [] mState.resampleTemp; } +void AudioMixer::setLog(NBLog::Writer *log) +{ + mState.mLog = log; +} + int AudioMixer::getTrackName(audio_channel_mask_t channelMask, int sessionId) { uint32_t names = (~mTrackNames) & mConfiguredNames; @@ -192,7 +204,6 @@ int AudioMixer::getTrackName(audio_channel_mask_t channelMask, int sessionId) t->sessionId = sessionId; // setBufferProvider(name, AudioBufferProvider *) is required before enable(name) t->bufferProvider = NULL; - t->downmixerBufferProvider = NULL; t->buffer.raw = NULL; // no initialization needed // t->buffer.frameCount @@ -203,7 +214,7 @@ int AudioMixer::getTrackName(audio_channel_mask_t channelMask, int sessionId) // setParameter(name, TRACK, MAIN_BUFFER, mixBuffer) is required before enable(name) t->mainBuffer = NULL; t->auxBuffer = NULL; - // see t->localTimeFreq in constructor above + t->downmixerBufferProvider = NULL; status_t status = initTrackDownmix(&mState.tracks[n], n, channelMask); if (status == OK) { @@ -556,7 +567,7 @@ bool AudioMixer::track_t::setResampler(uint32_t value, uint32_t devSampleRate) // the resampler sees the number of channels after the downmixer, if any downmixerBufferProvider != NULL ? MAX_NUM_CHANNELS : channelCount, devSampleRate, quality); - resampler->setLocalTimeFreq(localTimeFreq); + resampler->setLocalTimeFreq(sLocalTimeFreq); } return true; } @@ -615,7 +626,6 @@ void AudioMixer::setBufferProvider(int name, AudioBufferProvider* bufferProvider } - void AudioMixer::process(int64_t pts) { mState.hook(&mState, pts); @@ -760,7 +770,8 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) } -void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux) +void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFrameCount, + int32_t* temp, int32_t* aux) { t->resampler->setSampleRate(t->sampleRate); @@ -793,11 +804,13 @@ void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFram } } -void AudioMixer::track__nop(track_t* t, int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux) +void AudioMixer::track__nop(track_t* t, int32_t* out, size_t outFrameCount, int32_t* temp, + int32_t* aux) { } -void AudioMixer::volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux) +void AudioMixer::volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, + int32_t* aux) { int32_t vl = t->prevVolume[0]; int32_t vr = t->prevVolume[1]; @@ -839,7 +852,8 @@ void AudioMixer::volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, i t->adjustVolumeRamp(aux != NULL); } -void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux) +void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, + int32_t* aux) { const int16_t vl = t->volume[0]; const int16_t vr = t->volume[1]; @@ -867,7 +881,8 @@ void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32 } } -void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux) +void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, + int32_t* aux) { const int16_t *in = static_cast<const int16_t *>(t->in); @@ -957,7 +972,8 @@ void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount t->in = in; } -void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux) +void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, + int32_t* aux) { const int16_t *in = static_cast<int16_t const *>(t->in); @@ -1053,33 +1069,37 @@ void AudioMixer::process__nop(state_t* state, int64_t pts) // avoid multiple memset() on same buffer uint32_t e1 = e0, e2 = e0; int i = 31 - __builtin_clz(e1); - track_t& t1 = state->tracks[i]; - e2 &= ~(1<<i); - while (e2) { - i = 31 - __builtin_clz(e2); + { + track_t& t1 = state->tracks[i]; e2 &= ~(1<<i); - track_t& t2 = state->tracks[i]; - if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) { - e1 &= ~(1<<i); + while (e2) { + i = 31 - __builtin_clz(e2); + e2 &= ~(1<<i); + track_t& t2 = state->tracks[i]; + if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) { + e1 &= ~(1<<i); + } } - } - e0 &= ~(e1); + e0 &= ~(e1); - memset(t1.mainBuffer, 0, bufSize); + memset(t1.mainBuffer, 0, bufSize); + } while (e1) { i = 31 - __builtin_clz(e1); e1 &= ~(1<<i); - t1 = state->tracks[i]; - size_t outFrames = state->frameCount; - while (outFrames) { - t1.buffer.frameCount = outFrames; - int64_t outputPTS = calculateOutputPTS( - t1, pts, state->frameCount - outFrames); - t1.bufferProvider->getNextBuffer(&t1.buffer, outputPTS); - if (t1.buffer.raw == NULL) break; - outFrames -= t1.buffer.frameCount; - t1.bufferProvider->releaseBuffer(&t1.buffer); + { + track_t& t3 = state->tracks[i]; + size_t outFrames = state->frameCount; + while (outFrames) { + t3.buffer.frameCount = outFrames; + int64_t outputPTS = calculateOutputPTS( + t3, pts, state->frameCount - outFrames); + t3.bufferProvider->getNextBuffer(&t3.buffer, outputPTS); + if (t3.buffer.raw == NULL) break; + outFrames -= t3.buffer.frameCount; + t3.bufferProvider->releaseBuffer(&t3.buffer); + } } } } @@ -1098,12 +1118,6 @@ void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) e0 &= ~(1<<i); track_t& t = state->tracks[i]; t.buffer.frameCount = state->frameCount; - int valid = t.bufferProvider->getValid(); - if (valid != AudioBufferProvider::kValid) { - ALOGE("invalid bufferProvider=%p name=%d frameCount=%d valid=%#x enabledTracks=%#x", - t.bufferProvider, i, t.buffer.frameCount, valid, enabledTracks); - // expect to crash - } t.bufferProvider->getNextBuffer(&t.buffer, pts); t.frameCount = t.buffer.frameCount; t.in = t.buffer.raw; @@ -1148,7 +1162,8 @@ void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) while (outFrames) { size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount; if (inFrames) { - t.hook(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames, state->resampleTemp, aux); + t.hook(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames, + state->resampleTemp, aux); t.frameCount -= inFrames; outFrames -= inFrames; if (CC_UNLIKELY(aux != NULL)) { @@ -1157,7 +1172,8 @@ void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) } if (t.frameCount == 0 && outFrames) { t.bufferProvider->releaseBuffer(&t.buffer); - t.buffer.frameCount = (state->frameCount - numFrames) - (BLOCKSIZE - outFrames); + t.buffer.frameCount = (state->frameCount - numFrames) - + (BLOCKSIZE - outFrames); int64_t outputPTS = calculateOutputPTS( t, pts, numFrames + (BLOCKSIZE - outFrames)); t.bufferProvider->getNextBuffer(&t.buffer, outputPTS); @@ -1247,7 +1263,8 @@ void AudioMixer::process__genericResampling(state_t* state, int64_t pts) if (CC_UNLIKELY(aux != NULL)) { aux += outFrames; } - t.hook(&t, outTemp + outFrames*MAX_NUM_CHANNELS, t.buffer.frameCount, state->resampleTemp, aux); + t.hook(&t, outTemp + outFrames*MAX_NUM_CHANNELS, t.buffer.frameCount, + state->resampleTemp, aux); outFrames += t.buffer.frameCount; t.bufferProvider->releaseBuffer(&t.buffer); } @@ -1287,7 +1304,8 @@ void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state, // been enabled for mixing. if (in == NULL || ((unsigned long)in & 3)) { memset(out, 0, numFrames*MAX_NUM_CHANNELS*sizeof(int16_t)); - ALOGE_IF(((unsigned long)in & 3), "process stereo track: input buffer alignment pb: buffer %p track %d, channels %d, needs %08x", + ALOGE_IF(((unsigned long)in & 3), "process stereo track: input buffer alignment pb: " + "buffer %p track %d, channels %d, needs %08x", in, i, t.channelCount, t.needs); return; } @@ -1429,7 +1447,16 @@ int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS, if (AudioBufferProvider::kInvalidPTS == basePTS) return AudioBufferProvider::kInvalidPTS; - return basePTS + ((outputFrameIndex * t.localTimeFreq) / t.sampleRate); + return basePTS + ((outputFrameIndex * sLocalTimeFreq) / t.sampleRate); +} + +/*static*/ uint64_t AudioMixer::sLocalTimeFreq; +/*static*/ pthread_once_t AudioMixer::sOnceControl = PTHREAD_ONCE_INIT; + +/*static*/ void AudioMixer::sInitRoutine() +{ + LocalClock lc; + sLocalTimeFreq = lc.getLocalFreq(); } // ---------------------------------------------------------------------------- diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h index 6333357..43aeb86 100644 --- a/services/audioflinger/AudioMixer.h +++ b/services/audioflinger/AudioMixer.h @@ -28,6 +28,7 @@ #include <audio_effects/effect_downmix.h> #include <system/audio.h> +#include <media/nbaio/NBLog.h> namespace android { @@ -41,8 +42,15 @@ public: /*virtual*/ ~AudioMixer(); // non-virtual saves a v-table, restore if sub-classed + + // This mixer has a hard-coded upper limit of 32 active track inputs. + // Adding support for > 32 tracks would require more than simply changing this value. static const uint32_t MAX_NUM_TRACKS = 32; // maximum number of channels supported by the mixer + + // This mixer has a hard-coded upper limit of 2 channels for output. + // There is support for > 2 channel tracks down-mixed to 2 channel output via a down-mix effect. + // Adding support for > 2 channel output would require more than simply changing this value. static const uint32_t MAX_NUM_CHANNELS = 2; // maximum number of channels supported for the content static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = 8; @@ -139,7 +147,8 @@ private: struct track_t; class DownmixerBufferProvider; - typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp, int32_t* aux); + typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp, + int32_t* aux); static const int BLOCKSIZE = 16; // 4 cache lines struct track_t { @@ -188,12 +197,12 @@ private: // 16-byte boundary - uint64_t localTimeFreq; - DownmixerBufferProvider* downmixerBufferProvider; // 4 bytes int32_t sessionId; + int32_t padding[2]; + // 16-byte boundary bool setResampler(uint32_t sampleRate, uint32_t devSampleRate); @@ -212,7 +221,8 @@ private: void (*hook)(state_t* state, int64_t pts); // one of process__*, never NULL int32_t *outputTemp; int32_t *resampleTemp; - int32_t reserved[2]; + NBLog::Writer* mLog; + int32_t reserved[1]; // FIXME allocate dynamically to save some memory when maxNumTracks < MAX_NUM_TRACKS track_t tracks[MAX_NUM_TRACKS]; __attribute__((aligned(32))); }; @@ -239,6 +249,10 @@ private: const uint32_t mSampleRate; + NBLog::Writer mDummyLog; +public: + void setLog(NBLog::Writer* log); +private: state_t mState __attribute__((aligned(32))); // effect descriptor for the downmixer used by the mixer @@ -254,12 +268,17 @@ private: static status_t prepareTrackForDownmix(track_t* pTrack, int trackNum); static void unprepareTrackForDownmix(track_t* pTrack, int trackName); - static void track__genericResample(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux); + static void track__genericResample(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, + int32_t* aux); static void track__nop(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux); - static void track__16BitsStereo(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux); - static void track__16BitsMono(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux); - static void volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux); - static void volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux); + static void track__16BitsStereo(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, + int32_t* aux); + static void track__16BitsMono(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, + int32_t* aux); + static void volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, + int32_t* aux); + static void volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, + int32_t* aux); static void process__validate(state_t* state, int64_t pts); static void process__nop(state_t* state, int64_t pts); @@ -274,6 +293,10 @@ private: static int64_t calculateOutputPTS(const track_t& t, int64_t basePTS, int outputFrameIndex); + + static uint64_t sLocalTimeFreq; + static pthread_once_t sOnceControl; + static void sInitRoutine(); }; // ---------------------------------------------------------------------------- diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp index 55e0826..2706880 100644 --- a/services/audioflinger/AudioPolicyService.cpp +++ b/services/audioflinger/AudioPolicyService.cpp @@ -147,7 +147,7 @@ status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device, return BAD_VALUE; } - ALOGV("setDeviceConnectionState() tid %d", gettid()); + ALOGV("setDeviceConnectionState()"); Mutex::Autolock _l(mLock); return mpAudioPolicy->set_device_connection_state(mpAudioPolicy, device, state, device_address); @@ -176,7 +176,7 @@ status_t AudioPolicyService::setPhoneState(audio_mode_t state) return BAD_VALUE; } - ALOGV("setPhoneState() tid %d", gettid()); + ALOGV("setPhoneState()"); // TODO: check if it is more appropriate to do it in platform specific policy manager AudioSystem::setMode(state); @@ -201,7 +201,7 @@ status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage, if (config < 0 || config >= AUDIO_POLICY_FORCE_CFG_CNT) { return BAD_VALUE; } - ALOGV("setForceUse() tid %d", gettid()); + ALOGV("setForceUse()"); Mutex::Autolock _l(mLock); mpAudioPolicy->set_force_use(mpAudioPolicy, usage, config); return NO_ERROR; @@ -227,9 +227,10 @@ audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream, if (mpAudioPolicy == NULL) { return 0; } - ALOGV("getOutput() tid %d", gettid()); + ALOGV("getOutput()"); Mutex::Autolock _l(mLock); - return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate, format, channelMask, flags); + return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate, format, channelMask, + flags); } status_t AudioPolicyService::startOutput(audio_io_handle_t output, @@ -239,7 +240,7 @@ status_t AudioPolicyService::startOutput(audio_io_handle_t output, if (mpAudioPolicy == NULL) { return NO_INIT; } - ALOGV("startOutput() tid %d", gettid()); + ALOGV("startOutput()"); Mutex::Autolock _l(mLock); return mpAudioPolicy->start_output(mpAudioPolicy, output, stream, session); } @@ -251,7 +252,7 @@ status_t AudioPolicyService::stopOutput(audio_io_handle_t output, if (mpAudioPolicy == NULL) { return NO_INIT; } - ALOGV("stopOutput() tid %d", gettid()); + ALOGV("stopOutput()"); Mutex::Autolock _l(mLock); return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session); } @@ -261,7 +262,7 @@ void AudioPolicyService::releaseOutput(audio_io_handle_t output) if (mpAudioPolicy == NULL) { return; } - ALOGV("releaseOutput() tid %d", gettid()); + ALOGV("releaseOutput()"); Mutex::Autolock _l(mLock); mpAudioPolicy->release_output(mpAudioPolicy, output); } @@ -282,7 +283,7 @@ audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource, Mutex::Autolock _l(mLock); // the audio_in_acoustics_t parameter is ignored by get_input() audio_io_handle_t input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate, - format, channelMask, (audio_in_acoustics_t) 0); + format, channelMask, (audio_in_acoustics_t) 0); if (input == 0) { return input; @@ -485,6 +486,15 @@ bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inP return mpAudioPolicy->is_stream_active(mpAudioPolicy, stream, inPastMs); } +bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const +{ + if (mpAudioPolicy == NULL) { + return 0; + } + Mutex::Autolock _l(mLock); + return mpAudioPolicy->is_stream_active_remotely(mpAudioPolicy, stream, inPastMs); +} + bool AudioPolicyService::isSourceActive(audio_source_t source) const { if (mpAudioPolicy == NULL) { @@ -535,7 +545,7 @@ status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession, } void AudioPolicyService::binderDied(const wp<IBinder>& who) { - ALOGW("binderDied() %p, tid %d, calling pid %d", who.unsafe_get(), gettid(), + ALOGW("binderDied() %p, calling pid %d", who.unsafe_get(), IPCThreadState::self()->getCallingPid()); } diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h index 63f9549..35cf368 100644 --- a/services/audioflinger/AudioPolicyService.h +++ b/services/audioflinger/AudioPolicyService.h @@ -104,6 +104,7 @@ public: virtual status_t unregisterEffect(int id); virtual status_t setEffectEnabled(int id, bool enabled); virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const; + virtual bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) const; virtual bool isSourceActive(audio_source_t source) const; virtual status_t queryDefaultPreProcessing(int audioSession, @@ -142,11 +143,11 @@ private: status_t dumpInternals(int fd); // Thread used for tone playback and to send audio config commands to audio flinger - // For tone playback, using a separate thread is necessary to avoid deadlock with mLock because startTone() - // and stopTone() are normally called with mLock locked and requesting a tone start or stop will cause - // calls to AudioPolicyService and an attempt to lock mLock. - // For audio config commands, it is necessary because audio flinger requires that the calling process (user) - // has permission to modify audio settings. + // For tone playback, using a separate thread is necessary to avoid deadlock with mLock because + // startTone() and stopTone() are normally called with mLock locked and requesting a tone start + // or stop will cause calls to AudioPolicyService and an attempt to lock mLock. + // For audio config commands, it is necessary because audio flinger requires that the calling + // process (user) has permission to modify audio settings. class AudioCommandThread : public Thread { class AudioCommand; public: diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/services/audioflinger/AudioResamplerSinc.cpp index 3f22ca6..207f26b 100644 --- a/services/audioflinger/AudioResamplerSinc.cpp +++ b/services/audioflinger/AudioResamplerSinc.cpp @@ -53,7 +53,7 @@ namespace android { * tools/resampler_tools * cmd-line: fir -l 7 -s 48000 -c 20478 */ -const int32_t AudioResamplerSinc::mFirCoefsUp[] __attribute__ ((aligned (32))) = { +const uint32_t AudioResamplerSinc::mFirCoefsUp[] __attribute__ ((aligned (32))) = { 0x6d374bc7, 0x111c6ba0, 0xf3240e61, 0x07d14a38, 0xfc509e64, 0x0139cee9, 0xffc8c866, 0xfffcc300, 0x6d35278a, 0x103e8192, 0xf36b9dfd, 0x07bdfaa5, 0xfc5102d0, 0x013d618d, 0xffc663b9, 0xfffd9592, 0x6d2ebafe, 0x0f62811a, 0xf3b3d8ac, 0x07a9f399, 0xfc51d9a6, 0x0140bea5, 0xffc41212, 0xfffe631e, @@ -189,7 +189,7 @@ const int32_t AudioResamplerSinc::mFirCoefsUp[] __attribute__ ((aligned (32))) = * These coefficients are optimized for 48KHz -> 44.1KHz * cmd-line: fir -l 7 -s 48000 -c 17189 */ -const int32_t AudioResamplerSinc::mFirCoefsDown[] __attribute__ ((aligned (32))) = { +const uint32_t AudioResamplerSinc::mFirCoefsDown[] __attribute__ ((aligned (32))) = { 0x5bacb6f4, 0x1ded1a1d, 0xf0398d56, 0x0394f674, 0x0193a5f9, 0xfe66dbeb, 0x00791043, 0xfffe6631, 0x5bab6c81, 0x1d3ddccd, 0xf0421d2c, 0x03af9995, 0x01818dc9, 0xfe6bb63e, 0x0079812a, 0xfffdc37d, 0x5ba78d37, 0x1c8f2cf9, 0xf04beb1d, 0x03c9a04a, 0x016f8aca, 0xfe70a511, 0x0079e34d, 0xfffd2545, @@ -512,7 +512,7 @@ void AudioResamplerSinc::resample(int32_t* out, size_t outFrameCount, if (mConstants == &veryHighQualityConstants && readResampleCoefficients) { mFirCoefs = readResampleCoefficients( mInSampleRate <= mSampleRate ); } else { - mFirCoefs = (mInSampleRate <= mSampleRate) ? mFirCoefsUp : mFirCoefsDown; + mFirCoefs = (const int32_t *) ((mInSampleRate <= mSampleRate) ? mFirCoefsUp : mFirCoefsDown); } // select the appropriate resampler diff --git a/services/audioflinger/AudioResamplerSinc.h b/services/audioflinger/AudioResamplerSinc.h index 09c6866..1ea4474 100644 --- a/services/audioflinger/AudioResamplerSinc.h +++ b/services/audioflinger/AudioResamplerSinc.h @@ -70,8 +70,8 @@ private: int32_t mVolumeSIMD[2]; const int32_t * mFirCoefs; - static const int32_t mFirCoefsDown[]; - static const int32_t mFirCoefsUp[]; + static const uint32_t mFirCoefsDown[]; + static const uint32_t mFirCoefsUp[]; // ---------------------------------------------------------------------------- static const int32_t RESAMPLE_FIR_NUM_COEF = 8; diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp new file mode 100644 index 0000000..d66294c --- /dev/null +++ b/services/audioflinger/Effects.cpp @@ -0,0 +1,1723 @@ +/* +** +** Copyright 2012, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + + +#define LOG_TAG "AudioFlinger" +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> +#include <audio_effects/effect_visualizer.h> +#include <audio_utils/primitives.h> +#include <private/media/AudioEffectShared.h> +#include <media/EffectsFactoryApi.h> + +#include "AudioFlinger.h" +#include "ServiceUtilities.h" + +// ---------------------------------------------------------------------------- + +// Note: the following macro is used for extremely verbose logging message. In +// order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to +// 0; but one side effect of this is to turn all LOGV's as well. Some messages +// are so verbose that we want to suppress them even when we have ALOG_ASSERT +// turned on. Do not uncomment the #def below unless you really know what you +// are doing and want to see all of the extremely verbose messages. +//#define VERY_VERY_VERBOSE_LOGGING +#ifdef VERY_VERY_VERBOSE_LOGGING +#define ALOGVV ALOGV +#else +#define ALOGVV(a...) do { } while(0) +#endif + +namespace android { + +// ---------------------------------------------------------------------------- +// EffectModule implementation +// ---------------------------------------------------------------------------- + +#undef LOG_TAG +#define LOG_TAG "AudioFlinger::EffectModule" + +AudioFlinger::EffectModule::EffectModule(ThreadBase *thread, + const wp<AudioFlinger::EffectChain>& chain, + effect_descriptor_t *desc, + int id, + int sessionId) + : mPinned(sessionId > AUDIO_SESSION_OUTPUT_MIX), + mThread(thread), mChain(chain), mId(id), mSessionId(sessionId), + mDescriptor(*desc), + // mConfig is set by configure() and not used before then + mEffectInterface(NULL), + mStatus(NO_INIT), mState(IDLE), + // mMaxDisableWaitCnt is set by configure() and not used before then + // mDisableWaitCnt is set by process() and updateState() and not used before then + mSuspended(false) +{ + ALOGV("Constructor %p", this); + int lStatus; + + // create effect engine from effect factory + mStatus = EffectCreate(&desc->uuid, sessionId, thread->id(), &mEffectInterface); + + if (mStatus != NO_ERROR) { + return; + } + lStatus = init(); + if (lStatus < 0) { + mStatus = lStatus; + goto Error; + } + + ALOGV("Constructor success name %s, Interface %p", mDescriptor.name, mEffectInterface); + return; +Error: + EffectRelease(mEffectInterface); + mEffectInterface = NULL; + ALOGV("Constructor Error %d", mStatus); +} + +AudioFlinger::EffectModule::~EffectModule() +{ + ALOGV("Destructor %p", this); + if (mEffectInterface != NULL) { + if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC || + (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) { + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + audio_stream_t *stream = thread->stream(); + if (stream != NULL) { + stream->remove_audio_effect(stream, mEffectInterface); + } + } + } + // release effect engine + EffectRelease(mEffectInterface); + } +} + +status_t AudioFlinger::EffectModule::addHandle(EffectHandle *handle) +{ + status_t status; + + Mutex::Autolock _l(mLock); + int priority = handle->priority(); + size_t size = mHandles.size(); + EffectHandle *controlHandle = NULL; + size_t i; + for (i = 0; i < size; i++) { + EffectHandle *h = mHandles[i]; + if (h == NULL || h->destroyed_l()) { + continue; + } + // first non destroyed handle is considered in control + if (controlHandle == NULL) + controlHandle = h; + if (h->priority() <= priority) { + break; + } + } + // if inserted in first place, move effect control from previous owner to this handle + if (i == 0) { + bool enabled = false; + if (controlHandle != NULL) { + enabled = controlHandle->enabled(); + controlHandle->setControl(false/*hasControl*/, true /*signal*/, enabled /*enabled*/); + } + handle->setControl(true /*hasControl*/, false /*signal*/, enabled /*enabled*/); + status = NO_ERROR; + } else { + status = ALREADY_EXISTS; + } + ALOGV("addHandle() %p added handle %p in position %d", this, handle, i); + mHandles.insertAt(handle, i); + return status; +} + +size_t AudioFlinger::EffectModule::removeHandle(EffectHandle *handle) +{ + Mutex::Autolock _l(mLock); + size_t size = mHandles.size(); + size_t i; + for (i = 0; i < size; i++) { + if (mHandles[i] == handle) { + break; + } + } + if (i == size) { + return size; + } + ALOGV("removeHandle() %p removed handle %p in position %d", this, handle, i); + + mHandles.removeAt(i); + // if removed from first place, move effect control from this handle to next in line + if (i == 0) { + EffectHandle *h = controlHandle_l(); + if (h != NULL) { + h->setControl(true /*hasControl*/, true /*signal*/ , handle->enabled() /*enabled*/); + } + } + + // Prevent calls to process() and other functions on effect interface from now on. + // The effect engine will be released by the destructor when the last strong reference on + // this object is released which can happen after next process is called. + if (mHandles.size() == 0 && !mPinned) { + mState = DESTROYED; + } + + return mHandles.size(); +} + +// must be called with EffectModule::mLock held +AudioFlinger::EffectHandle *AudioFlinger::EffectModule::controlHandle_l() +{ + // the first valid handle in the list has control over the module + for (size_t i = 0; i < mHandles.size(); i++) { + EffectHandle *h = mHandles[i]; + if (h != NULL && !h->destroyed_l()) { + return h; + } + } + + return NULL; +} + +size_t AudioFlinger::EffectModule::disconnect(EffectHandle *handle, bool unpinIfLast) +{ + ALOGV("disconnect() %p handle %p", this, handle); + // keep a strong reference on this EffectModule to avoid calling the + // destructor before we exit + sp<EffectModule> keep(this); + { + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + thread->disconnectEffect(keep, handle, unpinIfLast); + } + } + return mHandles.size(); +} + +void AudioFlinger::EffectModule::updateState() { + Mutex::Autolock _l(mLock); + + switch (mState) { + case RESTART: + reset_l(); + // FALL THROUGH + + case STARTING: + // clear auxiliary effect input buffer for next accumulation + if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { + memset(mConfig.inputCfg.buffer.raw, + 0, + mConfig.inputCfg.buffer.frameCount*sizeof(int32_t)); + } + if (start_l() == NO_ERROR) { + mState = ACTIVE; + } else { + mState = IDLE; + } + break; + case STOPPING: + if (stop_l() == NO_ERROR) { + mDisableWaitCnt = mMaxDisableWaitCnt; + } else { + mDisableWaitCnt = 1; // will cause immediate transition to IDLE + } + mState = STOPPED; + break; + case STOPPED: + // mDisableWaitCnt is forced to 1 by process() when the engine indicates the end of the + // turn off sequence. + if (--mDisableWaitCnt == 0) { + reset_l(); + mState = IDLE; + } + break; + default: //IDLE , ACTIVE, DESTROYED + break; + } +} + +void AudioFlinger::EffectModule::process() +{ + Mutex::Autolock _l(mLock); + + if (mState == DESTROYED || mEffectInterface == NULL || + mConfig.inputCfg.buffer.raw == NULL || + mConfig.outputCfg.buffer.raw == NULL) { + return; + } + + if (isProcessEnabled()) { + // do 32 bit to 16 bit conversion for auxiliary effect input buffer + if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { + ditherAndClamp(mConfig.inputCfg.buffer.s32, + mConfig.inputCfg.buffer.s32, + mConfig.inputCfg.buffer.frameCount/2); + } + + // do the actual processing in the effect engine + int ret = (*mEffectInterface)->process(mEffectInterface, + &mConfig.inputCfg.buffer, + &mConfig.outputCfg.buffer); + + // force transition to IDLE state when engine is ready + if (mState == STOPPED && ret == -ENODATA) { + mDisableWaitCnt = 1; + } + + // clear auxiliary effect input buffer for next accumulation + if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { + memset(mConfig.inputCfg.buffer.raw, 0, + mConfig.inputCfg.buffer.frameCount*sizeof(int32_t)); + } + } else if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_INSERT && + mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) { + // If an insert effect is idle and input buffer is different from output buffer, + // accumulate input onto output + sp<EffectChain> chain = mChain.promote(); + if (chain != 0 && chain->activeTrackCnt() != 0) { + size_t frameCnt = mConfig.inputCfg.buffer.frameCount * 2; //always stereo here + int16_t *in = mConfig.inputCfg.buffer.s16; + int16_t *out = mConfig.outputCfg.buffer.s16; + for (size_t i = 0; i < frameCnt; i++) { + out[i] = clamp16((int32_t)out[i] + (int32_t)in[i]); + } + } + } +} + +void AudioFlinger::EffectModule::reset_l() +{ + if (mStatus != NO_ERROR || mEffectInterface == NULL) { + return; + } + (*mEffectInterface)->command(mEffectInterface, EFFECT_CMD_RESET, 0, NULL, 0, NULL); +} + +status_t AudioFlinger::EffectModule::configure() +{ + status_t status; + sp<ThreadBase> thread; + uint32_t size; + audio_channel_mask_t channelMask; + + if (mEffectInterface == NULL) { + status = NO_INIT; + goto exit; + } + + thread = mThread.promote(); + if (thread == 0) { + status = DEAD_OBJECT; + goto exit; + } + + // TODO: handle configuration of effects replacing track process + channelMask = thread->channelMask(); + + if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { + mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_MONO; + } else { + mConfig.inputCfg.channels = channelMask; + } + mConfig.outputCfg.channels = channelMask; + mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT; + mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT; + mConfig.inputCfg.samplingRate = thread->sampleRate(); + mConfig.outputCfg.samplingRate = mConfig.inputCfg.samplingRate; + mConfig.inputCfg.bufferProvider.cookie = NULL; + mConfig.inputCfg.bufferProvider.getBuffer = NULL; + mConfig.inputCfg.bufferProvider.releaseBuffer = NULL; + mConfig.outputCfg.bufferProvider.cookie = NULL; + mConfig.outputCfg.bufferProvider.getBuffer = NULL; + mConfig.outputCfg.bufferProvider.releaseBuffer = NULL; + mConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ; + // Insert effect: + // - in session AUDIO_SESSION_OUTPUT_MIX or AUDIO_SESSION_OUTPUT_STAGE, + // always overwrites output buffer: input buffer == output buffer + // - in other sessions: + // last effect in the chain accumulates in output buffer: input buffer != output buffer + // other effect: overwrites output buffer: input buffer == output buffer + // Auxiliary effect: + // accumulates in output buffer: input buffer != output buffer + // Therefore: accumulate <=> input buffer != output buffer + if (mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) { + mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE; + } else { + mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE; + } + mConfig.inputCfg.mask = EFFECT_CONFIG_ALL; + mConfig.outputCfg.mask = EFFECT_CONFIG_ALL; + mConfig.inputCfg.buffer.frameCount = thread->frameCount(); + mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount; + + ALOGV("configure() %p thread %p buffer %p framecount %d", + this, thread.get(), mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount); + + status_t cmdStatus; + size = sizeof(int); + status = (*mEffectInterface)->command(mEffectInterface, + EFFECT_CMD_SET_CONFIG, + sizeof(effect_config_t), + &mConfig, + &size, + &cmdStatus); + if (status == 0) { + status = cmdStatus; + } + + if (status == 0 && + (memcmp(&mDescriptor.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0)) { + uint32_t buf32[sizeof(effect_param_t) / sizeof(uint32_t) + 2]; + effect_param_t *p = (effect_param_t *)buf32; + + p->psize = sizeof(uint32_t); + p->vsize = sizeof(uint32_t); + size = sizeof(int); + *(int32_t *)p->data = VISUALIZER_PARAM_LATENCY; + + uint32_t latency = 0; + PlaybackThread *pbt = thread->mAudioFlinger->checkPlaybackThread_l(thread->mId); + if (pbt != NULL) { + latency = pbt->latency_l(); + } + + *((int32_t *)p->data + 1)= latency; + (*mEffectInterface)->command(mEffectInterface, + EFFECT_CMD_SET_PARAM, + sizeof(effect_param_t) + 8, + &buf32, + &size, + &cmdStatus); + } + + mMaxDisableWaitCnt = (MAX_DISABLE_TIME_MS * mConfig.outputCfg.samplingRate) / + (1000 * mConfig.outputCfg.buffer.frameCount); + +exit: + mStatus = status; + return status; +} + +status_t AudioFlinger::EffectModule::init() +{ + Mutex::Autolock _l(mLock); + if (mEffectInterface == NULL) { + return NO_INIT; + } + status_t cmdStatus; + uint32_t size = sizeof(status_t); + status_t status = (*mEffectInterface)->command(mEffectInterface, + EFFECT_CMD_INIT, + 0, + NULL, + &size, + &cmdStatus); + if (status == 0) { + status = cmdStatus; + } + return status; +} + +status_t AudioFlinger::EffectModule::start() +{ + Mutex::Autolock _l(mLock); + return start_l(); +} + +status_t AudioFlinger::EffectModule::start_l() +{ + if (mEffectInterface == NULL) { + return NO_INIT; + } + if (mStatus != NO_ERROR) { + return mStatus; + } + status_t cmdStatus; + uint32_t size = sizeof(status_t); + status_t status = (*mEffectInterface)->command(mEffectInterface, + EFFECT_CMD_ENABLE, + 0, + NULL, + &size, + &cmdStatus); + if (status == 0) { + status = cmdStatus; + } + if (status == 0 && + ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC || + (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC)) { + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + audio_stream_t *stream = thread->stream(); + if (stream != NULL) { + stream->add_audio_effect(stream, mEffectInterface); + } + } + } + return status; +} + +status_t AudioFlinger::EffectModule::stop() +{ + Mutex::Autolock _l(mLock); + return stop_l(); +} + +status_t AudioFlinger::EffectModule::stop_l() +{ + if (mEffectInterface == NULL) { + return NO_INIT; + } + if (mStatus != NO_ERROR) { + return mStatus; + } + status_t cmdStatus; + uint32_t size = sizeof(status_t); + status_t status = (*mEffectInterface)->command(mEffectInterface, + EFFECT_CMD_DISABLE, + 0, + NULL, + &size, + &cmdStatus); + if (status == 0) { + status = cmdStatus; + } + if (status == 0 && + ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC || + (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC)) { + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + audio_stream_t *stream = thread->stream(); + if (stream != NULL) { + stream->remove_audio_effect(stream, mEffectInterface); + } + } + } + return status; +} + +status_t AudioFlinger::EffectModule::command(uint32_t cmdCode, + uint32_t cmdSize, + void *pCmdData, + uint32_t *replySize, + void *pReplyData) +{ + Mutex::Autolock _l(mLock); + ALOGVV("command(), cmdCode: %d, mEffectInterface: %p", cmdCode, mEffectInterface); + + if (mState == DESTROYED || mEffectInterface == NULL) { + return NO_INIT; + } + if (mStatus != NO_ERROR) { + return mStatus; + } + status_t status = (*mEffectInterface)->command(mEffectInterface, + cmdCode, + cmdSize, + pCmdData, + replySize, + pReplyData); + if (cmdCode != EFFECT_CMD_GET_PARAM && status == NO_ERROR) { + uint32_t size = (replySize == NULL) ? 0 : *replySize; + for (size_t i = 1; i < mHandles.size(); i++) { + EffectHandle *h = mHandles[i]; + if (h != NULL && !h->destroyed_l()) { + h->commandExecuted(cmdCode, cmdSize, pCmdData, size, pReplyData); + } + } + } + return status; +} + +status_t AudioFlinger::EffectModule::setEnabled(bool enabled) +{ + Mutex::Autolock _l(mLock); + return setEnabled_l(enabled); +} + +// must be called with EffectModule::mLock held +status_t AudioFlinger::EffectModule::setEnabled_l(bool enabled) +{ + + ALOGV("setEnabled %p enabled %d", this, enabled); + + if (enabled != isEnabled()) { + status_t status = AudioSystem::setEffectEnabled(mId, enabled); + if (enabled && status != NO_ERROR) { + return status; + } + + switch (mState) { + // going from disabled to enabled + case IDLE: + mState = STARTING; + break; + case STOPPED: + mState = RESTART; + break; + case STOPPING: + mState = ACTIVE; + break; + + // going from enabled to disabled + case RESTART: + mState = STOPPED; + break; + case STARTING: + mState = IDLE; + break; + case ACTIVE: + mState = STOPPING; + break; + case DESTROYED: + return NO_ERROR; // simply ignore as we are being destroyed + } + for (size_t i = 1; i < mHandles.size(); i++) { + EffectHandle *h = mHandles[i]; + if (h != NULL && !h->destroyed_l()) { + h->setEnabled(enabled); + } + } + } + return NO_ERROR; +} + +bool AudioFlinger::EffectModule::isEnabled() const +{ + switch (mState) { + case RESTART: + case STARTING: + case ACTIVE: + return true; + case IDLE: + case STOPPING: + case STOPPED: + case DESTROYED: + default: + return false; + } +} + +bool AudioFlinger::EffectModule::isProcessEnabled() const +{ + if (mStatus != NO_ERROR) { + return false; + } + + switch (mState) { + case RESTART: + case ACTIVE: + case STOPPING: + case STOPPED: + return true; + case IDLE: + case STARTING: + case DESTROYED: + default: + return false; + } +} + +status_t AudioFlinger::EffectModule::setVolume(uint32_t *left, uint32_t *right, bool controller) +{ + Mutex::Autolock _l(mLock); + if (mStatus != NO_ERROR) { + return mStatus; + } + status_t status = NO_ERROR; + // Send volume indication if EFFECT_FLAG_VOLUME_IND is set and read back altered volume + // if controller flag is set (Note that controller == TRUE => EFFECT_FLAG_VOLUME_CTRL set) + if (isProcessEnabled() && + ((mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL || + (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_IND)) { + status_t cmdStatus; + uint32_t volume[2]; + uint32_t *pVolume = NULL; + uint32_t size = sizeof(volume); + volume[0] = *left; + volume[1] = *right; + if (controller) { + pVolume = volume; + } + status = (*mEffectInterface)->command(mEffectInterface, + EFFECT_CMD_SET_VOLUME, + size, + volume, + &size, + pVolume); + if (controller && status == NO_ERROR && size == sizeof(volume)) { + *left = volume[0]; + *right = volume[1]; + } + } + return status; +} + +status_t AudioFlinger::EffectModule::setDevice(audio_devices_t device) +{ + if (device == AUDIO_DEVICE_NONE) { + return NO_ERROR; + } + + Mutex::Autolock _l(mLock); + if (mStatus != NO_ERROR) { + return mStatus; + } + status_t status = NO_ERROR; + if (device && (mDescriptor.flags & EFFECT_FLAG_DEVICE_MASK) == EFFECT_FLAG_DEVICE_IND) { + status_t cmdStatus; + uint32_t size = sizeof(status_t); + uint32_t cmd = audio_is_output_devices(device) ? EFFECT_CMD_SET_DEVICE : + EFFECT_CMD_SET_INPUT_DEVICE; + status = (*mEffectInterface)->command(mEffectInterface, + cmd, + sizeof(uint32_t), + &device, + &size, + &cmdStatus); + } + return status; +} + +status_t AudioFlinger::EffectModule::setMode(audio_mode_t mode) +{ + Mutex::Autolock _l(mLock); + if (mStatus != NO_ERROR) { + return mStatus; + } + status_t status = NO_ERROR; + if ((mDescriptor.flags & EFFECT_FLAG_AUDIO_MODE_MASK) == EFFECT_FLAG_AUDIO_MODE_IND) { + status_t cmdStatus; + uint32_t size = sizeof(status_t); + status = (*mEffectInterface)->command(mEffectInterface, + EFFECT_CMD_SET_AUDIO_MODE, + sizeof(audio_mode_t), + &mode, + &size, + &cmdStatus); + if (status == NO_ERROR) { + status = cmdStatus; + } + } + return status; +} + +status_t AudioFlinger::EffectModule::setAudioSource(audio_source_t source) +{ + Mutex::Autolock _l(mLock); + if (mStatus != NO_ERROR) { + return mStatus; + } + status_t status = NO_ERROR; + if ((mDescriptor.flags & EFFECT_FLAG_AUDIO_SOURCE_MASK) == EFFECT_FLAG_AUDIO_SOURCE_IND) { + uint32_t size = 0; + status = (*mEffectInterface)->command(mEffectInterface, + EFFECT_CMD_SET_AUDIO_SOURCE, + sizeof(audio_source_t), + &source, + &size, + NULL); + } + return status; +} + +void AudioFlinger::EffectModule::setSuspended(bool suspended) +{ + Mutex::Autolock _l(mLock); + mSuspended = suspended; +} + +bool AudioFlinger::EffectModule::suspended() const +{ + Mutex::Autolock _l(mLock); + return mSuspended; +} + +bool AudioFlinger::EffectModule::purgeHandles() +{ + bool enabled = false; + Mutex::Autolock _l(mLock); + for (size_t i = 0; i < mHandles.size(); i++) { + EffectHandle *handle = mHandles[i]; + if (handle != NULL && !handle->destroyed_l()) { + handle->effect().clear(); + if (handle->hasControl()) { + enabled = handle->enabled(); + } + } + } + return enabled; +} + +void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + snprintf(buffer, SIZE, "\tEffect ID %d:\n", mId); + result.append(buffer); + + bool locked = AudioFlinger::dumpTryLock(mLock); + // failed to lock - AudioFlinger is probably deadlocked + if (!locked) { + result.append("\t\tCould not lock Fx mutex:\n"); + } + + result.append("\t\tSession Status State Engine:\n"); + snprintf(buffer, SIZE, "\t\t%05d %03d %03d 0x%08x\n", + mSessionId, mStatus, mState, (uint32_t)mEffectInterface); + result.append(buffer); + + result.append("\t\tDescriptor:\n"); + snprintf(buffer, SIZE, "\t\t- UUID: %08X-%04X-%04X-%04X-%02X%02X%02X%02X%02X%02X\n", + mDescriptor.uuid.timeLow, mDescriptor.uuid.timeMid, mDescriptor.uuid.timeHiAndVersion, + mDescriptor.uuid.clockSeq, mDescriptor.uuid.node[0], mDescriptor.uuid.node[1], + mDescriptor.uuid.node[2], + mDescriptor.uuid.node[3],mDescriptor.uuid.node[4],mDescriptor.uuid.node[5]); + result.append(buffer); + snprintf(buffer, SIZE, "\t\t- TYPE: %08X-%04X-%04X-%04X-%02X%02X%02X%02X%02X%02X\n", + mDescriptor.type.timeLow, mDescriptor.type.timeMid, + mDescriptor.type.timeHiAndVersion, + mDescriptor.type.clockSeq, mDescriptor.type.node[0], mDescriptor.type.node[1], + mDescriptor.type.node[2], + mDescriptor.type.node[3],mDescriptor.type.node[4],mDescriptor.type.node[5]); + result.append(buffer); + snprintf(buffer, SIZE, "\t\t- apiVersion: %08X\n\t\t- flags: %08X\n", + mDescriptor.apiVersion, + mDescriptor.flags); + result.append(buffer); + snprintf(buffer, SIZE, "\t\t- name: %s\n", + mDescriptor.name); + result.append(buffer); + snprintf(buffer, SIZE, "\t\t- implementor: %s\n", + mDescriptor.implementor); + result.append(buffer); + + result.append("\t\t- Input configuration:\n"); + result.append("\t\t\tBuffer Frames Smp rate Channels Format\n"); + snprintf(buffer, SIZE, "\t\t\t0x%08x %05d %05d %08x %d\n", + (uint32_t)mConfig.inputCfg.buffer.raw, + mConfig.inputCfg.buffer.frameCount, + mConfig.inputCfg.samplingRate, + mConfig.inputCfg.channels, + mConfig.inputCfg.format); + result.append(buffer); + + result.append("\t\t- Output configuration:\n"); + result.append("\t\t\tBuffer Frames Smp rate Channels Format\n"); + snprintf(buffer, SIZE, "\t\t\t0x%08x %05d %05d %08x %d\n", + (uint32_t)mConfig.outputCfg.buffer.raw, + mConfig.outputCfg.buffer.frameCount, + mConfig.outputCfg.samplingRate, + mConfig.outputCfg.channels, + mConfig.outputCfg.format); + result.append(buffer); + + snprintf(buffer, SIZE, "\t\t%d Clients:\n", mHandles.size()); + result.append(buffer); + result.append("\t\t\tPid Priority Ctrl Locked client server\n"); + for (size_t i = 0; i < mHandles.size(); ++i) { + EffectHandle *handle = mHandles[i]; + if (handle != NULL && !handle->destroyed_l()) { + handle->dump(buffer, SIZE); + result.append(buffer); + } + } + + result.append("\n"); + + write(fd, result.string(), result.length()); + + if (locked) { + mLock.unlock(); + } +} + +// ---------------------------------------------------------------------------- +// EffectHandle implementation +// ---------------------------------------------------------------------------- + +#undef LOG_TAG +#define LOG_TAG "AudioFlinger::EffectHandle" + +AudioFlinger::EffectHandle::EffectHandle(const sp<EffectModule>& effect, + const sp<AudioFlinger::Client>& client, + const sp<IEffectClient>& effectClient, + int32_t priority) + : BnEffect(), + mEffect(effect), mEffectClient(effectClient), mClient(client), mCblk(NULL), + mPriority(priority), mHasControl(false), mEnabled(false), mDestroyed(false) +{ + ALOGV("constructor %p", this); + + if (client == 0) { + return; + } + int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int); + mCblkMemory = client->heap()->allocate(EFFECT_PARAM_BUFFER_SIZE + bufOffset); + if (mCblkMemory != 0) { + mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer()); + + if (mCblk != NULL) { + new(mCblk) effect_param_cblk_t(); + mBuffer = (uint8_t *)mCblk + bufOffset; + } + } else { + ALOGE("not enough memory for Effect size=%u", EFFECT_PARAM_BUFFER_SIZE + + sizeof(effect_param_cblk_t)); + return; + } +} + +AudioFlinger::EffectHandle::~EffectHandle() +{ + ALOGV("Destructor %p", this); + + if (mEffect == 0) { + mDestroyed = true; + return; + } + mEffect->lock(); + mDestroyed = true; + mEffect->unlock(); + disconnect(false); +} + +status_t AudioFlinger::EffectHandle::enable() +{ + ALOGV("enable %p", this); + if (!mHasControl) { + return INVALID_OPERATION; + } + if (mEffect == 0) { + return DEAD_OBJECT; + } + + if (mEnabled) { + return NO_ERROR; + } + + mEnabled = true; + + sp<ThreadBase> thread = mEffect->thread().promote(); + if (thread != 0) { + thread->checkSuspendOnEffectEnabled(mEffect, true, mEffect->sessionId()); + } + + // checkSuspendOnEffectEnabled() can suspend this same effect when enabled + if (mEffect->suspended()) { + return NO_ERROR; + } + + status_t status = mEffect->setEnabled(true); + if (status != NO_ERROR) { + if (thread != 0) { + thread->checkSuspendOnEffectEnabled(mEffect, false, mEffect->sessionId()); + } + mEnabled = false; + } + return status; +} + +status_t AudioFlinger::EffectHandle::disable() +{ + ALOGV("disable %p", this); + if (!mHasControl) { + return INVALID_OPERATION; + } + if (mEffect == 0) { + return DEAD_OBJECT; + } + + if (!mEnabled) { + return NO_ERROR; + } + mEnabled = false; + + if (mEffect->suspended()) { + return NO_ERROR; + } + + status_t status = mEffect->setEnabled(false); + + sp<ThreadBase> thread = mEffect->thread().promote(); + if (thread != 0) { + thread->checkSuspendOnEffectEnabled(mEffect, false, mEffect->sessionId()); + } + + return status; +} + +void AudioFlinger::EffectHandle::disconnect() +{ + disconnect(true); +} + +void AudioFlinger::EffectHandle::disconnect(bool unpinIfLast) +{ + ALOGV("disconnect(%s)", unpinIfLast ? "true" : "false"); + if (mEffect == 0) { + return; + } + // restore suspended effects if the disconnected handle was enabled and the last one. + if ((mEffect->disconnect(this, unpinIfLast) == 0) && mEnabled) { + sp<ThreadBase> thread = mEffect->thread().promote(); + if (thread != 0) { + thread->checkSuspendOnEffectEnabled(mEffect, false, mEffect->sessionId()); + } + } + + // release sp on module => module destructor can be called now + mEffect.clear(); + if (mClient != 0) { + if (mCblk != NULL) { + // unlike ~TrackBase(), mCblk is never a local new, so don't delete + mCblk->~effect_param_cblk_t(); // destroy our shared-structure. + } + mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to + // Client destructor must run with AudioFlinger mutex locked + Mutex::Autolock _l(mClient->audioFlinger()->mLock); + mClient.clear(); + } +} + +status_t AudioFlinger::EffectHandle::command(uint32_t cmdCode, + uint32_t cmdSize, + void *pCmdData, + uint32_t *replySize, + void *pReplyData) +{ + ALOGVV("command(), cmdCode: %d, mHasControl: %d, mEffect: %p", + cmdCode, mHasControl, (mEffect == 0) ? 0 : mEffect.get()); + + // only get parameter command is permitted for applications not controlling the effect + if (!mHasControl && cmdCode != EFFECT_CMD_GET_PARAM) { + return INVALID_OPERATION; + } + if (mEffect == 0) { + return DEAD_OBJECT; + } + if (mClient == 0) { + return INVALID_OPERATION; + } + + // handle commands that are not forwarded transparently to effect engine + if (cmdCode == EFFECT_CMD_SET_PARAM_COMMIT) { + // No need to trylock() here as this function is executed in the binder thread serving a + // particular client process: no risk to block the whole media server process or mixer + // threads if we are stuck here + Mutex::Autolock _l(mCblk->lock); + if (mCblk->clientIndex > EFFECT_PARAM_BUFFER_SIZE || + mCblk->serverIndex > EFFECT_PARAM_BUFFER_SIZE) { + mCblk->serverIndex = 0; + mCblk->clientIndex = 0; + return BAD_VALUE; + } + status_t status = NO_ERROR; + while (mCblk->serverIndex < mCblk->clientIndex) { + int reply; + uint32_t rsize = sizeof(int); + int *p = (int *)(mBuffer + mCblk->serverIndex); + int size = *p++; + if (((uint8_t *)p + size) > mBuffer + mCblk->clientIndex) { + ALOGW("command(): invalid parameter block size"); + break; + } + effect_param_t *param = (effect_param_t *)p; + if (param->psize == 0 || param->vsize == 0) { + ALOGW("command(): null parameter or value size"); + mCblk->serverIndex += size; + continue; + } + uint32_t psize = sizeof(effect_param_t) + + ((param->psize - 1) / sizeof(int) + 1) * sizeof(int) + + param->vsize; + status_t ret = mEffect->command(EFFECT_CMD_SET_PARAM, + psize, + p, + &rsize, + &reply); + // stop at first error encountered + if (ret != NO_ERROR) { + status = ret; + *(int *)pReplyData = reply; + break; + } else if (reply != NO_ERROR) { + *(int *)pReplyData = reply; + break; + } + mCblk->serverIndex += size; + } + mCblk->serverIndex = 0; + mCblk->clientIndex = 0; + return status; + } else if (cmdCode == EFFECT_CMD_ENABLE) { + *(int *)pReplyData = NO_ERROR; + return enable(); + } else if (cmdCode == EFFECT_CMD_DISABLE) { + *(int *)pReplyData = NO_ERROR; + return disable(); + } + + return mEffect->command(cmdCode, cmdSize, pCmdData, replySize, pReplyData); +} + +void AudioFlinger::EffectHandle::setControl(bool hasControl, bool signal, bool enabled) +{ + ALOGV("setControl %p control %d", this, hasControl); + + mHasControl = hasControl; + mEnabled = enabled; + + if (signal && mEffectClient != 0) { + mEffectClient->controlStatusChanged(hasControl); + } +} + +void AudioFlinger::EffectHandle::commandExecuted(uint32_t cmdCode, + uint32_t cmdSize, + void *pCmdData, + uint32_t replySize, + void *pReplyData) +{ + if (mEffectClient != 0) { + mEffectClient->commandExecuted(cmdCode, cmdSize, pCmdData, replySize, pReplyData); + } +} + + + +void AudioFlinger::EffectHandle::setEnabled(bool enabled) +{ + if (mEffectClient != 0) { + mEffectClient->enableStatusChanged(enabled); + } +} + +status_t AudioFlinger::EffectHandle::onTransact( + uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) +{ + return BnEffect::onTransact(code, data, reply, flags); +} + + +void AudioFlinger::EffectHandle::dump(char* buffer, size_t size) +{ + bool locked = mCblk != NULL && AudioFlinger::dumpTryLock(mCblk->lock); + + snprintf(buffer, size, "\t\t\t%05d %05d %01u %01u %05u %05u\n", + (mClient == 0) ? getpid_cached : mClient->pid(), + mPriority, + mHasControl, + !locked, + mCblk ? mCblk->clientIndex : 0, + mCblk ? mCblk->serverIndex : 0 + ); + + if (locked) { + mCblk->lock.unlock(); + } +} + +#undef LOG_TAG +#define LOG_TAG "AudioFlinger::EffectChain" + +AudioFlinger::EffectChain::EffectChain(ThreadBase *thread, + int sessionId) + : mThread(thread), mSessionId(sessionId), mActiveTrackCnt(0), mTrackCnt(0), mTailBufferCount(0), + mOwnInBuffer(false), mVolumeCtrlIdx(-1), mLeftVolume(UINT_MAX), mRightVolume(UINT_MAX), + mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX) +{ + mStrategy = AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC); + if (thread == NULL) { + return; + } + mMaxTailBuffers = ((kProcessTailDurationMs * thread->sampleRate()) / 1000) / + thread->frameCount(); +} + +AudioFlinger::EffectChain::~EffectChain() +{ + if (mOwnInBuffer) { + delete mInBuffer; + } + +} + +// getEffectFromDesc_l() must be called with ThreadBase::mLock held +sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromDesc_l( + effect_descriptor_t *descriptor) +{ + size_t size = mEffects.size(); + + for (size_t i = 0; i < size; i++) { + if (memcmp(&mEffects[i]->desc().uuid, &descriptor->uuid, sizeof(effect_uuid_t)) == 0) { + return mEffects[i]; + } + } + return 0; +} + +// getEffectFromId_l() must be called with ThreadBase::mLock held +sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromId_l(int id) +{ + size_t size = mEffects.size(); + + for (size_t i = 0; i < size; i++) { + // by convention, return first effect if id provided is 0 (0 is never a valid id) + if (id == 0 || mEffects[i]->id() == id) { + return mEffects[i]; + } + } + return 0; +} + +// getEffectFromType_l() must be called with ThreadBase::mLock held +sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromType_l( + const effect_uuid_t *type) +{ + size_t size = mEffects.size(); + + for (size_t i = 0; i < size; i++) { + if (memcmp(&mEffects[i]->desc().type, type, sizeof(effect_uuid_t)) == 0) { + return mEffects[i]; + } + } + return 0; +} + +void AudioFlinger::EffectChain::clearInputBuffer() +{ + Mutex::Autolock _l(mLock); + sp<ThreadBase> thread = mThread.promote(); + if (thread == 0) { + ALOGW("clearInputBuffer(): cannot promote mixer thread"); + return; + } + clearInputBuffer_l(thread); +} + +// Must be called with EffectChain::mLock locked +void AudioFlinger::EffectChain::clearInputBuffer_l(sp<ThreadBase> thread) +{ + size_t numSamples = thread->frameCount() * thread->channelCount(); + memset(mInBuffer, 0, numSamples * sizeof(int16_t)); + +} + +// Must be called with EffectChain::mLock locked +void AudioFlinger::EffectChain::process_l() +{ + sp<ThreadBase> thread = mThread.promote(); + if (thread == 0) { + ALOGW("process_l(): cannot promote mixer thread"); + return; + } + bool isGlobalSession = (mSessionId == AUDIO_SESSION_OUTPUT_MIX) || + (mSessionId == AUDIO_SESSION_OUTPUT_STAGE); + // always process effects unless no more tracks are on the session and the effect tail + // has been rendered + bool doProcess = true; + if (!isGlobalSession) { + bool tracksOnSession = (trackCnt() != 0); + + if (!tracksOnSession && mTailBufferCount == 0) { + doProcess = false; + } + + if (activeTrackCnt() == 0) { + // if no track is active and the effect tail has not been rendered, + // the input buffer must be cleared here as the mixer process will not do it + if (tracksOnSession || mTailBufferCount > 0) { + clearInputBuffer_l(thread); + if (mTailBufferCount > 0) { + mTailBufferCount--; + } + } + } + } + + size_t size = mEffects.size(); + if (doProcess) { + for (size_t i = 0; i < size; i++) { + mEffects[i]->process(); + } + } + for (size_t i = 0; i < size; i++) { + mEffects[i]->updateState(); + } +} + +// addEffect_l() must be called with PlaybackThread::mLock held +status_t AudioFlinger::EffectChain::addEffect_l(const sp<EffectModule>& effect) +{ + effect_descriptor_t desc = effect->desc(); + uint32_t insertPref = desc.flags & EFFECT_FLAG_INSERT_MASK; + + Mutex::Autolock _l(mLock); + effect->setChain(this); + sp<ThreadBase> thread = mThread.promote(); + if (thread == 0) { + return NO_INIT; + } + effect->setThread(thread); + + if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { + // Auxiliary effects are inserted at the beginning of mEffects vector as + // they are processed first and accumulated in chain input buffer + mEffects.insertAt(effect, 0); + + // the input buffer for auxiliary effect contains mono samples in + // 32 bit format. This is to avoid saturation in AudoMixer + // accumulation stage. Saturation is done in EffectModule::process() before + // calling the process in effect engine + size_t numSamples = thread->frameCount(); + int32_t *buffer = new int32_t[numSamples]; + memset(buffer, 0, numSamples * sizeof(int32_t)); + effect->setInBuffer((int16_t *)buffer); + // auxiliary effects output samples to chain input buffer for further processing + // by insert effects + effect->setOutBuffer(mInBuffer); + } else { + // Insert effects are inserted at the end of mEffects vector as they are processed + // after track and auxiliary effects. + // Insert effect order as a function of indicated preference: + // if EFFECT_FLAG_INSERT_EXCLUSIVE, insert in first position or reject if + // another effect is present + // else if EFFECT_FLAG_INSERT_FIRST, insert in first position or after the + // last effect claiming first position + // else if EFFECT_FLAG_INSERT_LAST, insert in last position or before the + // first effect claiming last position + // else if EFFECT_FLAG_INSERT_ANY insert after first or before last + // Reject insertion if an effect with EFFECT_FLAG_INSERT_EXCLUSIVE is + // already present + + size_t size = mEffects.size(); + size_t idx_insert = size; + ssize_t idx_insert_first = -1; + ssize_t idx_insert_last = -1; + + for (size_t i = 0; i < size; i++) { + effect_descriptor_t d = mEffects[i]->desc(); + uint32_t iMode = d.flags & EFFECT_FLAG_TYPE_MASK; + uint32_t iPref = d.flags & EFFECT_FLAG_INSERT_MASK; + if (iMode == EFFECT_FLAG_TYPE_INSERT) { + // check invalid effect chaining combinations + if (insertPref == EFFECT_FLAG_INSERT_EXCLUSIVE || + iPref == EFFECT_FLAG_INSERT_EXCLUSIVE) { + ALOGW("addEffect_l() could not insert effect %s: exclusive conflict with %s", + desc.name, d.name); + return INVALID_OPERATION; + } + // remember position of first insert effect and by default + // select this as insert position for new effect + if (idx_insert == size) { + idx_insert = i; + } + // remember position of last insert effect claiming + // first position + if (iPref == EFFECT_FLAG_INSERT_FIRST) { + idx_insert_first = i; + } + // remember position of first insert effect claiming + // last position + if (iPref == EFFECT_FLAG_INSERT_LAST && + idx_insert_last == -1) { + idx_insert_last = i; + } + } + } + + // modify idx_insert from first position if needed + if (insertPref == EFFECT_FLAG_INSERT_LAST) { + if (idx_insert_last != -1) { + idx_insert = idx_insert_last; + } else { + idx_insert = size; + } + } else { + if (idx_insert_first != -1) { + idx_insert = idx_insert_first + 1; + } + } + + // always read samples from chain input buffer + effect->setInBuffer(mInBuffer); + + // if last effect in the chain, output samples to chain + // output buffer, otherwise to chain input buffer + if (idx_insert == size) { + if (idx_insert != 0) { + mEffects[idx_insert-1]->setOutBuffer(mInBuffer); + mEffects[idx_insert-1]->configure(); + } + effect->setOutBuffer(mOutBuffer); + } else { + effect->setOutBuffer(mInBuffer); + } + mEffects.insertAt(effect, idx_insert); + + ALOGV("addEffect_l() effect %p, added in chain %p at rank %d", effect.get(), this, + idx_insert); + } + effect->configure(); + return NO_ERROR; +} + +// removeEffect_l() must be called with PlaybackThread::mLock held +size_t AudioFlinger::EffectChain::removeEffect_l(const sp<EffectModule>& effect) +{ + Mutex::Autolock _l(mLock); + size_t size = mEffects.size(); + uint32_t type = effect->desc().flags & EFFECT_FLAG_TYPE_MASK; + + for (size_t i = 0; i < size; i++) { + if (effect == mEffects[i]) { + // calling stop here will remove pre-processing effect from the audio HAL. + // This is safe as we hold the EffectChain mutex which guarantees that we are not in + // the middle of a read from audio HAL + if (mEffects[i]->state() == EffectModule::ACTIVE || + mEffects[i]->state() == EffectModule::STOPPING) { + mEffects[i]->stop(); + } + if (type == EFFECT_FLAG_TYPE_AUXILIARY) { + delete[] effect->inBuffer(); + } else { + if (i == size - 1 && i != 0) { + mEffects[i - 1]->setOutBuffer(mOutBuffer); + mEffects[i - 1]->configure(); + } + } + mEffects.removeAt(i); + ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %d", effect.get(), + this, i); + break; + } + } + + return mEffects.size(); +} + +// setDevice_l() must be called with PlaybackThread::mLock held +void AudioFlinger::EffectChain::setDevice_l(audio_devices_t device) +{ + size_t size = mEffects.size(); + for (size_t i = 0; i < size; i++) { + mEffects[i]->setDevice(device); + } +} + +// setMode_l() must be called with PlaybackThread::mLock held +void AudioFlinger::EffectChain::setMode_l(audio_mode_t mode) +{ + size_t size = mEffects.size(); + for (size_t i = 0; i < size; i++) { + mEffects[i]->setMode(mode); + } +} + +// setAudioSource_l() must be called with PlaybackThread::mLock held +void AudioFlinger::EffectChain::setAudioSource_l(audio_source_t source) +{ + size_t size = mEffects.size(); + for (size_t i = 0; i < size; i++) { + mEffects[i]->setAudioSource(source); + } +} + +// setVolume_l() must be called with PlaybackThread::mLock held +bool AudioFlinger::EffectChain::setVolume_l(uint32_t *left, uint32_t *right) +{ + uint32_t newLeft = *left; + uint32_t newRight = *right; + bool hasControl = false; + int ctrlIdx = -1; + size_t size = mEffects.size(); + + // first update volume controller + for (size_t i = size; i > 0; i--) { + if (mEffects[i - 1]->isProcessEnabled() && + (mEffects[i - 1]->desc().flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL) { + ctrlIdx = i - 1; + hasControl = true; + break; + } + } + + if (ctrlIdx == mVolumeCtrlIdx && *left == mLeftVolume && *right == mRightVolume) { + if (hasControl) { + *left = mNewLeftVolume; + *right = mNewRightVolume; + } + return hasControl; + } + + mVolumeCtrlIdx = ctrlIdx; + mLeftVolume = newLeft; + mRightVolume = newRight; + + // second get volume update from volume controller + if (ctrlIdx >= 0) { + mEffects[ctrlIdx]->setVolume(&newLeft, &newRight, true); + mNewLeftVolume = newLeft; + mNewRightVolume = newRight; + } + // then indicate volume to all other effects in chain. + // Pass altered volume to effects before volume controller + // and requested volume to effects after controller + uint32_t lVol = newLeft; + uint32_t rVol = newRight; + + for (size_t i = 0; i < size; i++) { + if ((int)i == ctrlIdx) { + continue; + } + // this also works for ctrlIdx == -1 when there is no volume controller + if ((int)i > ctrlIdx) { + lVol = *left; + rVol = *right; + } + mEffects[i]->setVolume(&lVol, &rVol, false); + } + *left = newLeft; + *right = newRight; + + return hasControl; +} + +void AudioFlinger::EffectChain::dump(int fd, const Vector<String16>& args) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + snprintf(buffer, SIZE, "Effects for session %d:\n", mSessionId); + result.append(buffer); + + bool locked = AudioFlinger::dumpTryLock(mLock); + // failed to lock - AudioFlinger is probably deadlocked + if (!locked) { + result.append("\tCould not lock mutex:\n"); + } + + result.append("\tNum fx In buffer Out buffer Active tracks:\n"); + snprintf(buffer, SIZE, "\t%02d 0x%08x 0x%08x %d\n", + mEffects.size(), + (uint32_t)mInBuffer, + (uint32_t)mOutBuffer, + mActiveTrackCnt); + result.append(buffer); + write(fd, result.string(), result.size()); + + for (size_t i = 0; i < mEffects.size(); ++i) { + sp<EffectModule> effect = mEffects[i]; + if (effect != 0) { + effect->dump(fd, args); + } + } + + if (locked) { + mLock.unlock(); + } +} + +// must be called with ThreadBase::mLock held +void AudioFlinger::EffectChain::setEffectSuspended_l( + const effect_uuid_t *type, bool suspend) +{ + sp<SuspendedEffectDesc> desc; + // use effect type UUID timelow as key as there is no real risk of identical + // timeLow fields among effect type UUIDs. + ssize_t index = mSuspendedEffects.indexOfKey(type->timeLow); + if (suspend) { + if (index >= 0) { + desc = mSuspendedEffects.valueAt(index); + } else { + desc = new SuspendedEffectDesc(); + desc->mType = *type; + mSuspendedEffects.add(type->timeLow, desc); + ALOGV("setEffectSuspended_l() add entry for %08x", type->timeLow); + } + if (desc->mRefCount++ == 0) { + sp<EffectModule> effect = getEffectIfEnabled(type); + if (effect != 0) { + desc->mEffect = effect; + effect->setSuspended(true); + effect->setEnabled(false); + } + } + } else { + if (index < 0) { + return; + } + desc = mSuspendedEffects.valueAt(index); + if (desc->mRefCount <= 0) { + ALOGW("setEffectSuspended_l() restore refcount should not be 0 %d", desc->mRefCount); + desc->mRefCount = 1; + } + if (--desc->mRefCount == 0) { + ALOGV("setEffectSuspended_l() remove entry for %08x", mSuspendedEffects.keyAt(index)); + if (desc->mEffect != 0) { + sp<EffectModule> effect = desc->mEffect.promote(); + if (effect != 0) { + effect->setSuspended(false); + effect->lock(); + EffectHandle *handle = effect->controlHandle_l(); + if (handle != NULL && !handle->destroyed_l()) { + effect->setEnabled_l(handle->enabled()); + } + effect->unlock(); + } + desc->mEffect.clear(); + } + mSuspendedEffects.removeItemsAt(index); + } + } +} + +// must be called with ThreadBase::mLock held +void AudioFlinger::EffectChain::setEffectSuspendedAll_l(bool suspend) +{ + sp<SuspendedEffectDesc> desc; + + ssize_t index = mSuspendedEffects.indexOfKey((int)kKeyForSuspendAll); + if (suspend) { + if (index >= 0) { + desc = mSuspendedEffects.valueAt(index); + } else { + desc = new SuspendedEffectDesc(); + mSuspendedEffects.add((int)kKeyForSuspendAll, desc); + ALOGV("setEffectSuspendedAll_l() add entry for 0"); + } + if (desc->mRefCount++ == 0) { + Vector< sp<EffectModule> > effects; + getSuspendEligibleEffects(effects); + for (size_t i = 0; i < effects.size(); i++) { + setEffectSuspended_l(&effects[i]->desc().type, true); + } + } + } else { + if (index < 0) { + return; + } + desc = mSuspendedEffects.valueAt(index); + if (desc->mRefCount <= 0) { + ALOGW("setEffectSuspendedAll_l() restore refcount should not be 0 %d", desc->mRefCount); + desc->mRefCount = 1; + } + if (--desc->mRefCount == 0) { + Vector<const effect_uuid_t *> types; + for (size_t i = 0; i < mSuspendedEffects.size(); i++) { + if (mSuspendedEffects.keyAt(i) == (int)kKeyForSuspendAll) { + continue; + } + types.add(&mSuspendedEffects.valueAt(i)->mType); + } + for (size_t i = 0; i < types.size(); i++) { + setEffectSuspended_l(types[i], false); + } + ALOGV("setEffectSuspendedAll_l() remove entry for %08x", + mSuspendedEffects.keyAt(index)); + mSuspendedEffects.removeItem((int)kKeyForSuspendAll); + } + } +} + + +// The volume effect is used for automated tests only +#ifndef OPENSL_ES_H_ +static const effect_uuid_t SL_IID_VOLUME_ = { 0x09e8ede0, 0xddde, 0x11db, 0xb4f6, + { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }; +const effect_uuid_t * const SL_IID_VOLUME = &SL_IID_VOLUME_; +#endif //OPENSL_ES_H_ + +bool AudioFlinger::EffectChain::isEffectEligibleForSuspend(const effect_descriptor_t& desc) +{ + // auxiliary effects and visualizer are never suspended on output mix + if ((mSessionId == AUDIO_SESSION_OUTPUT_MIX) && + (((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) || + (memcmp(&desc.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0) || + (memcmp(&desc.type, SL_IID_VOLUME, sizeof(effect_uuid_t)) == 0))) { + return false; + } + return true; +} + +void AudioFlinger::EffectChain::getSuspendEligibleEffects( + Vector< sp<AudioFlinger::EffectModule> > &effects) +{ + effects.clear(); + for (size_t i = 0; i < mEffects.size(); i++) { + if (isEffectEligibleForSuspend(mEffects[i]->desc())) { + effects.add(mEffects[i]); + } + } +} + +sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectIfEnabled( + const effect_uuid_t *type) +{ + sp<EffectModule> effect = getEffectFromType_l(type); + return effect != 0 && effect->isEnabled() ? effect : 0; +} + +void AudioFlinger::EffectChain::checkSuspendOnEffectEnabled(const sp<EffectModule>& effect, + bool enabled) +{ + ssize_t index = mSuspendedEffects.indexOfKey(effect->desc().type.timeLow); + if (enabled) { + if (index < 0) { + // if the effect is not suspend check if all effects are suspended + index = mSuspendedEffects.indexOfKey((int)kKeyForSuspendAll); + if (index < 0) { + return; + } + if (!isEffectEligibleForSuspend(effect->desc())) { + return; + } + setEffectSuspended_l(&effect->desc().type, enabled); + index = mSuspendedEffects.indexOfKey(effect->desc().type.timeLow); + if (index < 0) { + ALOGW("checkSuspendOnEffectEnabled() Fx should be suspended here!"); + return; + } + } + ALOGV("checkSuspendOnEffectEnabled() enable suspending fx %08x", + effect->desc().type.timeLow); + sp<SuspendedEffectDesc> desc = mSuspendedEffects.valueAt(index); + // if effect is requested to suspended but was not yet enabled, supend it now. + if (desc->mEffect == 0) { + desc->mEffect = effect; + effect->setEnabled(false); + effect->setSuspended(true); + } + } else { + if (index < 0) { + return; + } + ALOGV("checkSuspendOnEffectEnabled() disable restoring fx %08x", + effect->desc().type.timeLow); + sp<SuspendedEffectDesc> desc = mSuspendedEffects.valueAt(index); + desc->mEffect.clear(); + effect->setSuspended(false); + } +} + +}; // namespace android diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h new file mode 100644 index 0000000..91303ee --- /dev/null +++ b/services/audioflinger/Effects.h @@ -0,0 +1,359 @@ +/* +** +** Copyright 2012, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#ifndef INCLUDING_FROM_AUDIOFLINGER_H + #error This header file should only be included from AudioFlinger.h +#endif + +//--- Audio Effect Management + +// EffectModule and EffectChain classes both have their own mutex to protect +// state changes or resource modifications. Always respect the following order +// if multiple mutexes must be acquired to avoid cross deadlock: +// AudioFlinger -> ThreadBase -> EffectChain -> EffectModule + +// The EffectModule class is a wrapper object controlling the effect engine implementation +// in the effect library. It prevents concurrent calls to process() and command() functions +// from different client threads. It keeps a list of EffectHandle objects corresponding +// to all client applications using this effect and notifies applications of effect state, +// control or parameter changes. It manages the activation state machine to send appropriate +// reset, enable, disable commands to effect engine and provide volume +// ramping when effects are activated/deactivated. +// When controlling an auxiliary effect, the EffectModule also provides an input buffer used by +// the attached track(s) to accumulate their auxiliary channel. +class EffectModule : public RefBase { +public: + EffectModule(ThreadBase *thread, + const wp<AudioFlinger::EffectChain>& chain, + effect_descriptor_t *desc, + int id, + int sessionId); + virtual ~EffectModule(); + + enum effect_state { + IDLE, + RESTART, + STARTING, + ACTIVE, + STOPPING, + STOPPED, + DESTROYED + }; + + int id() const { return mId; } + void process(); + void updateState(); + status_t command(uint32_t cmdCode, + uint32_t cmdSize, + void *pCmdData, + uint32_t *replySize, + void *pReplyData); + + void reset_l(); + status_t configure(); + status_t init(); + effect_state state() const { + return mState; + } + uint32_t status() { + return mStatus; + } + int sessionId() const { + return mSessionId; + } + status_t setEnabled(bool enabled); + status_t setEnabled_l(bool enabled); + bool isEnabled() const; + bool isProcessEnabled() const; + + void setInBuffer(int16_t *buffer) { mConfig.inputCfg.buffer.s16 = buffer; } + int16_t *inBuffer() { return mConfig.inputCfg.buffer.s16; } + void setOutBuffer(int16_t *buffer) { mConfig.outputCfg.buffer.s16 = buffer; } + int16_t *outBuffer() { return mConfig.outputCfg.buffer.s16; } + void setChain(const wp<EffectChain>& chain) { mChain = chain; } + void setThread(const wp<ThreadBase>& thread) { mThread = thread; } + const wp<ThreadBase>& thread() { return mThread; } + + status_t addHandle(EffectHandle *handle); + size_t disconnect(EffectHandle *handle, bool unpinIfLast); + size_t removeHandle(EffectHandle *handle); + + const effect_descriptor_t& desc() const { return mDescriptor; } + wp<EffectChain>& chain() { return mChain; } + + status_t setDevice(audio_devices_t device); + status_t setVolume(uint32_t *left, uint32_t *right, bool controller); + status_t setMode(audio_mode_t mode); + status_t setAudioSource(audio_source_t source); + status_t start(); + status_t stop(); + void setSuspended(bool suspended); + bool suspended() const; + + EffectHandle* controlHandle_l(); + + bool isPinned() const { return mPinned; } + void unPin() { mPinned = false; } + bool purgeHandles(); + void lock() { mLock.lock(); } + void unlock() { mLock.unlock(); } + + void dump(int fd, const Vector<String16>& args); + +protected: + friend class AudioFlinger; // for mHandles + bool mPinned; + + // Maximum time allocated to effect engines to complete the turn off sequence + static const uint32_t MAX_DISABLE_TIME_MS = 10000; + + EffectModule(const EffectModule&); + EffectModule& operator = (const EffectModule&); + + status_t start_l(); + status_t stop_l(); + +mutable Mutex mLock; // mutex for process, commands and handles list protection + wp<ThreadBase> mThread; // parent thread + wp<EffectChain> mChain; // parent effect chain + const int mId; // this instance unique ID + const int mSessionId; // audio session ID + const effect_descriptor_t mDescriptor;// effect descriptor received from effect engine + effect_config_t mConfig; // input and output audio configuration + effect_handle_t mEffectInterface; // Effect module C API + status_t mStatus; // initialization status + effect_state mState; // current activation state + Vector<EffectHandle *> mHandles; // list of client handles + // First handle in mHandles has highest priority and controls the effect module + uint32_t mMaxDisableWaitCnt; // maximum grace period before forcing an effect off after + // sending disable command. + uint32_t mDisableWaitCnt; // current process() calls count during disable period. + bool mSuspended; // effect is suspended: temporarily disabled by framework +}; + +// The EffectHandle class implements the IEffect interface. It provides resources +// to receive parameter updates, keeps track of effect control +// ownership and state and has a pointer to the EffectModule object it is controlling. +// There is one EffectHandle object for each application controlling (or using) +// an effect module. +// The EffectHandle is obtained by calling AudioFlinger::createEffect(). +class EffectHandle: public android::BnEffect { +public: + + EffectHandle(const sp<EffectModule>& effect, + const sp<AudioFlinger::Client>& client, + const sp<IEffectClient>& effectClient, + int32_t priority); + virtual ~EffectHandle(); + + // IEffect + virtual status_t enable(); + virtual status_t disable(); + virtual status_t command(uint32_t cmdCode, + uint32_t cmdSize, + void *pCmdData, + uint32_t *replySize, + void *pReplyData); + virtual void disconnect(); +private: + void disconnect(bool unpinIfLast); +public: + virtual sp<IMemory> getCblk() const { return mCblkMemory; } + virtual status_t onTransact(uint32_t code, const Parcel& data, + Parcel* reply, uint32_t flags); + + + // Give or take control of effect module + // - hasControl: true if control is given, false if removed + // - signal: true client app should be signaled of change, false otherwise + // - enabled: state of the effect when control is passed + void setControl(bool hasControl, bool signal, bool enabled); + void commandExecuted(uint32_t cmdCode, + uint32_t cmdSize, + void *pCmdData, + uint32_t replySize, + void *pReplyData); + void setEnabled(bool enabled); + bool enabled() const { return mEnabled; } + + // Getters + int id() const { return mEffect->id(); } + int priority() const { return mPriority; } + bool hasControl() const { return mHasControl; } + sp<EffectModule> effect() const { return mEffect; } + // destroyed_l() must be called with the associated EffectModule mLock held + bool destroyed_l() const { return mDestroyed; } + + void dump(char* buffer, size_t size); + +protected: + friend class AudioFlinger; // for mEffect, mHasControl, mEnabled + EffectHandle(const EffectHandle&); + EffectHandle& operator =(const EffectHandle&); + + sp<EffectModule> mEffect; // pointer to controlled EffectModule + sp<IEffectClient> mEffectClient; // callback interface for client notifications + /*const*/ sp<Client> mClient; // client for shared memory allocation, see disconnect() + sp<IMemory> mCblkMemory; // shared memory for control block + effect_param_cblk_t* mCblk; // control block for deferred parameter setting via + // shared memory + uint8_t* mBuffer; // pointer to parameter area in shared memory + int mPriority; // client application priority to control the effect + bool mHasControl; // true if this handle is controlling the effect + bool mEnabled; // cached enable state: needed when the effect is + // restored after being suspended + bool mDestroyed; // Set to true by destructor. Access with EffectModule + // mLock held +}; + +// the EffectChain class represents a group of effects associated to one audio session. +// There can be any number of EffectChain objects per output mixer thread (PlaybackThread). +// The EffecChain with session ID 0 contains global effects applied to the output mix. +// Effects in this chain can be insert or auxiliary. Effects in other chains (attached to +// tracks) are insert only. The EffectChain maintains an ordered list of effect module, the +// order corresponding in the effect process order. When attached to a track (session ID != 0), +// it also provide it's own input buffer used by the track as accumulation buffer. +class EffectChain : public RefBase { +public: + EffectChain(const wp<ThreadBase>& wThread, int sessionId); + EffectChain(ThreadBase *thread, int sessionId); + virtual ~EffectChain(); + + // special key used for an entry in mSuspendedEffects keyed vector + // corresponding to a suspend all request. + static const int kKeyForSuspendAll = 0; + + // minimum duration during which we force calling effect process when last track on + // a session is stopped or removed to allow effect tail to be rendered + static const int kProcessTailDurationMs = 1000; + + void process_l(); + + void lock() { + mLock.lock(); + } + void unlock() { + mLock.unlock(); + } + + status_t addEffect_l(const sp<EffectModule>& handle); + size_t removeEffect_l(const sp<EffectModule>& handle); + + int sessionId() const { return mSessionId; } + void setSessionId(int sessionId) { mSessionId = sessionId; } + + sp<EffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor); + sp<EffectModule> getEffectFromId_l(int id); + sp<EffectModule> getEffectFromType_l(const effect_uuid_t *type); + bool setVolume_l(uint32_t *left, uint32_t *right); + void setDevice_l(audio_devices_t device); + void setMode_l(audio_mode_t mode); + void setAudioSource_l(audio_source_t source); + + void setInBuffer(int16_t *buffer, bool ownsBuffer = false) { + mInBuffer = buffer; + mOwnInBuffer = ownsBuffer; + } + int16_t *inBuffer() const { + return mInBuffer; + } + void setOutBuffer(int16_t *buffer) { + mOutBuffer = buffer; + } + int16_t *outBuffer() const { + return mOutBuffer; + } + + void incTrackCnt() { android_atomic_inc(&mTrackCnt); } + void decTrackCnt() { android_atomic_dec(&mTrackCnt); } + int32_t trackCnt() const { return android_atomic_acquire_load(&mTrackCnt); } + + void incActiveTrackCnt() { android_atomic_inc(&mActiveTrackCnt); + mTailBufferCount = mMaxTailBuffers; } + void decActiveTrackCnt() { android_atomic_dec(&mActiveTrackCnt); } + int32_t activeTrackCnt() const { return android_atomic_acquire_load(&mActiveTrackCnt); } + + uint32_t strategy() const { return mStrategy; } + void setStrategy(uint32_t strategy) + { mStrategy = strategy; } + + // suspend effect of the given type + void setEffectSuspended_l(const effect_uuid_t *type, + bool suspend); + // suspend all eligible effects + void setEffectSuspendedAll_l(bool suspend); + // check if effects should be suspend or restored when a given effect is enable or disabled + void checkSuspendOnEffectEnabled(const sp<EffectModule>& effect, + bool enabled); + + void clearInputBuffer(); + + void dump(int fd, const Vector<String16>& args); + +protected: + friend class AudioFlinger; // for mThread, mEffects + EffectChain(const EffectChain&); + EffectChain& operator =(const EffectChain&); + + class SuspendedEffectDesc : public RefBase { + public: + SuspendedEffectDesc() : mRefCount(0) {} + + int mRefCount; + effect_uuid_t mType; + wp<EffectModule> mEffect; + }; + + // get a list of effect modules to suspend when an effect of the type + // passed is enabled. + void getSuspendEligibleEffects(Vector< sp<EffectModule> > &effects); + + // get an effect module if it is currently enable + sp<EffectModule> getEffectIfEnabled(const effect_uuid_t *type); + // true if the effect whose descriptor is passed can be suspended + // OEMs can modify the rules implemented in this method to exclude specific effect + // types or implementations from the suspend/restore mechanism. + bool isEffectEligibleForSuspend(const effect_descriptor_t& desc); + + void clearInputBuffer_l(sp<ThreadBase> thread); + + wp<ThreadBase> mThread; // parent mixer thread + Mutex mLock; // mutex protecting effect list + Vector< sp<EffectModule> > mEffects; // list of effect modules + int mSessionId; // audio session ID + int16_t *mInBuffer; // chain input buffer + int16_t *mOutBuffer; // chain output buffer + + // 'volatile' here means these are accessed with atomic operations instead of mutex + volatile int32_t mActiveTrackCnt; // number of active tracks connected + volatile int32_t mTrackCnt; // number of tracks connected + + int32_t mTailBufferCount; // current effect tail buffer count + int32_t mMaxTailBuffers; // maximum effect tail buffers + bool mOwnInBuffer; // true if the chain owns its input buffer + int mVolumeCtrlIdx; // index of insert effect having control over volume + uint32_t mLeftVolume; // previous volume on left channel + uint32_t mRightVolume; // previous volume on right channel + uint32_t mNewLeftVolume; // new volume on left channel + uint32_t mNewRightVolume; // new volume on right channel + uint32_t mStrategy; // strategy for this effect chain + // mSuspendedEffects lists all effects currently suspended in the chain. + // Use effect type UUID timelow field as key. There is no real risk of identical + // timeLow fields among effect type UUIDs. + // Updated by updateSuspendedSessions_l() only. + KeyedVector< int, sp<SuspendedEffectDesc> > mSuspendedEffects; +}; diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp index 3c8a256..21df1d7 100644 --- a/services/audioflinger/FastMixer.cpp +++ b/services/audioflinger/FastMixer.cpp @@ -14,9 +14,17 @@ * limitations under the License. */ +// <IMPORTANT_WARNING> +// Design rules for threadLoop() are given in the comments at section "Fast mixer thread" of +// StateQueue.h. In particular, avoid library and system calls except at well-known points. +// The design rules are only for threadLoop(), and don't apply to FastMixerDumpState methods. +// </IMPORTANT_WARNING> + #define LOG_TAG "FastMixer" //#define LOG_NDEBUG 0 +#define ATRACE_TAG ATRACE_TAG_AUDIO + #include <sys/atomics.h> #include <time.h> #include <utils/Log.h> @@ -84,6 +92,7 @@ bool FastMixer::threadLoop() struct timespec measuredWarmupTs = {0, 0}; // how long did it take for warmup to complete uint32_t warmupCycles = 0; // counter of number of loop cycles required to warmup NBAIO_Sink* teeSink = NULL; // if non-NULL, then duplicate write() to this non-blocking sink + NBLog::Writer dummyLogWriter, *logWriter = &dummyLogWriter; for (;;) { @@ -114,6 +123,10 @@ bool FastMixer::threadLoop() // As soon as possible of learning of a new dump area, start using it dumpState = next->mDumpState != NULL ? next->mDumpState : &dummyDumpState; teeSink = next->mTeeSink; + logWriter = next->mNBLogWriter != NULL ? next->mNBLogWriter : &dummyLogWriter; + if (mixer != NULL) { + mixer->setLog(logWriter); + } // We want to always have a valid reference to the previous (non-idle) state. // However, the state queue only guarantees access to current and previous states. @@ -157,6 +170,10 @@ bool FastMixer::threadLoop() if (old <= 0) { __futex_syscall4(coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL); } + int policy = sched_getscheduler(0); + if (!(policy == SCHED_FIFO || policy == SCHED_RR)) { + ALOGE("did not receive expected priority boost"); + } // This may be overly conservative; there could be times that the normal mixer // requests such a brief cold idle that it doesn't require resetting this flag. isWarm = false; @@ -301,7 +318,7 @@ bool FastMixer::threadLoop() generations[i] = fastTrack->mGeneration; } - // finally process modified tracks; these use the same slot + // finally process (potentially) modified tracks; these use the same slot // but may have a different buffer provider or volume provider unsigned modifiedTracks = currentTrackMask & previousTrackMask; while (modifiedTracks != 0) { @@ -309,6 +326,7 @@ bool FastMixer::threadLoop() modifiedTracks &= ~(1 << i); const FastTrack* fastTrack = ¤t->mFastTracks[i]; if (fastTrack->mGeneration != generations[i]) { + // this track was actually modified AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider; ALOG_ASSERT(bufferProvider != NULL); if (mixer != NULL) { @@ -371,14 +389,14 @@ bool FastMixer::threadLoop() // up to 1 ms. If enough active tracks all blocked in sequence, this would result // in the overall fast mix cycle being delayed. Should use a non-blocking FIFO. size_t framesReady = fastTrack->mBufferProvider->framesReady(); -#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER) - // I wish we had formatted trace names - char traceName[16]; - strcpy(traceName, "framesReady"); - traceName[11] = i + (i < 10 ? '0' : 'A' - 10); - traceName[12] = '\0'; - ATRACE_INT(traceName, framesReady); -#endif + if (ATRACE_ENABLED()) { + // I wish we had formatted trace names + char traceName[16]; + strcpy(traceName, "fRdy"); + traceName[4] = i + (i < 10 ? '0' : 'A' - 10); + traceName[5] = '\0'; + ATRACE_INT(traceName, framesReady); + } FastTrackDump *ftDump = &dumpState->mTracks[i]; FastTrackUnderruns underruns = ftDump->mUnderruns; if (framesReady < frameCount) { @@ -424,16 +442,12 @@ bool FastMixer::threadLoop() // FIXME write() is non-blocking and lock-free for a properly implemented NBAIO sink, // but this code should be modified to handle both non-blocking and blocking sinks dumpState->mWriteSequence++; -#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER) - Tracer::traceBegin(ATRACE_TAG, "write"); -#endif + ATRACE_BEGIN("write"); ssize_t framesWritten = outputSink->write(mixBuffer, frameCount); -#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER) - Tracer::traceEnd(ATRACE_TAG); -#endif + ATRACE_END(); dumpState->mWriteSequence++; if (framesWritten >= 0) { - ALOG_ASSERT(framesWritten <= frameCount); + ALOG_ASSERT((size_t) framesWritten <= frameCount); dumpState->mFramesWritten += framesWritten; //if ((size_t) framesWritten == frameCount) { // didFullWrite = true; @@ -451,6 +465,7 @@ bool FastMixer::threadLoop() struct timespec newTs; int rc = clock_gettime(CLOCK_MONOTONIC, &newTs); if (rc == 0) { + //logWriter->logTimestamp(newTs); if (oldTsValid) { time_t sec = newTs.tv_sec - oldTs.tv_sec; long nsec = newTs.tv_nsec - oldTs.tv_nsec; @@ -485,9 +500,7 @@ bool FastMixer::threadLoop() sleepNs = -1; if (isWarm) { if (sec > 0 || nsec > underrunNs) { -#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER) - ScopedTrace st(ATRACE_TAG, "underrun"); -#endif + ATRACE_NAME("underrun"); // FIXME only log occasionally ALOGV("underrun: time since last cycle %d.%03ld sec", (int) sec, nsec / 1000000L); @@ -567,10 +580,8 @@ bool FastMixer::threadLoop() // this store #4 is not atomic with respect to stores #1, #2, #3 above, but // the newest open and oldest closed halves are atomic with respect to each other dumpState->mBounds = bounds; -#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER) ATRACE_INT("cycle_ms", monotonicNs / 1000000); ATRACE_INT("load_us", loadNs / 1000); -#endif } #endif } else { diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h index 462739b..2ab1d04 100644 --- a/services/audioflinger/FastMixer.h +++ b/services/audioflinger/FastMixer.h @@ -107,7 +107,7 @@ struct FastMixerDumpState { #ifdef FAST_MIXER_STATISTICS // Recently collected samples of per-cycle monotonic time, thread CPU time, and CPU frequency. // kSamplingN is the size of the sampling frame, and must be a power of 2 <= 0x8000. - static const uint32_t kSamplingN = 0x1000; + static const uint32_t kSamplingN = 0x8000; // The bounds define the interval of valid samples, and are represented as follows: // newest open (excluded) endpoint = lower 16 bits of bounds, modulo N // oldest closed (included) endpoint = upper 16 bits of bounds, modulo N diff --git a/services/audioflinger/FastMixerState.cpp b/services/audioflinger/FastMixerState.cpp index 6305a83..c45c81b 100644 --- a/services/audioflinger/FastMixerState.cpp +++ b/services/audioflinger/FastMixerState.cpp @@ -31,7 +31,7 @@ FastTrack::~FastTrack() FastMixerState::FastMixerState() : mFastTracksGen(0), mTrackMask(0), mOutputSink(NULL), mOutputSinkGen(0), mFrameCount(0), mCommand(INITIAL), mColdFutexAddr(NULL), mColdGen(0), - mDumpState(NULL), mTeeSink(NULL) + mDumpState(NULL), mTeeSink(NULL), mNBLogWriter(NULL) { } diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h index 6e53f21..f6e7903 100644 --- a/services/audioflinger/FastMixerState.h +++ b/services/audioflinger/FastMixerState.h @@ -20,6 +20,7 @@ #include <system/audio.h> #include <media/ExtendedAudioBufferProvider.h> #include <media/nbaio/NBAIO.h> +#include <media/nbaio/NBLog.h> namespace android { @@ -77,6 +78,7 @@ struct FastMixerState { // This might be a one-time configuration rather than per-state FastMixerDumpState* mDumpState; // if non-NULL, then update dump state periodically NBAIO_Sink* mTeeSink; // if non-NULL, then duplicate write()s to this non-blocking sink + NBLog::Writer* mNBLogWriter; // non-blocking logger }; // struct FastMixerState } // namespace android diff --git a/services/audioflinger/ISchedulingPolicyService.cpp b/services/audioflinger/ISchedulingPolicyService.cpp index 909b77e..0079968 100644 --- a/services/audioflinger/ISchedulingPolicyService.cpp +++ b/services/audioflinger/ISchedulingPolicyService.cpp @@ -37,14 +37,15 @@ public: { } - virtual int requestPriority(int32_t pid, int32_t tid, int32_t prio) + virtual int requestPriority(int32_t pid, int32_t tid, int32_t prio, bool asynchronous) { Parcel data, reply; data.writeInterfaceToken(ISchedulingPolicyService::getInterfaceDescriptor()); data.writeInt32(pid); data.writeInt32(tid); data.writeInt32(prio); - remote()->transact(REQUEST_PRIORITY_TRANSACTION, data, &reply); + uint32_t flags = asynchronous ? IBinder::FLAG_ONEWAY : 0; + remote()->transact(REQUEST_PRIORITY_TRANSACTION, data, &reply, flags); // fail on exception if (reply.readExceptionCode() != 0) return -1; return reply.readInt32(); diff --git a/services/audioflinger/ISchedulingPolicyService.h b/services/audioflinger/ISchedulingPolicyService.h index a38e67e..b94b191 100644 --- a/services/audioflinger/ISchedulingPolicyService.h +++ b/services/audioflinger/ISchedulingPolicyService.h @@ -27,7 +27,7 @@ public: DECLARE_META_INTERFACE(SchedulingPolicyService); virtual int requestPriority(/*pid_t*/int32_t pid, /*pid_t*/int32_t tid, - int32_t prio) = 0; + int32_t prio, bool asynchronous) = 0; }; diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h new file mode 100644 index 0000000..a749d7a --- /dev/null +++ b/services/audioflinger/PlaybackTracks.h @@ -0,0 +1,274 @@ +/* +** +** Copyright 2012, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#ifndef INCLUDING_FROM_AUDIOFLINGER_H + #error This header file should only be included from AudioFlinger.h +#endif + +// playback track +class Track : public TrackBase, public VolumeProvider { +public: + Track( PlaybackThread *thread, + const sp<Client>& client, + audio_stream_type_t streamType, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + size_t frameCount, + const sp<IMemory>& sharedBuffer, + int sessionId, + IAudioFlinger::track_flags_t flags); + virtual ~Track(); + + static void appendDumpHeader(String8& result); + void dump(char* buffer, size_t size); + virtual status_t start(AudioSystem::sync_event_t event = + AudioSystem::SYNC_EVENT_NONE, + int triggerSession = 0); + virtual void stop(); + void pause(); + + void flush(); + void destroy(); + int name() const { return mName; } + + audio_stream_type_t streamType() const { + return mStreamType; + } + status_t attachAuxEffect(int EffectId); + void setAuxBuffer(int EffectId, int32_t *buffer); + int32_t *auxBuffer() const { return mAuxBuffer; } + void setMainBuffer(int16_t *buffer) { mMainBuffer = buffer; } + int16_t *mainBuffer() const { return mMainBuffer; } + int auxEffectId() const { return mAuxEffectId; } + +// implement FastMixerState::VolumeProvider interface + virtual uint32_t getVolumeLR(); + + virtual status_t setSyncEvent(const sp<SyncEvent>& event); + +protected: + // for numerous + friend class PlaybackThread; + friend class MixerThread; + friend class DirectOutputThread; + + Track(const Track&); + Track& operator = (const Track&); + + // AudioBufferProvider interface + virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, + int64_t pts = kInvalidPTS); + // releaseBuffer() not overridden + + virtual size_t framesReady() const; + + bool isPausing() const { return mState == PAUSING; } + bool isPaused() const { return mState == PAUSED; } + bool isResuming() const { return mState == RESUMING; } + bool isReady() const; + void setPaused() { mState = PAUSED; } + void reset(); + + bool isOutputTrack() const { + return (mStreamType == AUDIO_STREAM_CNT); + } + + sp<IMemory> sharedBuffer() const { return mSharedBuffer; } + + // framesWritten is cumulative, never reset, and is shared all tracks + // audioHalFrames is derived from output latency + // FIXME parameters not needed, could get them from the thread + bool presentationComplete(size_t framesWritten, size_t audioHalFrames); + +public: + void triggerEvents(AudioSystem::sync_event_t type); + void invalidate(); + bool isInvalid() const { return mIsInvalid; } + virtual bool isTimedTrack() const { return false; } + bool isFastTrack() const { return (mFlags & IAudioFlinger::TRACK_FAST) != 0; } + +protected: + + // FILLED state is used for suppressing volume ramp at begin of playing + enum {FS_INVALID, FS_FILLING, FS_FILLED, FS_ACTIVE}; + mutable uint8_t mFillingUpStatus; + int8_t mRetryCount; + const sp<IMemory> mSharedBuffer; + bool mResetDone; + const audio_stream_type_t mStreamType; + int mName; // track name on the normal mixer, + // allocated statically at track creation time, + // and is even allocated (though unused) for fast tracks + // FIXME don't allocate track name for fast tracks + int16_t *mMainBuffer; + int32_t *mAuxBuffer; + int mAuxEffectId; + bool mHasVolumeController; + size_t mPresentationCompleteFrames; // number of frames written to the + // audio HAL when this track will be fully rendered + // zero means not monitoring +private: + IAudioFlinger::track_flags_t mFlags; + + // The following fields are only for fast tracks, and should be in a subclass + int mFastIndex; // index within FastMixerState::mFastTracks[]; + // either mFastIndex == -1 if not isFastTrack() + // or 0 < mFastIndex < FastMixerState::kMaxFast because + // index 0 is reserved for normal mixer's submix; + // index is allocated statically at track creation time + // but the slot is only used if track is active + FastTrackUnderruns mObservedUnderruns; // Most recently observed value of + // mFastMixerDumpState.mTracks[mFastIndex].mUnderruns + uint32_t mUnderrunCount; // Counter of total number of underruns, never reset + volatile float mCachedVolume; // combined master volume and stream type volume; + // 'volatile' means accessed without lock or + // barrier, but is read/written atomically + bool mIsInvalid; // non-resettable latch, set by invalidate() +}; // end of Track + +class TimedTrack : public Track { + public: + static sp<TimedTrack> create(PlaybackThread *thread, + const sp<Client>& client, + audio_stream_type_t streamType, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + size_t frameCount, + const sp<IMemory>& sharedBuffer, + int sessionId); + virtual ~TimedTrack(); + + class TimedBuffer { + public: + TimedBuffer(); + TimedBuffer(const sp<IMemory>& buffer, int64_t pts); + const sp<IMemory>& buffer() const { return mBuffer; } + int64_t pts() const { return mPTS; } + uint32_t position() const { return mPosition; } + void setPosition(uint32_t pos) { mPosition = pos; } + private: + sp<IMemory> mBuffer; + int64_t mPTS; + uint32_t mPosition; + }; + + // Mixer facing methods. + virtual bool isTimedTrack() const { return true; } + virtual size_t framesReady() const; + + // AudioBufferProvider interface + virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, + int64_t pts); + virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer); + + // Client/App facing methods. + status_t allocateTimedBuffer(size_t size, + sp<IMemory>* buffer); + status_t queueTimedBuffer(const sp<IMemory>& buffer, + int64_t pts); + status_t setMediaTimeTransform(const LinearTransform& xform, + TimedAudioTrack::TargetTimeline target); + + private: + TimedTrack(PlaybackThread *thread, + const sp<Client>& client, + audio_stream_type_t streamType, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + size_t frameCount, + const sp<IMemory>& sharedBuffer, + int sessionId); + + void timedYieldSamples_l(AudioBufferProvider::Buffer* buffer); + void timedYieldSilence_l(uint32_t numFrames, + AudioBufferProvider::Buffer* buffer); + void trimTimedBufferQueue_l(); + void trimTimedBufferQueueHead_l(const char* logTag); + void updateFramesPendingAfterTrim_l(const TimedBuffer& buf, + const char* logTag); + + uint64_t mLocalTimeFreq; + LinearTransform mLocalTimeToSampleTransform; + LinearTransform mMediaTimeToSampleTransform; + sp<MemoryDealer> mTimedMemoryDealer; + + Vector<TimedBuffer> mTimedBufferQueue; + bool mQueueHeadInFlight; + bool mTrimQueueHeadOnRelease; + uint32_t mFramesPendingInQueue; + + uint8_t* mTimedSilenceBuffer; + uint32_t mTimedSilenceBufferSize; + mutable Mutex mTimedBufferQueueLock; + bool mTimedAudioOutputOnTime; + CCHelper mCCHelper; + + Mutex mMediaTimeTransformLock; + LinearTransform mMediaTimeTransform; + bool mMediaTimeTransformValid; + TimedAudioTrack::TargetTimeline mMediaTimeTransformTarget; +}; + + +// playback track, used by DuplicatingThread +class OutputTrack : public Track { +public: + + class Buffer : public AudioBufferProvider::Buffer { + public: + int16_t *mBuffer; + }; + + OutputTrack(PlaybackThread *thread, + DuplicatingThread *sourceThread, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + size_t frameCount); + virtual ~OutputTrack(); + + virtual status_t start(AudioSystem::sync_event_t event = + AudioSystem::SYNC_EVENT_NONE, + int triggerSession = 0); + virtual void stop(); + bool write(int16_t* data, uint32_t frames); + bool bufferQueueEmpty() const { return mBufferQueue.size() == 0; } + bool isActive() const { return mActive; } + const wp<ThreadBase>& thread() const { return mThread; } + +private: + + enum { + NO_MORE_BUFFERS = 0x80000001, // same in AudioTrack.h, ok to be different value + }; + + status_t obtainBuffer(AudioBufferProvider::Buffer* buffer, + uint32_t waitTimeMs); + void clearBufferQueue(); + + // Maximum number of pending buffers allocated by OutputTrack::write() + static const uint8_t kMaxOverFlowBuffers = 10; + + Vector < Buffer* > mBufferQueue; + AudioBufferProvider::Buffer mOutBuffer; + bool mActive; + DuplicatingThread* const mSourceThread; // for waitTimeMs() in write() + AudioTrackClientProxy* mClientProxy; +}; // end of OutputTrack diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h new file mode 100644 index 0000000..6c0d1d3 --- /dev/null +++ b/services/audioflinger/RecordTracks.h @@ -0,0 +1,60 @@ +/* +** +** Copyright 2012, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#ifndef INCLUDING_FROM_AUDIOFLINGER_H + #error This header file should only be included from AudioFlinger.h +#endif + +// record track +class RecordTrack : public TrackBase { +public: + RecordTrack(RecordThread *thread, + const sp<Client>& client, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + size_t frameCount, + int sessionId); + virtual ~RecordTrack(); + + virtual status_t start(AudioSystem::sync_event_t event, int triggerSession); + virtual void stop(); + + void destroy(); + + // clear the buffer overflow flag + void clearOverflow() { mOverflow = false; } + // set the buffer overflow flag and return previous value + bool setOverflow() { bool tmp = mOverflow; mOverflow = true; + return tmp; } + + static void appendDumpHeader(String8& result); + void dump(char* buffer, size_t size); + +private: + friend class AudioFlinger; // for mState + + RecordTrack(const RecordTrack&); + RecordTrack& operator = (const RecordTrack&); + + // AudioBufferProvider interface + virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, + int64_t pts = kInvalidPTS); + // releaseBuffer() not overridden + + bool mOverflow; // overflow on most recent attempt to fill client buffer +}; diff --git a/services/audioflinger/SchedulingPolicyService.cpp b/services/audioflinger/SchedulingPolicyService.cpp index 59cc99a..36e62e9 100644 --- a/services/audioflinger/SchedulingPolicyService.cpp +++ b/services/audioflinger/SchedulingPolicyService.cpp @@ -25,7 +25,7 @@ static sp<ISchedulingPolicyService> sSchedulingPolicyService; static const String16 _scheduling_policy("scheduling_policy"); static Mutex sMutex; -int requestPriority(pid_t pid, pid_t tid, int32_t prio) +int requestPriority(pid_t pid, pid_t tid, int32_t prio, bool asynchronous) { // FIXME merge duplicated code related to service lookup, caching, and error recovery sp<ISchedulingPolicyService> sps; @@ -46,7 +46,7 @@ int requestPriority(pid_t pid, pid_t tid, int32_t prio) } sleep(1); } - return sps->requestPriority(pid, tid, prio); + return sps->requestPriority(pid, tid, prio, asynchronous); } } // namespace android diff --git a/services/audioflinger/SchedulingPolicyService.h b/services/audioflinger/SchedulingPolicyService.h index 7ac8454..a9870d4 100644 --- a/services/audioflinger/SchedulingPolicyService.h +++ b/services/audioflinger/SchedulingPolicyService.h @@ -21,7 +21,10 @@ namespace android { // Request elevated priority for thread tid, whose thread group leader must be pid. // The priority parameter is currently restricted to either 1 or 2. -int requestPriority(pid_t pid, pid_t tid, int32_t prio); +// The asynchronous parameter should be 'true' to return immediately, +// after the request is enqueued but not necessarily executed. +// The default value 'false' means to return after request has been enqueued and executed. +int requestPriority(pid_t pid, pid_t tid, int32_t prio, bool asynchronous = false); } // namespace android diff --git a/services/audioflinger/ServiceUtilities.cpp b/services/audioflinger/ServiceUtilities.cpp index 6a58852..d15bd04 100644 --- a/services/audioflinger/ServiceUtilities.cpp +++ b/services/audioflinger/ServiceUtilities.cpp @@ -21,8 +21,9 @@ namespace android { -// This optimization assumes mediaserver process doesn't fork, which it doesn't -const pid_t getpid_cached = getpid(); +// Not valid until initialized by AudioFlinger constructor. It would have to be +// re-initialized if the process containing AudioFlinger service forks (which it doesn't). +pid_t getpid_cached; bool recordingAllowed() { if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true; diff --git a/services/audioflinger/ServiceUtilities.h b/services/audioflinger/ServiceUtilities.h index f77ec5b..80cecba 100644 --- a/services/audioflinger/ServiceUtilities.h +++ b/services/audioflinger/ServiceUtilities.h @@ -18,7 +18,7 @@ namespace android { -extern const pid_t getpid_cached; +extern pid_t getpid_cached; bool recordingAllowed(); bool settingsAllowed(); diff --git a/services/audioflinger/StateQueue.h b/services/audioflinger/StateQueue.h index eba190c..e33b3c6 100644 --- a/services/audioflinger/StateQueue.h +++ b/services/audioflinger/StateQueue.h @@ -17,6 +17,72 @@ #ifndef ANDROID_AUDIO_STATE_QUEUE_H #define ANDROID_AUDIO_STATE_QUEUE_H +// The state queue template class was originally driven by this use case / requirements: +// There are two threads: a fast mixer, and a normal mixer, and they share state. +// The interesting part of the shared state is a set of active fast tracks, +// and the output HAL configuration (buffer size in frames, sample rate, etc.). +// Fast mixer thread: +// periodic with typical period < 10 ms +// FIFO/RR scheduling policy and a low fixed priority +// ok to block for bounded time using nanosleep() to achieve desired period +// must not block on condition wait, mutex lock, atomic operation spin, I/O, etc. +// under typical operations of mixing, writing, or adding/removing tracks +// ok to block for unbounded time when the output HAL configuration changes, +// and this may result in an audible artifact +// needs read-only access to a recent stable state, +// but not necessarily the most current one +// Normal mixer thread: +// periodic with typical period ~40 ms +// SCHED_OTHER scheduling policy and nice priority == urgent audio +// ok to block, but prefer to avoid as much as possible +// needs read/write access to state +// The normal mixer may need to temporarily suspend the fast mixer thread during mode changes. +// It will do this using the state -- one of the fields tells the fast mixer to idle. + +// Additional requirements: +// - observer must always be able to poll for and view the latest pushed state; it must never be +// blocked from seeing that state +// - observer does not need to see every state in sequence; it is OK for it to skip states +// [see below for more on this] +// - mutator must always be able to read/modify a state, it must never be blocked from reading or +// modifying state +// - reduce memcpy where possible +// - work well if the observer runs more frequently than the mutator, +// as is the case with fast mixer/normal mixer. +// It is not a requirement to work well if the roles were reversed, +// and the mutator were to run more frequently than the observer. +// In this case, the mutator could get blocked waiting for a slot to fill up for +// it to work with. This could be solved somewhat by increasing the depth of the queue, but it would +// still limit the mutator to a finite number of changes before it would block. A future +// possibility, not implemented here, would be to allow the mutator to safely overwrite an already +// pushed state. This could be done by the mutator overwriting mNext, but then being prepared to +// read an mAck which is actually for the earlier mNext (since there is a race). + +// Solution: +// Let's call the fast mixer thread the "observer" and normal mixer thread the "mutator". +// We assume there is only a single observer and a single mutator; this is critical. +// Each state is of type <T>, and should contain only POD (Plain Old Data) and raw pointers, as +// memcpy() may be used to copy state, and the destructors are run in unpredictable order. +// The states in chronological order are: previous, current, next, and mutating: +// previous read-only, observer can compare vs. current to see the subset that changed +// current read-only, this is the primary state for observer +// next read-only, when observer is ready to accept a new state it will shift it in: +// previous = current +// current = next +// and the slot formerly used by previous is now available to the mutator. +// mutating invisible to observer, read/write to mutator +// Initialization is tricky, especially for the observer. If the observer starts execution +// before the mutator, there are no previous, current, or next states. And even if the observer +// starts execution after the mutator, there is a next state but no previous or current states. +// To solve this, we'll have the observer idle until there is a next state, +// and it will have to deal with the case where there is no previous state. +// The states are stored in a shared FIFO queue represented using a circular array. +// The observer polls for mutations, and receives a new state pointer after a +// a mutation is pushed onto the queue. To the observer, the state pointers are +// effectively in random order, that is the observer should not do address +// arithmetic on the state pointers. However to the mutator, the state pointers +// are in a definite circular order. + namespace android { #ifdef STATE_QUEUE_DUMP @@ -108,7 +174,7 @@ public: #endif private: - static const unsigned kN = 4; // values != 4 are not supported by this code + static const unsigned kN = 4; // values < 4 are not supported by this code T mStates[kN]; // written by mutator, read by observer // "volatile" is meaningless with SMP, but here it indicates that we're using atomic ops diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp new file mode 100644 index 0000000..539bb4f --- /dev/null +++ b/services/audioflinger/Threads.cpp @@ -0,0 +1,4461 @@ +/* +** +** Copyright 2012, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + + +#define LOG_TAG "AudioFlinger" +//#define LOG_NDEBUG 0 +#define ATRACE_TAG ATRACE_TAG_AUDIO + +#include <math.h> +#include <fcntl.h> +#include <sys/stat.h> +#include <cutils/properties.h> +#include <cutils/compiler.h> +#include <utils/Log.h> +#include <utils/Trace.h> + +#include <private/media/AudioTrackShared.h> +#include <hardware/audio.h> +#include <audio_effects/effect_ns.h> +#include <audio_effects/effect_aec.h> +#include <audio_utils/primitives.h> + +// NBAIO implementations +#include <media/nbaio/AudioStreamOutSink.h> +#include <media/nbaio/MonoPipe.h> +#include <media/nbaio/MonoPipeReader.h> +#include <media/nbaio/Pipe.h> +#include <media/nbaio/PipeReader.h> +#include <media/nbaio/SourceAudioBufferProvider.h> + +#include <powermanager/PowerManager.h> + +#include <common_time/cc_helper.h> +#include <common_time/local_clock.h> + +#include "AudioFlinger.h" +#include "AudioMixer.h" +#include "FastMixer.h" +#include "ServiceUtilities.h" +#include "SchedulingPolicyService.h" + +#undef ADD_BATTERY_DATA + +#ifdef ADD_BATTERY_DATA +#include <media/IMediaPlayerService.h> +#include <media/IMediaDeathNotifier.h> +#endif + +// #define DEBUG_CPU_USAGE 10 // log statistics every n wall clock seconds +#ifdef DEBUG_CPU_USAGE +#include <cpustats/CentralTendencyStatistics.h> +#include <cpustats/ThreadCpuUsage.h> +#endif + +// ---------------------------------------------------------------------------- + +// Note: the following macro is used for extremely verbose logging message. In +// order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to +// 0; but one side effect of this is to turn all LOGV's as well. Some messages +// are so verbose that we want to suppress them even when we have ALOG_ASSERT +// turned on. Do not uncomment the #def below unless you really know what you +// are doing and want to see all of the extremely verbose messages. +//#define VERY_VERY_VERBOSE_LOGGING +#ifdef VERY_VERY_VERBOSE_LOGGING +#define ALOGVV ALOGV +#else +#define ALOGVV(a...) do { } while(0) +#endif + +namespace android { + +// retry counts for buffer fill timeout +// 50 * ~20msecs = 1 second +static const int8_t kMaxTrackRetries = 50; +static const int8_t kMaxTrackStartupRetries = 50; +// allow less retry attempts on direct output thread. +// direct outputs can be a scarce resource in audio hardware and should +// be released as quickly as possible. +static const int8_t kMaxTrackRetriesDirect = 2; + +// don't warn about blocked writes or record buffer overflows more often than this +static const nsecs_t kWarningThrottleNs = seconds(5); + +// RecordThread loop sleep time upon application overrun or audio HAL read error +static const int kRecordThreadSleepUs = 5000; + +// maximum time to wait for setParameters to complete +static const nsecs_t kSetParametersTimeoutNs = seconds(2); + +// minimum sleep time for the mixer thread loop when tracks are active but in underrun +static const uint32_t kMinThreadSleepTimeUs = 5000; +// maximum divider applied to the active sleep time in the mixer thread loop +static const uint32_t kMaxThreadSleepTimeShift = 2; + +// minimum normal mix buffer size, expressed in milliseconds rather than frames +static const uint32_t kMinNormalMixBufferSizeMs = 20; +// maximum normal mix buffer size +static const uint32_t kMaxNormalMixBufferSizeMs = 24; + +// Whether to use fast mixer +static const enum { + FastMixer_Never, // never initialize or use: for debugging only + FastMixer_Always, // always initialize and use, even if not needed: for debugging only + // normal mixer multiplier is 1 + FastMixer_Static, // initialize if needed, then use all the time if initialized, + // multiplier is calculated based on min & max normal mixer buffer size + FastMixer_Dynamic, // initialize if needed, then use dynamically depending on track load, + // multiplier is calculated based on min & max normal mixer buffer size + // FIXME for FastMixer_Dynamic: + // Supporting this option will require fixing HALs that can't handle large writes. + // For example, one HAL implementation returns an error from a large write, + // and another HAL implementation corrupts memory, possibly in the sample rate converter. + // We could either fix the HAL implementations, or provide a wrapper that breaks + // up large writes into smaller ones, and the wrapper would need to deal with scheduler. +} kUseFastMixer = FastMixer_Static; + +// Priorities for requestPriority +static const int kPriorityAudioApp = 2; +static const int kPriorityFastMixer = 3; + +// IAudioFlinger::createTrack() reports back to client the total size of shared memory area +// for the track. The client then sub-divides this into smaller buffers for its use. +// Currently the client uses double-buffering by default, but doesn't tell us about that. +// So for now we just assume that client is double-buffered. +// FIXME It would be better for client to tell AudioFlinger whether it wants double-buffering or +// N-buffering, so AudioFlinger could allocate the right amount of memory. +// See the client's minBufCount and mNotificationFramesAct calculations for details. +static const int kFastTrackMultiplier = 2; + +// ---------------------------------------------------------------------------- + +#ifdef ADD_BATTERY_DATA +// To collect the amplifier usage +static void addBatteryData(uint32_t params) { + sp<IMediaPlayerService> service = IMediaDeathNotifier::getMediaPlayerService(); + if (service == NULL) { + // it already logged + return; + } + + service->addBatteryData(params); +} +#endif + + +// ---------------------------------------------------------------------------- +// CPU Stats +// ---------------------------------------------------------------------------- + +class CpuStats { +public: + CpuStats(); + void sample(const String8 &title); +#ifdef DEBUG_CPU_USAGE +private: + ThreadCpuUsage mCpuUsage; // instantaneous thread CPU usage in wall clock ns + CentralTendencyStatistics mWcStats; // statistics on thread CPU usage in wall clock ns + + CentralTendencyStatistics mHzStats; // statistics on thread CPU usage in cycles + + int mCpuNum; // thread's current CPU number + int mCpukHz; // frequency of thread's current CPU in kHz +#endif +}; + +CpuStats::CpuStats() +#ifdef DEBUG_CPU_USAGE + : mCpuNum(-1), mCpukHz(-1) +#endif +{ +} + +void CpuStats::sample(const String8 &title) { +#ifdef DEBUG_CPU_USAGE + // get current thread's delta CPU time in wall clock ns + double wcNs; + bool valid = mCpuUsage.sampleAndEnable(wcNs); + + // record sample for wall clock statistics + if (valid) { + mWcStats.sample(wcNs); + } + + // get the current CPU number + int cpuNum = sched_getcpu(); + + // get the current CPU frequency in kHz + int cpukHz = mCpuUsage.getCpukHz(cpuNum); + + // check if either CPU number or frequency changed + if (cpuNum != mCpuNum || cpukHz != mCpukHz) { + mCpuNum = cpuNum; + mCpukHz = cpukHz; + // ignore sample for purposes of cycles + valid = false; + } + + // if no change in CPU number or frequency, then record sample for cycle statistics + if (valid && mCpukHz > 0) { + double cycles = wcNs * cpukHz * 0.000001; + mHzStats.sample(cycles); + } + + unsigned n = mWcStats.n(); + // mCpuUsage.elapsed() is expensive, so don't call it every loop + if ((n & 127) == 1) { + long long elapsed = mCpuUsage.elapsed(); + if (elapsed >= DEBUG_CPU_USAGE * 1000000000LL) { + double perLoop = elapsed / (double) n; + double perLoop100 = perLoop * 0.01; + double perLoop1k = perLoop * 0.001; + double mean = mWcStats.mean(); + double stddev = mWcStats.stddev(); + double minimum = mWcStats.minimum(); + double maximum = mWcStats.maximum(); + double meanCycles = mHzStats.mean(); + double stddevCycles = mHzStats.stddev(); + double minCycles = mHzStats.minimum(); + double maxCycles = mHzStats.maximum(); + mCpuUsage.resetElapsed(); + mWcStats.reset(); + mHzStats.reset(); + ALOGD("CPU usage for %s over past %.1f secs\n" + " (%u mixer loops at %.1f mean ms per loop):\n" + " us per mix loop: mean=%.0f stddev=%.0f min=%.0f max=%.0f\n" + " %% of wall: mean=%.1f stddev=%.1f min=%.1f max=%.1f\n" + " MHz: mean=%.1f, stddev=%.1f, min=%.1f max=%.1f", + title.string(), + elapsed * .000000001, n, perLoop * .000001, + mean * .001, + stddev * .001, + minimum * .001, + maximum * .001, + mean / perLoop100, + stddev / perLoop100, + minimum / perLoop100, + maximum / perLoop100, + meanCycles / perLoop1k, + stddevCycles / perLoop1k, + minCycles / perLoop1k, + maxCycles / perLoop1k); + + } + } +#endif +}; + +// ---------------------------------------------------------------------------- +// ThreadBase +// ---------------------------------------------------------------------------- + +AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id, + audio_devices_t outDevice, audio_devices_t inDevice, type_t type) + : Thread(false /*canCallJava*/), + mType(type), + mAudioFlinger(audioFlinger), mSampleRate(0), mFrameCount(0), mNormalFrameCount(0), + // mChannelMask + mChannelCount(0), + mFrameSize(1), mFormat(AUDIO_FORMAT_INVALID), + mParamStatus(NO_ERROR), + mStandby(false), mOutDevice(outDevice), mInDevice(inDevice), + mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id), + // mName will be set by concrete (non-virtual) subclass + mDeathRecipient(new PMDeathRecipient(this)) +{ +} + +AudioFlinger::ThreadBase::~ThreadBase() +{ + mParamCond.broadcast(); + // do not lock the mutex in destructor + releaseWakeLock_l(); + if (mPowerManager != 0) { + sp<IBinder> binder = mPowerManager->asBinder(); + binder->unlinkToDeath(mDeathRecipient); + } +} + +void AudioFlinger::ThreadBase::exit() +{ + ALOGV("ThreadBase::exit"); + // do any cleanup required for exit to succeed + preExit(); + { + // This lock prevents the following race in thread (uniprocessor for illustration): + // if (!exitPending()) { + // // context switch from here to exit() + // // exit() calls requestExit(), what exitPending() observes + // // exit() calls signal(), which is dropped since no waiters + // // context switch back from exit() to here + // mWaitWorkCV.wait(...); + // // now thread is hung + // } + AutoMutex lock(mLock); + requestExit(); + mWaitWorkCV.broadcast(); + } + // When Thread::requestExitAndWait is made virtual and this method is renamed to + // "virtual status_t requestExitAndWait()", replace by "return Thread::requestExitAndWait();" + requestExitAndWait(); +} + +status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs) +{ + status_t status; + + ALOGV("ThreadBase::setParameters() %s", keyValuePairs.string()); + Mutex::Autolock _l(mLock); + + mNewParameters.add(keyValuePairs); + mWaitWorkCV.signal(); + // wait condition with timeout in case the thread loop has exited + // before the request could be processed + if (mParamCond.waitRelative(mLock, kSetParametersTimeoutNs) == NO_ERROR) { + status = mParamStatus; + mWaitWorkCV.signal(); + } else { + status = TIMED_OUT; + } + return status; +} + +void AudioFlinger::ThreadBase::sendIoConfigEvent(int event, int param) +{ + Mutex::Autolock _l(mLock); + sendIoConfigEvent_l(event, param); +} + +// sendIoConfigEvent_l() must be called with ThreadBase::mLock held +void AudioFlinger::ThreadBase::sendIoConfigEvent_l(int event, int param) +{ + IoConfigEvent *ioEvent = new IoConfigEvent(event, param); + mConfigEvents.add(static_cast<ConfigEvent *>(ioEvent)); + ALOGV("sendIoConfigEvent() num events %d event %d, param %d", mConfigEvents.size(), event, + param); + mWaitWorkCV.signal(); +} + +// sendPrioConfigEvent_l() must be called with ThreadBase::mLock held +void AudioFlinger::ThreadBase::sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio) +{ + PrioConfigEvent *prioEvent = new PrioConfigEvent(pid, tid, prio); + mConfigEvents.add(static_cast<ConfigEvent *>(prioEvent)); + ALOGV("sendPrioConfigEvent_l() num events %d pid %d, tid %d prio %d", + mConfigEvents.size(), pid, tid, prio); + mWaitWorkCV.signal(); +} + +void AudioFlinger::ThreadBase::processConfigEvents() +{ + mLock.lock(); + while (!mConfigEvents.isEmpty()) { + ALOGV("processConfigEvents() remaining events %d", mConfigEvents.size()); + ConfigEvent *event = mConfigEvents[0]; + mConfigEvents.removeAt(0); + // release mLock before locking AudioFlinger mLock: lock order is always + // AudioFlinger then ThreadBase to avoid cross deadlock + mLock.unlock(); + switch(event->type()) { + case CFG_EVENT_PRIO: { + PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event); + // FIXME Need to understand why this has be done asynchronously + int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio(), + true /*asynchronous*/); + if (err != 0) { + ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; " + "error %d", + prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err); + } + } break; + case CFG_EVENT_IO: { + IoConfigEvent *ioEvent = static_cast<IoConfigEvent *>(event); + mAudioFlinger->mLock.lock(); + audioConfigChanged_l(ioEvent->event(), ioEvent->param()); + mAudioFlinger->mLock.unlock(); + } break; + default: + ALOGE("processConfigEvents() unknown event type %d", event->type()); + break; + } + delete event; + mLock.lock(); + } + mLock.unlock(); +} + +void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + bool locked = AudioFlinger::dumpTryLock(mLock); + if (!locked) { + snprintf(buffer, SIZE, "thread %p maybe dead locked\n", this); + write(fd, buffer, strlen(buffer)); + } + + snprintf(buffer, SIZE, "io handle: %d\n", mId); + result.append(buffer); + snprintf(buffer, SIZE, "TID: %d\n", getTid()); + result.append(buffer); + snprintf(buffer, SIZE, "standby: %d\n", mStandby); + result.append(buffer); + snprintf(buffer, SIZE, "Sample rate: %u\n", mSampleRate); + result.append(buffer); + snprintf(buffer, SIZE, "HAL frame count: %d\n", mFrameCount); + result.append(buffer); + snprintf(buffer, SIZE, "Normal frame count: %d\n", mNormalFrameCount); + result.append(buffer); + snprintf(buffer, SIZE, "Channel Count: %d\n", mChannelCount); + result.append(buffer); + snprintf(buffer, SIZE, "Channel Mask: 0x%08x\n", mChannelMask); + result.append(buffer); + snprintf(buffer, SIZE, "Format: %d\n", mFormat); + result.append(buffer); + snprintf(buffer, SIZE, "Frame size: %u\n", mFrameSize); + result.append(buffer); + + snprintf(buffer, SIZE, "\nPending setParameters commands: \n"); + result.append(buffer); + result.append(" Index Command"); + for (size_t i = 0; i < mNewParameters.size(); ++i) { + snprintf(buffer, SIZE, "\n %02d ", i); + result.append(buffer); + result.append(mNewParameters[i]); + } + + snprintf(buffer, SIZE, "\n\nPending config events: \n"); + result.append(buffer); + for (size_t i = 0; i < mConfigEvents.size(); i++) { + mConfigEvents[i]->dump(buffer, SIZE); + result.append(buffer); + } + result.append("\n"); + + write(fd, result.string(), result.size()); + + if (locked) { + mLock.unlock(); + } +} + +void AudioFlinger::ThreadBase::dumpEffectChains(int fd, const Vector<String16>& args) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + snprintf(buffer, SIZE, "\n- %d Effect Chains:\n", mEffectChains.size()); + write(fd, buffer, strlen(buffer)); + + for (size_t i = 0; i < mEffectChains.size(); ++i) { + sp<EffectChain> chain = mEffectChains[i]; + if (chain != 0) { + chain->dump(fd, args); + } + } +} + +void AudioFlinger::ThreadBase::acquireWakeLock() +{ + Mutex::Autolock _l(mLock); + acquireWakeLock_l(); +} + +void AudioFlinger::ThreadBase::acquireWakeLock_l() +{ + if (mPowerManager == 0) { + // use checkService() to avoid blocking if power service is not up yet + sp<IBinder> binder = + defaultServiceManager()->checkService(String16("power")); + if (binder == 0) { + ALOGW("Thread %s cannot connect to the power manager service", mName); + } else { + mPowerManager = interface_cast<IPowerManager>(binder); + binder->linkToDeath(mDeathRecipient); + } + } + if (mPowerManager != 0) { + sp<IBinder> binder = new BBinder(); + status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK, + binder, + String16(mName)); + if (status == NO_ERROR) { + mWakeLockToken = binder; + } + ALOGV("acquireWakeLock_l() %s status %d", mName, status); + } +} + +void AudioFlinger::ThreadBase::releaseWakeLock() +{ + Mutex::Autolock _l(mLock); + releaseWakeLock_l(); +} + +void AudioFlinger::ThreadBase::releaseWakeLock_l() +{ + if (mWakeLockToken != 0) { + ALOGV("releaseWakeLock_l() %s", mName); + if (mPowerManager != 0) { + mPowerManager->releaseWakeLock(mWakeLockToken, 0); + } + mWakeLockToken.clear(); + } +} + +void AudioFlinger::ThreadBase::clearPowerManager() +{ + Mutex::Autolock _l(mLock); + releaseWakeLock_l(); + mPowerManager.clear(); +} + +void AudioFlinger::ThreadBase::PMDeathRecipient::binderDied(const wp<IBinder>& who) +{ + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + thread->clearPowerManager(); + } + ALOGW("power manager service died !!!"); +} + +void AudioFlinger::ThreadBase::setEffectSuspended( + const effect_uuid_t *type, bool suspend, int sessionId) +{ + Mutex::Autolock _l(mLock); + setEffectSuspended_l(type, suspend, sessionId); +} + +void AudioFlinger::ThreadBase::setEffectSuspended_l( + const effect_uuid_t *type, bool suspend, int sessionId) +{ + sp<EffectChain> chain = getEffectChain_l(sessionId); + if (chain != 0) { + if (type != NULL) { + chain->setEffectSuspended_l(type, suspend); + } else { + chain->setEffectSuspendedAll_l(suspend); + } + } + + updateSuspendedSessions_l(type, suspend, sessionId); +} + +void AudioFlinger::ThreadBase::checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain) +{ + ssize_t index = mSuspendedSessions.indexOfKey(chain->sessionId()); + if (index < 0) { + return; + } + + const KeyedVector <int, sp<SuspendedSessionDesc> >& sessionEffects = + mSuspendedSessions.valueAt(index); + + for (size_t i = 0; i < sessionEffects.size(); i++) { + sp<SuspendedSessionDesc> desc = sessionEffects.valueAt(i); + for (int j = 0; j < desc->mRefCount; j++) { + if (sessionEffects.keyAt(i) == EffectChain::kKeyForSuspendAll) { + chain->setEffectSuspendedAll_l(true); + } else { + ALOGV("checkSuspendOnAddEffectChain_l() suspending effects %08x", + desc->mType.timeLow); + chain->setEffectSuspended_l(&desc->mType, true); + } + } + } +} + +void AudioFlinger::ThreadBase::updateSuspendedSessions_l(const effect_uuid_t *type, + bool suspend, + int sessionId) +{ + ssize_t index = mSuspendedSessions.indexOfKey(sessionId); + + KeyedVector <int, sp<SuspendedSessionDesc> > sessionEffects; + + if (suspend) { + if (index >= 0) { + sessionEffects = mSuspendedSessions.valueAt(index); + } else { + mSuspendedSessions.add(sessionId, sessionEffects); + } + } else { + if (index < 0) { + return; + } + sessionEffects = mSuspendedSessions.valueAt(index); + } + + + int key = EffectChain::kKeyForSuspendAll; + if (type != NULL) { + key = type->timeLow; + } + index = sessionEffects.indexOfKey(key); + + sp<SuspendedSessionDesc> desc; + if (suspend) { + if (index >= 0) { + desc = sessionEffects.valueAt(index); + } else { + desc = new SuspendedSessionDesc(); + if (type != NULL) { + desc->mType = *type; + } + sessionEffects.add(key, desc); + ALOGV("updateSuspendedSessions_l() suspend adding effect %08x", key); + } + desc->mRefCount++; + } else { + if (index < 0) { + return; + } + desc = sessionEffects.valueAt(index); + if (--desc->mRefCount == 0) { + ALOGV("updateSuspendedSessions_l() restore removing effect %08x", key); + sessionEffects.removeItemsAt(index); + if (sessionEffects.isEmpty()) { + ALOGV("updateSuspendedSessions_l() restore removing session %d", + sessionId); + mSuspendedSessions.removeItem(sessionId); + } + } + } + if (!sessionEffects.isEmpty()) { + mSuspendedSessions.replaceValueFor(sessionId, sessionEffects); + } +} + +void AudioFlinger::ThreadBase::checkSuspendOnEffectEnabled(const sp<EffectModule>& effect, + bool enabled, + int sessionId) +{ + Mutex::Autolock _l(mLock); + checkSuspendOnEffectEnabled_l(effect, enabled, sessionId); +} + +void AudioFlinger::ThreadBase::checkSuspendOnEffectEnabled_l(const sp<EffectModule>& effect, + bool enabled, + int sessionId) +{ + if (mType != RECORD) { + // suspend all effects in AUDIO_SESSION_OUTPUT_MIX when enabling any effect on + // another session. This gives the priority to well behaved effect control panels + // and applications not using global effects. + // Enabling post processing in AUDIO_SESSION_OUTPUT_STAGE session does not affect + // global effects + if ((sessionId != AUDIO_SESSION_OUTPUT_MIX) && (sessionId != AUDIO_SESSION_OUTPUT_STAGE)) { + setEffectSuspended_l(NULL, enabled, AUDIO_SESSION_OUTPUT_MIX); + } + } + + sp<EffectChain> chain = getEffectChain_l(sessionId); + if (chain != 0) { + chain->checkSuspendOnEffectEnabled(effect, enabled); + } +} + +// ThreadBase::createEffect_l() must be called with AudioFlinger::mLock held +sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l( + const sp<AudioFlinger::Client>& client, + const sp<IEffectClient>& effectClient, + int32_t priority, + int sessionId, + effect_descriptor_t *desc, + int *enabled, + status_t *status + ) +{ + sp<EffectModule> effect; + sp<EffectHandle> handle; + status_t lStatus; + sp<EffectChain> chain; + bool chainCreated = false; + bool effectCreated = false; + bool effectRegistered = false; + + lStatus = initCheck(); + if (lStatus != NO_ERROR) { + ALOGW("createEffect_l() Audio driver not initialized."); + goto Exit; + } + + // Do not allow effects with session ID 0 on direct output or duplicating threads + // TODO: add rule for hw accelerated effects on direct outputs with non PCM format + if (sessionId == AUDIO_SESSION_OUTPUT_MIX && mType != MIXER) { + ALOGW("createEffect_l() Cannot add auxiliary effect %s to session %d", + desc->name, sessionId); + lStatus = BAD_VALUE; + goto Exit; + } + // Only Pre processor effects are allowed on input threads and only on input threads + if ((mType == RECORD) != ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) { + ALOGW("createEffect_l() effect %s (flags %08x) created on wrong thread type %d", + desc->name, desc->flags, mType); + lStatus = BAD_VALUE; + goto Exit; + } + + ALOGV("createEffect_l() thread %p effect %s on session %d", this, desc->name, sessionId); + + { // scope for mLock + Mutex::Autolock _l(mLock); + + // check for existing effect chain with the requested audio session + chain = getEffectChain_l(sessionId); + if (chain == 0) { + // create a new chain for this session + ALOGV("createEffect_l() new effect chain for session %d", sessionId); + chain = new EffectChain(this, sessionId); + addEffectChain_l(chain); + chain->setStrategy(getStrategyForSession_l(sessionId)); + chainCreated = true; + } else { + effect = chain->getEffectFromDesc_l(desc); + } + + ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get()); + + if (effect == 0) { + int id = mAudioFlinger->nextUniqueId(); + // Check CPU and memory usage + lStatus = AudioSystem::registerEffect(desc, mId, chain->strategy(), sessionId, id); + if (lStatus != NO_ERROR) { + goto Exit; + } + effectRegistered = true; + // create a new effect module if none present in the chain + effect = new EffectModule(this, chain, desc, id, sessionId); + lStatus = effect->status(); + if (lStatus != NO_ERROR) { + goto Exit; + } + lStatus = chain->addEffect_l(effect); + if (lStatus != NO_ERROR) { + goto Exit; + } + effectCreated = true; + + effect->setDevice(mOutDevice); + effect->setDevice(mInDevice); + effect->setMode(mAudioFlinger->getMode()); + effect->setAudioSource(mAudioSource); + } + // create effect handle and connect it to effect module + handle = new EffectHandle(effect, client, effectClient, priority); + lStatus = effect->addHandle(handle.get()); + if (enabled != NULL) { + *enabled = (int)effect->isEnabled(); + } + } + +Exit: + if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) { + Mutex::Autolock _l(mLock); + if (effectCreated) { + chain->removeEffect_l(effect); + } + if (effectRegistered) { + AudioSystem::unregisterEffect(effect->id()); + } + if (chainCreated) { + removeEffectChain_l(chain); + } + handle.clear(); + } + + if (status != NULL) { + *status = lStatus; + } + return handle; +} + +sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect(int sessionId, int effectId) +{ + Mutex::Autolock _l(mLock); + return getEffect_l(sessionId, effectId); +} + +sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect_l(int sessionId, int effectId) +{ + sp<EffectChain> chain = getEffectChain_l(sessionId); + return chain != 0 ? chain->getEffectFromId_l(effectId) : 0; +} + +// PlaybackThread::addEffect_l() must be called with AudioFlinger::mLock and +// PlaybackThread::mLock held +status_t AudioFlinger::ThreadBase::addEffect_l(const sp<EffectModule>& effect) +{ + // check for existing effect chain with the requested audio session + int sessionId = effect->sessionId(); + sp<EffectChain> chain = getEffectChain_l(sessionId); + bool chainCreated = false; + + if (chain == 0) { + // create a new chain for this session + ALOGV("addEffect_l() new effect chain for session %d", sessionId); + chain = new EffectChain(this, sessionId); + addEffectChain_l(chain); + chain->setStrategy(getStrategyForSession_l(sessionId)); + chainCreated = true; + } + ALOGV("addEffect_l() %p chain %p effect %p", this, chain.get(), effect.get()); + + if (chain->getEffectFromId_l(effect->id()) != 0) { + ALOGW("addEffect_l() %p effect %s already present in chain %p", + this, effect->desc().name, chain.get()); + return BAD_VALUE; + } + + status_t status = chain->addEffect_l(effect); + if (status != NO_ERROR) { + if (chainCreated) { + removeEffectChain_l(chain); + } + return status; + } + + effect->setDevice(mOutDevice); + effect->setDevice(mInDevice); + effect->setMode(mAudioFlinger->getMode()); + effect->setAudioSource(mAudioSource); + return NO_ERROR; +} + +void AudioFlinger::ThreadBase::removeEffect_l(const sp<EffectModule>& effect) { + + ALOGV("removeEffect_l() %p effect %p", this, effect.get()); + effect_descriptor_t desc = effect->desc(); + if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { + detachAuxEffect_l(effect->id()); + } + + sp<EffectChain> chain = effect->chain().promote(); + if (chain != 0) { + // remove effect chain if removing last effect + if (chain->removeEffect_l(effect) == 0) { + removeEffectChain_l(chain); + } + } else { + ALOGW("removeEffect_l() %p cannot promote chain for effect %p", this, effect.get()); + } +} + +void AudioFlinger::ThreadBase::lockEffectChains_l( + Vector< sp<AudioFlinger::EffectChain> >& effectChains) +{ + effectChains = mEffectChains; + for (size_t i = 0; i < mEffectChains.size(); i++) { + mEffectChains[i]->lock(); + } +} + +void AudioFlinger::ThreadBase::unlockEffectChains( + const Vector< sp<AudioFlinger::EffectChain> >& effectChains) +{ + for (size_t i = 0; i < effectChains.size(); i++) { + effectChains[i]->unlock(); + } +} + +sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain(int sessionId) +{ + Mutex::Autolock _l(mLock); + return getEffectChain_l(sessionId); +} + +sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain_l(int sessionId) const +{ + size_t size = mEffectChains.size(); + for (size_t i = 0; i < size; i++) { + if (mEffectChains[i]->sessionId() == sessionId) { + return mEffectChains[i]; + } + } + return 0; +} + +void AudioFlinger::ThreadBase::setMode(audio_mode_t mode) +{ + Mutex::Autolock _l(mLock); + size_t size = mEffectChains.size(); + for (size_t i = 0; i < size; i++) { + mEffectChains[i]->setMode_l(mode); + } +} + +void AudioFlinger::ThreadBase::disconnectEffect(const sp<EffectModule>& effect, + EffectHandle *handle, + bool unpinIfLast) { + + Mutex::Autolock _l(mLock); + ALOGV("disconnectEffect() %p effect %p", this, effect.get()); + // delete the effect module if removing last handle on it + if (effect->removeHandle(handle) == 0) { + if (!effect->isPinned() || unpinIfLast) { + removeEffect_l(effect); + AudioSystem::unregisterEffect(effect->id()); + } + } +} + +// ---------------------------------------------------------------------------- +// Playback +// ---------------------------------------------------------------------------- + +AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinger, + AudioStreamOut* output, + audio_io_handle_t id, + audio_devices_t device, + type_t type) + : ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type), + mMixBuffer(NULL), mSuspended(0), mBytesWritten(0), + // mStreamTypes[] initialized in constructor body + mOutput(output), + mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false), + mMixerStatus(MIXER_IDLE), + mMixerStatusIgnoringFastTracks(MIXER_IDLE), + standbyDelay(AudioFlinger::mStandbyTimeInNsecs), + mScreenState(AudioFlinger::mScreenState), + // index 0 is reserved for normal mixer's submix + mFastTrackAvailMask(((1 << FastMixerState::kMaxFastTracks) - 1) & ~1) +{ + snprintf(mName, kNameLength, "AudioOut_%X", id); + mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mName); + + // Assumes constructor is called by AudioFlinger with it's mLock held, but + // it would be safer to explicitly pass initial masterVolume/masterMute as + // parameter. + // + // If the HAL we are using has support for master volume or master mute, + // then do not attenuate or mute during mixing (just leave the volume at 1.0 + // and the mute set to false). + mMasterVolume = audioFlinger->masterVolume_l(); + mMasterMute = audioFlinger->masterMute_l(); + if (mOutput && mOutput->audioHwDev) { + if (mOutput->audioHwDev->canSetMasterVolume()) { + mMasterVolume = 1.0; + } + + if (mOutput->audioHwDev->canSetMasterMute()) { + mMasterMute = false; + } + } + + readOutputParameters(); + + // mStreamTypes[AUDIO_STREAM_CNT] is initialized by stream_type_t default constructor + // There is no AUDIO_STREAM_MIN, and ++ operator does not compile + for (audio_stream_type_t stream = (audio_stream_type_t) 0; stream < AUDIO_STREAM_CNT; + stream = (audio_stream_type_t) (stream + 1)) { + mStreamTypes[stream].volume = mAudioFlinger->streamVolume_l(stream); + mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream); + } + // mStreamTypes[AUDIO_STREAM_CNT] exists but isn't explicitly initialized here, + // because mAudioFlinger doesn't have one to copy from +} + +AudioFlinger::PlaybackThread::~PlaybackThread() +{ + mAudioFlinger->unregisterWriter(mNBLogWriter); + delete [] mMixBuffer; +} + +void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args) +{ + dumpInternals(fd, args); + dumpTracks(fd, args); + dumpEffectChains(fd, args); +} + +void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + result.appendFormat("Output thread %p stream volumes in dB:\n ", this); + for (int i = 0; i < AUDIO_STREAM_CNT; ++i) { + const stream_type_t *st = &mStreamTypes[i]; + if (i > 0) { + result.appendFormat(", "); + } + result.appendFormat("%d:%.2g", i, 20.0 * log10(st->volume)); + if (st->mute) { + result.append("M"); + } + } + result.append("\n"); + write(fd, result.string(), result.length()); + result.clear(); + + snprintf(buffer, SIZE, "Output thread %p tracks\n", this); + result.append(buffer); + Track::appendDumpHeader(result); + for (size_t i = 0; i < mTracks.size(); ++i) { + sp<Track> track = mTracks[i]; + if (track != 0) { + track->dump(buffer, SIZE); + result.append(buffer); + } + } + + snprintf(buffer, SIZE, "Output thread %p active tracks\n", this); + result.append(buffer); + Track::appendDumpHeader(result); + for (size_t i = 0; i < mActiveTracks.size(); ++i) { + sp<Track> track = mActiveTracks[i].promote(); + if (track != 0) { + track->dump(buffer, SIZE); + result.append(buffer); + } + } + write(fd, result.string(), result.size()); + + // These values are "raw"; they will wrap around. See prepareTracks_l() for a better way. + FastTrackUnderruns underruns = getFastTrackUnderruns(0); + fdprintf(fd, "Normal mixer raw underrun counters: partial=%u empty=%u\n", + underruns.mBitFields.mPartial, underruns.mBitFields.mEmpty); +} + +void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + snprintf(buffer, SIZE, "\nOutput thread %p internals\n", this); + result.append(buffer); + snprintf(buffer, SIZE, "last write occurred (msecs): %llu\n", + ns2ms(systemTime() - mLastWriteTime)); + result.append(buffer); + snprintf(buffer, SIZE, "total writes: %d\n", mNumWrites); + result.append(buffer); + snprintf(buffer, SIZE, "delayed writes: %d\n", mNumDelayedWrites); + result.append(buffer); + snprintf(buffer, SIZE, "blocked in write: %d\n", mInWrite); + result.append(buffer); + snprintf(buffer, SIZE, "suspend count: %d\n", mSuspended); + result.append(buffer); + snprintf(buffer, SIZE, "mix buffer : %p\n", mMixBuffer); + result.append(buffer); + write(fd, result.string(), result.size()); + fdprintf(fd, "Fast track availMask=%#x\n", mFastTrackAvailMask); + + dumpBase(fd, args); +} + +// Thread virtuals +status_t AudioFlinger::PlaybackThread::readyToRun() +{ + status_t status = initCheck(); + if (status == NO_ERROR) { + ALOGI("AudioFlinger's thread %p ready to run", this); + } else { + ALOGE("No working audio driver found."); + } + return status; +} + +void AudioFlinger::PlaybackThread::onFirstRef() +{ + run(mName, ANDROID_PRIORITY_URGENT_AUDIO); +} + +// ThreadBase virtuals +void AudioFlinger::PlaybackThread::preExit() +{ + ALOGV(" preExit()"); + // FIXME this is using hard-coded strings but in the future, this functionality will be + // converted to use audio HAL extensions required to support tunneling + mOutput->stream->common.set_parameters(&mOutput->stream->common, "exiting=1"); +} + +// PlaybackThread::createTrack_l() must be called with AudioFlinger::mLock held +sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l( + const sp<AudioFlinger::Client>& client, + audio_stream_type_t streamType, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + size_t frameCount, + const sp<IMemory>& sharedBuffer, + int sessionId, + IAudioFlinger::track_flags_t *flags, + pid_t tid, + status_t *status) +{ + sp<Track> track; + status_t lStatus; + + bool isTimed = (*flags & IAudioFlinger::TRACK_TIMED) != 0; + + // client expresses a preference for FAST, but we get the final say + if (*flags & IAudioFlinger::TRACK_FAST) { + if ( + // not timed + (!isTimed) && + // either of these use cases: + ( + // use case 1: shared buffer with any frame count + ( + (sharedBuffer != 0) + ) || + // use case 2: callback handler and frame count is default or at least as large as HAL + ( + (tid != -1) && + ((frameCount == 0) || + (frameCount >= (mFrameCount * kFastTrackMultiplier))) + ) + ) && + // PCM data + audio_is_linear_pcm(format) && + // mono or stereo + ( (channelMask == AUDIO_CHANNEL_OUT_MONO) || + (channelMask == AUDIO_CHANNEL_OUT_STEREO) ) && +#ifndef FAST_TRACKS_AT_NON_NATIVE_SAMPLE_RATE + // hardware sample rate + (sampleRate == mSampleRate) && +#endif + // normal mixer has an associated fast mixer + hasFastMixer() && + // there are sufficient fast track slots available + (mFastTrackAvailMask != 0) + // FIXME test that MixerThread for this fast track has a capable output HAL + // FIXME add a permission test also? + ) { + // if frameCount not specified, then it defaults to fast mixer (HAL) frame count + if (frameCount == 0) { + frameCount = mFrameCount * kFastTrackMultiplier; + } + ALOGV("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d", + frameCount, mFrameCount); + } else { + ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: isTimed=%d sharedBuffer=%p frameCount=%d " + "mFrameCount=%d format=%d isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u " + "hasFastMixer=%d tid=%d fastTrackAvailMask=%#x", + isTimed, sharedBuffer.get(), frameCount, mFrameCount, format, + audio_is_linear_pcm(format), + channelMask, sampleRate, mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask); + *flags &= ~IAudioFlinger::TRACK_FAST; + // For compatibility with AudioTrack calculation, buffer depth is forced + // to be at least 2 x the normal mixer frame count and cover audio hardware latency. + // This is probably too conservative, but legacy application code may depend on it. + // If you change this calculation, also review the start threshold which is related. + uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream); + uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate); + if (minBufCount < 2) { + minBufCount = 2; + } + size_t minFrameCount = mNormalFrameCount * minBufCount; + if (frameCount < minFrameCount) { + frameCount = minFrameCount; + } + } + } + + if (mType == DIRECT) { + if ((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_PCM) { + if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) { + ALOGE("createTrack_l() Bad parameter: sampleRate %u format %d, channelMask 0x%08x " + "for output %p with format %d", + sampleRate, format, channelMask, mOutput, mFormat); + lStatus = BAD_VALUE; + goto Exit; + } + } + } else { + // Resampler implementation limits input sampling rate to 2 x output sampling rate. + if (sampleRate > mSampleRate*2) { + ALOGE("Sample rate out of range: %u mSampleRate %u", sampleRate, mSampleRate); + lStatus = BAD_VALUE; + goto Exit; + } + } + + lStatus = initCheck(); + if (lStatus != NO_ERROR) { + ALOGE("Audio driver not initialized."); + goto Exit; + } + + { // scope for mLock + Mutex::Autolock _l(mLock); + + // all tracks in same audio session must share the same routing strategy otherwise + // conflicts will happen when tracks are moved from one output to another by audio policy + // manager + uint32_t strategy = AudioSystem::getStrategyForStream(streamType); + for (size_t i = 0; i < mTracks.size(); ++i) { + sp<Track> t = mTracks[i]; + if (t != 0 && !t->isOutputTrack()) { + uint32_t actual = AudioSystem::getStrategyForStream(t->streamType()); + if (sessionId == t->sessionId() && strategy != actual) { + ALOGE("createTrack_l() mismatched strategy; expected %u but found %u", + strategy, actual); + lStatus = BAD_VALUE; + goto Exit; + } + } + } + + if (!isTimed) { + track = new Track(this, client, streamType, sampleRate, format, + channelMask, frameCount, sharedBuffer, sessionId, *flags); + } else { + track = TimedTrack::create(this, client, streamType, sampleRate, format, + channelMask, frameCount, sharedBuffer, sessionId); + } + if (track == 0 || track->getCblk() == NULL || track->name() < 0) { + lStatus = NO_MEMORY; + goto Exit; + } + mTracks.add(track); + + sp<EffectChain> chain = getEffectChain_l(sessionId); + if (chain != 0) { + ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer()); + track->setMainBuffer(chain->inBuffer()); + chain->setStrategy(AudioSystem::getStrategyForStream(track->streamType())); + chain->incTrackCnt(); + } + + if ((*flags & IAudioFlinger::TRACK_FAST) && (tid != -1)) { + pid_t callingPid = IPCThreadState::self()->getCallingPid(); + // we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful, + // so ask activity manager to do this on our behalf + sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp); + } + } + + lStatus = NO_ERROR; + +Exit: + if (status) { + *status = lStatus; + } + return track; +} + +uint32_t AudioFlinger::PlaybackThread::correctLatency_l(uint32_t latency) const +{ + return latency; +} + +uint32_t AudioFlinger::PlaybackThread::latency() const +{ + Mutex::Autolock _l(mLock); + return latency_l(); +} +uint32_t AudioFlinger::PlaybackThread::latency_l() const +{ + if (initCheck() == NO_ERROR) { + return correctLatency_l(mOutput->stream->get_latency(mOutput->stream)); + } else { + return 0; + } +} + +void AudioFlinger::PlaybackThread::setMasterVolume(float value) +{ + Mutex::Autolock _l(mLock); + // Don't apply master volume in SW if our HAL can do it for us. + if (mOutput && mOutput->audioHwDev && + mOutput->audioHwDev->canSetMasterVolume()) { + mMasterVolume = 1.0; + } else { + mMasterVolume = value; + } +} + +void AudioFlinger::PlaybackThread::setMasterMute(bool muted) +{ + Mutex::Autolock _l(mLock); + // Don't apply master mute in SW if our HAL can do it for us. + if (mOutput && mOutput->audioHwDev && + mOutput->audioHwDev->canSetMasterMute()) { + mMasterMute = false; + } else { + mMasterMute = muted; + } +} + +void AudioFlinger::PlaybackThread::setStreamVolume(audio_stream_type_t stream, float value) +{ + Mutex::Autolock _l(mLock); + mStreamTypes[stream].volume = value; +} + +void AudioFlinger::PlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted) +{ + Mutex::Autolock _l(mLock); + mStreamTypes[stream].mute = muted; +} + +float AudioFlinger::PlaybackThread::streamVolume(audio_stream_type_t stream) const +{ + Mutex::Autolock _l(mLock); + return mStreamTypes[stream].volume; +} + +// addTrack_l() must be called with ThreadBase::mLock held +status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track) +{ + status_t status = ALREADY_EXISTS; + + // set retry count for buffer fill + track->mRetryCount = kMaxTrackStartupRetries; + if (mActiveTracks.indexOf(track) < 0) { + // the track is newly added, make sure it fills up all its + // buffers before playing. This is to ensure the client will + // effectively get the latency it requested. + track->mFillingUpStatus = Track::FS_FILLING; + track->mResetDone = false; + track->mPresentationCompleteFrames = 0; + mActiveTracks.add(track); + if (track->mainBuffer() != mMixBuffer) { + sp<EffectChain> chain = getEffectChain_l(track->sessionId()); + if (chain != 0) { + ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(), + track->sessionId()); + chain->incActiveTrackCnt(); + } + } + + status = NO_ERROR; + } + + ALOGV("mWaitWorkCV.broadcast"); + mWaitWorkCV.broadcast(); + + return status; +} + +// destroyTrack_l() must be called with ThreadBase::mLock held +void AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track) +{ + track->mState = TrackBase::TERMINATED; + // active tracks are removed by threadLoop() + if (mActiveTracks.indexOf(track) < 0) { + removeTrack_l(track); + } +} + +void AudioFlinger::PlaybackThread::removeTrack_l(const sp<Track>& track) +{ + track->triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE); + mTracks.remove(track); + deleteTrackName_l(track->name()); + // redundant as track is about to be destroyed, for dumpsys only + track->mName = -1; + if (track->isFastTrack()) { + int index = track->mFastIndex; + ALOG_ASSERT(0 < index && index < (int)FastMixerState::kMaxFastTracks); + ALOG_ASSERT(!(mFastTrackAvailMask & (1 << index))); + mFastTrackAvailMask |= 1 << index; + // redundant as track is about to be destroyed, for dumpsys only + track->mFastIndex = -1; + } + sp<EffectChain> chain = getEffectChain_l(track->sessionId()); + if (chain != 0) { + chain->decTrackCnt(); + } +} + +String8 AudioFlinger::PlaybackThread::getParameters(const String8& keys) +{ + String8 out_s8 = String8(""); + char *s; + + Mutex::Autolock _l(mLock); + if (initCheck() != NO_ERROR) { + return out_s8; + } + + s = mOutput->stream->common.get_parameters(&mOutput->stream->common, keys.string()); + out_s8 = String8(s); + free(s); + return out_s8; +} + +// audioConfigChanged_l() must be called with AudioFlinger::mLock held +void AudioFlinger::PlaybackThread::audioConfigChanged_l(int event, int param) { + AudioSystem::OutputDescriptor desc; + void *param2 = NULL; + + ALOGV("PlaybackThread::audioConfigChanged_l, thread %p, event %d, param %d", this, event, + param); + + switch (event) { + case AudioSystem::OUTPUT_OPENED: + case AudioSystem::OUTPUT_CONFIG_CHANGED: + desc.channels = mChannelMask; + desc.samplingRate = mSampleRate; + desc.format = mFormat; + desc.frameCount = mNormalFrameCount; // FIXME see + // AudioFlinger::frameCount(audio_io_handle_t) + desc.latency = latency(); + param2 = &desc; + break; + + case AudioSystem::STREAM_CONFIG_CHANGED: + param2 = ¶m; + case AudioSystem::OUTPUT_CLOSED: + default: + break; + } + mAudioFlinger->audioConfigChanged_l(event, mId, param2); +} + +void AudioFlinger::PlaybackThread::readOutputParameters() +{ + mSampleRate = mOutput->stream->common.get_sample_rate(&mOutput->stream->common); + mChannelMask = mOutput->stream->common.get_channels(&mOutput->stream->common); + mChannelCount = (uint16_t)popcount(mChannelMask); + mFormat = mOutput->stream->common.get_format(&mOutput->stream->common); + mFrameSize = audio_stream_frame_size(&mOutput->stream->common); + mFrameCount = mOutput->stream->common.get_buffer_size(&mOutput->stream->common) / mFrameSize; + if (mFrameCount & 15) { + ALOGW("HAL output buffer size is %u frames but AudioMixer requires multiples of 16 frames", + mFrameCount); + } + + // Calculate size of normal mix buffer relative to the HAL output buffer size + double multiplier = 1.0; + if (mType == MIXER && (kUseFastMixer == FastMixer_Static || + kUseFastMixer == FastMixer_Dynamic)) { + size_t minNormalFrameCount = (kMinNormalMixBufferSizeMs * mSampleRate) / 1000; + size_t maxNormalFrameCount = (kMaxNormalMixBufferSizeMs * mSampleRate) / 1000; + // round up minimum and round down maximum to nearest 16 frames to satisfy AudioMixer + minNormalFrameCount = (minNormalFrameCount + 15) & ~15; + maxNormalFrameCount = maxNormalFrameCount & ~15; + if (maxNormalFrameCount < minNormalFrameCount) { + maxNormalFrameCount = minNormalFrameCount; + } + multiplier = (double) minNormalFrameCount / (double) mFrameCount; + if (multiplier <= 1.0) { + multiplier = 1.0; + } else if (multiplier <= 2.0) { + if (2 * mFrameCount <= maxNormalFrameCount) { + multiplier = 2.0; + } else { + multiplier = (double) maxNormalFrameCount / (double) mFrameCount; + } + } else { + // prefer an even multiplier, for compatibility with doubling of fast tracks due to HAL + // SRC (it would be unusual for the normal mix buffer size to not be a multiple of fast + // track, but we sometimes have to do this to satisfy the maximum frame count + // constraint) + // FIXME this rounding up should not be done if no HAL SRC + uint32_t truncMult = (uint32_t) multiplier; + if ((truncMult & 1)) { + if ((truncMult + 1) * mFrameCount <= maxNormalFrameCount) { + ++truncMult; + } + } + multiplier = (double) truncMult; + } + } + mNormalFrameCount = multiplier * mFrameCount; + // round up to nearest 16 frames to satisfy AudioMixer + mNormalFrameCount = (mNormalFrameCount + 15) & ~15; + ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount, + mNormalFrameCount); + + delete[] mMixBuffer; + mMixBuffer = new int16_t[mNormalFrameCount * mChannelCount]; + memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t)); + + // force reconfiguration of effect chains and engines to take new buffer size and audio + // parameters into account + // Note that mLock is not held when readOutputParameters() is called from the constructor + // but in this case nothing is done below as no audio sessions have effect yet so it doesn't + // matter. + // create a copy of mEffectChains as calling moveEffectChain_l() can reorder some effect chains + Vector< sp<EffectChain> > effectChains = mEffectChains; + for (size_t i = 0; i < effectChains.size(); i ++) { + mAudioFlinger->moveEffectChain_l(effectChains[i]->sessionId(), this, this, false); + } +} + + +status_t AudioFlinger::PlaybackThread::getRenderPosition(size_t *halFrames, size_t *dspFrames) +{ + if (halFrames == NULL || dspFrames == NULL) { + return BAD_VALUE; + } + Mutex::Autolock _l(mLock); + if (initCheck() != NO_ERROR) { + return INVALID_OPERATION; + } + size_t framesWritten = mBytesWritten / mFrameSize; + *halFrames = framesWritten; + + if (isSuspended()) { + // return an estimation of rendered frames when the output is suspended + size_t latencyFrames = (latency_l() * mSampleRate) / 1000; + *dspFrames = framesWritten >= latencyFrames ? framesWritten - latencyFrames : 0; + return NO_ERROR; + } else { + return mOutput->stream->get_render_position(mOutput->stream, dspFrames); + } +} + +uint32_t AudioFlinger::PlaybackThread::hasAudioSession(int sessionId) const +{ + Mutex::Autolock _l(mLock); + uint32_t result = 0; + if (getEffectChain_l(sessionId) != 0) { + result = EFFECT_SESSION; + } + + for (size_t i = 0; i < mTracks.size(); ++i) { + sp<Track> track = mTracks[i]; + if (sessionId == track->sessionId() && !track->isInvalid()) { + result |= TRACK_SESSION; + break; + } + } + + return result; +} + +uint32_t AudioFlinger::PlaybackThread::getStrategyForSession_l(int sessionId) +{ + // session AUDIO_SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that + // it is moved to correct output by audio policy manager when A2DP is connected or disconnected + if (sessionId == AUDIO_SESSION_OUTPUT_MIX) { + return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC); + } + for (size_t i = 0; i < mTracks.size(); i++) { + sp<Track> track = mTracks[i]; + if (sessionId == track->sessionId() && !track->isInvalid()) { + return AudioSystem::getStrategyForStream(track->streamType()); + } + } + return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC); +} + + +AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::getOutput() const +{ + Mutex::Autolock _l(mLock); + return mOutput; +} + +AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::clearOutput() +{ + Mutex::Autolock _l(mLock); + AudioStreamOut *output = mOutput; + mOutput = NULL; + // FIXME FastMixer might also have a raw ptr to mOutputSink; + // must push a NULL and wait for ack + mOutputSink.clear(); + mPipeSink.clear(); + mNormalSink.clear(); + return output; +} + +// this method must always be called either with ThreadBase mLock held or inside the thread loop +audio_stream_t* AudioFlinger::PlaybackThread::stream() const +{ + if (mOutput == NULL) { + return NULL; + } + return &mOutput->stream->common; +} + +uint32_t AudioFlinger::PlaybackThread::activeSleepTimeUs() const +{ + return (uint32_t)((uint32_t)((mNormalFrameCount * 1000) / mSampleRate) * 1000); +} + +status_t AudioFlinger::PlaybackThread::setSyncEvent(const sp<SyncEvent>& event) +{ + if (!isValidSyncEvent(event)) { + return BAD_VALUE; + } + + Mutex::Autolock _l(mLock); + + for (size_t i = 0; i < mTracks.size(); ++i) { + sp<Track> track = mTracks[i]; + if (event->triggerSession() == track->sessionId()) { + (void) track->setSyncEvent(event); + return NO_ERROR; + } + } + + return NAME_NOT_FOUND; +} + +bool AudioFlinger::PlaybackThread::isValidSyncEvent(const sp<SyncEvent>& event) const +{ + return event->type() == AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE; +} + +void AudioFlinger::PlaybackThread::threadLoop_removeTracks( + const Vector< sp<Track> >& tracksToRemove) +{ + size_t count = tracksToRemove.size(); + if (CC_UNLIKELY(count)) { + for (size_t i = 0 ; i < count ; i++) { + const sp<Track>& track = tracksToRemove.itemAt(i); + if ((track->sharedBuffer() != 0) && + (track->mState == TrackBase::ACTIVE || track->mState == TrackBase::RESUMING)) { + AudioSystem::stopOutput(mId, track->streamType(), track->sessionId()); + } + } + } + +} + +void AudioFlinger::PlaybackThread::checkSilentMode_l() +{ + if (!mMasterMute) { + char value[PROPERTY_VALUE_MAX]; + if (property_get("ro.audio.silent", value, "0") > 0) { + char *endptr; + unsigned long ul = strtoul(value, &endptr, 0); + if (*endptr == '\0' && ul != 0) { + ALOGD("Silence is golden"); + // The setprop command will not allow a property to be changed after + // the first time it is set, so we don't have to worry about un-muting. + setMasterMute_l(true); + } + } + } +} + +// shared by MIXER and DIRECT, overridden by DUPLICATING +void AudioFlinger::PlaybackThread::threadLoop_write() +{ + // FIXME rewrite to reduce number of system calls + mLastWriteTime = systemTime(); + mInWrite = true; + int bytesWritten; + + // If an NBAIO sink is present, use it to write the normal mixer's submix + if (mNormalSink != 0) { +#define mBitShift 2 // FIXME + size_t count = mixBufferSize >> mBitShift; + ATRACE_BEGIN("write"); + // update the setpoint when AudioFlinger::mScreenState changes + uint32_t screenState = AudioFlinger::mScreenState; + if (screenState != mScreenState) { + mScreenState = screenState; + MonoPipe *pipe = (MonoPipe *)mPipeSink.get(); + if (pipe != NULL) { + pipe->setAvgFrames((mScreenState & 1) ? + (pipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2); + } + } + ssize_t framesWritten = mNormalSink->write(mMixBuffer, count); + ATRACE_END(); + if (framesWritten > 0) { + bytesWritten = framesWritten << mBitShift; + } else { + bytesWritten = framesWritten; + } + // otherwise use the HAL / AudioStreamOut directly + } else { + // Direct output thread. + bytesWritten = (int)mOutput->stream->write(mOutput->stream, mMixBuffer, mixBufferSize); + } + + if (bytesWritten > 0) { + mBytesWritten += mixBufferSize; + } + mNumWrites++; + mInWrite = false; +} + +/* +The derived values that are cached: + - mixBufferSize from frame count * frame size + - activeSleepTime from activeSleepTimeUs() + - idleSleepTime from idleSleepTimeUs() + - standbyDelay from mActiveSleepTimeUs (DIRECT only) + - maxPeriod from frame count and sample rate (MIXER only) + +The parameters that affect these derived values are: + - frame count + - frame size + - sample rate + - device type: A2DP or not + - device latency + - format: PCM or not + - active sleep time + - idle sleep time +*/ + +void AudioFlinger::PlaybackThread::cacheParameters_l() +{ + mixBufferSize = mNormalFrameCount * mFrameSize; + activeSleepTime = activeSleepTimeUs(); + idleSleepTime = idleSleepTimeUs(); +} + +void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType) +{ + ALOGV ("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d", + this, streamType, mTracks.size()); + Mutex::Autolock _l(mLock); + + size_t size = mTracks.size(); + for (size_t i = 0; i < size; i++) { + sp<Track> t = mTracks[i]; + if (t->streamType() == streamType) { + t->invalidate(); + } + } +} + +status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& chain) +{ + int session = chain->sessionId(); + int16_t *buffer = mMixBuffer; + bool ownsBuffer = false; + + ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session); + if (session > 0) { + // Only one effect chain can be present in direct output thread and it uses + // the mix buffer as input + if (mType != DIRECT) { + size_t numSamples = mNormalFrameCount * mChannelCount; + buffer = new int16_t[numSamples]; + memset(buffer, 0, numSamples * sizeof(int16_t)); + ALOGV("addEffectChain_l() creating new input buffer %p session %d", buffer, session); + ownsBuffer = true; + } + + // Attach all tracks with same session ID to this chain. + for (size_t i = 0; i < mTracks.size(); ++i) { + sp<Track> track = mTracks[i]; + if (session == track->sessionId()) { + ALOGV("addEffectChain_l() track->setMainBuffer track %p buffer %p", track.get(), + buffer); + track->setMainBuffer(buffer); + chain->incTrackCnt(); + } + } + + // indicate all active tracks in the chain + for (size_t i = 0 ; i < mActiveTracks.size() ; ++i) { + sp<Track> track = mActiveTracks[i].promote(); + if (track == 0) { + continue; + } + if (session == track->sessionId()) { + ALOGV("addEffectChain_l() activating track %p on session %d", track.get(), session); + chain->incActiveTrackCnt(); + } + } + } + + chain->setInBuffer(buffer, ownsBuffer); + chain->setOutBuffer(mMixBuffer); + // Effect chain for session AUDIO_SESSION_OUTPUT_STAGE is inserted at end of effect + // chains list in order to be processed last as it contains output stage effects + // Effect chain for session AUDIO_SESSION_OUTPUT_MIX is inserted before + // session AUDIO_SESSION_OUTPUT_STAGE to be processed + // after track specific effects and before output stage + // It is therefore mandatory that AUDIO_SESSION_OUTPUT_MIX == 0 and + // that AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX + // Effect chain for other sessions are inserted at beginning of effect + // chains list to be processed before output mix effects. Relative order between other + // sessions is not important + size_t size = mEffectChains.size(); + size_t i = 0; + for (i = 0; i < size; i++) { + if (mEffectChains[i]->sessionId() < session) { + break; + } + } + mEffectChains.insertAt(chain, i); + checkSuspendOnAddEffectChain_l(chain); + + return NO_ERROR; +} + +size_t AudioFlinger::PlaybackThread::removeEffectChain_l(const sp<EffectChain>& chain) +{ + int session = chain->sessionId(); + + ALOGV("removeEffectChain_l() %p from thread %p for session %d", chain.get(), this, session); + + for (size_t i = 0; i < mEffectChains.size(); i++) { + if (chain == mEffectChains[i]) { + mEffectChains.removeAt(i); + // detach all active tracks from the chain + for (size_t i = 0 ; i < mActiveTracks.size() ; ++i) { + sp<Track> track = mActiveTracks[i].promote(); + if (track == 0) { + continue; + } + if (session == track->sessionId()) { + ALOGV("removeEffectChain_l(): stopping track on chain %p for session Id: %d", + chain.get(), session); + chain->decActiveTrackCnt(); + } + } + + // detach all tracks with same session ID from this chain + for (size_t i = 0; i < mTracks.size(); ++i) { + sp<Track> track = mTracks[i]; + if (session == track->sessionId()) { + track->setMainBuffer(mMixBuffer); + chain->decTrackCnt(); + } + } + break; + } + } + return mEffectChains.size(); +} + +status_t AudioFlinger::PlaybackThread::attachAuxEffect( + const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId) +{ + Mutex::Autolock _l(mLock); + return attachAuxEffect_l(track, EffectId); +} + +status_t AudioFlinger::PlaybackThread::attachAuxEffect_l( + const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId) +{ + status_t status = NO_ERROR; + + if (EffectId == 0) { + track->setAuxBuffer(0, NULL); + } else { + // Auxiliary effects are always in audio session AUDIO_SESSION_OUTPUT_MIX + sp<EffectModule> effect = getEffect_l(AUDIO_SESSION_OUTPUT_MIX, EffectId); + if (effect != 0) { + if ((effect->desc().flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { + track->setAuxBuffer(EffectId, (int32_t *)effect->inBuffer()); + } else { + status = INVALID_OPERATION; + } + } else { + status = BAD_VALUE; + } + } + return status; +} + +void AudioFlinger::PlaybackThread::detachAuxEffect_l(int effectId) +{ + for (size_t i = 0; i < mTracks.size(); ++i) { + sp<Track> track = mTracks[i]; + if (track->auxEffectId() == effectId) { + attachAuxEffect_l(track, 0); + } + } +} + +bool AudioFlinger::PlaybackThread::threadLoop() +{ + Vector< sp<Track> > tracksToRemove; + + standbyTime = systemTime(); + + // MIXER + nsecs_t lastWarning = 0; + + // DUPLICATING + // FIXME could this be made local to while loop? + writeFrames = 0; + + cacheParameters_l(); + sleepTime = idleSleepTime; + + if (mType == MIXER) { + sleepTimeShift = 0; + } + + CpuStats cpuStats; + const String8 myName(String8::format("thread %p type %d TID %d", this, mType, gettid())); + + acquireWakeLock(); + + // mNBLogWriter->log can only be called while thread mutex mLock is held. + // So if you need to log when mutex is unlocked, set logString to a non-NULL string, + // and then that string will be logged at the next convenient opportunity. + const char *logString = NULL; + + while (!exitPending()) + { + cpuStats.sample(myName); + + Vector< sp<EffectChain> > effectChains; + + processConfigEvents(); + + { // scope for mLock + + Mutex::Autolock _l(mLock); + + if (logString != NULL) { + mNBLogWriter->logTimestamp(); + mNBLogWriter->log(logString); + logString = NULL; + } + + if (checkForNewParameters_l()) { + cacheParameters_l(); + } + + saveOutputTracks(); + + // put audio hardware into standby after short delay + if (CC_UNLIKELY((!mActiveTracks.size() && systemTime() > standbyTime) || + isSuspended())) { + if (!mStandby) { + + threadLoop_standby(); + + mStandby = true; + } + + if (!mActiveTracks.size() && mConfigEvents.isEmpty()) { + // we're about to wait, flush the binder command buffer + IPCThreadState::self()->flushCommands(); + + clearOutputTracks(); + + if (exitPending()) { + break; + } + + releaseWakeLock_l(); + // wait until we have something to do... + ALOGV("%s going to sleep", myName.string()); + mWaitWorkCV.wait(mLock); + ALOGV("%s waking up", myName.string()); + acquireWakeLock_l(); + + mMixerStatus = MIXER_IDLE; + mMixerStatusIgnoringFastTracks = MIXER_IDLE; + mBytesWritten = 0; + + checkSilentMode_l(); + + standbyTime = systemTime() + standbyDelay; + sleepTime = idleSleepTime; + if (mType == MIXER) { + sleepTimeShift = 0; + } + + continue; + } + } + + // mMixerStatusIgnoringFastTracks is also updated internally + mMixerStatus = prepareTracks_l(&tracksToRemove); + + // prevent any changes in effect chain list and in each effect chain + // during mixing and effect process as the audio buffers could be deleted + // or modified if an effect is created or deleted + lockEffectChains_l(effectChains); + } + + if (CC_LIKELY(mMixerStatus == MIXER_TRACKS_READY)) { + threadLoop_mix(); + } else { + threadLoop_sleepTime(); + } + + if (isSuspended()) { + sleepTime = suspendSleepTimeUs(); + mBytesWritten += mixBufferSize; + } + + // only process effects if we're going to write + if (sleepTime == 0) { + for (size_t i = 0; i < effectChains.size(); i ++) { + effectChains[i]->process_l(); + } + } + + // enable changes in effect chain + unlockEffectChains(effectChains); + + // sleepTime == 0 means we must write to audio hardware + if (sleepTime == 0) { + + threadLoop_write(); + +if (mType == MIXER) { + // write blocked detection + nsecs_t now = systemTime(); + nsecs_t delta = now - mLastWriteTime; + if (!mStandby && delta > maxPeriod) { + mNumDelayedWrites++; + if ((now - lastWarning) > kWarningThrottleNs) { + ATRACE_NAME("underrun"); + ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p", + ns2ms(delta), mNumDelayedWrites, this); + lastWarning = now; + } + } +} + + mStandby = false; + } else { + usleep(sleepTime); + } + + // Finally let go of removed track(s), without the lock held + // since we can't guarantee the destructors won't acquire that + // same lock. This will also mutate and push a new fast mixer state. + threadLoop_removeTracks(tracksToRemove); + tracksToRemove.clear(); + + // FIXME I don't understand the need for this here; + // it was in the original code but maybe the + // assignment in saveOutputTracks() makes this unnecessary? + clearOutputTracks(); + + // Effect chains will be actually deleted here if they were removed from + // mEffectChains list during mixing or effects processing + effectChains.clear(); + + // FIXME Note that the above .clear() is no longer necessary since effectChains + // is now local to this block, but will keep it for now (at least until merge done). + } + + // for DuplicatingThread, standby mode is handled by the outputTracks, otherwise ... + if (mType == MIXER || mType == DIRECT) { + // put output stream into standby mode + if (!mStandby) { + mOutput->stream->common.standby(&mOutput->stream->common); + } + } + + releaseWakeLock(); + + ALOGV("Thread %p type %d exiting", this, mType); + return false; +} + + +// ---------------------------------------------------------------------------- + +AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, + audio_io_handle_t id, audio_devices_t device, type_t type) + : PlaybackThread(audioFlinger, output, id, device, type), + // mAudioMixer below + // mFastMixer below + mFastMixerFutex(0) + // mOutputSink below + // mPipeSink below + // mNormalSink below +{ + ALOGV("MixerThread() id=%d device=%#x type=%d", id, device, type); + ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%d, mFormat=%d, mFrameSize=%u, " + "mFrameCount=%d, mNormalFrameCount=%d", + mSampleRate, mChannelMask, mChannelCount, mFormat, mFrameSize, mFrameCount, + mNormalFrameCount); + mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate); + + // FIXME - Current mixer implementation only supports stereo output + if (mChannelCount != FCC_2) { + ALOGE("Invalid audio hardware channel count %d", mChannelCount); + } + + // create an NBAIO sink for the HAL output stream, and negotiate + mOutputSink = new AudioStreamOutSink(output->stream); + size_t numCounterOffers = 0; + const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount)}; + ssize_t index = mOutputSink->negotiate(offers, 1, NULL, numCounterOffers); + ALOG_ASSERT(index == 0); + + // initialize fast mixer depending on configuration + bool initFastMixer; + switch (kUseFastMixer) { + case FastMixer_Never: + initFastMixer = false; + break; + case FastMixer_Always: + initFastMixer = true; + break; + case FastMixer_Static: + case FastMixer_Dynamic: + initFastMixer = mFrameCount < mNormalFrameCount; + break; + } + if (initFastMixer) { + + // create a MonoPipe to connect our submix to FastMixer + NBAIO_Format format = mOutputSink->format(); + // This pipe depth compensates for scheduling latency of the normal mixer thread. + // When it wakes up after a maximum latency, it runs a few cycles quickly before + // finally blocking. Note the pipe implementation rounds up the request to a power of 2. + MonoPipe *monoPipe = new MonoPipe(mNormalFrameCount * 4, format, true /*writeCanBlock*/); + const NBAIO_Format offers[1] = {format}; + size_t numCounterOffers = 0; + ssize_t index = monoPipe->negotiate(offers, 1, NULL, numCounterOffers); + ALOG_ASSERT(index == 0); + monoPipe->setAvgFrames((mScreenState & 1) ? + (monoPipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2); + mPipeSink = monoPipe; + +#ifdef TEE_SINK + if (mTeeSinkOutputEnabled) { + // create a Pipe to archive a copy of FastMixer's output for dumpsys + Pipe *teeSink = new Pipe(mTeeSinkOutputFrames, format); + numCounterOffers = 0; + index = teeSink->negotiate(offers, 1, NULL, numCounterOffers); + ALOG_ASSERT(index == 0); + mTeeSink = teeSink; + PipeReader *teeSource = new PipeReader(*teeSink); + numCounterOffers = 0; + index = teeSource->negotiate(offers, 1, NULL, numCounterOffers); + ALOG_ASSERT(index == 0); + mTeeSource = teeSource; + } +#endif + + // create fast mixer and configure it initially with just one fast track for our submix + mFastMixer = new FastMixer(); + FastMixerStateQueue *sq = mFastMixer->sq(); +#ifdef STATE_QUEUE_DUMP + sq->setObserverDump(&mStateQueueObserverDump); + sq->setMutatorDump(&mStateQueueMutatorDump); +#endif + FastMixerState *state = sq->begin(); + FastTrack *fastTrack = &state->mFastTracks[0]; + // wrap the source side of the MonoPipe to make it an AudioBufferProvider + fastTrack->mBufferProvider = new SourceAudioBufferProvider(new MonoPipeReader(monoPipe)); + fastTrack->mVolumeProvider = NULL; + fastTrack->mGeneration++; + state->mFastTracksGen++; + state->mTrackMask = 1; + // fast mixer will use the HAL output sink + state->mOutputSink = mOutputSink.get(); + state->mOutputSinkGen++; + state->mFrameCount = mFrameCount; + state->mCommand = FastMixerState::COLD_IDLE; + // already done in constructor initialization list + //mFastMixerFutex = 0; + state->mColdFutexAddr = &mFastMixerFutex; + state->mColdGen++; + state->mDumpState = &mFastMixerDumpState; +#ifdef TEE_SINK + state->mTeeSink = mTeeSink.get(); +#endif + mFastMixerNBLogWriter = audioFlinger->newWriter_l(kFastMixerLogSize, "FastMixer"); + state->mNBLogWriter = mFastMixerNBLogWriter.get(); + sq->end(); + sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED); + + // start the fast mixer + mFastMixer->run("FastMixer", PRIORITY_URGENT_AUDIO); + pid_t tid = mFastMixer->getTid(); + int err = requestPriority(getpid_cached, tid, kPriorityFastMixer); + if (err != 0) { + ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d", + kPriorityFastMixer, getpid_cached, tid, err); + } + +#ifdef AUDIO_WATCHDOG + // create and start the watchdog + mAudioWatchdog = new AudioWatchdog(); + mAudioWatchdog->setDump(&mAudioWatchdogDump); + mAudioWatchdog->run("AudioWatchdog", PRIORITY_URGENT_AUDIO); + tid = mAudioWatchdog->getTid(); + err = requestPriority(getpid_cached, tid, kPriorityFastMixer); + if (err != 0) { + ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d", + kPriorityFastMixer, getpid_cached, tid, err); + } +#endif + + } else { + mFastMixer = NULL; + } + + switch (kUseFastMixer) { + case FastMixer_Never: + case FastMixer_Dynamic: + mNormalSink = mOutputSink; + break; + case FastMixer_Always: + mNormalSink = mPipeSink; + break; + case FastMixer_Static: + mNormalSink = initFastMixer ? mPipeSink : mOutputSink; + break; + } +} + +AudioFlinger::MixerThread::~MixerThread() +{ + if (mFastMixer != NULL) { + FastMixerStateQueue *sq = mFastMixer->sq(); + FastMixerState *state = sq->begin(); + if (state->mCommand == FastMixerState::COLD_IDLE) { + int32_t old = android_atomic_inc(&mFastMixerFutex); + if (old == -1) { + __futex_syscall3(&mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1); + } + } + state->mCommand = FastMixerState::EXIT; + sq->end(); + sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED); + mFastMixer->join(); + // Though the fast mixer thread has exited, it's state queue is still valid. + // We'll use that extract the final state which contains one remaining fast track + // corresponding to our sub-mix. + state = sq->begin(); + ALOG_ASSERT(state->mTrackMask == 1); + FastTrack *fastTrack = &state->mFastTracks[0]; + ALOG_ASSERT(fastTrack->mBufferProvider != NULL); + delete fastTrack->mBufferProvider; + sq->end(false /*didModify*/); + delete mFastMixer; +#ifdef AUDIO_WATCHDOG + if (mAudioWatchdog != 0) { + mAudioWatchdog->requestExit(); + mAudioWatchdog->requestExitAndWait(); + mAudioWatchdog.clear(); + } +#endif + } + mAudioFlinger->unregisterWriter(mFastMixerNBLogWriter); + delete mAudioMixer; +} + + +uint32_t AudioFlinger::MixerThread::correctLatency_l(uint32_t latency) const +{ + if (mFastMixer != NULL) { + MonoPipe *pipe = (MonoPipe *)mPipeSink.get(); + latency += (pipe->getAvgFrames() * 1000) / mSampleRate; + } + return latency; +} + + +void AudioFlinger::MixerThread::threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove) +{ + PlaybackThread::threadLoop_removeTracks(tracksToRemove); +} + +void AudioFlinger::MixerThread::threadLoop_write() +{ + // FIXME we should only do one push per cycle; confirm this is true + // Start the fast mixer if it's not already running + if (mFastMixer != NULL) { + FastMixerStateQueue *sq = mFastMixer->sq(); + FastMixerState *state = sq->begin(); + if (state->mCommand != FastMixerState::MIX_WRITE && + (kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)) { + if (state->mCommand == FastMixerState::COLD_IDLE) { + int32_t old = android_atomic_inc(&mFastMixerFutex); + if (old == -1) { + __futex_syscall3(&mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1); + } +#ifdef AUDIO_WATCHDOG + if (mAudioWatchdog != 0) { + mAudioWatchdog->resume(); + } +#endif + } + state->mCommand = FastMixerState::MIX_WRITE; + sq->end(); + sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED); + if (kUseFastMixer == FastMixer_Dynamic) { + mNormalSink = mPipeSink; + } + } else { + sq->end(false /*didModify*/); + } + } + PlaybackThread::threadLoop_write(); +} + +void AudioFlinger::MixerThread::threadLoop_standby() +{ + // Idle the fast mixer if it's currently running + if (mFastMixer != NULL) { + FastMixerStateQueue *sq = mFastMixer->sq(); + FastMixerState *state = sq->begin(); + if (!(state->mCommand & FastMixerState::IDLE)) { + state->mCommand = FastMixerState::COLD_IDLE; + state->mColdFutexAddr = &mFastMixerFutex; + state->mColdGen++; + mFastMixerFutex = 0; + sq->end(); + // BLOCK_UNTIL_PUSHED would be insufficient, as we need it to stop doing I/O now + sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED); + if (kUseFastMixer == FastMixer_Dynamic) { + mNormalSink = mOutputSink; + } +#ifdef AUDIO_WATCHDOG + if (mAudioWatchdog != 0) { + mAudioWatchdog->pause(); + } +#endif + } else { + sq->end(false /*didModify*/); + } + } + PlaybackThread::threadLoop_standby(); +} + +// shared by MIXER and DIRECT, overridden by DUPLICATING +void AudioFlinger::PlaybackThread::threadLoop_standby() +{ + ALOGV("Audio hardware entering standby, mixer %p, suspend count %d", this, mSuspended); + mOutput->stream->common.standby(&mOutput->stream->common); +} + +void AudioFlinger::MixerThread::threadLoop_mix() +{ + // obtain the presentation timestamp of the next output buffer + int64_t pts; + status_t status = INVALID_OPERATION; + + if (mNormalSink != 0) { + status = mNormalSink->getNextWriteTimestamp(&pts); + } else { + status = mOutputSink->getNextWriteTimestamp(&pts); + } + + if (status != NO_ERROR) { + pts = AudioBufferProvider::kInvalidPTS; + } + + // mix buffers... + mAudioMixer->process(pts); + // increase sleep time progressively when application underrun condition clears. + // Only increase sleep time if the mixer is ready for two consecutive times to avoid + // that a steady state of alternating ready/not ready conditions keeps the sleep time + // such that we would underrun the audio HAL. + if ((sleepTime == 0) && (sleepTimeShift > 0)) { + sleepTimeShift--; + } + sleepTime = 0; + standbyTime = systemTime() + standbyDelay; + //TODO: delay standby when effects have a tail +} + +void AudioFlinger::MixerThread::threadLoop_sleepTime() +{ + // If no tracks are ready, sleep once for the duration of an output + // buffer size, then write 0s to the output + if (sleepTime == 0) { + if (mMixerStatus == MIXER_TRACKS_ENABLED) { + sleepTime = activeSleepTime >> sleepTimeShift; + if (sleepTime < kMinThreadSleepTimeUs) { + sleepTime = kMinThreadSleepTimeUs; + } + // reduce sleep time in case of consecutive application underruns to avoid + // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer + // duration we would end up writing less data than needed by the audio HAL if + // the condition persists. + if (sleepTimeShift < kMaxThreadSleepTimeShift) { + sleepTimeShift++; + } + } else { + sleepTime = idleSleepTime; + } + } else if (mBytesWritten != 0 || (mMixerStatus == MIXER_TRACKS_ENABLED)) { + memset (mMixBuffer, 0, mixBufferSize); + sleepTime = 0; + ALOGV_IF(mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED), + "anticipated start"); + } + // TODO add standby time extension fct of effect tail +} + +// prepareTracks_l() must be called with ThreadBase::mLock held +AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTracks_l( + Vector< sp<Track> > *tracksToRemove) +{ + + mixer_state mixerStatus = MIXER_IDLE; + // find out which tracks need to be processed + size_t count = mActiveTracks.size(); + size_t mixedTracks = 0; + size_t tracksWithEffect = 0; + // counts only _active_ fast tracks + size_t fastTracks = 0; + uint32_t resetMask = 0; // bit mask of fast tracks that need to be reset + + float masterVolume = mMasterVolume; + bool masterMute = mMasterMute; + + if (masterMute) { + masterVolume = 0; + } + // Delegate master volume control to effect in output mix effect chain if needed + sp<EffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX); + if (chain != 0) { + uint32_t v = (uint32_t)(masterVolume * (1 << 24)); + chain->setVolume_l(&v, &v); + masterVolume = (float)((v + (1 << 23)) >> 24); + chain.clear(); + } + + // prepare a new state to push + FastMixerStateQueue *sq = NULL; + FastMixerState *state = NULL; + bool didModify = false; + FastMixerStateQueue::block_t block = FastMixerStateQueue::BLOCK_UNTIL_PUSHED; + if (mFastMixer != NULL) { + sq = mFastMixer->sq(); + state = sq->begin(); + } + + for (size_t i=0 ; i<count ; i++) { + sp<Track> t = mActiveTracks[i].promote(); + if (t == 0) { + continue; + } + + // this const just means the local variable doesn't change + Track* const track = t.get(); + + // process fast tracks + if (track->isFastTrack()) { + + // It's theoretically possible (though unlikely) for a fast track to be created + // and then removed within the same normal mix cycle. This is not a problem, as + // the track never becomes active so it's fast mixer slot is never touched. + // The converse, of removing an (active) track and then creating a new track + // at the identical fast mixer slot within the same normal mix cycle, + // is impossible because the slot isn't marked available until the end of each cycle. + int j = track->mFastIndex; + ALOG_ASSERT(0 < j && j < (int)FastMixerState::kMaxFastTracks); + ALOG_ASSERT(!(mFastTrackAvailMask & (1 << j))); + FastTrack *fastTrack = &state->mFastTracks[j]; + + // Determine whether the track is currently in underrun condition, + // and whether it had a recent underrun. + FastTrackDump *ftDump = &mFastMixerDumpState.mTracks[j]; + FastTrackUnderruns underruns = ftDump->mUnderruns; + uint32_t recentFull = (underruns.mBitFields.mFull - + track->mObservedUnderruns.mBitFields.mFull) & UNDERRUN_MASK; + uint32_t recentPartial = (underruns.mBitFields.mPartial - + track->mObservedUnderruns.mBitFields.mPartial) & UNDERRUN_MASK; + uint32_t recentEmpty = (underruns.mBitFields.mEmpty - + track->mObservedUnderruns.mBitFields.mEmpty) & UNDERRUN_MASK; + uint32_t recentUnderruns = recentPartial + recentEmpty; + track->mObservedUnderruns = underruns; + // don't count underruns that occur while stopping or pausing + // or stopped which can occur when flush() is called while active + if (!(track->isStopping() || track->isPausing() || track->isStopped())) { + track->mUnderrunCount += recentUnderruns; + } + + // This is similar to the state machine for normal tracks, + // with a few modifications for fast tracks. + bool isActive = true; + switch (track->mState) { + case TrackBase::STOPPING_1: + // track stays active in STOPPING_1 state until first underrun + if (recentUnderruns > 0) { + track->mState = TrackBase::STOPPING_2; + } + break; + case TrackBase::PAUSING: + // ramp down is not yet implemented + track->setPaused(); + break; + case TrackBase::RESUMING: + // ramp up is not yet implemented + track->mState = TrackBase::ACTIVE; + break; + case TrackBase::ACTIVE: + if (recentFull > 0 || recentPartial > 0) { + // track has provided at least some frames recently: reset retry count + track->mRetryCount = kMaxTrackRetries; + } + if (recentUnderruns == 0) { + // no recent underruns: stay active + break; + } + // there has recently been an underrun of some kind + if (track->sharedBuffer() == 0) { + // were any of the recent underruns "empty" (no frames available)? + if (recentEmpty == 0) { + // no, then ignore the partial underruns as they are allowed indefinitely + break; + } + // there has recently been an "empty" underrun: decrement the retry counter + if (--(track->mRetryCount) > 0) { + break; + } + // indicate to client process that the track was disabled because of underrun; + // it will then automatically call start() when data is available + android_atomic_or(CBLK_DISABLED, &track->mCblk->flags); + // remove from active list, but state remains ACTIVE [confusing but true] + isActive = false; + break; + } + // fall through + case TrackBase::STOPPING_2: + case TrackBase::PAUSED: + case TrackBase::TERMINATED: + case TrackBase::STOPPED: + case TrackBase::FLUSHED: // flush() while active + // Check for presentation complete if track is inactive + // We have consumed all the buffers of this track. + // This would be incomplete if we auto-paused on underrun + { + size_t audioHALFrames = + (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000; + size_t framesWritten = mBytesWritten / mFrameSize; + if (!(mStandby || track->presentationComplete(framesWritten, audioHALFrames))) { + // track stays in active list until presentation is complete + break; + } + } + if (track->isStopping_2()) { + track->mState = TrackBase::STOPPED; + } + if (track->isStopped()) { + // Can't reset directly, as fast mixer is still polling this track + // track->reset(); + // So instead mark this track as needing to be reset after push with ack + resetMask |= 1 << i; + } + isActive = false; + break; + case TrackBase::IDLE: + default: + LOG_FATAL("unexpected track state %d", track->mState); + } + + if (isActive) { + // was it previously inactive? + if (!(state->mTrackMask & (1 << j))) { + ExtendedAudioBufferProvider *eabp = track; + VolumeProvider *vp = track; + fastTrack->mBufferProvider = eabp; + fastTrack->mVolumeProvider = vp; + fastTrack->mSampleRate = track->mSampleRate; + fastTrack->mChannelMask = track->mChannelMask; + fastTrack->mGeneration++; + state->mTrackMask |= 1 << j; + didModify = true; + // no acknowledgement required for newly active tracks + } + // cache the combined master volume and stream type volume for fast mixer; this + // lacks any synchronization or barrier so VolumeProvider may read a stale value + track->mCachedVolume = masterVolume * mStreamTypes[track->streamType()].volume; + ++fastTracks; + } else { + // was it previously active? + if (state->mTrackMask & (1 << j)) { + fastTrack->mBufferProvider = NULL; + fastTrack->mGeneration++; + state->mTrackMask &= ~(1 << j); + didModify = true; + // If any fast tracks were removed, we must wait for acknowledgement + // because we're about to decrement the last sp<> on those tracks. + block = FastMixerStateQueue::BLOCK_UNTIL_ACKED; + } else { + LOG_FATAL("fast track %d should have been active", j); + } + tracksToRemove->add(track); + // Avoids a misleading display in dumpsys + track->mObservedUnderruns.mBitFields.mMostRecent = UNDERRUN_FULL; + } + continue; + } + + { // local variable scope to avoid goto warning + + audio_track_cblk_t* cblk = track->cblk(); + + // The first time a track is added we wait + // for all its buffers to be filled before processing it + int name = track->name(); + // make sure that we have enough frames to mix one full buffer. + // enforce this condition only once to enable draining the buffer in case the client + // app does not call stop() and relies on underrun to stop: + // hence the test on (mMixerStatus == MIXER_TRACKS_READY) meaning the track was mixed + // during last round + uint32_t minFrames = 1; + if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() && + (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) { + if (t->sampleRate() == mSampleRate) { + minFrames = mNormalFrameCount; + } else { + // +1 for rounding and +1 for additional sample needed for interpolation + minFrames = (mNormalFrameCount * t->sampleRate()) / mSampleRate + 1 + 1; + // add frames already consumed but not yet released by the resampler + // because cblk->framesReady() will include these frames + minFrames += mAudioMixer->getUnreleasedFrames(track->name()); + // the minimum track buffer size is normally twice the number of frames necessary + // to fill one buffer and the resampler should not leave more than one buffer worth + // of unreleased frames after each pass, but just in case... + ALOG_ASSERT(minFrames <= cblk->frameCount_); + } + } + if ((track->framesReady() >= minFrames) && track->isReady() && + !track->isPaused() && !track->isTerminated()) + { + ALOGVV("track %d u=%08x, s=%08x [OK] on thread %p", name, cblk->user, cblk->server, + this); + + mixedTracks++; + + // track->mainBuffer() != mMixBuffer means there is an effect chain + // connected to the track + chain.clear(); + if (track->mainBuffer() != mMixBuffer) { + chain = getEffectChain_l(track->sessionId()); + // Delegate volume control to effect in track effect chain if needed + if (chain != 0) { + tracksWithEffect++; + } else { + ALOGW("prepareTracks_l(): track %d attached to effect but no chain found on " + "session %d", + name, track->sessionId()); + } + } + + + int param = AudioMixer::VOLUME; + if (track->mFillingUpStatus == Track::FS_FILLED) { + // no ramp for the first volume setting + track->mFillingUpStatus = Track::FS_ACTIVE; + if (track->mState == TrackBase::RESUMING) { + track->mState = TrackBase::ACTIVE; + param = AudioMixer::RAMP_VOLUME; + } + mAudioMixer->setParameter(name, AudioMixer::RESAMPLE, AudioMixer::RESET, NULL); + } else if (cblk->server != 0) { + // If the track is stopped before the first frame was mixed, + // do not apply ramp + param = AudioMixer::RAMP_VOLUME; + } + + // compute volume for this track + uint32_t vl, vr, va; + if (track->isPausing() || mStreamTypes[track->streamType()].mute) { + vl = vr = va = 0; + if (track->isPausing()) { + track->setPaused(); + } + } else { + + // read original volumes with volume control + float typeVolume = mStreamTypes[track->streamType()].volume; + float v = masterVolume * typeVolume; + ServerProxy *proxy = track->mServerProxy; + uint32_t vlr = proxy->getVolumeLR(); + vl = vlr & 0xFFFF; + vr = vlr >> 16; + // track volumes come from shared memory, so can't be trusted and must be clamped + if (vl > MAX_GAIN_INT) { + ALOGV("Track left volume out of range: %04X", vl); + vl = MAX_GAIN_INT; + } + if (vr > MAX_GAIN_INT) { + ALOGV("Track right volume out of range: %04X", vr); + vr = MAX_GAIN_INT; + } + // now apply the master volume and stream type volume + vl = (uint32_t)(v * vl) << 12; + vr = (uint32_t)(v * vr) << 12; + // assuming master volume and stream type volume each go up to 1.0, + // vl and vr are now in 8.24 format + + uint16_t sendLevel = proxy->getSendLevel_U4_12(); + // send level comes from shared memory and so may be corrupt + if (sendLevel > MAX_GAIN_INT) { + ALOGV("Track send level out of range: %04X", sendLevel); + sendLevel = MAX_GAIN_INT; + } + va = (uint32_t)(v * sendLevel); + } + // Delegate volume control to effect in track effect chain if needed + if (chain != 0 && chain->setVolume_l(&vl, &vr)) { + // Do not ramp volume if volume is controlled by effect + param = AudioMixer::VOLUME; + track->mHasVolumeController = true; + } else { + // force no volume ramp when volume controller was just disabled or removed + // from effect chain to avoid volume spike + if (track->mHasVolumeController) { + param = AudioMixer::VOLUME; + } + track->mHasVolumeController = false; + } + + // Convert volumes from 8.24 to 4.12 format + // This additional clamping is needed in case chain->setVolume_l() overshot + vl = (vl + (1 << 11)) >> 12; + if (vl > MAX_GAIN_INT) { + vl = MAX_GAIN_INT; + } + vr = (vr + (1 << 11)) >> 12; + if (vr > MAX_GAIN_INT) { + vr = MAX_GAIN_INT; + } + + if (va > MAX_GAIN_INT) { + va = MAX_GAIN_INT; // va is uint32_t, so no need to check for - + } + + // XXX: these things DON'T need to be done each time + mAudioMixer->setBufferProvider(name, track); + mAudioMixer->enable(name); + + mAudioMixer->setParameter(name, param, AudioMixer::VOLUME0, (void *)vl); + mAudioMixer->setParameter(name, param, AudioMixer::VOLUME1, (void *)vr); + mAudioMixer->setParameter(name, param, AudioMixer::AUXLEVEL, (void *)va); + mAudioMixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::FORMAT, (void *)track->format()); + mAudioMixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::CHANNEL_MASK, (void *)track->channelMask()); + // limit track sample rate to 2 x output sample rate, which changes at re-configuration + uint32_t maxSampleRate = mSampleRate * 2; + uint32_t reqSampleRate = track->mServerProxy->getSampleRate(); + if (reqSampleRate == 0) { + reqSampleRate = mSampleRate; + } else if (reqSampleRate > maxSampleRate) { + reqSampleRate = maxSampleRate; + } + mAudioMixer->setParameter( + name, + AudioMixer::RESAMPLE, + AudioMixer::SAMPLE_RATE, + (void *)reqSampleRate); + mAudioMixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MAIN_BUFFER, (void *)track->mainBuffer()); + mAudioMixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::AUX_BUFFER, (void *)track->auxBuffer()); + + // reset retry count + track->mRetryCount = kMaxTrackRetries; + + // If one track is ready, set the mixer ready if: + // - the mixer was not ready during previous round OR + // - no other track is not ready + if (mMixerStatusIgnoringFastTracks != MIXER_TRACKS_READY || + mixerStatus != MIXER_TRACKS_ENABLED) { + mixerStatus = MIXER_TRACKS_READY; + } + } else { + // clear effect chain input buffer if an active track underruns to avoid sending + // previous audio buffer again to effects + chain = getEffectChain_l(track->sessionId()); + if (chain != 0) { + chain->clearInputBuffer(); + } + + ALOGVV("track %d u=%08x, s=%08x [NOT READY] on thread %p", name, cblk->user, + cblk->server, this); + if ((track->sharedBuffer() != 0) || track->isTerminated() || + track->isStopped() || track->isPaused()) { + // We have consumed all the buffers of this track. + // Remove it from the list of active tracks. + // TODO: use actual buffer filling status instead of latency when available from + // audio HAL + size_t audioHALFrames = (latency_l() * mSampleRate) / 1000; + size_t framesWritten = mBytesWritten / mFrameSize; + if (mStandby || track->presentationComplete(framesWritten, audioHALFrames)) { + if (track->isStopped()) { + track->reset(); + } + tracksToRemove->add(track); + } + } else { + track->mUnderrunCount++; + // No buffers for this track. Give it a few chances to + // fill a buffer, then remove it from active list. + if (--(track->mRetryCount) <= 0) { + ALOGI("BUFFER TIMEOUT: remove(%d) from active list on thread %p", name, this); + tracksToRemove->add(track); + // indicate to client process that the track was disabled because of underrun; + // it will then automatically call start() when data is available + android_atomic_or(CBLK_DISABLED, &cblk->flags); + // If one track is not ready, mark the mixer also not ready if: + // - the mixer was ready during previous round OR + // - no other track is ready + } else if (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY || + mixerStatus != MIXER_TRACKS_READY) { + mixerStatus = MIXER_TRACKS_ENABLED; + } + } + mAudioMixer->disable(name); + } + + } // local variable scope to avoid goto warning +track_is_ready: ; + + } + + // Push the new FastMixer state if necessary + bool pauseAudioWatchdog = false; + if (didModify) { + state->mFastTracksGen++; + // if the fast mixer was active, but now there are no fast tracks, then put it in cold idle + if (kUseFastMixer == FastMixer_Dynamic && + state->mCommand == FastMixerState::MIX_WRITE && state->mTrackMask <= 1) { + state->mCommand = FastMixerState::COLD_IDLE; + state->mColdFutexAddr = &mFastMixerFutex; + state->mColdGen++; + mFastMixerFutex = 0; + if (kUseFastMixer == FastMixer_Dynamic) { + mNormalSink = mOutputSink; + } + // If we go into cold idle, need to wait for acknowledgement + // so that fast mixer stops doing I/O. + block = FastMixerStateQueue::BLOCK_UNTIL_ACKED; + pauseAudioWatchdog = true; + } + } + if (sq != NULL) { + sq->end(didModify); + sq->push(block); + } +#ifdef AUDIO_WATCHDOG + if (pauseAudioWatchdog && mAudioWatchdog != 0) { + mAudioWatchdog->pause(); + } +#endif + + // Now perform the deferred reset on fast tracks that have stopped + while (resetMask != 0) { + size_t i = __builtin_ctz(resetMask); + ALOG_ASSERT(i < count); + resetMask &= ~(1 << i); + sp<Track> t = mActiveTracks[i].promote(); + if (t == 0) { + continue; + } + Track* track = t.get(); + ALOG_ASSERT(track->isFastTrack() && track->isStopped()); + track->reset(); + } + + // remove all the tracks that need to be... + count = tracksToRemove->size(); + if (CC_UNLIKELY(count)) { + for (size_t i=0 ; i<count ; i++) { + const sp<Track>& track = tracksToRemove->itemAt(i); + mActiveTracks.remove(track); + if (track->mainBuffer() != mMixBuffer) { + chain = getEffectChain_l(track->sessionId()); + if (chain != 0) { + ALOGV("stopping track on chain %p for session Id: %d", chain.get(), + track->sessionId()); + chain->decActiveTrackCnt(); + } + } + if (track->isTerminated()) { + removeTrack_l(track); + } + } + } + + // mix buffer must be cleared if all tracks are connected to an + // effect chain as in this case the mixer will not write to + // mix buffer and track effects will accumulate into it + if ((mixedTracks != 0 && mixedTracks == tracksWithEffect) || + (mixedTracks == 0 && fastTracks > 0)) { + // FIXME as a performance optimization, should remember previous zero status + memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t)); + } + + // if any fast tracks, then status is ready + mMixerStatusIgnoringFastTracks = mixerStatus; + if (fastTracks > 0) { + mixerStatus = MIXER_TRACKS_READY; + } + return mixerStatus; +} + +// getTrackName_l() must be called with ThreadBase::mLock held +int AudioFlinger::MixerThread::getTrackName_l(audio_channel_mask_t channelMask, int sessionId) +{ + return mAudioMixer->getTrackName(channelMask, sessionId); +} + +// deleteTrackName_l() must be called with ThreadBase::mLock held +void AudioFlinger::MixerThread::deleteTrackName_l(int name) +{ + ALOGV("remove track (%d) and delete from mixer", name); + mAudioMixer->deleteTrackName(name); +} + +// checkForNewParameters_l() must be called with ThreadBase::mLock held +bool AudioFlinger::MixerThread::checkForNewParameters_l() +{ + // if !&IDLE, holds the FastMixer state to restore after new parameters processed + FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE; + bool reconfig = false; + + while (!mNewParameters.isEmpty()) { + + if (mFastMixer != NULL) { + FastMixerStateQueue *sq = mFastMixer->sq(); + FastMixerState *state = sq->begin(); + if (!(state->mCommand & FastMixerState::IDLE)) { + previousCommand = state->mCommand; + state->mCommand = FastMixerState::HOT_IDLE; + sq->end(); + sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED); + } else { + sq->end(false /*didModify*/); + } + } + + status_t status = NO_ERROR; + String8 keyValuePair = mNewParameters[0]; + AudioParameter param = AudioParameter(keyValuePair); + int value; + + if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) { + reconfig = true; + } + if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) { + if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) { + status = BAD_VALUE; + } else { + reconfig = true; + } + } + if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) { + if (value != AUDIO_CHANNEL_OUT_STEREO) { + status = BAD_VALUE; + } else { + reconfig = true; + } + } + if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) { + // do not accept frame count changes if tracks are open as the track buffer + // size depends on frame count and correct behavior would not be guaranteed + // if frame count is changed after track creation + if (!mTracks.isEmpty()) { + status = INVALID_OPERATION; + } else { + reconfig = true; + } + } + if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) { +#ifdef ADD_BATTERY_DATA + // when changing the audio output device, call addBatteryData to notify + // the change + if (mOutDevice != value) { + uint32_t params = 0; + // check whether speaker is on + if (value & AUDIO_DEVICE_OUT_SPEAKER) { + params |= IMediaPlayerService::kBatteryDataSpeakerOn; + } + + audio_devices_t deviceWithoutSpeaker + = AUDIO_DEVICE_OUT_ALL & ~AUDIO_DEVICE_OUT_SPEAKER; + // check if any other device (except speaker) is on + if (value & deviceWithoutSpeaker ) { + params |= IMediaPlayerService::kBatteryDataOtherAudioDeviceOn; + } + + if (params != 0) { + addBatteryData(params); + } + } +#endif + + // forward device change to effects that have requested to be + // aware of attached audio device. + mOutDevice = value; + for (size_t i = 0; i < mEffectChains.size(); i++) { + mEffectChains[i]->setDevice_l(mOutDevice); + } + } + + if (status == NO_ERROR) { + status = mOutput->stream->common.set_parameters(&mOutput->stream->common, + keyValuePair.string()); + if (!mStandby && status == INVALID_OPERATION) { + mOutput->stream->common.standby(&mOutput->stream->common); + mStandby = true; + mBytesWritten = 0; + status = mOutput->stream->common.set_parameters(&mOutput->stream->common, + keyValuePair.string()); + } + if (status == NO_ERROR && reconfig) { + delete mAudioMixer; + // for safety in case readOutputParameters() accesses mAudioMixer (it doesn't) + mAudioMixer = NULL; + readOutputParameters(); + mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate); + for (size_t i = 0; i < mTracks.size() ; i++) { + int name = getTrackName_l(mTracks[i]->mChannelMask, mTracks[i]->mSessionId); + if (name < 0) { + break; + } + mTracks[i]->mName = name; + } + sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED); + } + } + + mNewParameters.removeAt(0); + + mParamStatus = status; + mParamCond.signal(); + // wait for condition with time out in case the thread calling ThreadBase::setParameters() + // already timed out waiting for the status and will never signal the condition. + mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs); + } + + if (!(previousCommand & FastMixerState::IDLE)) { + ALOG_ASSERT(mFastMixer != NULL); + FastMixerStateQueue *sq = mFastMixer->sq(); + FastMixerState *state = sq->begin(); + ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE); + state->mCommand = previousCommand; + sq->end(); + sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED); + } + + return reconfig; +} + + +void AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>& args) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + PlaybackThread::dumpInternals(fd, args); + + snprintf(buffer, SIZE, "AudioMixer tracks: %08x\n", mAudioMixer->trackNames()); + result.append(buffer); + write(fd, result.string(), result.size()); + + // Make a non-atomic copy of fast mixer dump state so it won't change underneath us + FastMixerDumpState copy = mFastMixerDumpState; + copy.dump(fd); + +#ifdef STATE_QUEUE_DUMP + // Similar for state queue + StateQueueObserverDump observerCopy = mStateQueueObserverDump; + observerCopy.dump(fd); + StateQueueMutatorDump mutatorCopy = mStateQueueMutatorDump; + mutatorCopy.dump(fd); +#endif + +#ifdef TEE_SINK + // Write the tee output to a .wav file + dumpTee(fd, mTeeSource, mId); +#endif + +#ifdef AUDIO_WATCHDOG + if (mAudioWatchdog != 0) { + // Make a non-atomic copy of audio watchdog dump so it won't change underneath us + AudioWatchdogDump wdCopy = mAudioWatchdogDump; + wdCopy.dump(fd); + } +#endif +} + +uint32_t AudioFlinger::MixerThread::idleSleepTimeUs() const +{ + return (uint32_t)(((mNormalFrameCount * 1000) / mSampleRate) * 1000) / 2; +} + +uint32_t AudioFlinger::MixerThread::suspendSleepTimeUs() const +{ + return (uint32_t)(((mNormalFrameCount * 1000) / mSampleRate) * 1000); +} + +void AudioFlinger::MixerThread::cacheParameters_l() +{ + PlaybackThread::cacheParameters_l(); + + // FIXME: Relaxed timing because of a certain device that can't meet latency + // Should be reduced to 2x after the vendor fixes the driver issue + // increase threshold again due to low power audio mode. The way this warning + // threshold is calculated and its usefulness should be reconsidered anyway. + maxPeriod = seconds(mNormalFrameCount) / mSampleRate * 15; +} + +// ---------------------------------------------------------------------------- + +AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger, + AudioStreamOut* output, audio_io_handle_t id, audio_devices_t device) + : PlaybackThread(audioFlinger, output, id, device, DIRECT) + // mLeftVolFloat, mRightVolFloat +{ +} + +AudioFlinger::DirectOutputThread::~DirectOutputThread() +{ +} + +AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prepareTracks_l( + Vector< sp<Track> > *tracksToRemove +) +{ + size_t count = mActiveTracks.size(); + mixer_state mixerStatus = MIXER_IDLE; + + // find out which tracks need to be processed + for (size_t i = 0; i < count; i++) { + sp<Track> t = mActiveTracks[i].promote(); + // The track died recently + if (t == 0) { + continue; + } + + Track* const track = t.get(); + audio_track_cblk_t* cblk = track->cblk(); + + // The first time a track is added we wait + // for all its buffers to be filled before processing it + uint32_t minFrames; + if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing()) { + minFrames = mNormalFrameCount; + } else { + minFrames = 1; + } + if ((track->framesReady() >= minFrames) && track->isReady() && + !track->isPaused() && !track->isTerminated()) + { + ALOGVV("track %d u=%08x, s=%08x [OK]", track->name(), cblk->user, cblk->server); + + if (track->mFillingUpStatus == Track::FS_FILLED) { + track->mFillingUpStatus = Track::FS_ACTIVE; + mLeftVolFloat = mRightVolFloat = 0; + if (track->mState == TrackBase::RESUMING) { + track->mState = TrackBase::ACTIVE; + } + } + + // compute volume for this track + float left, right; + if (mMasterMute || track->isPausing() || mStreamTypes[track->streamType()].mute) { + left = right = 0; + if (track->isPausing()) { + track->setPaused(); + } + } else { + float typeVolume = mStreamTypes[track->streamType()].volume; + float v = mMasterVolume * typeVolume; + uint32_t vlr = track->mServerProxy->getVolumeLR(); + float v_clamped = v * (vlr & 0xFFFF); + if (v_clamped > MAX_GAIN) { + v_clamped = MAX_GAIN; + } + left = v_clamped/MAX_GAIN; + v_clamped = v * (vlr >> 16); + if (v_clamped > MAX_GAIN) { + v_clamped = MAX_GAIN; + } + right = v_clamped/MAX_GAIN; + } + // Only consider last track started for volume and mixer state control. + // This is the last entry in mActiveTracks unless a track underruns. + // As we only care about the transition phase between two tracks on a + // direct output, it is not a problem to ignore the underrun case. + if (i == (count - 1)) { + if (left != mLeftVolFloat || right != mRightVolFloat) { + mLeftVolFloat = left; + mRightVolFloat = right; + + // Convert volumes from float to 8.24 + uint32_t vl = (uint32_t)(left * (1 << 24)); + uint32_t vr = (uint32_t)(right * (1 << 24)); + + // Delegate volume control to effect in track effect chain if needed + // only one effect chain can be present on DirectOutputThread, so if + // there is one, the track is connected to it + if (!mEffectChains.isEmpty()) { + // Do not ramp volume if volume is controlled by effect + mEffectChains[0]->setVolume_l(&vl, &vr); + left = (float)vl / (1 << 24); + right = (float)vr / (1 << 24); + } + mOutput->stream->set_volume(mOutput->stream, left, right); + } + + // reset retry count + track->mRetryCount = kMaxTrackRetriesDirect; + mActiveTrack = t; + mixerStatus = MIXER_TRACKS_READY; + } + } else { + // clear effect chain input buffer if the last active track started underruns + // to avoid sending previous audio buffer again to effects + if (!mEffectChains.isEmpty() && (i == (count -1))) { + mEffectChains[0]->clearInputBuffer(); + } + + ALOGVV("track %d u=%08x, s=%08x [NOT READY]", track->name(), cblk->user, cblk->server); + if ((track->sharedBuffer() != 0) || track->isTerminated() || + track->isStopped() || track->isPaused()) { + // We have consumed all the buffers of this track. + // Remove it from the list of active tracks. + // TODO: implement behavior for compressed audio + size_t audioHALFrames = (latency_l() * mSampleRate) / 1000; + size_t framesWritten = mBytesWritten / mFrameSize; + if (mStandby || track->presentationComplete(framesWritten, audioHALFrames)) { + if (track->isStopped()) { + track->reset(); + } + tracksToRemove->add(track); + } + } else { + // No buffers for this track. Give it a few chances to + // fill a buffer, then remove it from active list. + // Only consider last track started for mixer state control + if (--(track->mRetryCount) <= 0) { + ALOGV("BUFFER TIMEOUT: remove(%d) from active list", track->name()); + tracksToRemove->add(track); + } else if (i == (count -1)){ + mixerStatus = MIXER_TRACKS_ENABLED; + } + } + } + } + + // remove all the tracks that need to be... + count = tracksToRemove->size(); + if (CC_UNLIKELY(count)) { + for (size_t i = 0 ; i < count ; i++) { + const sp<Track>& track = tracksToRemove->itemAt(i); + mActiveTracks.remove(track); + if (!mEffectChains.isEmpty()) { + ALOGV("stopping track on chain %p for session Id: %d", mEffectChains[0].get(), + track->sessionId()); + mEffectChains[0]->decActiveTrackCnt(); + } + if (track->isTerminated()) { + removeTrack_l(track); + } + } + } + + return mixerStatus; +} + +void AudioFlinger::DirectOutputThread::threadLoop_mix() +{ + AudioBufferProvider::Buffer buffer; + size_t frameCount = mFrameCount; + int8_t *curBuf = (int8_t *)mMixBuffer; + // output audio to hardware + while (frameCount) { + buffer.frameCount = frameCount; + mActiveTrack->getNextBuffer(&buffer); + if (CC_UNLIKELY(buffer.raw == NULL)) { + memset(curBuf, 0, frameCount * mFrameSize); + break; + } + memcpy(curBuf, buffer.raw, buffer.frameCount * mFrameSize); + frameCount -= buffer.frameCount; + curBuf += buffer.frameCount * mFrameSize; + mActiveTrack->releaseBuffer(&buffer); + } + sleepTime = 0; + standbyTime = systemTime() + standbyDelay; + mActiveTrack.clear(); + +} + +void AudioFlinger::DirectOutputThread::threadLoop_sleepTime() +{ + if (sleepTime == 0) { + if (mMixerStatus == MIXER_TRACKS_ENABLED) { + sleepTime = activeSleepTime; + } else { + sleepTime = idleSleepTime; + } + } else if (mBytesWritten != 0 && audio_is_linear_pcm(mFormat)) { + memset(mMixBuffer, 0, mFrameCount * mFrameSize); + sleepTime = 0; + } +} + +// getTrackName_l() must be called with ThreadBase::mLock held +int AudioFlinger::DirectOutputThread::getTrackName_l(audio_channel_mask_t channelMask, + int sessionId) +{ + return 0; +} + +// deleteTrackName_l() must be called with ThreadBase::mLock held +void AudioFlinger::DirectOutputThread::deleteTrackName_l(int name) +{ +} + +// checkForNewParameters_l() must be called with ThreadBase::mLock held +bool AudioFlinger::DirectOutputThread::checkForNewParameters_l() +{ + bool reconfig = false; + + while (!mNewParameters.isEmpty()) { + status_t status = NO_ERROR; + String8 keyValuePair = mNewParameters[0]; + AudioParameter param = AudioParameter(keyValuePair); + int value; + + if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) { + // do not accept frame count changes if tracks are open as the track buffer + // size depends on frame count and correct behavior would not be garantied + // if frame count is changed after track creation + if (!mTracks.isEmpty()) { + status = INVALID_OPERATION; + } else { + reconfig = true; + } + } + if (status == NO_ERROR) { + status = mOutput->stream->common.set_parameters(&mOutput->stream->common, + keyValuePair.string()); + if (!mStandby && status == INVALID_OPERATION) { + mOutput->stream->common.standby(&mOutput->stream->common); + mStandby = true; + mBytesWritten = 0; + status = mOutput->stream->common.set_parameters(&mOutput->stream->common, + keyValuePair.string()); + } + if (status == NO_ERROR && reconfig) { + readOutputParameters(); + sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED); + } + } + + mNewParameters.removeAt(0); + + mParamStatus = status; + mParamCond.signal(); + // wait for condition with time out in case the thread calling ThreadBase::setParameters() + // already timed out waiting for the status and will never signal the condition. + mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs); + } + return reconfig; +} + +uint32_t AudioFlinger::DirectOutputThread::activeSleepTimeUs() const +{ + uint32_t time; + if (audio_is_linear_pcm(mFormat)) { + time = PlaybackThread::activeSleepTimeUs(); + } else { + time = 10000; + } + return time; +} + +uint32_t AudioFlinger::DirectOutputThread::idleSleepTimeUs() const +{ + uint32_t time; + if (audio_is_linear_pcm(mFormat)) { + time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000) / 2; + } else { + time = 10000; + } + return time; +} + +uint32_t AudioFlinger::DirectOutputThread::suspendSleepTimeUs() const +{ + uint32_t time; + if (audio_is_linear_pcm(mFormat)) { + time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000); + } else { + time = 10000; + } + return time; +} + +void AudioFlinger::DirectOutputThread::cacheParameters_l() +{ + PlaybackThread::cacheParameters_l(); + + // use shorter standby delay as on normal output to release + // hardware resources as soon as possible + standbyDelay = microseconds(activeSleepTime*2); +} + +// ---------------------------------------------------------------------------- + +AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger, + AudioFlinger::MixerThread* mainThread, audio_io_handle_t id) + : MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->outDevice(), + DUPLICATING), + mWaitTimeMs(UINT_MAX) +{ + addOutputTrack(mainThread); +} + +AudioFlinger::DuplicatingThread::~DuplicatingThread() +{ + for (size_t i = 0; i < mOutputTracks.size(); i++) { + mOutputTracks[i]->destroy(); + } +} + +void AudioFlinger::DuplicatingThread::threadLoop_mix() +{ + // mix buffers... + if (outputsReady(outputTracks)) { + mAudioMixer->process(AudioBufferProvider::kInvalidPTS); + } else { + memset(mMixBuffer, 0, mixBufferSize); + } + sleepTime = 0; + writeFrames = mNormalFrameCount; + standbyTime = systemTime() + standbyDelay; +} + +void AudioFlinger::DuplicatingThread::threadLoop_sleepTime() +{ + if (sleepTime == 0) { + if (mMixerStatus == MIXER_TRACKS_ENABLED) { + sleepTime = activeSleepTime; + } else { + sleepTime = idleSleepTime; + } + } else if (mBytesWritten != 0) { + if (mMixerStatus == MIXER_TRACKS_ENABLED) { + writeFrames = mNormalFrameCount; + memset(mMixBuffer, 0, mixBufferSize); + } else { + // flush remaining overflow buffers in output tracks + writeFrames = 0; + } + sleepTime = 0; + } +} + +void AudioFlinger::DuplicatingThread::threadLoop_write() +{ + for (size_t i = 0; i < outputTracks.size(); i++) { + outputTracks[i]->write(mMixBuffer, writeFrames); + } + mBytesWritten += mixBufferSize; +} + +void AudioFlinger::DuplicatingThread::threadLoop_standby() +{ + // DuplicatingThread implements standby by stopping all tracks + for (size_t i = 0; i < outputTracks.size(); i++) { + outputTracks[i]->stop(); + } +} + +void AudioFlinger::DuplicatingThread::saveOutputTracks() +{ + outputTracks = mOutputTracks; +} + +void AudioFlinger::DuplicatingThread::clearOutputTracks() +{ + outputTracks.clear(); +} + +void AudioFlinger::DuplicatingThread::addOutputTrack(MixerThread *thread) +{ + Mutex::Autolock _l(mLock); + // FIXME explain this formula + size_t frameCount = (3 * mNormalFrameCount * mSampleRate) / thread->sampleRate(); + OutputTrack *outputTrack = new OutputTrack(thread, + this, + mSampleRate, + mFormat, + mChannelMask, + frameCount); + if (outputTrack->cblk() != NULL) { + thread->setStreamVolume(AUDIO_STREAM_CNT, 1.0f); + mOutputTracks.add(outputTrack); + ALOGV("addOutputTrack() track %p, on thread %p", outputTrack, thread); + updateWaitTime_l(); + } +} + +void AudioFlinger::DuplicatingThread::removeOutputTrack(MixerThread *thread) +{ + Mutex::Autolock _l(mLock); + for (size_t i = 0; i < mOutputTracks.size(); i++) { + if (mOutputTracks[i]->thread() == thread) { + mOutputTracks[i]->destroy(); + mOutputTracks.removeAt(i); + updateWaitTime_l(); + return; + } + } + ALOGV("removeOutputTrack(): unkonwn thread: %p", thread); +} + +// caller must hold mLock +void AudioFlinger::DuplicatingThread::updateWaitTime_l() +{ + mWaitTimeMs = UINT_MAX; + for (size_t i = 0; i < mOutputTracks.size(); i++) { + sp<ThreadBase> strong = mOutputTracks[i]->thread().promote(); + if (strong != 0) { + uint32_t waitTimeMs = (strong->frameCount() * 2 * 1000) / strong->sampleRate(); + if (waitTimeMs < mWaitTimeMs) { + mWaitTimeMs = waitTimeMs; + } + } + } +} + + +bool AudioFlinger::DuplicatingThread::outputsReady( + const SortedVector< sp<OutputTrack> > &outputTracks) +{ + for (size_t i = 0; i < outputTracks.size(); i++) { + sp<ThreadBase> thread = outputTracks[i]->thread().promote(); + if (thread == 0) { + ALOGW("DuplicatingThread::outputsReady() could not promote thread on output track %p", + outputTracks[i].get()); + return false; + } + PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); + // see note at standby() declaration + if (playbackThread->standby() && !playbackThread->isSuspended()) { + ALOGV("DuplicatingThread output track %p on thread %p Not Ready", outputTracks[i].get(), + thread.get()); + return false; + } + } + return true; +} + +uint32_t AudioFlinger::DuplicatingThread::activeSleepTimeUs() const +{ + return (mWaitTimeMs * 1000) / 2; +} + +void AudioFlinger::DuplicatingThread::cacheParameters_l() +{ + // updateWaitTime_l() sets mWaitTimeMs, which affects activeSleepTimeUs(), so call it first + updateWaitTime_l(); + + MixerThread::cacheParameters_l(); +} + +// ---------------------------------------------------------------------------- +// Record +// ---------------------------------------------------------------------------- + +AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger, + AudioStreamIn *input, + uint32_t sampleRate, + audio_channel_mask_t channelMask, + audio_io_handle_t id, + audio_devices_t outDevice, + audio_devices_t inDevice +#ifdef TEE_SINK + , const sp<NBAIO_Sink>& teeSink +#endif + ) : + ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD), + mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL), + // mRsmpInIndex and mInputBytes set by readInputParameters() + mReqChannelCount(popcount(channelMask)), + mReqSampleRate(sampleRate) + // mBytesRead is only meaningful while active, and so is cleared in start() + // (but might be better to also clear here for dump?) +#ifdef TEE_SINK + , mTeeSink(teeSink) +#endif +{ + snprintf(mName, kNameLength, "AudioIn_%X", id); + + readInputParameters(); + +} + + +AudioFlinger::RecordThread::~RecordThread() +{ + delete[] mRsmpInBuffer; + delete mResampler; + delete[] mRsmpOutBuffer; +} + +void AudioFlinger::RecordThread::onFirstRef() +{ + run(mName, PRIORITY_URGENT_AUDIO); +} + +status_t AudioFlinger::RecordThread::readyToRun() +{ + status_t status = initCheck(); + ALOGW_IF(status != NO_ERROR,"RecordThread %p could not initialize", this); + return status; +} + +bool AudioFlinger::RecordThread::threadLoop() +{ + AudioBufferProvider::Buffer buffer; + sp<RecordTrack> activeTrack; + Vector< sp<EffectChain> > effectChains; + + nsecs_t lastWarning = 0; + + inputStandBy(); + acquireWakeLock(); + + // used to verify we've read at least once before evaluating how many bytes were read + bool readOnce = false; + + // start recording + while (!exitPending()) { + + processConfigEvents(); + + { // scope for mLock + Mutex::Autolock _l(mLock); + checkForNewParameters_l(); + if (mActiveTrack == 0 && mConfigEvents.isEmpty()) { + standby(); + + if (exitPending()) { + break; + } + + releaseWakeLock_l(); + ALOGV("RecordThread: loop stopping"); + // go to sleep + mWaitWorkCV.wait(mLock); + ALOGV("RecordThread: loop starting"); + acquireWakeLock_l(); + continue; + } + if (mActiveTrack != 0) { + if (mActiveTrack->mState == TrackBase::PAUSING) { + standby(); + mActiveTrack.clear(); + mStartStopCond.broadcast(); + } else if (mActiveTrack->mState == TrackBase::RESUMING) { + if (mReqChannelCount != mActiveTrack->channelCount()) { + mActiveTrack.clear(); + mStartStopCond.broadcast(); + } else if (readOnce) { + // record start succeeds only if first read from audio input + // succeeds + if (mBytesRead >= 0) { + mActiveTrack->mState = TrackBase::ACTIVE; + } else { + mActiveTrack.clear(); + } + mStartStopCond.broadcast(); + } + mStandby = false; + } else if (mActiveTrack->mState == TrackBase::TERMINATED) { + removeTrack_l(mActiveTrack); + mActiveTrack.clear(); + } + } + lockEffectChains_l(effectChains); + } + + if (mActiveTrack != 0) { + if (mActiveTrack->mState != TrackBase::ACTIVE && + mActiveTrack->mState != TrackBase::RESUMING) { + unlockEffectChains(effectChains); + usleep(kRecordThreadSleepUs); + continue; + } + for (size_t i = 0; i < effectChains.size(); i ++) { + effectChains[i]->process_l(); + } + + buffer.frameCount = mFrameCount; + if (CC_LIKELY(mActiveTrack->getNextBuffer(&buffer) == NO_ERROR)) { + readOnce = true; + size_t framesOut = buffer.frameCount; + if (mResampler == NULL) { + // no resampling + while (framesOut) { + size_t framesIn = mFrameCount - mRsmpInIndex; + if (framesIn) { + int8_t *src = (int8_t *)mRsmpInBuffer + mRsmpInIndex * mFrameSize; + int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) * + mActiveTrack->mFrameSize; + if (framesIn > framesOut) + framesIn = framesOut; + mRsmpInIndex += framesIn; + framesOut -= framesIn; + if (mChannelCount == mReqChannelCount || + mFormat != AUDIO_FORMAT_PCM_16_BIT) { + memcpy(dst, src, framesIn * mFrameSize); + } else { + if (mChannelCount == 1) { + upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, + (int16_t *)src, framesIn); + } else { + downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, + (int16_t *)src, framesIn); + } + } + } + if (framesOut && mFrameCount == mRsmpInIndex) { + void *readInto; + if (framesOut == mFrameCount && + (mChannelCount == mReqChannelCount || + mFormat != AUDIO_FORMAT_PCM_16_BIT)) { + readInto = buffer.raw; + framesOut = 0; + } else { + readInto = mRsmpInBuffer; + mRsmpInIndex = 0; + } + mBytesRead = mInput->stream->read(mInput->stream, readInto, + mInputBytes); + if (mBytesRead <= 0) { + if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE)) + { + ALOGE("Error reading audio input"); + // Force input into standby so that it tries to + // recover at next read attempt + inputStandBy(); + usleep(kRecordThreadSleepUs); + } + mRsmpInIndex = mFrameCount; + framesOut = 0; + buffer.frameCount = 0; + } +#ifdef TEE_SINK + else if (mTeeSink != 0) { + (void) mTeeSink->write(readInto, + mBytesRead >> Format_frameBitShift(mTeeSink->format())); + } +#endif + } + } + } else { + // resampling + + memset(mRsmpOutBuffer, 0, framesOut * 2 * sizeof(int32_t)); + // alter output frame count as if we were expecting stereo samples + if (mChannelCount == 1 && mReqChannelCount == 1) { + framesOut >>= 1; + } + mResampler->resample(mRsmpOutBuffer, framesOut, + this /* AudioBufferProvider* */); + // ditherAndClamp() works as long as all buffers returned by + // mActiveTrack->getNextBuffer() are 32 bit aligned which should be always true. + if (mChannelCount == 2 && mReqChannelCount == 1) { + ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut); + // the resampler always outputs stereo samples: + // do post stereo to mono conversion + downmix_to_mono_i16_from_stereo_i16(buffer.i16, (int16_t *)mRsmpOutBuffer, + framesOut); + } else { + ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut); + } + + } + if (mFramestoDrop == 0) { + mActiveTrack->releaseBuffer(&buffer); + } else { + if (mFramestoDrop > 0) { + mFramestoDrop -= buffer.frameCount; + if (mFramestoDrop <= 0) { + clearSyncStartEvent(); + } + } else { + mFramestoDrop += buffer.frameCount; + if (mFramestoDrop >= 0 || mSyncStartEvent == 0 || + mSyncStartEvent->isCancelled()) { + ALOGW("Synced record %s, session %d, trigger session %d", + (mFramestoDrop >= 0) ? "timed out" : "cancelled", + mActiveTrack->sessionId(), + (mSyncStartEvent != 0) ? mSyncStartEvent->triggerSession() : 0); + clearSyncStartEvent(); + } + } + } + mActiveTrack->clearOverflow(); + } + // client isn't retrieving buffers fast enough + else { + if (!mActiveTrack->setOverflow()) { + nsecs_t now = systemTime(); + if ((now - lastWarning) > kWarningThrottleNs) { + ALOGW("RecordThread: buffer overflow"); + lastWarning = now; + } + } + // Release the processor for a while before asking for a new buffer. + // This will give the application more chance to read from the buffer and + // clear the overflow. + usleep(kRecordThreadSleepUs); + } + } + // enable changes in effect chain + unlockEffectChains(effectChains); + effectChains.clear(); + } + + standby(); + + { + Mutex::Autolock _l(mLock); + mActiveTrack.clear(); + mStartStopCond.broadcast(); + } + + releaseWakeLock(); + + ALOGV("RecordThread %p exiting", this); + return false; +} + +void AudioFlinger::RecordThread::standby() +{ + if (!mStandby) { + inputStandBy(); + mStandby = true; + } +} + +void AudioFlinger::RecordThread::inputStandBy() +{ + mInput->stream->common.standby(&mInput->stream->common); +} + +sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l( + const sp<AudioFlinger::Client>& client, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + size_t frameCount, + int sessionId, + IAudioFlinger::track_flags_t flags, + pid_t tid, + status_t *status) +{ + sp<RecordTrack> track; + status_t lStatus; + + lStatus = initCheck(); + if (lStatus != NO_ERROR) { + ALOGE("Audio driver not initialized."); + goto Exit; + } + + // FIXME use flags and tid similar to createTrack_l() + + { // scope for mLock + Mutex::Autolock _l(mLock); + + track = new RecordTrack(this, client, sampleRate, + format, channelMask, frameCount, sessionId); + + if (track->getCblk() == 0) { + lStatus = NO_MEMORY; + goto Exit; + } + mTracks.add(track); + + // disable AEC and NS if the device is a BT SCO headset supporting those pre processings + bool suspend = audio_is_bluetooth_sco_device(mInDevice) && + mAudioFlinger->btNrecIsOff(); + setEffectSuspended_l(FX_IID_AEC, suspend, sessionId); + setEffectSuspended_l(FX_IID_NS, suspend, sessionId); + } + lStatus = NO_ERROR; + +Exit: + if (status) { + *status = lStatus; + } + return track; +} + +status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack, + AudioSystem::sync_event_t event, + int triggerSession) +{ + ALOGV("RecordThread::start event %d, triggerSession %d", event, triggerSession); + sp<ThreadBase> strongMe = this; + status_t status = NO_ERROR; + + if (event == AudioSystem::SYNC_EVENT_NONE) { + clearSyncStartEvent(); + } else if (event != AudioSystem::SYNC_EVENT_SAME) { + mSyncStartEvent = mAudioFlinger->createSyncEvent(event, + triggerSession, + recordTrack->sessionId(), + syncStartEventCallback, + this); + // Sync event can be cancelled by the trigger session if the track is not in a + // compatible state in which case we start record immediately + if (mSyncStartEvent->isCancelled()) { + clearSyncStartEvent(); + } else { + // do not wait for the event for more than AudioSystem::kSyncRecordStartTimeOutMs + mFramestoDrop = - ((AudioSystem::kSyncRecordStartTimeOutMs * mReqSampleRate) / 1000); + } + } + + { + AutoMutex lock(mLock); + if (mActiveTrack != 0) { + if (recordTrack != mActiveTrack.get()) { + status = -EBUSY; + } else if (mActiveTrack->mState == TrackBase::PAUSING) { + mActiveTrack->mState = TrackBase::ACTIVE; + } + return status; + } + + recordTrack->mState = TrackBase::IDLE; + mActiveTrack = recordTrack; + mLock.unlock(); + status_t status = AudioSystem::startInput(mId); + mLock.lock(); + if (status != NO_ERROR) { + mActiveTrack.clear(); + clearSyncStartEvent(); + return status; + } + mRsmpInIndex = mFrameCount; + mBytesRead = 0; + if (mResampler != NULL) { + mResampler->reset(); + } + mActiveTrack->mState = TrackBase::RESUMING; + // signal thread to start + ALOGV("Signal record thread"); + mWaitWorkCV.broadcast(); + // do not wait for mStartStopCond if exiting + if (exitPending()) { + mActiveTrack.clear(); + status = INVALID_OPERATION; + goto startError; + } + mStartStopCond.wait(mLock); + if (mActiveTrack == 0) { + ALOGV("Record failed to start"); + status = BAD_VALUE; + goto startError; + } + ALOGV("Record started OK"); + return status; + } +startError: + AudioSystem::stopInput(mId); + clearSyncStartEvent(); + return status; +} + +void AudioFlinger::RecordThread::clearSyncStartEvent() +{ + if (mSyncStartEvent != 0) { + mSyncStartEvent->cancel(); + } + mSyncStartEvent.clear(); + mFramestoDrop = 0; +} + +void AudioFlinger::RecordThread::syncStartEventCallback(const wp<SyncEvent>& event) +{ + sp<SyncEvent> strongEvent = event.promote(); + + if (strongEvent != 0) { + RecordThread *me = (RecordThread *)strongEvent->cookie(); + me->handleSyncStartEvent(strongEvent); + } +} + +void AudioFlinger::RecordThread::handleSyncStartEvent(const sp<SyncEvent>& event) +{ + if (event == mSyncStartEvent) { + // TODO: use actual buffer filling status instead of 2 buffers when info is available + // from audio HAL + mFramestoDrop = mFrameCount * 2; + } +} + +bool AudioFlinger::RecordThread::stop_l(RecordThread::RecordTrack* recordTrack) { + ALOGV("RecordThread::stop"); + if (recordTrack != mActiveTrack.get() || recordTrack->mState == TrackBase::PAUSING) { + return false; + } + recordTrack->mState = TrackBase::PAUSING; + // do not wait for mStartStopCond if exiting + if (exitPending()) { + return true; + } + mStartStopCond.wait(mLock); + // if we have been restarted, recordTrack == mActiveTrack.get() here + if (exitPending() || recordTrack != mActiveTrack.get()) { + ALOGV("Record stopped OK"); + return true; + } + return false; +} + +bool AudioFlinger::RecordThread::isValidSyncEvent(const sp<SyncEvent>& event) const +{ + return false; +} + +status_t AudioFlinger::RecordThread::setSyncEvent(const sp<SyncEvent>& event) +{ +#if 0 // This branch is currently dead code, but is preserved in case it will be needed in future + if (!isValidSyncEvent(event)) { + return BAD_VALUE; + } + + int eventSession = event->triggerSession(); + status_t ret = NAME_NOT_FOUND; + + Mutex::Autolock _l(mLock); + + for (size_t i = 0; i < mTracks.size(); i++) { + sp<RecordTrack> track = mTracks[i]; + if (eventSession == track->sessionId()) { + (void) track->setSyncEvent(event); + ret = NO_ERROR; + } + } + return ret; +#else + return BAD_VALUE; +#endif +} + +// destroyTrack_l() must be called with ThreadBase::mLock held +void AudioFlinger::RecordThread::destroyTrack_l(const sp<RecordTrack>& track) +{ + track->mState = TrackBase::TERMINATED; + // active tracks are removed by threadLoop() + if (mActiveTrack != track) { + removeTrack_l(track); + } +} + +void AudioFlinger::RecordThread::removeTrack_l(const sp<RecordTrack>& track) +{ + mTracks.remove(track); + // need anything related to effects here? +} + +void AudioFlinger::RecordThread::dump(int fd, const Vector<String16>& args) +{ + dumpInternals(fd, args); + dumpTracks(fd, args); + dumpEffectChains(fd, args); +} + +void AudioFlinger::RecordThread::dumpInternals(int fd, const Vector<String16>& args) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + snprintf(buffer, SIZE, "\nInput thread %p internals\n", this); + result.append(buffer); + + if (mActiveTrack != 0) { + snprintf(buffer, SIZE, "In index: %d\n", mRsmpInIndex); + result.append(buffer); + snprintf(buffer, SIZE, "In size: %d\n", mInputBytes); + result.append(buffer); + snprintf(buffer, SIZE, "Resampling: %d\n", (mResampler != NULL)); + result.append(buffer); + snprintf(buffer, SIZE, "Out channel count: %u\n", mReqChannelCount); + result.append(buffer); + snprintf(buffer, SIZE, "Out sample rate: %u\n", mReqSampleRate); + result.append(buffer); + } else { + result.append("No active record client\n"); + } + + write(fd, result.string(), result.size()); + + dumpBase(fd, args); +} + +void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + snprintf(buffer, SIZE, "Input thread %p tracks\n", this); + result.append(buffer); + RecordTrack::appendDumpHeader(result); + for (size_t i = 0; i < mTracks.size(); ++i) { + sp<RecordTrack> track = mTracks[i]; + if (track != 0) { + track->dump(buffer, SIZE); + result.append(buffer); + } + } + + if (mActiveTrack != 0) { + snprintf(buffer, SIZE, "\nInput thread %p active tracks\n", this); + result.append(buffer); + RecordTrack::appendDumpHeader(result); + mActiveTrack->dump(buffer, SIZE); + result.append(buffer); + + } + write(fd, result.string(), result.size()); +} + +// AudioBufferProvider interface +status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts) +{ + size_t framesReq = buffer->frameCount; + size_t framesReady = mFrameCount - mRsmpInIndex; + int channelCount; + + if (framesReady == 0) { + mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mInputBytes); + if (mBytesRead <= 0) { + if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE)) { + ALOGE("RecordThread::getNextBuffer() Error reading audio input"); + // Force input into standby so that it tries to + // recover at next read attempt + inputStandBy(); + usleep(kRecordThreadSleepUs); + } + buffer->raw = NULL; + buffer->frameCount = 0; + return NOT_ENOUGH_DATA; + } + mRsmpInIndex = 0; + framesReady = mFrameCount; + } + + if (framesReq > framesReady) { + framesReq = framesReady; + } + + if (mChannelCount == 1 && mReqChannelCount == 2) { + channelCount = 1; + } else { + channelCount = 2; + } + buffer->raw = mRsmpInBuffer + mRsmpInIndex * channelCount; + buffer->frameCount = framesReq; + return NO_ERROR; +} + +// AudioBufferProvider interface +void AudioFlinger::RecordThread::releaseBuffer(AudioBufferProvider::Buffer* buffer) +{ + mRsmpInIndex += buffer->frameCount; + buffer->frameCount = 0; +} + +bool AudioFlinger::RecordThread::checkForNewParameters_l() +{ + bool reconfig = false; + + while (!mNewParameters.isEmpty()) { + status_t status = NO_ERROR; + String8 keyValuePair = mNewParameters[0]; + AudioParameter param = AudioParameter(keyValuePair); + int value; + audio_format_t reqFormat = mFormat; + uint32_t reqSamplingRate = mReqSampleRate; + uint32_t reqChannelCount = mReqChannelCount; + + if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) { + reqSamplingRate = value; + reconfig = true; + } + if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) { + reqFormat = (audio_format_t) value; + reconfig = true; + } + if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) { + reqChannelCount = popcount(value); + reconfig = true; + } + if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) { + // do not accept frame count changes if tracks are open as the track buffer + // size depends on frame count and correct behavior would not be guaranteed + // if frame count is changed after track creation + if (mActiveTrack != 0) { + status = INVALID_OPERATION; + } else { + reconfig = true; + } + } + if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) { + // forward device change to effects that have requested to be + // aware of attached audio device. + for (size_t i = 0; i < mEffectChains.size(); i++) { + mEffectChains[i]->setDevice_l(value); + } + + // store input device and output device but do not forward output device to audio HAL. + // Note that status is ignored by the caller for output device + // (see AudioFlinger::setParameters() + if (audio_is_output_devices(value)) { + mOutDevice = value; + status = BAD_VALUE; + } else { + mInDevice = value; + // disable AEC and NS if the device is a BT SCO headset supporting those + // pre processings + if (mTracks.size() > 0) { + bool suspend = audio_is_bluetooth_sco_device(mInDevice) && + mAudioFlinger->btNrecIsOff(); + for (size_t i = 0; i < mTracks.size(); i++) { + sp<RecordTrack> track = mTracks[i]; + setEffectSuspended_l(FX_IID_AEC, suspend, track->sessionId()); + setEffectSuspended_l(FX_IID_NS, suspend, track->sessionId()); + } + } + } + } + if (param.getInt(String8(AudioParameter::keyInputSource), value) == NO_ERROR && + mAudioSource != (audio_source_t)value) { + // forward device change to effects that have requested to be + // aware of attached audio device. + for (size_t i = 0; i < mEffectChains.size(); i++) { + mEffectChains[i]->setAudioSource_l((audio_source_t)value); + } + mAudioSource = (audio_source_t)value; + } + if (status == NO_ERROR) { + status = mInput->stream->common.set_parameters(&mInput->stream->common, + keyValuePair.string()); + if (status == INVALID_OPERATION) { + inputStandBy(); + status = mInput->stream->common.set_parameters(&mInput->stream->common, + keyValuePair.string()); + } + if (reconfig) { + if (status == BAD_VALUE && + reqFormat == mInput->stream->common.get_format(&mInput->stream->common) && + reqFormat == AUDIO_FORMAT_PCM_16_BIT && + (mInput->stream->common.get_sample_rate(&mInput->stream->common) + <= (2 * reqSamplingRate)) && + popcount(mInput->stream->common.get_channels(&mInput->stream->common)) + <= FCC_2 && + (reqChannelCount <= FCC_2)) { + status = NO_ERROR; + } + if (status == NO_ERROR) { + readInputParameters(); + sendIoConfigEvent_l(AudioSystem::INPUT_CONFIG_CHANGED); + } + } + } + + mNewParameters.removeAt(0); + + mParamStatus = status; + mParamCond.signal(); + // wait for condition with time out in case the thread calling ThreadBase::setParameters() + // already timed out waiting for the status and will never signal the condition. + mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs); + } + return reconfig; +} + +String8 AudioFlinger::RecordThread::getParameters(const String8& keys) +{ + char *s; + String8 out_s8 = String8(); + + Mutex::Autolock _l(mLock); + if (initCheck() != NO_ERROR) { + return out_s8; + } + + s = mInput->stream->common.get_parameters(&mInput->stream->common, keys.string()); + out_s8 = String8(s); + free(s); + return out_s8; +} + +void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param) { + AudioSystem::OutputDescriptor desc; + void *param2 = NULL; + + switch (event) { + case AudioSystem::INPUT_OPENED: + case AudioSystem::INPUT_CONFIG_CHANGED: + desc.channels = mChannelMask; + desc.samplingRate = mSampleRate; + desc.format = mFormat; + desc.frameCount = mFrameCount; + desc.latency = 0; + param2 = &desc; + break; + + case AudioSystem::INPUT_CLOSED: + default: + break; + } + mAudioFlinger->audioConfigChanged_l(event, mId, param2); +} + +void AudioFlinger::RecordThread::readInputParameters() +{ + delete mRsmpInBuffer; + // mRsmpInBuffer is always assigned a new[] below + delete mRsmpOutBuffer; + mRsmpOutBuffer = NULL; + delete mResampler; + mResampler = NULL; + + mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common); + mChannelMask = mInput->stream->common.get_channels(&mInput->stream->common); + mChannelCount = (uint16_t)popcount(mChannelMask); + mFormat = mInput->stream->common.get_format(&mInput->stream->common); + mFrameSize = audio_stream_frame_size(&mInput->stream->common); + mInputBytes = mInput->stream->common.get_buffer_size(&mInput->stream->common); + mFrameCount = mInputBytes / mFrameSize; + mNormalFrameCount = mFrameCount; // not used by record, but used by input effects + mRsmpInBuffer = new int16_t[mFrameCount * mChannelCount]; + + if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2) + { + int channelCount; + // optimization: if mono to mono, use the resampler in stereo to stereo mode to avoid + // stereo to mono post process as the resampler always outputs stereo. + if (mChannelCount == 1 && mReqChannelCount == 2) { + channelCount = 1; + } else { + channelCount = 2; + } + mResampler = AudioResampler::create(16, channelCount, mReqSampleRate); + mResampler->setSampleRate(mSampleRate); + mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN); + mRsmpOutBuffer = new int32_t[mFrameCount * 2]; + + // optmization: if mono to mono, alter input frame count as if we were inputing + // stereo samples + if (mChannelCount == 1 && mReqChannelCount == 1) { + mFrameCount >>= 1; + } + + } + mRsmpInIndex = mFrameCount; +} + +unsigned int AudioFlinger::RecordThread::getInputFramesLost() +{ + Mutex::Autolock _l(mLock); + if (initCheck() != NO_ERROR) { + return 0; + } + + return mInput->stream->get_input_frames_lost(mInput->stream); +} + +uint32_t AudioFlinger::RecordThread::hasAudioSession(int sessionId) const +{ + Mutex::Autolock _l(mLock); + uint32_t result = 0; + if (getEffectChain_l(sessionId) != 0) { + result = EFFECT_SESSION; + } + + for (size_t i = 0; i < mTracks.size(); ++i) { + if (sessionId == mTracks[i]->sessionId()) { + result |= TRACK_SESSION; + break; + } + } + + return result; +} + +KeyedVector<int, bool> AudioFlinger::RecordThread::sessionIds() const +{ + KeyedVector<int, bool> ids; + Mutex::Autolock _l(mLock); + for (size_t j = 0; j < mTracks.size(); ++j) { + sp<RecordThread::RecordTrack> track = mTracks[j]; + int sessionId = track->sessionId(); + if (ids.indexOfKey(sessionId) < 0) { + ids.add(sessionId, true); + } + } + return ids; +} + +AudioFlinger::AudioStreamIn* AudioFlinger::RecordThread::clearInput() +{ + Mutex::Autolock _l(mLock); + AudioStreamIn *input = mInput; + mInput = NULL; + return input; +} + +// this method must always be called either with ThreadBase mLock held or inside the thread loop +audio_stream_t* AudioFlinger::RecordThread::stream() const +{ + if (mInput == NULL) { + return NULL; + } + return &mInput->stream->common; +} + +status_t AudioFlinger::RecordThread::addEffectChain_l(const sp<EffectChain>& chain) +{ + // only one chain per input thread + if (mEffectChains.size() != 0) { + return INVALID_OPERATION; + } + ALOGV("addEffectChain_l() %p on thread %p", chain.get(), this); + + chain->setInBuffer(NULL); + chain->setOutBuffer(NULL); + + checkSuspendOnAddEffectChain_l(chain); + + mEffectChains.add(chain); + + return NO_ERROR; +} + +size_t AudioFlinger::RecordThread::removeEffectChain_l(const sp<EffectChain>& chain) +{ + ALOGV("removeEffectChain_l() %p from thread %p", chain.get(), this); + ALOGW_IF(mEffectChains.size() != 1, + "removeEffectChain_l() %p invalid chain size %d on thread %p", + chain.get(), mEffectChains.size(), this); + if (mEffectChains.size() == 1) { + mEffectChains.removeAt(0); + } + return 0; +} + +}; // namespace android diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h new file mode 100644 index 0000000..7de6872 --- /dev/null +++ b/services/audioflinger/Threads.h @@ -0,0 +1,811 @@ +/* +** +** Copyright 2012, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#ifndef INCLUDING_FROM_AUDIOFLINGER_H + #error This header file should only be included from AudioFlinger.h +#endif + +class ThreadBase : public Thread { +public: + +#include "TrackBase.h" + + enum type_t { + MIXER, // Thread class is MixerThread + DIRECT, // Thread class is DirectOutputThread + DUPLICATING, // Thread class is DuplicatingThread + RECORD // Thread class is RecordThread + }; + + ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id, + audio_devices_t outDevice, audio_devices_t inDevice, type_t type); + virtual ~ThreadBase(); + + void dumpBase(int fd, const Vector<String16>& args); + void dumpEffectChains(int fd, const Vector<String16>& args); + + void clearPowerManager(); + + // base for record and playback + enum { + CFG_EVENT_IO, + CFG_EVENT_PRIO + }; + + class ConfigEvent { + public: + ConfigEvent(int type) : mType(type) {} + virtual ~ConfigEvent() {} + + int type() const { return mType; } + + virtual void dump(char *buffer, size_t size) = 0; + + private: + const int mType; + }; + + class IoConfigEvent : public ConfigEvent { + public: + IoConfigEvent(int event, int param) : + ConfigEvent(CFG_EVENT_IO), mEvent(event), mParam(event) {} + virtual ~IoConfigEvent() {} + + int event() const { return mEvent; } + int param() const { return mParam; } + + virtual void dump(char *buffer, size_t size) { + snprintf(buffer, size, "IO event: event %d, param %d\n", mEvent, mParam); + } + + private: + const int mEvent; + const int mParam; + }; + + class PrioConfigEvent : public ConfigEvent { + public: + PrioConfigEvent(pid_t pid, pid_t tid, int32_t prio) : + ConfigEvent(CFG_EVENT_PRIO), mPid(pid), mTid(tid), mPrio(prio) {} + virtual ~PrioConfigEvent() {} + + pid_t pid() const { return mPid; } + pid_t tid() const { return mTid; } + int32_t prio() const { return mPrio; } + + virtual void dump(char *buffer, size_t size) { + snprintf(buffer, size, "Prio event: pid %d, tid %d, prio %d\n", mPid, mTid, mPrio); + } + + private: + const pid_t mPid; + const pid_t mTid; + const int32_t mPrio; + }; + + + class PMDeathRecipient : public IBinder::DeathRecipient { + public: + PMDeathRecipient(const wp<ThreadBase>& thread) : mThread(thread) {} + virtual ~PMDeathRecipient() {} + + // IBinder::DeathRecipient + virtual void binderDied(const wp<IBinder>& who); + + private: + PMDeathRecipient(const PMDeathRecipient&); + PMDeathRecipient& operator = (const PMDeathRecipient&); + + wp<ThreadBase> mThread; + }; + + virtual status_t initCheck() const = 0; + + // static externally-visible + type_t type() const { return mType; } + audio_io_handle_t id() const { return mId;} + + // dynamic externally-visible + uint32_t sampleRate() const { return mSampleRate; } + uint32_t channelCount() const { return mChannelCount; } + audio_channel_mask_t channelMask() const { return mChannelMask; } + audio_format_t format() const { return mFormat; } + // Called by AudioFlinger::frameCount(audio_io_handle_t output) and effects, + // and returns the normal mix buffer's frame count. + size_t frameCount() const { return mNormalFrameCount; } + // Return's the HAL's frame count i.e. fast mixer buffer size. + size_t frameCountHAL() const { return mFrameCount; } + + // Should be "virtual status_t requestExitAndWait()" and override same + // method in Thread, but Thread::requestExitAndWait() is not yet virtual. + void exit(); + virtual bool checkForNewParameters_l() = 0; + virtual status_t setParameters(const String8& keyValuePairs); + virtual String8 getParameters(const String8& keys) = 0; + virtual void audioConfigChanged_l(int event, int param = 0) = 0; + void sendIoConfigEvent(int event, int param = 0); + void sendIoConfigEvent_l(int event, int param = 0); + void sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio); + void processConfigEvents(); + + // see note at declaration of mStandby, mOutDevice and mInDevice + bool standby() const { return mStandby; } + audio_devices_t outDevice() const { return mOutDevice; } + audio_devices_t inDevice() const { return mInDevice; } + + virtual audio_stream_t* stream() const = 0; + + sp<EffectHandle> createEffect_l( + const sp<AudioFlinger::Client>& client, + const sp<IEffectClient>& effectClient, + int32_t priority, + int sessionId, + effect_descriptor_t *desc, + int *enabled, + status_t *status); + void disconnectEffect(const sp< EffectModule>& effect, + EffectHandle *handle, + bool unpinIfLast); + + // return values for hasAudioSession (bit field) + enum effect_state { + EFFECT_SESSION = 0x1, // the audio session corresponds to at least one + // effect + TRACK_SESSION = 0x2 // the audio session corresponds to at least one + // track + }; + + // get effect chain corresponding to session Id. + sp<EffectChain> getEffectChain(int sessionId); + // same as getEffectChain() but must be called with ThreadBase mutex locked + sp<EffectChain> getEffectChain_l(int sessionId) const; + // add an effect chain to the chain list (mEffectChains) + virtual status_t addEffectChain_l(const sp<EffectChain>& chain) = 0; + // remove an effect chain from the chain list (mEffectChains) + virtual size_t removeEffectChain_l(const sp<EffectChain>& chain) = 0; + // lock all effect chains Mutexes. Must be called before releasing the + // ThreadBase mutex before processing the mixer and effects. This guarantees the + // integrity of the chains during the process. + // Also sets the parameter 'effectChains' to current value of mEffectChains. + void lockEffectChains_l(Vector< sp<EffectChain> >& effectChains); + // unlock effect chains after process + void unlockEffectChains(const Vector< sp<EffectChain> >& effectChains); + // set audio mode to all effect chains + void setMode(audio_mode_t mode); + // get effect module with corresponding ID on specified audio session + sp<AudioFlinger::EffectModule> getEffect(int sessionId, int effectId); + sp<AudioFlinger::EffectModule> getEffect_l(int sessionId, int effectId); + // add and effect module. Also creates the effect chain is none exists for + // the effects audio session + status_t addEffect_l(const sp< EffectModule>& effect); + // remove and effect module. Also removes the effect chain is this was the last + // effect + void removeEffect_l(const sp< EffectModule>& effect); + // detach all tracks connected to an auxiliary effect + virtual void detachAuxEffect_l(int effectId) {} + // returns either EFFECT_SESSION if effects on this audio session exist in one + // chain, or TRACK_SESSION if tracks on this audio session exist, or both + virtual uint32_t hasAudioSession(int sessionId) const = 0; + // the value returned by default implementation is not important as the + // strategy is only meaningful for PlaybackThread which implements this method + virtual uint32_t getStrategyForSession_l(int sessionId) { return 0; } + + // suspend or restore effect according to the type of effect passed. a NULL + // type pointer means suspend all effects in the session + void setEffectSuspended(const effect_uuid_t *type, + bool suspend, + int sessionId = AUDIO_SESSION_OUTPUT_MIX); + // check if some effects must be suspended/restored when an effect is enabled + // or disabled + void checkSuspendOnEffectEnabled(const sp<EffectModule>& effect, + bool enabled, + int sessionId = AUDIO_SESSION_OUTPUT_MIX); + void checkSuspendOnEffectEnabled_l(const sp<EffectModule>& effect, + bool enabled, + int sessionId = AUDIO_SESSION_OUTPUT_MIX); + + virtual status_t setSyncEvent(const sp<SyncEvent>& event) = 0; + virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const = 0; + + + mutable Mutex mLock; + +protected: + + // entry describing an effect being suspended in mSuspendedSessions keyed vector + class SuspendedSessionDesc : public RefBase { + public: + SuspendedSessionDesc() : mRefCount(0) {} + + int mRefCount; // number of active suspend requests + effect_uuid_t mType; // effect type UUID + }; + + void acquireWakeLock(); + void acquireWakeLock_l(); + void releaseWakeLock(); + void releaseWakeLock_l(); + void setEffectSuspended_l(const effect_uuid_t *type, + bool suspend, + int sessionId); + // updated mSuspendedSessions when an effect suspended or restored + void updateSuspendedSessions_l(const effect_uuid_t *type, + bool suspend, + int sessionId); + // check if some effects must be suspended when an effect chain is added + void checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain); + + virtual void preExit() { } + + friend class AudioFlinger; // for mEffectChains + + const type_t mType; + + // Used by parameters, config events, addTrack_l, exit + Condition mWaitWorkCV; + + const sp<AudioFlinger> mAudioFlinger; + uint32_t mSampleRate; + size_t mFrameCount; // output HAL, direct output, record + size_t mNormalFrameCount; // normal mixer and effects + audio_channel_mask_t mChannelMask; + uint16_t mChannelCount; + size_t mFrameSize; + audio_format_t mFormat; + + // Parameter sequence by client: binder thread calling setParameters(): + // 1. Lock mLock + // 2. Append to mNewParameters + // 3. mWaitWorkCV.signal + // 4. mParamCond.waitRelative with timeout + // 5. read mParamStatus + // 6. mWaitWorkCV.signal + // 7. Unlock + // + // Parameter sequence by server: threadLoop calling checkForNewParameters_l(): + // 1. Lock mLock + // 2. If there is an entry in mNewParameters proceed ... + // 2. Read first entry in mNewParameters + // 3. Process + // 4. Remove first entry from mNewParameters + // 5. Set mParamStatus + // 6. mParamCond.signal + // 7. mWaitWorkCV.wait with timeout (this is to avoid overwriting mParamStatus) + // 8. Unlock + Condition mParamCond; + Vector<String8> mNewParameters; + status_t mParamStatus; + + Vector<ConfigEvent *> mConfigEvents; + + // These fields are written and read by thread itself without lock or barrier, + // and read by other threads without lock or barrier via standby() , outDevice() + // and inDevice(). + // Because of the absence of a lock or barrier, any other thread that reads + // these fields must use the information in isolation, or be prepared to deal + // with possibility that it might be inconsistent with other information. + bool mStandby; // Whether thread is currently in standby. + audio_devices_t mOutDevice; // output device + audio_devices_t mInDevice; // input device + audio_source_t mAudioSource; // (see audio.h, audio_source_t) + + const audio_io_handle_t mId; + Vector< sp<EffectChain> > mEffectChains; + + static const int kNameLength = 16; // prctl(PR_SET_NAME) limit + char mName[kNameLength]; + sp<IPowerManager> mPowerManager; + sp<IBinder> mWakeLockToken; + const sp<PMDeathRecipient> mDeathRecipient; + // list of suspended effects per session and per type. The first vector is + // keyed by session ID, the second by type UUID timeLow field + KeyedVector< int, KeyedVector< int, sp<SuspendedSessionDesc> > > + mSuspendedSessions; + static const size_t kLogSize = 4 * 1024; + sp<NBLog::Writer> mNBLogWriter; +}; + +// --- PlaybackThread --- +class PlaybackThread : public ThreadBase { +public: + +#include "PlaybackTracks.h" + + enum mixer_state { + MIXER_IDLE, // no active tracks + MIXER_TRACKS_ENABLED, // at least one active track, but no track has any data ready + MIXER_TRACKS_READY // at least one active track, and at least one track has data + // standby mode does not have an enum value + // suspend by audio policy manager is orthogonal to mixer state + }; + + PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, + audio_io_handle_t id, audio_devices_t device, type_t type); + virtual ~PlaybackThread(); + + void dump(int fd, const Vector<String16>& args); + + // Thread virtuals + virtual status_t readyToRun(); + virtual bool threadLoop(); + + // RefBase + virtual void onFirstRef(); + +protected: + // Code snippets that were lifted up out of threadLoop() + virtual void threadLoop_mix() = 0; + virtual void threadLoop_sleepTime() = 0; + virtual void threadLoop_write(); + virtual void threadLoop_standby(); + virtual void threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove); + + // prepareTracks_l reads and writes mActiveTracks, and returns + // the pending set of tracks to remove via Vector 'tracksToRemove'. The caller + // is responsible for clearing or destroying this Vector later on, when it + // is safe to do so. That will drop the final ref count and destroy the tracks. + virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0; + + // ThreadBase virtuals + virtual void preExit(); + +public: + + virtual status_t initCheck() const { return (mOutput == NULL) ? NO_INIT : NO_ERROR; } + + // return estimated latency in milliseconds, as reported by HAL + uint32_t latency() const; + // same, but lock must already be held + uint32_t latency_l() const; + + void setMasterVolume(float value); + void setMasterMute(bool muted); + + void setStreamVolume(audio_stream_type_t stream, float value); + void setStreamMute(audio_stream_type_t stream, bool muted); + + float streamVolume(audio_stream_type_t stream) const; + + sp<Track> createTrack_l( + const sp<AudioFlinger::Client>& client, + audio_stream_type_t streamType, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + size_t frameCount, + const sp<IMemory>& sharedBuffer, + int sessionId, + IAudioFlinger::track_flags_t *flags, + pid_t tid, + status_t *status); + + AudioStreamOut* getOutput() const; + AudioStreamOut* clearOutput(); + virtual audio_stream_t* stream() const; + + // a very large number of suspend() will eventually wraparound, but unlikely + void suspend() { (void) android_atomic_inc(&mSuspended); } + void restore() + { + // if restore() is done without suspend(), get back into + // range so that the next suspend() will operate correctly + if (android_atomic_dec(&mSuspended) <= 0) { + android_atomic_release_store(0, &mSuspended); + } + } + bool isSuspended() const + { return android_atomic_acquire_load(&mSuspended) > 0; } + + virtual String8 getParameters(const String8& keys); + virtual void audioConfigChanged_l(int event, int param = 0); + status_t getRenderPosition(size_t *halFrames, size_t *dspFrames); + int16_t *mixBuffer() const { return mMixBuffer; }; + + virtual void detachAuxEffect_l(int effectId); + status_t attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track> track, + int EffectId); + status_t attachAuxEffect_l(const sp<AudioFlinger::PlaybackThread::Track> track, + int EffectId); + + virtual status_t addEffectChain_l(const sp<EffectChain>& chain); + virtual size_t removeEffectChain_l(const sp<EffectChain>& chain); + virtual uint32_t hasAudioSession(int sessionId) const; + virtual uint32_t getStrategyForSession_l(int sessionId); + + + virtual status_t setSyncEvent(const sp<SyncEvent>& event); + virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const; + void invalidateTracks(audio_stream_type_t streamType); + + +protected: + int16_t* mMixBuffer; + + // suspend count, > 0 means suspended. While suspended, the thread continues to pull from + // tracks and mix, but doesn't write to HAL. A2DP and SCO HAL implementations can't handle + // concurrent use of both of them, so Audio Policy Service suspends one of the threads to + // workaround that restriction. + // 'volatile' means accessed via atomic operations and no lock. + volatile int32_t mSuspended; + + // FIXME overflows every 6+ hours at 44.1 kHz stereo 16-bit samples + // mFramesWritten would be better, or 64-bit even better + size_t mBytesWritten; +private: + // mMasterMute is in both PlaybackThread and in AudioFlinger. When a + // PlaybackThread needs to find out if master-muted, it checks it's local + // copy rather than the one in AudioFlinger. This optimization saves a lock. + bool mMasterMute; + void setMasterMute_l(bool muted) { mMasterMute = muted; } +protected: + SortedVector< wp<Track> > mActiveTracks; // FIXME check if this could be sp<> + + // Allocate a track name for a given channel mask. + // Returns name >= 0 if successful, -1 on failure. + virtual int getTrackName_l(audio_channel_mask_t channelMask, int sessionId) = 0; + virtual void deleteTrackName_l(int name) = 0; + + // Time to sleep between cycles when: + virtual uint32_t activeSleepTimeUs() const; // mixer state MIXER_TRACKS_ENABLED + virtual uint32_t idleSleepTimeUs() const = 0; // mixer state MIXER_IDLE + virtual uint32_t suspendSleepTimeUs() const = 0; // audio policy manager suspended us + // No sleep when mixer state == MIXER_TRACKS_READY; relies on audio HAL stream->write() + // No sleep in standby mode; waits on a condition + + // Code snippets that are temporarily lifted up out of threadLoop() until the merge + void checkSilentMode_l(); + + // Non-trivial for DUPLICATING only + virtual void saveOutputTracks() { } + virtual void clearOutputTracks() { } + + // Cache various calculated values, at threadLoop() entry and after a parameter change + virtual void cacheParameters_l(); + + virtual uint32_t correctLatency_l(uint32_t latency) const; + +private: + + friend class AudioFlinger; // for numerous + + PlaybackThread(const Client&); + PlaybackThread& operator = (const PlaybackThread&); + + status_t addTrack_l(const sp<Track>& track); + void destroyTrack_l(const sp<Track>& track); + void removeTrack_l(const sp<Track>& track); + + void readOutputParameters(); + + virtual void dumpInternals(int fd, const Vector<String16>& args); + void dumpTracks(int fd, const Vector<String16>& args); + + SortedVector< sp<Track> > mTracks; + // mStreamTypes[] uses 1 additional stream type internally for the OutputTrack used by + // DuplicatingThread + stream_type_t mStreamTypes[AUDIO_STREAM_CNT + 1]; + AudioStreamOut *mOutput; + + float mMasterVolume; + nsecs_t mLastWriteTime; + int mNumWrites; + int mNumDelayedWrites; + bool mInWrite; + + // FIXME rename these former local variables of threadLoop to standard "m" names + nsecs_t standbyTime; + size_t mixBufferSize; + + // cached copies of activeSleepTimeUs() and idleSleepTimeUs() made by cacheParameters_l() + uint32_t activeSleepTime; + uint32_t idleSleepTime; + + uint32_t sleepTime; + + // mixer status returned by prepareTracks_l() + mixer_state mMixerStatus; // current cycle + // previous cycle when in prepareTracks_l() + mixer_state mMixerStatusIgnoringFastTracks; + // FIXME or a separate ready state per track + + // FIXME move these declarations into the specific sub-class that needs them + // MIXER only + uint32_t sleepTimeShift; + + // same as AudioFlinger::mStandbyTimeInNsecs except for DIRECT which uses a shorter value + nsecs_t standbyDelay; + + // MIXER only + nsecs_t maxPeriod; + + // DUPLICATING only + uint32_t writeFrames; + +private: + // The HAL output sink is treated as non-blocking, but current implementation is blocking + sp<NBAIO_Sink> mOutputSink; + // If a fast mixer is present, the blocking pipe sink, otherwise clear + sp<NBAIO_Sink> mPipeSink; + // The current sink for the normal mixer to write it's (sub)mix, mOutputSink or mPipeSink + sp<NBAIO_Sink> mNormalSink; +#ifdef TEE_SINK + // For dumpsys + sp<NBAIO_Sink> mTeeSink; + sp<NBAIO_Source> mTeeSource; +#endif + uint32_t mScreenState; // cached copy of gScreenState + static const size_t kFastMixerLogSize = 4 * 1024; + sp<NBLog::Writer> mFastMixerNBLogWriter; +public: + virtual bool hasFastMixer() const = 0; + virtual FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex) const + { FastTrackUnderruns dummy; return dummy; } + +protected: + // accessed by both binder threads and within threadLoop(), lock on mutex needed + unsigned mFastTrackAvailMask; // bit i set if fast track [i] is available + +}; + +class MixerThread : public PlaybackThread { +public: + MixerThread(const sp<AudioFlinger>& audioFlinger, + AudioStreamOut* output, + audio_io_handle_t id, + audio_devices_t device, + type_t type = MIXER); + virtual ~MixerThread(); + + // Thread virtuals + + virtual bool checkForNewParameters_l(); + virtual void dumpInternals(int fd, const Vector<String16>& args); + +protected: + virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove); + virtual int getTrackName_l(audio_channel_mask_t channelMask, int sessionId); + virtual void deleteTrackName_l(int name); + virtual uint32_t idleSleepTimeUs() const; + virtual uint32_t suspendSleepTimeUs() const; + virtual void cacheParameters_l(); + + // threadLoop snippets + virtual void threadLoop_write(); + virtual void threadLoop_standby(); + virtual void threadLoop_mix(); + virtual void threadLoop_sleepTime(); + virtual void threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove); + virtual uint32_t correctLatency_l(uint32_t latency) const; + + AudioMixer* mAudioMixer; // normal mixer +private: + // one-time initialization, no locks required + FastMixer* mFastMixer; // non-NULL if there is also a fast mixer + sp<AudioWatchdog> mAudioWatchdog; // non-0 if there is an audio watchdog thread + + // contents are not guaranteed to be consistent, no locks required + FastMixerDumpState mFastMixerDumpState; +#ifdef STATE_QUEUE_DUMP + StateQueueObserverDump mStateQueueObserverDump; + StateQueueMutatorDump mStateQueueMutatorDump; +#endif + AudioWatchdogDump mAudioWatchdogDump; + + // accessible only within the threadLoop(), no locks required + // mFastMixer->sq() // for mutating and pushing state + int32_t mFastMixerFutex; // for cold idle + +public: + virtual bool hasFastMixer() const { return mFastMixer != NULL; } + virtual FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex) const { + ALOG_ASSERT(fastIndex < FastMixerState::kMaxFastTracks); + return mFastMixerDumpState.mTracks[fastIndex].mUnderruns; + } +}; + +class DirectOutputThread : public PlaybackThread { +public: + + DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, + audio_io_handle_t id, audio_devices_t device); + virtual ~DirectOutputThread(); + + // Thread virtuals + + virtual bool checkForNewParameters_l(); + +protected: + virtual int getTrackName_l(audio_channel_mask_t channelMask, int sessionId); + virtual void deleteTrackName_l(int name); + virtual uint32_t activeSleepTimeUs() const; + virtual uint32_t idleSleepTimeUs() const; + virtual uint32_t suspendSleepTimeUs() const; + virtual void cacheParameters_l(); + + // threadLoop snippets + virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove); + virtual void threadLoop_mix(); + virtual void threadLoop_sleepTime(); + +private: + // volumes last sent to audio HAL with stream->set_volume() + float mLeftVolFloat; + float mRightVolFloat; + + // prepareTracks_l() tells threadLoop_mix() the name of the single active track + sp<Track> mActiveTrack; +public: + virtual bool hasFastMixer() const { return false; } +}; + +class DuplicatingThread : public MixerThread { +public: + DuplicatingThread(const sp<AudioFlinger>& audioFlinger, MixerThread* mainThread, + audio_io_handle_t id); + virtual ~DuplicatingThread(); + + // Thread virtuals + void addOutputTrack(MixerThread* thread); + void removeOutputTrack(MixerThread* thread); + uint32_t waitTimeMs() const { return mWaitTimeMs; } +protected: + virtual uint32_t activeSleepTimeUs() const; + +private: + bool outputsReady(const SortedVector< sp<OutputTrack> > &outputTracks); +protected: + // threadLoop snippets + virtual void threadLoop_mix(); + virtual void threadLoop_sleepTime(); + virtual void threadLoop_write(); + virtual void threadLoop_standby(); + virtual void cacheParameters_l(); + +private: + // called from threadLoop, addOutputTrack, removeOutputTrack + virtual void updateWaitTime_l(); +protected: + virtual void saveOutputTracks(); + virtual void clearOutputTracks(); +private: + + uint32_t mWaitTimeMs; + SortedVector < sp<OutputTrack> > outputTracks; + SortedVector < sp<OutputTrack> > mOutputTracks; +public: + virtual bool hasFastMixer() const { return false; } +}; + + +// record thread +class RecordThread : public ThreadBase, public AudioBufferProvider + // derives from AudioBufferProvider interface for use by resampler +{ +public: + +#include "RecordTracks.h" + + RecordThread(const sp<AudioFlinger>& audioFlinger, + AudioStreamIn *input, + uint32_t sampleRate, + audio_channel_mask_t channelMask, + audio_io_handle_t id, + audio_devices_t outDevice, + audio_devices_t inDevice +#ifdef TEE_SINK + , const sp<NBAIO_Sink>& teeSink +#endif + ); + virtual ~RecordThread(); + + // no addTrack_l ? + void destroyTrack_l(const sp<RecordTrack>& track); + void removeTrack_l(const sp<RecordTrack>& track); + + void dumpInternals(int fd, const Vector<String16>& args); + void dumpTracks(int fd, const Vector<String16>& args); + + // Thread virtuals + virtual bool threadLoop(); + virtual status_t readyToRun(); + + // RefBase + virtual void onFirstRef(); + + virtual status_t initCheck() const { return (mInput == NULL) ? NO_INIT : NO_ERROR; } + sp<AudioFlinger::RecordThread::RecordTrack> createRecordTrack_l( + const sp<AudioFlinger::Client>& client, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + size_t frameCount, + int sessionId, + IAudioFlinger::track_flags_t flags, + pid_t tid, + status_t *status); + + status_t start(RecordTrack* recordTrack, + AudioSystem::sync_event_t event, + int triggerSession); + + // ask the thread to stop the specified track, and + // return true if the caller should then do it's part of the stopping process + bool stop_l(RecordTrack* recordTrack); + + void dump(int fd, const Vector<String16>& args); + AudioStreamIn* clearInput(); + virtual audio_stream_t* stream() const; + + // AudioBufferProvider interface + virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts); + virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer); + + virtual bool checkForNewParameters_l(); + virtual String8 getParameters(const String8& keys); + virtual void audioConfigChanged_l(int event, int param = 0); + void readInputParameters(); + virtual unsigned int getInputFramesLost(); + + virtual status_t addEffectChain_l(const sp<EffectChain>& chain); + virtual size_t removeEffectChain_l(const sp<EffectChain>& chain); + virtual uint32_t hasAudioSession(int sessionId) const; + + // Return the set of unique session IDs across all tracks. + // The keys are the session IDs, and the associated values are meaningless. + // FIXME replace by Set [and implement Bag/Multiset for other uses]. + KeyedVector<int, bool> sessionIds() const; + + virtual status_t setSyncEvent(const sp<SyncEvent>& event); + virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const; + + static void syncStartEventCallback(const wp<SyncEvent>& event); + void handleSyncStartEvent(const sp<SyncEvent>& event); + +private: + void clearSyncStartEvent(); + + // Enter standby if not already in standby, and set mStandby flag + void standby(); + + // Call the HAL standby method unconditionally, and don't change mStandby flag + void inputStandBy(); + + AudioStreamIn *mInput; + SortedVector < sp<RecordTrack> > mTracks; + // mActiveTrack has dual roles: it indicates the current active track, and + // is used together with mStartStopCond to indicate start()/stop() progress + sp<RecordTrack> mActiveTrack; + Condition mStartStopCond; + AudioResampler *mResampler; + int32_t *mRsmpOutBuffer; + int16_t *mRsmpInBuffer; + size_t mRsmpInIndex; + size_t mInputBytes; + const uint32_t mReqChannelCount; + const uint32_t mReqSampleRate; + ssize_t mBytesRead; + // sync event triggering actual audio capture. Frames read before this event will + // be dropped and therefore not read by the application. + sp<SyncEvent> mSyncStartEvent; + // number of captured frames to drop after the start sync event has been received. + // when < 0, maximum frames to drop before starting capture even if sync event is + // not received + ssize_t mFramestoDrop; + + // For dumpsys + const sp<NBAIO_Sink> mTeeSink; +}; diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h new file mode 100644 index 0000000..fac7071 --- /dev/null +++ b/services/audioflinger/TrackBase.h @@ -0,0 +1,145 @@ +/* +** +** Copyright 2012, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#ifndef INCLUDING_FROM_AUDIOFLINGER_H + #error This header file should only be included from AudioFlinger.h +#endif + +// base for record and playback +class TrackBase : public ExtendedAudioBufferProvider, public RefBase { + +public: + enum track_state { + IDLE, + TERMINATED, + FLUSHED, + STOPPED, + // next 2 states are currently used for fast tracks only + STOPPING_1, // waiting for first underrun + STOPPING_2, // waiting for presentation complete + RESUMING, + ACTIVE, + PAUSING, + PAUSED + }; + + TrackBase(ThreadBase *thread, + const sp<Client>& client, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + size_t frameCount, + const sp<IMemory>& sharedBuffer, + int sessionId, + bool isOut); + virtual ~TrackBase(); + + virtual status_t start(AudioSystem::sync_event_t event, + int triggerSession) = 0; + virtual void stop() = 0; + sp<IMemory> getCblk() const { return mCblkMemory; } + audio_track_cblk_t* cblk() const { return mCblk; } + int sessionId() const { return mSessionId; } + virtual status_t setSyncEvent(const sp<SyncEvent>& event); + +protected: + TrackBase(const TrackBase&); + TrackBase& operator = (const TrackBase&); + + // AudioBufferProvider interface + virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts) = 0; + virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer); + + // ExtendedAudioBufferProvider interface is only needed for Track, + // but putting it in TrackBase avoids the complexity of virtual inheritance + virtual size_t framesReady() const { return SIZE_MAX; } + + audio_format_t format() const { return mFormat; } + + uint32_t channelCount() const { return mChannelCount; } + + audio_channel_mask_t channelMask() const { return mChannelMask; } + + uint32_t sampleRate() const; // FIXME inline after cblk sr moved + + // Return a pointer to the start of a contiguous slice of the track buffer. + // Parameter 'offset' is the requested start position, expressed in + // monotonically increasing frame units relative to the track epoch. + // Parameter 'frames' is the requested length, also in frame units. + // Always returns non-NULL. It is the caller's responsibility to + // verify that this will be successful; the result of calling this + // function with invalid 'offset' or 'frames' is undefined. + void* getBuffer(uint32_t offset, uint32_t frames) const; + + bool isStopped() const { + return (mState == STOPPED || mState == FLUSHED); + } + + // for fast tracks only + bool isStopping() const { + return mState == STOPPING_1 || mState == STOPPING_2; + } + bool isStopping_1() const { + return mState == STOPPING_1; + } + bool isStopping_2() const { + return mState == STOPPING_2; + } + + bool isTerminated() const { + return mState == TERMINATED; + } + + bool step(); // mStepCount is an implicit input + void reset(); + + bool isOut() const { return mIsOut; } + // true for Track and TimedTrack, false for RecordTrack, + // this could be a track type if needed later + + const wp<ThreadBase> mThread; + /*const*/ sp<Client> mClient; // see explanation at ~TrackBase() why not const + sp<IMemory> mCblkMemory; + audio_track_cblk_t* mCblk; + void* mBuffer; // start of track buffer, typically in shared memory + // except for OutputTrack when it is in local memory + void* mBufferEnd; // &mBuffer[mFrameCount * frameSize], where frameSize + // is based on mChannelCount and 16-bit samples + uint32_t mStepCount; // saves AudioBufferProvider::Buffer::frameCount as of + // time of releaseBuffer() for later use by step() + // we don't really need a lock for these + track_state mState; + const uint32_t mSampleRate; // initial sample rate only; for tracks which + // support dynamic rates, the current value is in control block + const audio_format_t mFormat; + const audio_channel_mask_t mChannelMask; + const uint8_t mChannelCount; + const size_t mFrameSize; // AudioFlinger's view of frame size in shared memory, + // where for AudioTrack (but not AudioRecord), + // 8-bit PCM samples are stored as 16-bit + const size_t mFrameCount;// size of track buffer given at createTrack() or + // openRecord(), and then adjusted as needed + + bool mStepServerFailed; + const int mSessionId; + Vector < sp<SyncEvent> >mSyncEvents; + const bool mIsOut; + ServerProxy* mServerProxy; + const int mId; + sp<NBAIO_Sink> mTeeSink; + sp<NBAIO_Source> mTeeSource; +}; diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp new file mode 100644 index 0000000..5ac3129 --- /dev/null +++ b/services/audioflinger/Tracks.cpp @@ -0,0 +1,1810 @@ +/* +** +** Copyright 2012, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + + +#define LOG_TAG "AudioFlinger" +//#define LOG_NDEBUG 0 + +#include <math.h> +#include <cutils/compiler.h> +#include <utils/Log.h> + +#include <private/media/AudioTrackShared.h> + +#include <common_time/cc_helper.h> +#include <common_time/local_clock.h> + +#include "AudioMixer.h" +#include "AudioFlinger.h" +#include "ServiceUtilities.h" + +#include <media/nbaio/Pipe.h> +#include <media/nbaio/PipeReader.h> + +// ---------------------------------------------------------------------------- + +// Note: the following macro is used for extremely verbose logging message. In +// order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to +// 0; but one side effect of this is to turn all LOGV's as well. Some messages +// are so verbose that we want to suppress them even when we have ALOG_ASSERT +// turned on. Do not uncomment the #def below unless you really know what you +// are doing and want to see all of the extremely verbose messages. +//#define VERY_VERY_VERBOSE_LOGGING +#ifdef VERY_VERY_VERBOSE_LOGGING +#define ALOGVV ALOGV +#else +#define ALOGVV(a...) do { } while(0) +#endif + +namespace android { + +// ---------------------------------------------------------------------------- +// TrackBase +// ---------------------------------------------------------------------------- + +static volatile int32_t nextTrackId = 55; + +// TrackBase constructor must be called with AudioFlinger::mLock held +AudioFlinger::ThreadBase::TrackBase::TrackBase( + ThreadBase *thread, + const sp<Client>& client, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + size_t frameCount, + const sp<IMemory>& sharedBuffer, + int sessionId, + bool isOut) + : RefBase(), + mThread(thread), + mClient(client), + mCblk(NULL), + // mBuffer + // mBufferEnd + mStepCount(0), + mState(IDLE), + mSampleRate(sampleRate), + mFormat(format), + mChannelMask(channelMask), + mChannelCount(popcount(channelMask)), + mFrameSize(audio_is_linear_pcm(format) ? + mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)), + mFrameCount(frameCount), + mStepServerFailed(false), + mSessionId(sessionId), + mIsOut(isOut), + mServerProxy(NULL), + mId(android_atomic_inc(&nextTrackId)) +{ + // client == 0 implies sharedBuffer == 0 + ALOG_ASSERT(!(client == 0 && sharedBuffer != 0)); + + ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), + sharedBuffer->size()); + + // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize); + size_t size = sizeof(audio_track_cblk_t); + size_t bufferSize = frameCount * mFrameSize; + if (sharedBuffer == 0) { + size += bufferSize; + } + + if (client != 0) { + mCblkMemory = client->heap()->allocate(size); + if (mCblkMemory != 0) { + mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer()); + // can't assume mCblk != NULL + } else { + ALOGE("not enough memory for AudioTrack size=%u", size); + client->heap()->dump("AudioTrack"); + return; + } + } else { + // this syntax avoids calling the audio_track_cblk_t constructor twice + mCblk = (audio_track_cblk_t *) new uint8_t[size]; + // assume mCblk != NULL + } + + // construct the shared structure in-place. + if (mCblk != NULL) { + new(mCblk) audio_track_cblk_t(); + // clear all buffers + mCblk->frameCount_ = frameCount; +// uncomment the following lines to quickly test 32-bit wraparound +// mCblk->user = 0xffff0000; +// mCblk->server = 0xffff0000; +// mCblk->userBase = 0xffff0000; +// mCblk->serverBase = 0xffff0000; + if (sharedBuffer == 0) { + mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t); + memset(mBuffer, 0, bufferSize); + // Force underrun condition to avoid false underrun callback until first data is + // written to buffer (other flags are cleared) + mCblk->flags = CBLK_UNDERRUN; + } else { + mBuffer = sharedBuffer->pointer(); + } + mBufferEnd = (uint8_t *)mBuffer + bufferSize; + mServerProxy = new ServerProxy(mCblk, mBuffer, frameCount, mFrameSize, isOut); + +#ifdef TEE_SINK + if (mTeeSinkTrackEnabled) { + NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount); + if (pipeFormat != Format_Invalid) { + Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat); + size_t numCounterOffers = 0; + const NBAIO_Format offers[1] = {pipeFormat}; + ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers); + ALOG_ASSERT(index == 0); + PipeReader *pipeReader = new PipeReader(*pipe); + numCounterOffers = 0; + index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers); + ALOG_ASSERT(index == 0); + mTeeSink = pipe; + mTeeSource = pipeReader; + } + } +#endif + + } +} + +AudioFlinger::ThreadBase::TrackBase::~TrackBase() +{ +#ifdef TEE_SINK + dumpTee(-1, mTeeSource, mId); +#endif + // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference + delete mServerProxy; + if (mCblk != NULL) { + if (mClient == 0) { + delete mCblk; + } else { + mCblk->~audio_track_cblk_t(); // destroy our shared-structure. + } + } + mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to + if (mClient != 0) { + // Client destructor must run with AudioFlinger mutex locked + Mutex::Autolock _l(mClient->audioFlinger()->mLock); + // If the client's reference count drops to zero, the associated destructor + // must run with AudioFlinger lock held. Thus the explicit clear() rather than + // relying on the automatic clear() at end of scope. + mClient.clear(); + } +} + +// AudioBufferProvider interface +// getNextBuffer() = 0; +// This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack +void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer) +{ +#ifdef TEE_SINK + if (mTeeSink != 0) { + (void) mTeeSink->write(buffer->raw, buffer->frameCount); + } +#endif + + buffer->raw = NULL; + mStepCount = buffer->frameCount; + // FIXME See note at getNextBuffer() + (void) step(); // ignore return value of step() + buffer->frameCount = 0; +} + +bool AudioFlinger::ThreadBase::TrackBase::step() { + bool result = mServerProxy->step(mStepCount); + if (!result) { + ALOGV("stepServer failed acquiring cblk mutex"); + mStepServerFailed = true; + } + return result; +} + +void AudioFlinger::ThreadBase::TrackBase::reset() { + audio_track_cblk_t* cblk = this->cblk(); + + cblk->user = 0; + cblk->server = 0; + cblk->userBase = 0; + cblk->serverBase = 0; + mStepServerFailed = false; + ALOGV("TrackBase::reset"); +} + +uint32_t AudioFlinger::ThreadBase::TrackBase::sampleRate() const { + return mServerProxy->getSampleRate(); +} + +void* AudioFlinger::ThreadBase::TrackBase::getBuffer(uint32_t offset, uint32_t frames) const { + audio_track_cblk_t* cblk = this->cblk(); + int8_t *bufferStart = (int8_t *)mBuffer + (offset-cblk->serverBase) * mFrameSize; + int8_t *bufferEnd = bufferStart + frames * mFrameSize; + + // Check validity of returned pointer in case the track control block would have been corrupted. + ALOG_ASSERT(!(bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd), + "TrackBase::getBuffer buffer out of range:\n" + " start: %p, end %p , mBuffer %p mBufferEnd %p\n" + " server %u, serverBase %u, user %u, userBase %u, frameSize %u", + bufferStart, bufferEnd, mBuffer, mBufferEnd, + cblk->server, cblk->serverBase, cblk->user, cblk->userBase, mFrameSize); + + return bufferStart; +} + +status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event) +{ + mSyncEvents.add(event); + return NO_ERROR; +} + +// ---------------------------------------------------------------------------- +// Playback +// ---------------------------------------------------------------------------- + +AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track) + : BnAudioTrack(), + mTrack(track) +{ +} + +AudioFlinger::TrackHandle::~TrackHandle() { + // just stop the track on deletion, associated resources + // will be freed from the main thread once all pending buffers have + // been played. Unless it's not in the active track list, in which + // case we free everything now... + mTrack->destroy(); +} + +sp<IMemory> AudioFlinger::TrackHandle::getCblk() const { + return mTrack->getCblk(); +} + +status_t AudioFlinger::TrackHandle::start() { + return mTrack->start(); +} + +void AudioFlinger::TrackHandle::stop() { + mTrack->stop(); +} + +void AudioFlinger::TrackHandle::flush() { + mTrack->flush(); +} + +void AudioFlinger::TrackHandle::pause() { + mTrack->pause(); +} + +status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId) +{ + return mTrack->attachAuxEffect(EffectId); +} + +status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size, + sp<IMemory>* buffer) { + if (!mTrack->isTimedTrack()) + return INVALID_OPERATION; + + PlaybackThread::TimedTrack* tt = + reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); + return tt->allocateTimedBuffer(size, buffer); +} + +status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer, + int64_t pts) { + if (!mTrack->isTimedTrack()) + return INVALID_OPERATION; + + PlaybackThread::TimedTrack* tt = + reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); + return tt->queueTimedBuffer(buffer, pts); +} + +status_t AudioFlinger::TrackHandle::setMediaTimeTransform( + const LinearTransform& xform, int target) { + + if (!mTrack->isTimedTrack()) + return INVALID_OPERATION; + + PlaybackThread::TimedTrack* tt = + reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); + return tt->setMediaTimeTransform( + xform, static_cast<TimedAudioTrack::TargetTimeline>(target)); +} + +status_t AudioFlinger::TrackHandle::onTransact( + uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) +{ + return BnAudioTrack::onTransact(code, data, reply, flags); +} + +// ---------------------------------------------------------------------------- + +// Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held +AudioFlinger::PlaybackThread::Track::Track( + PlaybackThread *thread, + const sp<Client>& client, + audio_stream_type_t streamType, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + size_t frameCount, + const sp<IMemory>& sharedBuffer, + int sessionId, + IAudioFlinger::track_flags_t flags) + : TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer, + sessionId, true /*isOut*/), + mFillingUpStatus(FS_INVALID), + // mRetryCount initialized later when needed + mSharedBuffer(sharedBuffer), + mStreamType(streamType), + mName(-1), // see note below + mMainBuffer(thread->mixBuffer()), + mAuxBuffer(NULL), + mAuxEffectId(0), mHasVolumeController(false), + mPresentationCompleteFrames(0), + mFlags(flags), + mFastIndex(-1), + mUnderrunCount(0), + mCachedVolume(1.0), + mIsInvalid(false) +{ + if (mCblk != NULL) { + // to avoid leaking a track name, do not allocate one unless there is an mCblk + mName = thread->getTrackName_l(channelMask, sessionId); + mCblk->mName = mName; + if (mName < 0) { + ALOGE("no more track names available"); + return; + } + // only allocate a fast track index if we were able to allocate a normal track name + if (flags & IAudioFlinger::TRACK_FAST) { + ALOG_ASSERT(thread->mFastTrackAvailMask != 0); + int i = __builtin_ctz(thread->mFastTrackAvailMask); + ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks); + // FIXME This is too eager. We allocate a fast track index before the + // fast track becomes active. Since fast tracks are a scarce resource, + // this means we are potentially denying other more important fast tracks from + // being created. It would be better to allocate the index dynamically. + mFastIndex = i; + mCblk->mName = i; + // Read the initial underruns because this field is never cleared by the fast mixer + mObservedUnderruns = thread->getFastTrackUnderruns(i); + thread->mFastTrackAvailMask &= ~(1 << i); + } + } + ALOGV("Track constructor name %d, calling pid %d", mName, + IPCThreadState::self()->getCallingPid()); +} + +AudioFlinger::PlaybackThread::Track::~Track() +{ + ALOGV("PlaybackThread::Track destructor"); +} + +void AudioFlinger::PlaybackThread::Track::destroy() +{ + // NOTE: destroyTrack_l() can remove a strong reference to this Track + // by removing it from mTracks vector, so there is a risk that this Tracks's + // destructor is called. As the destructor needs to lock mLock, + // we must acquire a strong reference on this Track before locking mLock + // here so that the destructor is called only when exiting this function. + // On the other hand, as long as Track::destroy() is only called by + // TrackHandle destructor, the TrackHandle still holds a strong ref on + // this Track with its member mTrack. + sp<Track> keep(this); + { // scope for mLock + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + if (!isOutputTrack()) { + if (mState == ACTIVE || mState == RESUMING) { + AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId); + +#ifdef ADD_BATTERY_DATA + // to track the speaker usage + addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop); +#endif + } + AudioSystem::releaseOutput(thread->id()); + } + Mutex::Autolock _l(thread->mLock); + PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); + playbackThread->destroyTrack_l(this); + } + } +} + +/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result) +{ + result.append(" Name Client Type Fmt Chn mask Session StpCnt fCount S F SRate " + "L dB R dB Server User Main buf Aux Buf Flags Underruns\n"); +} + +void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size) +{ + uint32_t vlr = mServerProxy->getVolumeLR(); + if (isFastTrack()) { + sprintf(buffer, " F %2d", mFastIndex); + } else { + sprintf(buffer, " %4d", mName - AudioMixer::TRACK0); + } + track_state state = mState; + char stateChar; + switch (state) { + case IDLE: + stateChar = 'I'; + break; + case TERMINATED: + stateChar = 'T'; + break; + case STOPPING_1: + stateChar = 's'; + break; + case STOPPING_2: + stateChar = '5'; + break; + case STOPPED: + stateChar = 'S'; + break; + case RESUMING: + stateChar = 'R'; + break; + case ACTIVE: + stateChar = 'A'; + break; + case PAUSING: + stateChar = 'p'; + break; + case PAUSED: + stateChar = 'P'; + break; + case FLUSHED: + stateChar = 'F'; + break; + default: + stateChar = '?'; + break; + } + char nowInUnderrun; + switch (mObservedUnderruns.mBitFields.mMostRecent) { + case UNDERRUN_FULL: + nowInUnderrun = ' '; + break; + case UNDERRUN_PARTIAL: + nowInUnderrun = '<'; + break; + case UNDERRUN_EMPTY: + nowInUnderrun = '*'; + break; + default: + nowInUnderrun = '?'; + break; + } + snprintf(&buffer[7], size-7, " %6d %4u %3u 0x%08x %7u %6u %6u %1c %1d %5u %5.2g %5.2g " + "0x%08x 0x%08x 0x%08x 0x%08x %#5x %9u%c\n", + (mClient == 0) ? getpid_cached : mClient->pid(), + mStreamType, + mFormat, + mChannelMask, + mSessionId, + mStepCount, + mFrameCount, + stateChar, + mFillingUpStatus, + mServerProxy->getSampleRate(), + 20.0 * log10((vlr & 0xFFFF) / 4096.0), + 20.0 * log10((vlr >> 16) / 4096.0), + mCblk->server, + mCblk->user, + (int)mMainBuffer, + (int)mAuxBuffer, + mCblk->flags, + mUnderrunCount, + nowInUnderrun); +} + +// AudioBufferProvider interface +status_t AudioFlinger::PlaybackThread::Track::getNextBuffer( + AudioBufferProvider::Buffer* buffer, int64_t pts) +{ + audio_track_cblk_t* cblk = this->cblk(); + uint32_t framesReady; + uint32_t framesReq = buffer->frameCount; + + // Check if last stepServer failed, try to step now + if (mStepServerFailed) { + // FIXME When called by fast mixer, this takes a mutex with tryLock(). + // Since the fast mixer is higher priority than client callback thread, + // it does not result in priority inversion for client. + // But a non-blocking solution would be preferable to avoid + // fast mixer being unable to tryLock(), and + // to avoid the extra context switches if the client wakes up, + // discovers the mutex is locked, then has to wait for fast mixer to unlock. + if (!step()) goto getNextBuffer_exit; + ALOGV("stepServer recovered"); + mStepServerFailed = false; + } + + // FIXME Same as above + framesReady = mServerProxy->framesReady(); + + if (CC_LIKELY(framesReady)) { + uint32_t s = cblk->server; + uint32_t bufferEnd = cblk->serverBase + mFrameCount; + + bufferEnd = (cblk->loopEnd < bufferEnd) ? cblk->loopEnd : bufferEnd; + if (framesReq > framesReady) { + framesReq = framesReady; + } + if (framesReq > bufferEnd - s) { + framesReq = bufferEnd - s; + } + + buffer->raw = getBuffer(s, framesReq); + buffer->frameCount = framesReq; + return NO_ERROR; + } + +getNextBuffer_exit: + buffer->raw = NULL; + buffer->frameCount = 0; + ALOGV("getNextBuffer() no more data for track %d on thread %p", mName, mThread.unsafe_get()); + return NOT_ENOUGH_DATA; +} + +// Note that framesReady() takes a mutex on the control block using tryLock(). +// This could result in priority inversion if framesReady() is called by the normal mixer, +// as the normal mixer thread runs at lower +// priority than the client's callback thread: there is a short window within framesReady() +// during which the normal mixer could be preempted, and the client callback would block. +// Another problem can occur if framesReady() is called by the fast mixer: +// the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer. +// FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue. +size_t AudioFlinger::PlaybackThread::Track::framesReady() const { + return mServerProxy->framesReady(); +} + +// Don't call for fast tracks; the framesReady() could result in priority inversion +bool AudioFlinger::PlaybackThread::Track::isReady() const { + if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) { + return true; + } + + if (framesReady() >= mFrameCount || + (mCblk->flags & CBLK_FORCEREADY)) { + mFillingUpStatus = FS_FILLED; + android_atomic_and(~CBLK_FORCEREADY, &mCblk->flags); + return true; + } + return false; +} + +status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event, + int triggerSession) +{ + status_t status = NO_ERROR; + ALOGV("start(%d), calling pid %d session %d", + mName, IPCThreadState::self()->getCallingPid(), mSessionId); + + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + Mutex::Autolock _l(thread->mLock); + track_state state = mState; + // here the track could be either new, or restarted + // in both cases "unstop" the track + if (state == PAUSED) { + mState = TrackBase::RESUMING; + ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this); + } else { + mState = TrackBase::ACTIVE; + ALOGV("? => ACTIVE (%d) on thread %p", mName, this); + } + + if (!isOutputTrack() && state != ACTIVE && state != RESUMING) { + thread->mLock.unlock(); + status = AudioSystem::startOutput(thread->id(), mStreamType, mSessionId); + thread->mLock.lock(); + +#ifdef ADD_BATTERY_DATA + // to track the speaker usage + if (status == NO_ERROR) { + addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart); + } +#endif + } + if (status == NO_ERROR) { + PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); + playbackThread->addTrack_l(this); + } else { + mState = state; + triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE); + } + } else { + status = BAD_VALUE; + } + return status; +} + +void AudioFlinger::PlaybackThread::Track::stop() +{ + ALOGV("stop(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid()); + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + Mutex::Autolock _l(thread->mLock); + track_state state = mState; + if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) { + // If the track is not active (PAUSED and buffers full), flush buffers + PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); + if (playbackThread->mActiveTracks.indexOf(this) < 0) { + reset(); + mState = STOPPED; + } else if (!isFastTrack()) { + mState = STOPPED; + } else { + // prepareTracks_l() will set state to STOPPING_2 after next underrun, + // and then to STOPPED and reset() when presentation is complete + mState = STOPPING_1; + } + ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName, + playbackThread); + } + if (!isOutputTrack() && (state == ACTIVE || state == RESUMING)) { + thread->mLock.unlock(); + AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId); + thread->mLock.lock(); + +#ifdef ADD_BATTERY_DATA + // to track the speaker usage + addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop); +#endif + } + } +} + +void AudioFlinger::PlaybackThread::Track::pause() +{ + ALOGV("pause(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid()); + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + Mutex::Autolock _l(thread->mLock); + if (mState == ACTIVE || mState == RESUMING) { + mState = PAUSING; + ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get()); + if (!isOutputTrack()) { + thread->mLock.unlock(); + AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId); + thread->mLock.lock(); + +#ifdef ADD_BATTERY_DATA + // to track the speaker usage + addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop); +#endif + } + } + } +} + +void AudioFlinger::PlaybackThread::Track::flush() +{ + ALOGV("flush(%d)", mName); + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + Mutex::Autolock _l(thread->mLock); + if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED && mState != PAUSED && + mState != PAUSING && mState != IDLE && mState != FLUSHED) { + return; + } + // No point remaining in PAUSED state after a flush => go to + // FLUSHED state + mState = FLUSHED; + // do not reset the track if it is still in the process of being stopped or paused. + // this will be done by prepareTracks_l() when the track is stopped. + // prepareTracks_l() will see mState == FLUSHED, then + // remove from active track list, reset(), and trigger presentation complete + PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); + if (playbackThread->mActiveTracks.indexOf(this) < 0) { + reset(); + } + } +} + +void AudioFlinger::PlaybackThread::Track::reset() +{ + // Do not reset twice to avoid discarding data written just after a flush and before + // the audioflinger thread detects the track is stopped. + if (!mResetDone) { + TrackBase::reset(); + // Force underrun condition to avoid false underrun callback until first data is + // written to buffer + android_atomic_and(~CBLK_FORCEREADY, &mCblk->flags); + android_atomic_or(CBLK_UNDERRUN, &mCblk->flags); + mFillingUpStatus = FS_FILLING; + mResetDone = true; + if (mState == FLUSHED) { + mState = IDLE; + } + } +} + +status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId) +{ + status_t status = DEAD_OBJECT; + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); + sp<AudioFlinger> af = mClient->audioFlinger(); + + Mutex::Autolock _l(af->mLock); + + sp<PlaybackThread> srcThread = af->getEffectThread_l(AUDIO_SESSION_OUTPUT_MIX, EffectId); + + if (EffectId != 0 && srcThread != 0 && playbackThread != srcThread.get()) { + Mutex::Autolock _dl(playbackThread->mLock); + Mutex::Autolock _sl(srcThread->mLock); + sp<EffectChain> chain = srcThread->getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX); + if (chain == 0) { + return INVALID_OPERATION; + } + + sp<EffectModule> effect = chain->getEffectFromId_l(EffectId); + if (effect == 0) { + return INVALID_OPERATION; + } + srcThread->removeEffect_l(effect); + playbackThread->addEffect_l(effect); + // removeEffect_l() has stopped the effect if it was active so it must be restarted + if (effect->state() == EffectModule::ACTIVE || + effect->state() == EffectModule::STOPPING) { + effect->start(); + } + + sp<EffectChain> dstChain = effect->chain().promote(); + if (dstChain == 0) { + srcThread->addEffect_l(effect); + return INVALID_OPERATION; + } + AudioSystem::unregisterEffect(effect->id()); + AudioSystem::registerEffect(&effect->desc(), + srcThread->id(), + dstChain->strategy(), + AUDIO_SESSION_OUTPUT_MIX, + effect->id()); + } + status = playbackThread->attachAuxEffect(this, EffectId); + } + return status; +} + +void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer) +{ + mAuxEffectId = EffectId; + mAuxBuffer = buffer; +} + +bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten, + size_t audioHalFrames) +{ + // a track is considered presented when the total number of frames written to audio HAL + // corresponds to the number of frames written when presentationComplete() is called for the + // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time. + if (mPresentationCompleteFrames == 0) { + mPresentationCompleteFrames = framesWritten + audioHalFrames; + ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d", + mPresentationCompleteFrames, audioHalFrames); + } + if (framesWritten >= mPresentationCompleteFrames) { + ALOGV("presentationComplete() session %d complete: framesWritten %d", + mSessionId, framesWritten); + triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE); + return true; + } + return false; +} + +void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type) +{ + for (int i = 0; i < (int)mSyncEvents.size(); i++) { + if (mSyncEvents[i]->type() == type) { + mSyncEvents[i]->trigger(); + mSyncEvents.removeAt(i); + i--; + } + } +} + +// implement VolumeBufferProvider interface + +uint32_t AudioFlinger::PlaybackThread::Track::getVolumeLR() +{ + // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs + ALOG_ASSERT(isFastTrack() && (mCblk != NULL)); + uint32_t vlr = mServerProxy->getVolumeLR(); + uint32_t vl = vlr & 0xFFFF; + uint32_t vr = vlr >> 16; + // track volumes come from shared memory, so can't be trusted and must be clamped + if (vl > MAX_GAIN_INT) { + vl = MAX_GAIN_INT; + } + if (vr > MAX_GAIN_INT) { + vr = MAX_GAIN_INT; + } + // now apply the cached master volume and stream type volume; + // this is trusted but lacks any synchronization or barrier so may be stale + float v = mCachedVolume; + vl *= v; + vr *= v; + // re-combine into U4.16 + vlr = (vr << 16) | (vl & 0xFFFF); + // FIXME look at mute, pause, and stop flags + return vlr; +} + +status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event) +{ + if (mState == TERMINATED || mState == PAUSED || + ((framesReady() == 0) && ((mSharedBuffer != 0) || + (mState == STOPPED)))) { + ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ", + mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady()); + event->cancel(); + return INVALID_OPERATION; + } + (void) TrackBase::setSyncEvent(event); + return NO_ERROR; +} + +void AudioFlinger::PlaybackThread::Track::invalidate() +{ + // FIXME should use proxy + android_atomic_or(CBLK_INVALID, &mCblk->flags); + mCblk->cv.signal(); + mIsInvalid = true; +} + +// ---------------------------------------------------------------------------- + +sp<AudioFlinger::PlaybackThread::TimedTrack> +AudioFlinger::PlaybackThread::TimedTrack::create( + PlaybackThread *thread, + const sp<Client>& client, + audio_stream_type_t streamType, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + size_t frameCount, + const sp<IMemory>& sharedBuffer, + int sessionId) { + if (!client->reserveTimedTrack()) + return 0; + + return new TimedTrack( + thread, client, streamType, sampleRate, format, channelMask, frameCount, + sharedBuffer, sessionId); +} + +AudioFlinger::PlaybackThread::TimedTrack::TimedTrack( + PlaybackThread *thread, + const sp<Client>& client, + audio_stream_type_t streamType, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + size_t frameCount, + const sp<IMemory>& sharedBuffer, + int sessionId) + : Track(thread, client, streamType, sampleRate, format, channelMask, + frameCount, sharedBuffer, sessionId, IAudioFlinger::TRACK_TIMED), + mQueueHeadInFlight(false), + mTrimQueueHeadOnRelease(false), + mFramesPendingInQueue(0), + mTimedSilenceBuffer(NULL), + mTimedSilenceBufferSize(0), + mTimedAudioOutputOnTime(false), + mMediaTimeTransformValid(false) +{ + LocalClock lc; + mLocalTimeFreq = lc.getLocalFreq(); + + mLocalTimeToSampleTransform.a_zero = 0; + mLocalTimeToSampleTransform.b_zero = 0; + mLocalTimeToSampleTransform.a_to_b_numer = sampleRate; + mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq; + LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer, + &mLocalTimeToSampleTransform.a_to_b_denom); + + mMediaTimeToSampleTransform.a_zero = 0; + mMediaTimeToSampleTransform.b_zero = 0; + mMediaTimeToSampleTransform.a_to_b_numer = sampleRate; + mMediaTimeToSampleTransform.a_to_b_denom = 1000000; + LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer, + &mMediaTimeToSampleTransform.a_to_b_denom); +} + +AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() { + mClient->releaseTimedTrack(); + delete [] mTimedSilenceBuffer; +} + +status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer( + size_t size, sp<IMemory>* buffer) { + + Mutex::Autolock _l(mTimedBufferQueueLock); + + trimTimedBufferQueue_l(); + + // lazily initialize the shared memory heap for timed buffers + if (mTimedMemoryDealer == NULL) { + const int kTimedBufferHeapSize = 512 << 10; + + mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize, + "AudioFlingerTimed"); + if (mTimedMemoryDealer == NULL) + return NO_MEMORY; + } + + sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size); + if (newBuffer == NULL) { + newBuffer = mTimedMemoryDealer->allocate(size); + if (newBuffer == NULL) + return NO_MEMORY; + } + + *buffer = newBuffer; + return NO_ERROR; +} + +// caller must hold mTimedBufferQueueLock +void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() { + int64_t mediaTimeNow; + { + Mutex::Autolock mttLock(mMediaTimeTransformLock); + if (!mMediaTimeTransformValid) + return; + + int64_t targetTimeNow; + status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) + ? mCCHelper.getCommonTime(&targetTimeNow) + : mCCHelper.getLocalTime(&targetTimeNow); + + if (OK != res) + return; + + if (!mMediaTimeTransform.doReverseTransform(targetTimeNow, + &mediaTimeNow)) { + return; + } + } + + size_t trimEnd; + for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) { + int64_t bufEnd; + + if ((trimEnd + 1) < mTimedBufferQueue.size()) { + // We have a next buffer. Just use its PTS as the PTS of the frame + // following the last frame in this buffer. If the stream is sparse + // (ie, there are deliberate gaps left in the stream which should be + // filled with silence by the TimedAudioTrack), then this can result + // in one extra buffer being left un-trimmed when it could have + // been. In general, this is not typical, and we would rather + // optimized away the TS calculation below for the more common case + // where PTSes are contiguous. + bufEnd = mTimedBufferQueue[trimEnd + 1].pts(); + } else { + // We have no next buffer. Compute the PTS of the frame following + // the last frame in this buffer by computing the duration of of + // this frame in media time units and adding it to the PTS of the + // buffer. + int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size() + / mFrameSize; + + if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount, + &bufEnd)) { + ALOGE("Failed to convert frame count of %lld to media time" + " duration" " (scale factor %d/%u) in %s", + frameCount, + mMediaTimeToSampleTransform.a_to_b_numer, + mMediaTimeToSampleTransform.a_to_b_denom, + __PRETTY_FUNCTION__); + break; + } + bufEnd += mTimedBufferQueue[trimEnd].pts(); + } + + if (bufEnd > mediaTimeNow) + break; + + // Is the buffer we want to use in the middle of a mix operation right + // now? If so, don't actually trim it. Just wait for the releaseBuffer + // from the mixer which should be coming back shortly. + if (!trimEnd && mQueueHeadInFlight) { + mTrimQueueHeadOnRelease = true; + } + } + + size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0; + if (trimStart < trimEnd) { + // Update the bookkeeping for framesReady() + for (size_t i = trimStart; i < trimEnd; ++i) { + updateFramesPendingAfterTrim_l(mTimedBufferQueue[i], "trim"); + } + + // Now actually remove the buffers from the queue. + mTimedBufferQueue.removeItemsAt(trimStart, trimEnd); + } +} + +void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l( + const char* logTag) { + ALOG_ASSERT(mTimedBufferQueue.size() > 0, + "%s called (reason \"%s\"), but timed buffer queue has no" + " elements to trim.", __FUNCTION__, logTag); + + updateFramesPendingAfterTrim_l(mTimedBufferQueue[0], logTag); + mTimedBufferQueue.removeAt(0); +} + +void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l( + const TimedBuffer& buf, + const char* logTag) { + uint32_t bufBytes = buf.buffer()->size(); + uint32_t consumedAlready = buf.position(); + + ALOG_ASSERT(consumedAlready <= bufBytes, + "Bad bookkeeping while updating frames pending. Timed buffer is" + " only %u bytes long, but claims to have consumed %u" + " bytes. (update reason: \"%s\")", + bufBytes, consumedAlready, logTag); + + uint32_t bufFrames = (bufBytes - consumedAlready) / mFrameSize; + ALOG_ASSERT(mFramesPendingInQueue >= bufFrames, + "Bad bookkeeping while updating frames pending. Should have at" + " least %u queued frames, but we think we have only %u. (update" + " reason: \"%s\")", + bufFrames, mFramesPendingInQueue, logTag); + + mFramesPendingInQueue -= bufFrames; +} + +status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer( + const sp<IMemory>& buffer, int64_t pts) { + + { + Mutex::Autolock mttLock(mMediaTimeTransformLock); + if (!mMediaTimeTransformValid) + return INVALID_OPERATION; + } + + Mutex::Autolock _l(mTimedBufferQueueLock); + + uint32_t bufFrames = buffer->size() / mFrameSize; + mFramesPendingInQueue += bufFrames; + mTimedBufferQueue.add(TimedBuffer(buffer, pts)); + + return NO_ERROR; +} + +status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform( + const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) { + + ALOGVV("setMediaTimeTransform az=%lld bz=%lld n=%d d=%u tgt=%d", + xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom, + target); + + if (!(target == TimedAudioTrack::LOCAL_TIME || + target == TimedAudioTrack::COMMON_TIME)) { + return BAD_VALUE; + } + + Mutex::Autolock lock(mMediaTimeTransformLock); + mMediaTimeTransform = xform; + mMediaTimeTransformTarget = target; + mMediaTimeTransformValid = true; + + return NO_ERROR; +} + +#define min(a, b) ((a) < (b) ? (a) : (b)) + +// implementation of getNextBuffer for tracks whose buffers have timestamps +status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer( + AudioBufferProvider::Buffer* buffer, int64_t pts) +{ + if (pts == AudioBufferProvider::kInvalidPTS) { + buffer->raw = NULL; + buffer->frameCount = 0; + mTimedAudioOutputOnTime = false; + return INVALID_OPERATION; + } + + Mutex::Autolock _l(mTimedBufferQueueLock); + + ALOG_ASSERT(!mQueueHeadInFlight, + "getNextBuffer called without releaseBuffer!"); + + while (true) { + + // if we have no timed buffers, then fail + if (mTimedBufferQueue.isEmpty()) { + buffer->raw = NULL; + buffer->frameCount = 0; + return NOT_ENOUGH_DATA; + } + + TimedBuffer& head = mTimedBufferQueue.editItemAt(0); + + // calculate the PTS of the head of the timed buffer queue expressed in + // local time + int64_t headLocalPTS; + { + Mutex::Autolock mttLock(mMediaTimeTransformLock); + + ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid"); + + if (mMediaTimeTransform.a_to_b_denom == 0) { + // the transform represents a pause, so yield silence + timedYieldSilence_l(buffer->frameCount, buffer); + return NO_ERROR; + } + + int64_t transformedPTS; + if (!mMediaTimeTransform.doForwardTransform(head.pts(), + &transformedPTS)) { + // the transform failed. this shouldn't happen, but if it does + // then just drop this buffer + ALOGW("timedGetNextBuffer transform failed"); + buffer->raw = NULL; + buffer->frameCount = 0; + trimTimedBufferQueueHead_l("getNextBuffer; no transform"); + return NO_ERROR; + } + + if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) { + if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS, + &headLocalPTS)) { + buffer->raw = NULL; + buffer->frameCount = 0; + return INVALID_OPERATION; + } + } else { + headLocalPTS = transformedPTS; + } + } + + // adjust the head buffer's PTS to reflect the portion of the head buffer + // that has already been consumed + int64_t effectivePTS = headLocalPTS + + ((head.position() / mFrameSize) * mLocalTimeFreq / sampleRate()); + + // Calculate the delta in samples between the head of the input buffer + // queue and the start of the next output buffer that will be written. + // If the transformation fails because of over or underflow, it means + // that the sample's position in the output stream is so far out of + // whack that it should just be dropped. + int64_t sampleDelta; + if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) { + ALOGV("*** head buffer is too far from PTS: dropped buffer"); + trimTimedBufferQueueHead_l("getNextBuffer, buf pts too far from" + " mix"); + continue; + } + if (!mLocalTimeToSampleTransform.doForwardTransform( + (effectivePTS - pts) << 32, &sampleDelta)) { + ALOGV("*** too late during sample rate transform: dropped buffer"); + trimTimedBufferQueueHead_l("getNextBuffer, bad local to sample"); + continue; + } + + ALOGVV("*** getNextBuffer head.pts=%lld head.pos=%d pts=%lld" + " sampleDelta=[%d.%08x]", + head.pts(), head.position(), pts, + static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1) + + (sampleDelta >> 32)), + static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF)); + + // if the delta between the ideal placement for the next input sample and + // the current output position is within this threshold, then we will + // concatenate the next input samples to the previous output + const int64_t kSampleContinuityThreshold = + (static_cast<int64_t>(sampleRate()) << 32) / 250; + + // if this is the first buffer of audio that we're emitting from this track + // then it should be almost exactly on time. + const int64_t kSampleStartupThreshold = 1LL << 32; + + if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) || + (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) { + // the next input is close enough to being on time, so concatenate it + // with the last output + timedYieldSamples_l(buffer); + + ALOGVV("*** on time: head.pos=%d frameCount=%u", + head.position(), buffer->frameCount); + return NO_ERROR; + } + + // Looks like our output is not on time. Reset our on timed status. + // Next time we mix samples from our input queue, then should be within + // the StartupThreshold. + mTimedAudioOutputOnTime = false; + if (sampleDelta > 0) { + // the gap between the current output position and the proper start of + // the next input sample is too big, so fill it with silence + uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32; + + timedYieldSilence_l(framesUntilNextInput, buffer); + ALOGV("*** silence: frameCount=%u", buffer->frameCount); + return NO_ERROR; + } else { + // the next input sample is late + uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32)); + size_t onTimeSamplePosition = + head.position() + lateFrames * mFrameSize; + + if (onTimeSamplePosition > head.buffer()->size()) { + // all the remaining samples in the head are too late, so + // drop it and move on + ALOGV("*** too late: dropped buffer"); + trimTimedBufferQueueHead_l("getNextBuffer, dropped late buffer"); + continue; + } else { + // skip over the late samples + head.setPosition(onTimeSamplePosition); + + // yield the available samples + timedYieldSamples_l(buffer); + + ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount); + return NO_ERROR; + } + } + } +} + +// Yield samples from the timed buffer queue head up to the given output +// buffer's capacity. +// +// Caller must hold mTimedBufferQueueLock +void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l( + AudioBufferProvider::Buffer* buffer) { + + const TimedBuffer& head = mTimedBufferQueue[0]; + + buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) + + head.position()); + + uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) / + mFrameSize); + size_t framesRequested = buffer->frameCount; + buffer->frameCount = min(framesLeftInHead, framesRequested); + + mQueueHeadInFlight = true; + mTimedAudioOutputOnTime = true; +} + +// Yield samples of silence up to the given output buffer's capacity +// +// Caller must hold mTimedBufferQueueLock +void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l( + uint32_t numFrames, AudioBufferProvider::Buffer* buffer) { + + // lazily allocate a buffer filled with silence + if (mTimedSilenceBufferSize < numFrames * mFrameSize) { + delete [] mTimedSilenceBuffer; + mTimedSilenceBufferSize = numFrames * mFrameSize; + mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize]; + memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize); + } + + buffer->raw = mTimedSilenceBuffer; + size_t framesRequested = buffer->frameCount; + buffer->frameCount = min(numFrames, framesRequested); + + mTimedAudioOutputOnTime = false; +} + +// AudioBufferProvider interface +void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer( + AudioBufferProvider::Buffer* buffer) { + + Mutex::Autolock _l(mTimedBufferQueueLock); + + // If the buffer which was just released is part of the buffer at the head + // of the queue, be sure to update the amt of the buffer which has been + // consumed. If the buffer being returned is not part of the head of the + // queue, its either because the buffer is part of the silence buffer, or + // because the head of the timed queue was trimmed after the mixer called + // getNextBuffer but before the mixer called releaseBuffer. + if (buffer->raw == mTimedSilenceBuffer) { + ALOG_ASSERT(!mQueueHeadInFlight, + "Queue head in flight during release of silence buffer!"); + goto done; + } + + ALOG_ASSERT(mQueueHeadInFlight, + "TimedTrack::releaseBuffer of non-silence buffer, but no queue" + " head in flight."); + + if (mTimedBufferQueue.size()) { + TimedBuffer& head = mTimedBufferQueue.editItemAt(0); + + void* start = head.buffer()->pointer(); + void* end = reinterpret_cast<void*>( + reinterpret_cast<uint8_t*>(head.buffer()->pointer()) + + head.buffer()->size()); + + ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end), + "released buffer not within the head of the timed buffer" + " queue; qHead = [%p, %p], released buffer = %p", + start, end, buffer->raw); + + head.setPosition(head.position() + + (buffer->frameCount * mFrameSize)); + mQueueHeadInFlight = false; + + ALOG_ASSERT(mFramesPendingInQueue >= buffer->frameCount, + "Bad bookkeeping during releaseBuffer! Should have at" + " least %u queued frames, but we think we have only %u", + buffer->frameCount, mFramesPendingInQueue); + + mFramesPendingInQueue -= buffer->frameCount; + + if ((static_cast<size_t>(head.position()) >= head.buffer()->size()) + || mTrimQueueHeadOnRelease) { + trimTimedBufferQueueHead_l("releaseBuffer"); + mTrimQueueHeadOnRelease = false; + } + } else { + LOG_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no" + " buffers in the timed buffer queue"); + } + +done: + buffer->raw = 0; + buffer->frameCount = 0; +} + +size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const { + Mutex::Autolock _l(mTimedBufferQueueLock); + return mFramesPendingInQueue; +} + +AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer() + : mPTS(0), mPosition(0) {} + +AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer( + const sp<IMemory>& buffer, int64_t pts) + : mBuffer(buffer), mPTS(pts), mPosition(0) {} + + +// ---------------------------------------------------------------------------- + +AudioFlinger::PlaybackThread::OutputTrack::OutputTrack( + PlaybackThread *playbackThread, + DuplicatingThread *sourceThread, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + size_t frameCount) + : Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount, + NULL, 0, IAudioFlinger::TRACK_DEFAULT), + mActive(false), mSourceThread(sourceThread), mClientProxy(NULL) +{ + + if (mCblk != NULL) { + mOutBuffer.frameCount = 0; + playbackThread->mTracks.add(this); + ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, " + "mCblk->frameCount_ %u, mChannelMask 0x%08x mBufferEnd %p", + mCblk, mBuffer, + mCblk->frameCount_, mChannelMask, mBufferEnd); + // since client and server are in the same process, + // the buffer has the same virtual address on both sides + mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize); + mClientProxy->setVolumeLR((uint32_t(uint16_t(0x1000)) << 16) | uint16_t(0x1000)); + mClientProxy->setSendLevel(0.0); + mClientProxy->setSampleRate(sampleRate); + } else { + ALOGW("Error creating output track on thread %p", playbackThread); + } +} + +AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack() +{ + clearBufferQueue(); + delete mClientProxy; + // superclass destructor will now delete the server proxy and shared memory both refer to +} + +status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event, + int triggerSession) +{ + status_t status = Track::start(event, triggerSession); + if (status != NO_ERROR) { + return status; + } + + mActive = true; + mRetryCount = 127; + return status; +} + +void AudioFlinger::PlaybackThread::OutputTrack::stop() +{ + Track::stop(); + clearBufferQueue(); + mOutBuffer.frameCount = 0; + mActive = false; +} + +bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames) +{ + Buffer *pInBuffer; + Buffer inBuffer; + uint32_t channelCount = mChannelCount; + bool outputBufferFull = false; + inBuffer.frameCount = frames; + inBuffer.i16 = data; + + uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs(); + + if (!mActive && frames != 0) { + start(); + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + MixerThread *mixerThread = (MixerThread *)thread.get(); + if (mFrameCount > frames) { + if (mBufferQueue.size() < kMaxOverFlowBuffers) { + uint32_t startFrames = (mFrameCount - frames); + pInBuffer = new Buffer; + pInBuffer->mBuffer = new int16_t[startFrames * channelCount]; + pInBuffer->frameCount = startFrames; + pInBuffer->i16 = pInBuffer->mBuffer; + memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t)); + mBufferQueue.add(pInBuffer); + } else { + ALOGW ("OutputTrack::write() %p no more buffers in queue", this); + } + } + } + } + + while (waitTimeLeftMs) { + // First write pending buffers, then new data + if (mBufferQueue.size()) { + pInBuffer = mBufferQueue.itemAt(0); + } else { + pInBuffer = &inBuffer; + } + + if (pInBuffer->frameCount == 0) { + break; + } + + if (mOutBuffer.frameCount == 0) { + mOutBuffer.frameCount = pInBuffer->frameCount; + nsecs_t startTime = systemTime(); + if (obtainBuffer(&mOutBuffer, waitTimeLeftMs) == (status_t)NO_MORE_BUFFERS) { + ALOGV ("OutputTrack::write() %p thread %p no more output buffers", this, + mThread.unsafe_get()); + outputBufferFull = true; + break; + } + uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime); + if (waitTimeLeftMs >= waitTimeMs) { + waitTimeLeftMs -= waitTimeMs; + } else { + waitTimeLeftMs = 0; + } + } + + uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount : + pInBuffer->frameCount; + memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t)); + mClientProxy->stepUser(outFrames); + pInBuffer->frameCount -= outFrames; + pInBuffer->i16 += outFrames * channelCount; + mOutBuffer.frameCount -= outFrames; + mOutBuffer.i16 += outFrames * channelCount; + + if (pInBuffer->frameCount == 0) { + if (mBufferQueue.size()) { + mBufferQueue.removeAt(0); + delete [] pInBuffer->mBuffer; + delete pInBuffer; + ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this, + mThread.unsafe_get(), mBufferQueue.size()); + } else { + break; + } + } + } + + // If we could not write all frames, allocate a buffer and queue it for next time. + if (inBuffer.frameCount) { + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0 && !thread->standby()) { + if (mBufferQueue.size() < kMaxOverFlowBuffers) { + pInBuffer = new Buffer; + pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount]; + pInBuffer->frameCount = inBuffer.frameCount; + pInBuffer->i16 = pInBuffer->mBuffer; + memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount * + sizeof(int16_t)); + mBufferQueue.add(pInBuffer); + ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this, + mThread.unsafe_get(), mBufferQueue.size()); + } else { + ALOGW("OutputTrack::write() %p thread %p no more overflow buffers", + mThread.unsafe_get(), this); + } + } + } + + // Calling write() with a 0 length buffer, means that no more data will be written: + // If no more buffers are pending, fill output track buffer to make sure it is started + // by output mixer. + if (frames == 0 && mBufferQueue.size() == 0) { + if (mCblk->user < mFrameCount) { + frames = mFrameCount - mCblk->user; + pInBuffer = new Buffer; + pInBuffer->mBuffer = new int16_t[frames * channelCount]; + pInBuffer->frameCount = frames; + pInBuffer->i16 = pInBuffer->mBuffer; + memset(pInBuffer->raw, 0, frames * channelCount * sizeof(int16_t)); + mBufferQueue.add(pInBuffer); + } else if (mActive) { + stop(); + } + } + + return outputBufferFull; +} + +status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer( + AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs) +{ + audio_track_cblk_t* cblk = mCblk; + uint32_t framesReq = buffer->frameCount; + + ALOGVV("OutputTrack::obtainBuffer user %d, server %d", cblk->user, cblk->server); + buffer->frameCount = 0; + + size_t framesAvail; + { + Mutex::Autolock _l(cblk->lock); + + // read the server count again + while (!(framesAvail = mClientProxy->framesAvailable_l())) { + if (CC_UNLIKELY(!mActive)) { + ALOGV("Not active and NO_MORE_BUFFERS"); + return NO_MORE_BUFFERS; + } + status_t result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs)); + if (result != NO_ERROR) { + return NO_MORE_BUFFERS; + } + } + } + + if (framesReq > framesAvail) { + framesReq = framesAvail; + } + + uint32_t u = cblk->user; + uint32_t bufferEnd = cblk->userBase + mFrameCount; + + if (framesReq > bufferEnd - u) { + framesReq = bufferEnd - u; + } + + buffer->frameCount = framesReq; + buffer->raw = mClientProxy->buffer(u); + return NO_ERROR; +} + + +void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue() +{ + size_t size = mBufferQueue.size(); + + for (size_t i = 0; i < size; i++) { + Buffer *pBuffer = mBufferQueue.itemAt(i); + delete [] pBuffer->mBuffer; + delete pBuffer; + } + mBufferQueue.clear(); +} + + +// ---------------------------------------------------------------------------- +// Record +// ---------------------------------------------------------------------------- + +AudioFlinger::RecordHandle::RecordHandle( + const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack) + : BnAudioRecord(), + mRecordTrack(recordTrack) +{ +} + +AudioFlinger::RecordHandle::~RecordHandle() { + stop_nonvirtual(); + mRecordTrack->destroy(); +} + +sp<IMemory> AudioFlinger::RecordHandle::getCblk() const { + return mRecordTrack->getCblk(); +} + +status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event, + int triggerSession) { + ALOGV("RecordHandle::start()"); + return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession); +} + +void AudioFlinger::RecordHandle::stop() { + stop_nonvirtual(); +} + +void AudioFlinger::RecordHandle::stop_nonvirtual() { + ALOGV("RecordHandle::stop()"); + mRecordTrack->stop(); +} + +status_t AudioFlinger::RecordHandle::onTransact( + uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) +{ + return BnAudioRecord::onTransact(code, data, reply, flags); +} + +// ---------------------------------------------------------------------------- + +// RecordTrack constructor must be called with AudioFlinger::mLock held +AudioFlinger::RecordThread::RecordTrack::RecordTrack( + RecordThread *thread, + const sp<Client>& client, + uint32_t sampleRate, + audio_format_t format, + audio_channel_mask_t channelMask, + size_t frameCount, + int sessionId) + : TrackBase(thread, client, sampleRate, format, + channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, false /*isOut*/), + mOverflow(false) +{ + ALOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer); +} + +AudioFlinger::RecordThread::RecordTrack::~RecordTrack() +{ + ALOGV("%s", __func__); +} + +// AudioBufferProvider interface +status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer, + int64_t pts) +{ + audio_track_cblk_t* cblk = this->cblk(); + uint32_t framesAvail; + uint32_t framesReq = buffer->frameCount; + + // Check if last stepServer failed, try to step now + if (mStepServerFailed) { + if (!step()) { + goto getNextBuffer_exit; + } + ALOGV("stepServer recovered"); + mStepServerFailed = false; + } + + // FIXME lock is not actually held, so overrun is possible + framesAvail = mServerProxy->framesAvailableIn_l(); + + if (CC_LIKELY(framesAvail)) { + uint32_t s = cblk->server; + uint32_t bufferEnd = cblk->serverBase + mFrameCount; + + if (framesReq > framesAvail) { + framesReq = framesAvail; + } + if (framesReq > bufferEnd - s) { + framesReq = bufferEnd - s; + } + + buffer->raw = getBuffer(s, framesReq); + buffer->frameCount = framesReq; + return NO_ERROR; + } + +getNextBuffer_exit: + buffer->raw = NULL; + buffer->frameCount = 0; + return NOT_ENOUGH_DATA; +} + +status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event, + int triggerSession) +{ + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + RecordThread *recordThread = (RecordThread *)thread.get(); + return recordThread->start(this, event, triggerSession); + } else { + return BAD_VALUE; + } +} + +void AudioFlinger::RecordThread::RecordTrack::stop() +{ + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + RecordThread *recordThread = (RecordThread *)thread.get(); + recordThread->mLock.lock(); + bool doStop = recordThread->stop_l(this); + if (doStop) { + TrackBase::reset(); + // Force overrun condition to avoid false overrun callback until first data is + // read from buffer + android_atomic_or(CBLK_UNDERRUN, &mCblk->flags); + } + recordThread->mLock.unlock(); + if (doStop) { + AudioSystem::stopInput(recordThread->id()); + } + } +} + +void AudioFlinger::RecordThread::RecordTrack::destroy() +{ + // see comments at AudioFlinger::PlaybackThread::Track::destroy() + sp<RecordTrack> keep(this); + { + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + if (mState == ACTIVE || mState == RESUMING) { + AudioSystem::stopInput(thread->id()); + } + AudioSystem::releaseInput(thread->id()); + Mutex::Autolock _l(thread->mLock); + RecordThread *recordThread = (RecordThread *) thread.get(); + recordThread->destroyTrack_l(this); + } + } +} + + +/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result) +{ + result.append(" Clien Fmt Chn mask Session Step S Serv User FrameCount\n"); +} + +void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size) +{ + snprintf(buffer, size, " %05d %03u 0x%08x %05d %04u %01d %08x %08x %05d\n", + (mClient == 0) ? getpid_cached : mClient->pid(), + mFormat, + mChannelMask, + mSessionId, + mStepCount, + mState, + mCblk->server, + mCblk->user, + mFrameCount); +} + +}; // namespace android diff --git a/services/audioflinger/audio-resampler/AudioResamplerCoefficients.cpp b/services/audioflinger/audio-resampler/AudioResamplerCoefficients.cpp index d45d697..7fc03a6 100644 --- a/services/audioflinger/audio-resampler/AudioResamplerCoefficients.cpp +++ b/services/audioflinger/audio-resampler/AudioResamplerCoefficients.cpp @@ -34,9 +34,9 @@ const int32_t* readResamplerCoefficients(bool upSample) { ALOGV("readResamplerCoefficients"); if (upSample) { - return up_sampler_filter_coefficients; + return (const int32_t *) up_sampler_filter_coefficients; } else { - return dn_sampler_filter_coefficients; + return (const int32_t *) dn_sampler_filter_coefficients; } } diff --git a/services/audioflinger/audio-resampler/filter_coefficients.h b/services/audioflinger/audio-resampler/filter_coefficients.h index bf70c63..8b082b3 100644 --- a/services/audioflinger/audio-resampler/filter_coefficients.h +++ b/services/audioflinger/audio-resampler/filter_coefficients.h @@ -18,7 +18,7 @@ namespace android { // cmd-line: fir -l 7 -s 48000 -c 23400 -n 16 -b 9.62 -const int32_t up_sampler_filter_coefficients[] __attribute__ ((aligned (32))) = { +const uint32_t up_sampler_filter_coefficients[] __attribute__ ((aligned (32))) = { 0x7ccccccd, 0x0323eb7f, 0xfd086246, 0x02b2aa5c, 0xfda45e2c, 0x01fa5183, 0xfe694e12, 0x0137e672, 0xff1c87d3, 0x009ce6d8, 0xff9a68b0, 0x003d150d, 0xffde727a, 0x00106595, 0xfff93679, 0x00021fc5, 0x7cc9b757, 0x022ac835, 0xfd7e3a71, 0x026b7da1, 0xfdd2b905, 0x01db7c90, 0xfe7db77c, 0x012aa7bf, 0xff24dc32, 0x0097dfc9, 0xff9d4ae9, 0x003b8742, 0xffdf38e5, 0x00100be5, 0xfff959f5, 0x0002144b, 0x7cc0773c, 0x01354bc1, 0xfdf365e8, 0x0224726d, 0xfe011d2e, 0x01bc908b, 0xfe923a2b, 0x011d528d, 0xff2d426f, 0x0092cbc0, 0xffa035cc, 0x0039f42e, 0xffe00236, 0x000fb0d2, 0xfff97dfa, 0x000208b0, @@ -151,7 +151,7 @@ const int32_t up_sampler_filter_coefficients[] __attribute__ ((aligned (32))) = }; // cmd-line: fir -l 7 -s 44100 -c 19876 -n 16 -b 9.62 -const int32_t dn_sampler_filter_coefficients[] __attribute__ ((aligned (32))) = { +const uint32_t dn_sampler_filter_coefficients[] __attribute__ ((aligned (32))) = { 0x736144b5, 0x0c333a22, 0xf4fca390, 0x09424904, 0xf8c92a41, 0x052ac04c, 0xfca4fc64, 0x01ed8cc7, 0xff119cc0, 0x0053ba6e, 0xfff9a80d, 0xffeaeaab, 0x001690d9, 0xfff11dcd, 0x000715d9, 0xfffdb4b9, 0x735ed3aa, 0x0b433de8, 0xf560f0f3, 0x091282c4, 0xf8dd5ccf, 0x0525cb66, 0xfca23e3d, 0x01f33960, 0xff0bc9c2, 0x00586127, 0xfff68603, 0xffecbad5, 0x0015ab8b, 0xfff17c10, 0x0006f71a, 0xfffdbc2f, 0x735780bb, 0x0a55a98f, 0xf5c5b2a1, 0x08e1ea27, 0xf8f25767, 0x0520366d, 0xfc9ff262, 0x01f89c98, 0xff0620a4, 0x005cf349, 0xfff36c0d, 0xffee8913, 0x0014c5dc, 0xfff1db1a, 0x0006d7d7, 0xfffdc3db, diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk index eff47c8..83d9ccd 100644 --- a/services/camera/libcameraservice/Android.mk +++ b/services/camera/libcameraservice/Android.mk @@ -10,8 +10,11 @@ LOCAL_SRC_FILES:= \ CameraService.cpp \ CameraClient.cpp \ Camera2Client.cpp \ + ProCamera2Client.cpp \ + Camera2ClientBase.cpp \ + CameraDeviceBase.cpp \ Camera2Device.cpp \ - camera2/CameraMetadata.cpp \ + Camera3Device.cpp \ camera2/Parameters.cpp \ camera2/FrameProcessor.cpp \ camera2/StreamingProcessor.cpp \ @@ -20,15 +23,23 @@ LOCAL_SRC_FILES:= \ camera2/ZslProcessor.cpp \ camera2/BurstCapture.cpp \ camera2/JpegCompressor.cpp \ - camera2/CaptureSequencer.cpp + camera2/CaptureSequencer.cpp \ + camera2/ProFrameProcessor.cpp \ + camera2/ZslProcessor3.cpp \ + camera3/Camera3Stream.cpp \ + camera3/Camera3IOStreamBase.cpp \ + camera3/Camera3InputStream.cpp \ + camera3/Camera3OutputStream.cpp \ + camera3/Camera3ZslStream.cpp \ + gui/RingBufferConsumer.cpp \ LOCAL_SHARED_LIBRARIES:= \ libui \ + liblog \ libutils \ libbinder \ libcutils \ libmedia \ - libmedia_native \ libcamera_client \ libgui \ libhardware \ @@ -40,6 +51,9 @@ LOCAL_C_INCLUDES += \ system/media/camera/include \ external/jpeg + +LOCAL_CFLAGS += -Wall -Wextra + LOCAL_MODULE:= libcameraservice include $(BUILD_SHARED_LIBRARY) diff --git a/services/camera/libcameraservice/Camera2Client.cpp b/services/camera/libcameraservice/Camera2Client.cpp index 3033577..dd50e3c 100644 --- a/services/camera/libcameraservice/Camera2Client.cpp +++ b/services/camera/libcameraservice/Camera2Client.cpp @@ -22,10 +22,14 @@ #include <utils/Trace.h> #include <cutils/properties.h> -#include <gui/SurfaceTextureClient.h> #include <gui/Surface.h> #include "camera2/Parameters.h" #include "Camera2Client.h" +#include "Camera2Device.h" +#include "Camera3Device.h" + +#include "camera2/ZslProcessor.h" +#include "camera2/ZslProcessor3.h" #define ALOG1(...) ALOGD_IF(gLogLevel >= 1, __VA_ARGS__); #define ALOG2(...) ALOGD_IF(gLogLevel >= 2, __VA_ARGS__); @@ -37,56 +41,55 @@ static int getCallingPid() { return IPCThreadState::self()->getCallingPid(); } -static int getCallingUid() { - return IPCThreadState::self()->getCallingUid(); -} - // Interface used by CameraService Camera2Client::Camera2Client(const sp<CameraService>& cameraService, const sp<ICameraClient>& cameraClient, + const String16& clientPackageName, int cameraId, int cameraFacing, int clientPid, - int servicePid): - Client(cameraService, cameraClient, - cameraId, cameraFacing, clientPid, servicePid), - mSharedCameraClient(cameraClient), - mParameters(cameraId, cameraFacing) + uid_t clientUid, + int servicePid, + int deviceVersion): + Camera2ClientBase(cameraService, cameraClient, clientPackageName, + cameraId, cameraFacing, clientPid, clientUid, servicePid), + mParameters(cameraId, cameraFacing), + mDeviceVersion(deviceVersion) { ATRACE_CALL(); ALOGI("Camera %d: Opened", cameraId); - mDevice = new Camera2Device(cameraId); + switch (mDeviceVersion) { + case CAMERA_DEVICE_API_VERSION_2_0: + mDevice = new Camera2Device(cameraId); + break; + case CAMERA_DEVICE_API_VERSION_3_0: + mDevice = new Camera3Device(cameraId); + break; + default: + ALOGE("Camera %d: Unknown HAL device version %d", + cameraId, mDeviceVersion); + mDevice = NULL; + break; + } + SharedParameters::Lock l(mParameters); l.mParameters.state = Parameters::DISCONNECTED; } -status_t Camera2Client::checkPid(const char* checkLocation) const { - int callingPid = getCallingPid(); - if (callingPid == mClientPid) return NO_ERROR; - - ALOGE("%s: attempt to use a locked camera from a different process" - " (old pid %d, new pid %d)", checkLocation, mClientPid, callingPid); - return PERMISSION_DENIED; -} - status_t Camera2Client::initialize(camera_module_t *module) { ATRACE_CALL(); ALOGV("%s: Initializing client for camera %d", __FUNCTION__, mCameraId); status_t res; - res = mDevice->initialize(module); + res = Camera2ClientBase::initialize(module); if (res != OK) { - ALOGE("%s: Camera %d: unable to initialize device: %s (%d)", - __FUNCTION__, mCameraId, strerror(-res), res); - return NO_INIT; + return res; } - res = mDevice->setNotifyCallback(this); - SharedParameters::Lock l(mParameters); res = l.mParameters.initialize(&(mDevice->info())); @@ -100,7 +103,7 @@ status_t Camera2Client::initialize(camera_module_t *module) mStreamingProcessor = new StreamingProcessor(this); - mFrameProcessor = new FrameProcessor(this); + mFrameProcessor = new FrameProcessor(mDevice, this); threadName = String8::format("C2-%d-FrameProc", mCameraId); mFrameProcessor->run(threadName.string()); @@ -115,10 +118,27 @@ status_t Camera2Client::initialize(camera_module_t *module) mCameraId); mJpegProcessor->run(threadName.string()); - mZslProcessor = new ZslProcessor(this, mCaptureSequencer); + switch (mDeviceVersion) { + case CAMERA_DEVICE_API_VERSION_2_0: { + sp<ZslProcessor> zslProc = + new ZslProcessor(this, mCaptureSequencer); + mZslProcessor = zslProc; + mZslProcessorThread = zslProc; + break; + } + case CAMERA_DEVICE_API_VERSION_3_0:{ + sp<ZslProcessor3> zslProc = + new ZslProcessor3(this, mCaptureSequencer); + mZslProcessor = zslProc; + mZslProcessorThread = zslProc; + break; + } + default: + break; + } threadName = String8::format("C2-%d-ZslProc", mCameraId); - mZslProcessor->run(threadName.string()); + mZslProcessorThread->run(threadName.string()); mCallbackProcessor = new CallbackProcessor(this); threadName = String8::format("C2-%d-CallbkProc", @@ -136,6 +156,7 @@ status_t Camera2Client::initialize(camera_module_t *module) Camera2Client::~Camera2Client() { ATRACE_CALL(); + ALOGV("~Camera2Client"); mDestructionStarted = true; @@ -146,9 +167,10 @@ Camera2Client::~Camera2Client() { status_t Camera2Client::dump(int fd, const Vector<String16>& args) { String8 result; - result.appendFormat("Client2[%d] (%p) PID: %d, dump:\n", + result.appendFormat("Client2[%d] (%p) Client: %s PID: %d, dump:\n", mCameraId, - getCameraClient()->asBinder().get(), + getRemoteCallback()->asBinder().get(), + String8(mClientPackageName).string(), mClientPid); result.append(" State: "); #define CASE_APPEND_ENUM(x) case x: result.append(#x "\n"); break; @@ -351,26 +373,15 @@ status_t Camera2Client::dump(int fd, const Vector<String16>& args) { mZslProcessor->dump(fd, args); - result = " Device dump:\n"; - write(fd, result.string(), result.size()); - - status_t res = mDevice->dump(fd, args); - if (res != OK) { - result = String8::format(" Error dumping device: %s (%d)", - strerror(-res), res); - write(fd, result.string(), result.size()); - } - + return dumpDevice(fd, args); #undef CASE_APPEND_ENUM - return NO_ERROR; } // ICamera interface void Camera2Client::disconnect() { ATRACE_CALL(); - Mutex::Autolock icl(mICameraLock); - status_t res; + Mutex::Autolock icl(mBinderSerializationLock); // Allow both client and the media server to disconnect at all times int callingPid = getCallingPid(); @@ -380,6 +391,12 @@ void Camera2Client::disconnect() { ALOGV("Camera %d: Shutting down", mCameraId); + /** + * disconnect() cannot call any methods that might need to promote a + * wp<Camera2Client>, since disconnect can be called from the destructor, at + * which point all such promotions will fail. + */ + stopPreviewL(); { @@ -397,7 +414,7 @@ void Camera2Client::disconnect() { mFrameProcessor->requestExit(); mCaptureSequencer->requestExit(); mJpegProcessor->requestExit(); - mZslProcessor->requestExit(); + mZslProcessorThread->requestExit(); mCallbackProcessor->requestExit(); ALOGV("Camera %d: Waiting for threads", mCameraId); @@ -405,7 +422,7 @@ void Camera2Client::disconnect() { mFrameProcessor->join(); mCaptureSequencer->join(); mJpegProcessor->join(); - mZslProcessor->join(); + mZslProcessorThread->join(); mCallbackProcessor->join(); ALOGV("Camera %d: Disconnecting device", mCameraId); @@ -420,7 +437,7 @@ void Camera2Client::disconnect() { status_t Camera2Client::connect(const sp<ICameraClient>& client) { ATRACE_CALL(); ALOGV("%s: E", __FUNCTION__); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); if (mClientPid != 0 && getCallingPid() != mClientPid) { ALOGE("%s: Camera %d: Connection attempt from pid %d; " @@ -431,8 +448,8 @@ status_t Camera2Client::connect(const sp<ICameraClient>& client) { mClientPid = getCallingPid(); - mCameraClient = client; - mSharedCameraClient = client; + mRemoteCallback = client; + mSharedCameraCallbacks = client; return OK; } @@ -440,7 +457,7 @@ status_t Camera2Client::connect(const sp<ICameraClient>& client) { status_t Camera2Client::lock() { ATRACE_CALL(); ALOGV("%s: E", __FUNCTION__); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); ALOGV("%s: Camera %d: Lock call from pid %d; current client pid %d", __FUNCTION__, mCameraId, getCallingPid(), mClientPid); @@ -461,7 +478,7 @@ status_t Camera2Client::lock() { status_t Camera2Client::unlock() { ATRACE_CALL(); ALOGV("%s: E", __FUNCTION__); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); ALOGV("%s: Camera %d: Unlock call from pid %d; current client pid %d", __FUNCTION__, mCameraId, getCallingPid(), mClientPid); @@ -473,8 +490,8 @@ status_t Camera2Client::unlock() { return INVALID_OPERATION; } mClientPid = 0; - mCameraClient.clear(); - mSharedCameraClient.clear(); + mRemoteCallback.clear(); + mSharedCameraCallbacks.clear(); return OK; } @@ -487,14 +504,14 @@ status_t Camera2Client::setPreviewDisplay( const sp<Surface>& surface) { ATRACE_CALL(); ALOGV("%s: E", __FUNCTION__); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; sp<IBinder> binder; sp<ANativeWindow> window; if (surface != 0) { - binder = surface->asBinder(); + binder = surface->getIGraphicBufferProducer()->asBinder(); window = surface; } @@ -502,18 +519,18 @@ status_t Camera2Client::setPreviewDisplay( } status_t Camera2Client::setPreviewTexture( - const sp<ISurfaceTexture>& surfaceTexture) { + const sp<IGraphicBufferProducer>& bufferProducer) { ATRACE_CALL(); ALOGV("%s: E", __FUNCTION__); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; sp<IBinder> binder; sp<ANativeWindow> window; - if (surfaceTexture != 0) { - binder = surfaceTexture->asBinder(); - window = new SurfaceTextureClient(surfaceTexture); + if (bufferProducer != 0) { + binder = bufferProducer->asBinder(); + window = new Surface(bufferProducer); } return setPreviewWindowL(binder, window); } @@ -549,7 +566,12 @@ status_t Camera2Client::setPreviewWindowL(const sp<IBinder>& binder, break; case Parameters::PREVIEW: // Already running preview - need to stop and create a new stream - mStreamingProcessor->stopStream(); + res = stopStream(); + if (res != OK) { + ALOGE("%s: Unable to stop preview to swap windows: %s (%d)", + __FUNCTION__, strerror(-res), res); + return res; + } state = Parameters::WAITING_FOR_PREVIEW_WINDOW; break; } @@ -574,8 +596,8 @@ status_t Camera2Client::setPreviewWindowL(const sp<IBinder>& binder, void Camera2Client::setPreviewCallbackFlag(int flag) { ATRACE_CALL(); ALOGV("%s: Camera %d: Flag 0x%x", __FUNCTION__, mCameraId, flag); - Mutex::Autolock icl(mICameraLock); - status_t res; + Mutex::Autolock icl(mBinderSerializationLock); + if ( checkPid(__FUNCTION__) != OK) return; SharedParameters::Lock l(mParameters); @@ -613,7 +635,7 @@ void Camera2Client::setPreviewCallbackFlagL(Parameters ¶ms, int flag) { status_t Camera2Client::startPreview() { ATRACE_CALL(); ALOGV("%s: E", __FUNCTION__); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; SharedParameters::Lock l(mParameters); @@ -668,7 +690,7 @@ status_t Camera2Client::startPreviewL(Parameters ¶ms, bool restart) { outputStreams.push(getCallbackStreamId()); } if (params.zslMode && !params.recordingHint) { - res = mZslProcessor->updateStream(params); + res = updateProcessorStream(mZslProcessor, params); if (res != OK) { ALOGE("%s: Camera %d: Unable to update ZSL stream: %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res); @@ -696,7 +718,7 @@ status_t Camera2Client::startPreviewL(Parameters ¶ms, bool restart) { // assumption that the user will record video. To optimize recording // startup time, create the necessary output streams for recording and // video snapshot now if they don't already exist. - res = mJpegProcessor->updateStream(params); + res = updateProcessorStream(mJpegProcessor, params); if (res != OK) { ALOGE("%s: Camera %d: Can't pre-configure still image " "stream: %s (%d)", @@ -729,7 +751,7 @@ status_t Camera2Client::startPreviewL(Parameters ¶ms, bool restart) { void Camera2Client::stopPreview() { ATRACE_CALL(); ALOGV("%s: E", __FUNCTION__); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return; stopPreviewL(); @@ -747,8 +769,7 @@ void Camera2Client::stopPreviewL() { switch (state) { case Parameters::DISCONNECTED: - ALOGE("%s: Camera %d: Call before initialized", - __FUNCTION__, mCameraId); + // Nothing to do. break; case Parameters::STOPPED: case Parameters::VIDEO_SNAPSHOT: @@ -757,7 +778,11 @@ void Camera2Client::stopPreviewL() { // no break case Parameters::RECORD: case Parameters::PREVIEW: - mStreamingProcessor->stopStream(); + res = stopStream(); + if (res != OK) { + ALOGE("%s: Camera %d: Can't stop streaming: %s (%d)", + __FUNCTION__, mCameraId, strerror(-res), res); + } res = mDevice->waitUntilDrained(); if (res != OK) { ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)", @@ -778,7 +803,7 @@ void Camera2Client::stopPreviewL() { bool Camera2Client::previewEnabled() { ATRACE_CALL(); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return false; @@ -788,7 +813,7 @@ bool Camera2Client::previewEnabled() { status_t Camera2Client::storeMetaDataInBuffers(bool enabled) { ATRACE_CALL(); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; @@ -813,7 +838,7 @@ status_t Camera2Client::storeMetaDataInBuffers(bool enabled) { status_t Camera2Client::startRecording() { ATRACE_CALL(); ALOGV("%s: E", __FUNCTION__); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; SharedParameters::Lock l(mParameters); @@ -864,7 +889,10 @@ status_t Camera2Client::startRecordingL(Parameters ¶ms, bool restart) { } } - res = mStreamingProcessor->updateRecordingStream(params); + res = updateProcessorStream< + StreamingProcessor, + &StreamingProcessor::updateRecordingStream>(mStreamingProcessor, + params); if (res != OK) { ALOGE("%s: Camera %d: Unable to update recording stream: %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res); @@ -904,7 +932,7 @@ status_t Camera2Client::startRecordingL(Parameters ¶ms, bool restart) { void Camera2Client::stopRecording() { ATRACE_CALL(); ALOGV("%s: E", __FUNCTION__); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); SharedParameters::Lock l(mParameters); status_t res; @@ -936,7 +964,7 @@ void Camera2Client::stopRecording() { bool Camera2Client::recordingEnabled() { ATRACE_CALL(); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); if ( checkPid(__FUNCTION__) != OK) return false; @@ -953,7 +981,7 @@ bool Camera2Client::recordingEnabledL() { void Camera2Client::releaseRecordingFrame(const sp<IMemory>& mem) { ATRACE_CALL(); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); if ( checkPid(__FUNCTION__) != OK) return; mStreamingProcessor->releaseRecordingFrame(mem); @@ -961,7 +989,7 @@ void Camera2Client::releaseRecordingFrame(const sp<IMemory>& mem) { status_t Camera2Client::autoFocus() { ATRACE_CALL(); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); ALOGV("%s: Camera %d", __FUNCTION__, mCameraId); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; @@ -999,9 +1027,9 @@ status_t Camera2Client::autoFocus() { * Send immediate notification back to client */ if (notifyImmediately) { - SharedCameraClient::Lock l(mSharedCameraClient); - if (l.mCameraClient != 0) { - l.mCameraClient->notifyCallback(CAMERA_MSG_FOCUS, + SharedCameraCallbacks::Lock l(mSharedCameraCallbacks); + if (l.mRemoteCallback != 0) { + l.mRemoteCallback->notifyCallback(CAMERA_MSG_FOCUS, notifySuccess ? 1 : 0, 0); } return OK; @@ -1032,7 +1060,7 @@ status_t Camera2Client::autoFocus() { status_t Camera2Client::cancelAutoFocus() { ATRACE_CALL(); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); ALOGV("%s: Camera %d", __FUNCTION__, mCameraId); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; @@ -1064,7 +1092,7 @@ status_t Camera2Client::cancelAutoFocus() { status_t Camera2Client::takePicture(int msgType) { ATRACE_CALL(); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; @@ -1100,7 +1128,7 @@ status_t Camera2Client::takePicture(int msgType) { ALOGV("%s: Camera %d: Starting picture capture", __FUNCTION__, mCameraId); - res = mJpegProcessor->updateStream(l.mParameters); + res = updateProcessorStream(mJpegProcessor, l.mParameters); if (res != OK) { ALOGE("%s: Camera %d: Can't set up still image stream: %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res); @@ -1123,7 +1151,7 @@ status_t Camera2Client::takePicture(int msgType) { status_t Camera2Client::setParameters(const String8& params) { ATRACE_CALL(); ALOGV("%s: Camera %d", __FUNCTION__, mCameraId); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; @@ -1140,7 +1168,7 @@ status_t Camera2Client::setParameters(const String8& params) { String8 Camera2Client::getParameters() const { ATRACE_CALL(); ALOGV("%s: Camera %d", __FUNCTION__, mCameraId); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); if ( checkPid(__FUNCTION__) != OK) return String8(); SharedParameters::ReadLock l(mParameters); @@ -1150,7 +1178,7 @@ String8 Camera2Client::getParameters() const { status_t Camera2Client::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2) { ATRACE_CALL(); - Mutex::Autolock icl(mICameraLock); + Mutex::Autolock icl(mBinderSerializationLock); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; @@ -1244,7 +1272,7 @@ status_t Camera2Client::commandPlayRecordingSoundL() { return OK; } -status_t Camera2Client::commandStartFaceDetectionL(int type) { +status_t Camera2Client::commandStartFaceDetectionL(int /*type*/) { ALOGV("%s: Camera %d: Starting face detection", __FUNCTION__, mCameraId); status_t res; @@ -1268,7 +1296,7 @@ status_t Camera2Client::commandStartFaceDetectionL(int type) { ANDROID_STATISTICS_FACE_DETECT_MODE_OFF) { ALOGE("%s: Camera %d: Face detection not supported", __FUNCTION__, mCameraId); - return INVALID_OPERATION; + return BAD_VALUE; } if (l.mParameters.enableFaceDetect) return OK; @@ -1325,16 +1353,6 @@ status_t Camera2Client::commandSetVideoBufferCountL(size_t count) { } /** Device-related methods */ - -void Camera2Client::notifyError(int errorCode, int arg1, int arg2) { - ALOGE("Error condition %d reported by HAL, arguments %d, %d", errorCode, arg1, arg2); -} - -void Camera2Client::notifyShutter(int frameNumber, nsecs_t timestamp) { - ALOGV("%s: Shutter notification for frame %d at time %lld", __FUNCTION__, - frameNumber, timestamp); -} - void Camera2Client::notifyAutoFocus(uint8_t newState, int triggerId) { ALOGV("%s: Autofocus state now %d, last trigger %d", __FUNCTION__, newState, triggerId); @@ -1430,16 +1448,16 @@ void Camera2Client::notifyAutoFocus(uint8_t newState, int triggerId) { } } if (sendMovingMessage) { - SharedCameraClient::Lock l(mSharedCameraClient); - if (l.mCameraClient != 0) { - l.mCameraClient->notifyCallback(CAMERA_MSG_FOCUS_MOVE, + SharedCameraCallbacks::Lock l(mSharedCameraCallbacks); + if (l.mRemoteCallback != 0) { + l.mRemoteCallback->notifyCallback(CAMERA_MSG_FOCUS_MOVE, afInMotion ? 1 : 0, 0); } } if (sendCompletedMessage) { - SharedCameraClient::Lock l(mSharedCameraClient); - if (l.mCameraClient != 0) { - l.mCameraClient->notifyCallback(CAMERA_MSG_FOCUS, + SharedCameraCallbacks::Lock l(mSharedCameraCallbacks); + if (l.mRemoteCallback != 0) { + l.mRemoteCallback->notifyCallback(CAMERA_MSG_FOCUS, success ? 1 : 0, 0); } } @@ -1451,23 +1469,6 @@ void Camera2Client::notifyAutoExposure(uint8_t newState, int triggerId) { mCaptureSequencer->notifyAutoExposure(newState, triggerId); } -void Camera2Client::notifyAutoWhitebalance(uint8_t newState, int triggerId) { - ALOGV("%s: Auto-whitebalance state now %d, last trigger %d", - __FUNCTION__, newState, triggerId); -} - -int Camera2Client::getCameraId() const { - return mCameraId; -} - -const sp<Camera2Device>& Camera2Client::getCameraDevice() { - return mDevice; -} - -const sp<CameraService>& Camera2Client::getCameraService() { - return mCameraService; -} - camera2::SharedParameters& Camera2Client::getParameters() { return mParameters; } @@ -1506,32 +1507,6 @@ status_t Camera2Client::stopStream() { return mStreamingProcessor->stopStream(); } -Camera2Client::SharedCameraClient::Lock::Lock(SharedCameraClient &client): - mCameraClient(client.mCameraClient), - mSharedClient(client) { - mSharedClient.mCameraClientLock.lock(); -} - -Camera2Client::SharedCameraClient::Lock::~Lock() { - mSharedClient.mCameraClientLock.unlock(); -} - -Camera2Client::SharedCameraClient::SharedCameraClient(const sp<ICameraClient>&client): - mCameraClient(client) { -} - -Camera2Client::SharedCameraClient& Camera2Client::SharedCameraClient::operator=( - const sp<ICameraClient>&client) { - Mutex::Autolock l(mCameraClientLock); - mCameraClient = client; - return *this; -} - -void Camera2Client::SharedCameraClient::clear() { - Mutex::Autolock l(mCameraClientLock); - mCameraClient.clear(); -} - const int32_t Camera2Client::kPreviewRequestIdStart; const int32_t Camera2Client::kPreviewRequestIdEnd; const int32_t Camera2Client::kRecordingRequestIdStart; @@ -1633,4 +1608,58 @@ status_t Camera2Client::syncWithDevice() { return res; } +template <typename ProcessorT> +status_t Camera2Client::updateProcessorStream(sp<ProcessorT> processor, + camera2::Parameters params) { + // No default template arguments until C++11, so we need this overload + return updateProcessorStream<ProcessorT, &ProcessorT::updateStream>( + processor, params); +} + +template <typename ProcessorT, + status_t (ProcessorT::*updateStreamF)(const Parameters &)> +status_t Camera2Client::updateProcessorStream(sp<ProcessorT> processor, + Parameters params) { + status_t res; + + // Get raw pointer since sp<T> doesn't have operator->* + ProcessorT *processorPtr = processor.get(); + res = (processorPtr->*updateStreamF)(params); + + /** + * Can't update the stream if it's busy? + * + * Then we need to stop the device (by temporarily clearing the request + * queue) and then try again. Resume streaming once we're done. + */ + if (res == -EBUSY) { + res = mStreamingProcessor->togglePauseStream(/*pause*/true); + if (res != OK) { + ALOGE("%s: Camera %d: Can't pause streaming: %s (%d)", + __FUNCTION__, mCameraId, strerror(-res), res); + } + + res = mDevice->waitUntilDrained(); + if (res != OK) { + ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)", + __FUNCTION__, mCameraId, strerror(-res), res); + } + + res = (processorPtr->*updateStreamF)(params); + if (res != OK) { + ALOGE("%s: Camera %d: Failed to update processing stream " + " despite having halted streaming first: %s (%d)", + __FUNCTION__, mCameraId, strerror(-res), res); + } + + res = mStreamingProcessor->togglePauseStream(/*pause*/false); + if (res != OK) { + ALOGE("%s: Camera %d: Can't unpause streaming: %s (%d)", + __FUNCTION__, mCameraId, strerror(-res), res); + } + } + + return res; +} + } // namespace android diff --git a/services/camera/libcameraservice/Camera2Client.h b/services/camera/libcameraservice/Camera2Client.h index 55ead02..8ab46b1 100644 --- a/services/camera/libcameraservice/Camera2Client.h +++ b/services/camera/libcameraservice/Camera2Client.h @@ -17,26 +17,26 @@ #ifndef ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_H #define ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_H -#include "Camera2Device.h" +#include "CameraDeviceBase.h" #include "CameraService.h" #include "camera2/Parameters.h" #include "camera2/FrameProcessor.h" #include "camera2/StreamingProcessor.h" #include "camera2/JpegProcessor.h" -#include "camera2/ZslProcessor.h" +#include "camera2/ZslProcessorInterface.h" #include "camera2/CaptureSequencer.h" #include "camera2/CallbackProcessor.h" +#include "Camera2ClientBase.h" namespace android { class IMemory; /** - * Implements the android.hardware.camera API on top of - * camera device HAL version 2. + * Interface between android.hardware.Camera API and Camera HAL device for versions + * CAMERA_DEVICE_API_VERSION_2_0 and 3_0. */ class Camera2Client : - public CameraService::Client, - public Camera2Device::NotificationListener + public Camera2ClientBase<CameraService::Client> { public: /** @@ -49,7 +49,7 @@ public: virtual status_t unlock(); virtual status_t setPreviewDisplay(const sp<Surface>& surface); virtual status_t setPreviewTexture( - const sp<ISurfaceTexture>& surfaceTexture); + const sp<IGraphicBufferProducer>& bufferProducer); virtual void setPreviewCallbackFlag(int flag); virtual status_t startPreview(); virtual void stopPreview(); @@ -72,10 +72,14 @@ public: Camera2Client(const sp<CameraService>& cameraService, const sp<ICameraClient>& cameraClient, + const String16& clientPackageName, int cameraId, int cameraFacing, int clientPid, - int servicePid); + uid_t clientUid, + int servicePid, + int deviceVersion); + virtual ~Camera2Client(); status_t initialize(camera_module_t *module); @@ -83,22 +87,16 @@ public: virtual status_t dump(int fd, const Vector<String16>& args); /** - * Interface used by Camera2Device + * Interface used by CameraDeviceBase */ - virtual void notifyError(int errorCode, int arg1, int arg2); - virtual void notifyShutter(int frameNumber, nsecs_t timestamp); virtual void notifyAutoFocus(uint8_t newState, int triggerId); virtual void notifyAutoExposure(uint8_t newState, int triggerId); - virtual void notifyAutoWhitebalance(uint8_t newState, int triggerId); /** * Interface used by independent components of Camera2Client. */ - int getCameraId() const; - const sp<Camera2Device>& getCameraDevice(); - const sp<CameraService>& getCameraService(); camera2::SharedParameters& getParameters(); int getPreviewStreamId() const; @@ -114,27 +112,6 @@ public: status_t stopStream(); - // Simple class to ensure that access to ICameraClient is serialized by - // requiring mCameraClientLock to be locked before access to mCameraClient - // is possible. - class SharedCameraClient { - public: - class Lock { - public: - Lock(SharedCameraClient &client); - ~Lock(); - sp<ICameraClient> &mCameraClient; - private: - SharedCameraClient &mSharedClient; - }; - SharedCameraClient(const sp<ICameraClient>& client); - SharedCameraClient& operator=(const sp<ICameraClient>& client); - void clear(); - private: - sp<ICameraClient> mCameraClient; - mutable Mutex mCameraClientLock; - } mSharedCameraClient; - static size_t calculateBufferSize(int width, int height, int format, int stride); @@ -149,15 +126,7 @@ public: private: /** ICamera interface-related private members */ - - // Mutex that must be locked by methods implementing the ICamera interface. - // Ensures serialization between incoming ICamera calls. All methods below - // that append 'L' to the name assume that mICameraLock is locked when - // they're called - mutable Mutex mICameraLock; - typedef camera2::Parameters Parameters; - typedef camera2::CameraMetadata CameraMetadata; status_t setPreviewWindowL(const sp<IBinder>& binder, sp<ANativeWindow> window); @@ -185,10 +154,17 @@ private: void setPreviewCallbackFlagL(Parameters ¶ms, int flag); status_t updateRequests(Parameters ¶ms); + int mDeviceVersion; // Used with stream IDs static const int NO_STREAM = -1; + template <typename ProcessorT> + status_t updateProcessorStream(sp<ProcessorT> processor, Parameters params); + template <typename ProcessorT, + status_t (ProcessorT::*updateStreamF)(const Parameters &)> + status_t updateProcessorStream(sp<ProcessorT> processor, Parameters params); + sp<camera2::FrameProcessor> mFrameProcessor; /* Preview/Recording related members */ @@ -204,23 +180,17 @@ private: sp<camera2::CaptureSequencer> mCaptureSequencer; sp<camera2::JpegProcessor> mJpegProcessor; - sp<camera2::ZslProcessor> mZslProcessor; + sp<camera2::ZslProcessorInterface> mZslProcessor; + sp<Thread> mZslProcessorThread; /** Notification-related members */ bool mAfInMotion; - /** Camera2Device instance wrapping HAL2 entry */ - - sp<Camera2Device> mDevice; - /** Utility members */ // Wait until the camera device has received the latest control settings status_t syncWithDevice(); - - // Verify that caller is the owner of the camera - status_t checkPid(const char *checkLocation) const; }; }; // namespace android diff --git a/services/camera/libcameraservice/Camera2ClientBase.cpp b/services/camera/libcameraservice/Camera2ClientBase.cpp new file mode 100644 index 0000000..0623b89 --- /dev/null +++ b/services/camera/libcameraservice/Camera2ClientBase.cpp @@ -0,0 +1,329 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera2ClientBase" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> +#include <utils/Trace.h> + +#include <cutils/properties.h> +#include <gui/Surface.h> +#include <gui/Surface.h> +#include "camera2/Parameters.h" +#include "Camera2ClientBase.h" +#include "camera2/ProFrameProcessor.h" + +#include "Camera2Device.h" + +namespace android { +using namespace camera2; + +static int getCallingPid() { + return IPCThreadState::self()->getCallingPid(); +} + +// Interface used by CameraService + +template <typename TClientBase> +Camera2ClientBase<TClientBase>::Camera2ClientBase( + const sp<CameraService>& cameraService, + const sp<TCamCallbacks>& remoteCallback, + const String16& clientPackageName, + int cameraId, + int cameraFacing, + int clientPid, + uid_t clientUid, + int servicePid): + TClientBase(cameraService, remoteCallback, clientPackageName, + cameraId, cameraFacing, clientPid, clientUid, servicePid), + mSharedCameraCallbacks(remoteCallback) +{ + ALOGI("Camera %d: Opened", cameraId); + mDevice = new Camera2Device(cameraId); +} + +template <typename TClientBase> +status_t Camera2ClientBase<TClientBase>::checkPid(const char* checkLocation) + const { + + int callingPid = getCallingPid(); + if (callingPid == TClientBase::mClientPid) return NO_ERROR; + + ALOGE("%s: attempt to use a locked camera from a different process" + " (old pid %d, new pid %d)", checkLocation, TClientBase::mClientPid, callingPid); + return PERMISSION_DENIED; +} + +template <typename TClientBase> +status_t Camera2ClientBase<TClientBase>::initialize(camera_module_t *module) { + ATRACE_CALL(); + ALOGV("%s: Initializing client for camera %d", __FUNCTION__, + TClientBase::mCameraId); + status_t res; + + // Verify ops permissions + res = TClientBase::startCameraOps(); + if (res != OK) { + return res; + } + + if (mDevice == NULL) { + ALOGE("%s: Camera %d: No device connected", + __FUNCTION__, TClientBase::mCameraId); + return NO_INIT; + } + + res = mDevice->initialize(module); + if (res != OK) { + ALOGE("%s: Camera %d: unable to initialize device: %s (%d)", + __FUNCTION__, TClientBase::mCameraId, strerror(-res), res); + return NO_INIT; + } + + res = mDevice->setNotifyCallback(this); + + return OK; +} + +template <typename TClientBase> +Camera2ClientBase<TClientBase>::~Camera2ClientBase() { + ATRACE_CALL(); + + TClientBase::mDestructionStarted = true; + + TClientBase::finishCameraOps(); + + disconnect(); + + ALOGI("Closed Camera %d", TClientBase::mCameraId); +} + +template <typename TClientBase> +status_t Camera2ClientBase<TClientBase>::dump(int fd, + const Vector<String16>& args) { + String8 result; + result.appendFormat("Camera2ClientBase[%d] (%p) PID: %d, dump:\n", + TClientBase::mCameraId, + TClientBase::getRemoteCallback()->asBinder().get(), + TClientBase::mClientPid); + result.append(" State: "); + + write(fd, result.string(), result.size()); + // TODO: print dynamic/request section from most recent requests + + return dumpDevice(fd, args); +} + +template <typename TClientBase> +status_t Camera2ClientBase<TClientBase>::dumpDevice( + int fd, + const Vector<String16>& args) { + String8 result; + + result = " Device dump:\n"; + write(fd, result.string(), result.size()); + + if (!mDevice.get()) { + result = " *** Device is detached\n"; + write(fd, result.string(), result.size()); + return NO_ERROR; + } + + status_t res = mDevice->dump(fd, args); + if (res != OK) { + result = String8::format(" Error dumping device: %s (%d)", + strerror(-res), res); + write(fd, result.string(), result.size()); + } + + return NO_ERROR; +} + +// ICameraClient2BaseUser interface + + +template <typename TClientBase> +void Camera2ClientBase<TClientBase>::disconnect() { + ATRACE_CALL(); + Mutex::Autolock icl(mBinderSerializationLock); + + // Allow both client and the media server to disconnect at all times + int callingPid = getCallingPid(); + if (callingPid != TClientBase::mClientPid && + callingPid != TClientBase::mServicePid) return; + + ALOGV("Camera %d: Shutting down", TClientBase::mCameraId); + + detachDevice(); + + CameraService::BasicClient::disconnect(); + + ALOGV("Camera %d: Shut down complete complete", TClientBase::mCameraId); +} + +template <typename TClientBase> +void Camera2ClientBase<TClientBase>::detachDevice() { + if (mDevice == 0) return; + mDevice->disconnect(); + + mDevice.clear(); + + ALOGV("Camera %d: Detach complete", TClientBase::mCameraId); +} + +template <typename TClientBase> +status_t Camera2ClientBase<TClientBase>::connect( + const sp<TCamCallbacks>& client) { + ATRACE_CALL(); + ALOGV("%s: E", __FUNCTION__); + Mutex::Autolock icl(mBinderSerializationLock); + + if (TClientBase::mClientPid != 0 && + getCallingPid() != TClientBase::mClientPid) { + + ALOGE("%s: Camera %d: Connection attempt from pid %d; " + "current locked to pid %d", + __FUNCTION__, + TClientBase::mCameraId, + getCallingPid(), + TClientBase::mClientPid); + return BAD_VALUE; + } + + TClientBase::mClientPid = getCallingPid(); + + TClientBase::mRemoteCallback = client; + mSharedCameraCallbacks = client; + + return OK; +} + +/** Device-related methods */ + +template <typename TClientBase> +void Camera2ClientBase<TClientBase>::notifyError(int errorCode, int arg1, + int arg2) { + ALOGE("Error condition %d reported by HAL, arguments %d, %d", errorCode, + arg1, arg2); +} + +template <typename TClientBase> +void Camera2ClientBase<TClientBase>::notifyShutter(int frameNumber, + nsecs_t timestamp) { + (void)frameNumber; + (void)timestamp; + + ALOGV("%s: Shutter notification for frame %d at time %lld", __FUNCTION__, + frameNumber, timestamp); +} + +template <typename TClientBase> +void Camera2ClientBase<TClientBase>::notifyAutoFocus(uint8_t newState, + int triggerId) { + (void)newState; + (void)triggerId; + + ALOGV("%s: Autofocus state now %d, last trigger %d", + __FUNCTION__, newState, triggerId); + + typename SharedCameraCallbacks::Lock l(mSharedCameraCallbacks); + if (l.mRemoteCallback != 0) { + l.mRemoteCallback->notifyCallback(CAMERA_MSG_FOCUS_MOVE, 1, 0); + } + if (l.mRemoteCallback != 0) { + l.mRemoteCallback->notifyCallback(CAMERA_MSG_FOCUS, 1, 0); + } +} + +template <typename TClientBase> +void Camera2ClientBase<TClientBase>::notifyAutoExposure(uint8_t newState, + int triggerId) { + (void)newState; + (void)triggerId; + + ALOGV("%s: Autoexposure state now %d, last trigger %d", + __FUNCTION__, newState, triggerId); +} + +template <typename TClientBase> +void Camera2ClientBase<TClientBase>::notifyAutoWhitebalance(uint8_t newState, + int triggerId) { + (void)newState; + (void)triggerId; + + ALOGV("%s: Auto-whitebalance state now %d, last trigger %d", + __FUNCTION__, newState, triggerId); +} + +template <typename TClientBase> +int Camera2ClientBase<TClientBase>::getCameraId() const { + return TClientBase::mCameraId; +} + +template <typename TClientBase> +const sp<CameraDeviceBase>& Camera2ClientBase<TClientBase>::getCameraDevice() { + return mDevice; +} + +template <typename TClientBase> +const sp<CameraService>& Camera2ClientBase<TClientBase>::getCameraService() { + return TClientBase::mCameraService; +} + +template <typename TClientBase> +Camera2ClientBase<TClientBase>::SharedCameraCallbacks::Lock::Lock( + SharedCameraCallbacks &client) : + + mRemoteCallback(client.mRemoteCallback), + mSharedClient(client) { + + mSharedClient.mRemoteCallbackLock.lock(); +} + +template <typename TClientBase> +Camera2ClientBase<TClientBase>::SharedCameraCallbacks::Lock::~Lock() { + mSharedClient.mRemoteCallbackLock.unlock(); +} + +template <typename TClientBase> +Camera2ClientBase<TClientBase>::SharedCameraCallbacks::SharedCameraCallbacks( + const sp<TCamCallbacks>&client) : + + mRemoteCallback(client) { +} + +template <typename TClientBase> +typename Camera2ClientBase<TClientBase>::SharedCameraCallbacks& +Camera2ClientBase<TClientBase>::SharedCameraCallbacks::operator=( + const sp<TCamCallbacks>&client) { + + Mutex::Autolock l(mRemoteCallbackLock); + mRemoteCallback = client; + return *this; +} + +template <typename TClientBase> +void Camera2ClientBase<TClientBase>::SharedCameraCallbacks::clear() { + Mutex::Autolock l(mRemoteCallbackLock); + mRemoteCallback.clear(); +} + +template class Camera2ClientBase<CameraService::ProClient>; +template class Camera2ClientBase<CameraService::Client>; + +} // namespace android diff --git a/services/camera/libcameraservice/Camera2ClientBase.h b/services/camera/libcameraservice/Camera2ClientBase.h new file mode 100644 index 0000000..9001efb --- /dev/null +++ b/services/camera/libcameraservice/Camera2ClientBase.h @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_BASE_H +#define ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_BASE_H + +#include "CameraDeviceBase.h" +#include "CameraService.h" + +namespace android { + +class IMemory; + +template <typename TClientBase> +class Camera2ClientBase : + public TClientBase, + public CameraDeviceBase::NotificationListener +{ +public: + typedef typename TClientBase::TCamCallbacks TCamCallbacks; + + /** + * Base binder interface (see ICamera/IProCameraUser for details) + */ + virtual status_t connect(const sp<TCamCallbacks>& callbacks); + virtual void disconnect(); + + /** + * Interface used by CameraService + */ + + // TODO: too many params, move into a ClientArgs<T> + Camera2ClientBase(const sp<CameraService>& cameraService, + const sp<TCamCallbacks>& remoteCallback, + const String16& clientPackageName, + int cameraId, + int cameraFacing, + int clientPid, + uid_t clientUid, + int servicePid); + virtual ~Camera2ClientBase(); + + virtual status_t initialize(camera_module_t *module); + virtual status_t dump(int fd, const Vector<String16>& args); + + /** + * CameraDeviceBase::NotificationListener implementation + */ + + virtual void notifyError(int errorCode, int arg1, int arg2); + virtual void notifyShutter(int frameNumber, nsecs_t timestamp); + virtual void notifyAutoFocus(uint8_t newState, int triggerId); + virtual void notifyAutoExposure(uint8_t newState, int triggerId); + virtual void notifyAutoWhitebalance(uint8_t newState, + int triggerId); + + + int getCameraId() const; + const sp<CameraDeviceBase>& + getCameraDevice(); + const sp<CameraService>& + getCameraService(); + + /** + * Interface used by independent components of CameraClient2Base. + */ + + // Simple class to ensure that access to TCamCallbacks is serialized + // by requiring mRemoteCallbackLock to be locked before access to + // mRemoteCallback is possible. + class SharedCameraCallbacks { + public: + class Lock { + public: + Lock(SharedCameraCallbacks &client); + ~Lock(); + sp<TCamCallbacks> &mRemoteCallback; + private: + SharedCameraCallbacks &mSharedClient; + }; + SharedCameraCallbacks(const sp<TCamCallbacks>& client); + SharedCameraCallbacks& operator=(const sp<TCamCallbacks>& client); + void clear(); + private: + sp<TCamCallbacks> mRemoteCallback; + mutable Mutex mRemoteCallbackLock; + } mSharedCameraCallbacks; + +protected: + + virtual status_t dumpDevice(int fd, const Vector<String16>& args); + + /** Binder client interface-related private members */ + + // Mutex that must be locked by methods implementing the binder client + // interface. Ensures serialization between incoming client calls. + // All methods in this class hierarchy that append 'L' to the name assume + // that mBinderSerializationLock is locked when they're called + mutable Mutex mBinderSerializationLock; + + /** CameraDeviceBase instance wrapping HAL2+ entry */ + + sp<CameraDeviceBase> mDevice; + + /** Utility members */ + + // Verify that caller is the owner of the camera + status_t checkPid(const char *checkLocation) const; + + virtual void detachDevice(); +}; + +}; // namespace android + +#endif diff --git a/services/camera/libcameraservice/Camera2Device.cpp b/services/camera/libcameraservice/Camera2Device.cpp index d6445c1..77df152 100644 --- a/services/camera/libcameraservice/Camera2Device.cpp +++ b/services/camera/libcameraservice/Camera2Device.cpp @@ -34,7 +34,7 @@ namespace android { Camera2Device::Camera2Device(int id): mId(id), - mDevice(NULL) + mHal2Device(NULL) { ATRACE_CALL(); ALOGV("%s: Created device for camera %d", __FUNCTION__, id); @@ -47,11 +47,15 @@ Camera2Device::~Camera2Device() disconnect(); } +int Camera2Device::getId() const { + return mId; +} + status_t Camera2Device::initialize(camera_module_t *module) { ATRACE_CALL(); ALOGV("%s: Initializing device for camera %d", __FUNCTION__, mId); - if (mDevice != NULL) { + if (mHal2Device != NULL) { ALOGE("%s: Already initialized!", __FUNCTION__); return INVALID_OPERATION; } @@ -131,7 +135,7 @@ status_t Camera2Device::initialize(camera_module_t *module) } mDeviceInfo = info.static_camera_characteristics; - mDevice = device; + mHal2Device = device; return OK; } @@ -139,23 +143,23 @@ status_t Camera2Device::initialize(camera_module_t *module) status_t Camera2Device::disconnect() { ATRACE_CALL(); status_t res = OK; - if (mDevice) { + if (mHal2Device) { ALOGV("%s: Closing device for camera %d", __FUNCTION__, mId); - int inProgressCount = mDevice->ops->get_in_progress_count(mDevice); + int inProgressCount = mHal2Device->ops->get_in_progress_count(mHal2Device); if (inProgressCount > 0) { ALOGW("%s: Closing camera device %d with %d requests in flight!", __FUNCTION__, mId, inProgressCount); } mReprocessStreams.clear(); mStreams.clear(); - res = mDevice->common.close(&mDevice->common); + res = mHal2Device->common.close(&mHal2Device->common); if (res != OK) { ALOGE("%s: Could not close camera %d: %s (%d)", __FUNCTION__, mId, strerror(-res), res); } - mDevice = NULL; + mHal2Device = NULL; ALOGV("%s: Shutdown complete", __FUNCTION__); } return res; @@ -197,12 +201,12 @@ status_t Camera2Device::dump(int fd, const Vector<String16>& args) { write(fd, result.string(), result.size()); status_t res; - res = mDevice->ops->dump(mDevice, fd); + res = mHal2Device->ops->dump(mHal2Device, fd); return res; } -const camera2::CameraMetadata& Camera2Device::info() const { +const CameraMetadata& Camera2Device::info() const { ALOGVV("%s: E", __FUNCTION__); return mDeviceInfo; @@ -240,7 +244,7 @@ status_t Camera2Device::createStream(sp<ANativeWindow> consumer, status_t res; ALOGV("%s: E", __FUNCTION__); - sp<StreamAdapter> stream = new StreamAdapter(mDevice); + sp<StreamAdapter> stream = new StreamAdapter(mHal2Device); res = stream->connectToDevice(consumer, width, height, format, size); if (res != OK) { @@ -276,7 +280,7 @@ status_t Camera2Device::createReprocessStreamFromStream(int outputId, int *id) { return BAD_VALUE; } - sp<ReprocessStreamAdapter> stream = new ReprocessStreamAdapter(mDevice); + sp<ReprocessStreamAdapter> stream = new ReprocessStreamAdapter(mHal2Device); res = stream->connectToDevice((*streamI)); if (res != OK) { @@ -401,8 +405,8 @@ status_t Camera2Device::createDefaultRequest(int templateId, status_t err; ALOGV("%s: E", __FUNCTION__); camera_metadata_t *rawRequest; - err = mDevice->ops->construct_default_request( - mDevice, templateId, &rawRequest); + err = mHal2Device->ops->construct_default_request( + mHal2Device, templateId, &rawRequest); request->acquire(rawRequest); return err; } @@ -417,12 +421,12 @@ status_t Camera2Device::waitUntilDrained() { // TODO: Set up notifications from HAL, instead of sleeping here uint32_t totalTime = 0; - while (mDevice->ops->get_in_progress_count(mDevice) > 0) { + while (mHal2Device->ops->get_in_progress_count(mHal2Device) > 0) { usleep(kSleepTime); totalTime += kSleepTime; if (totalTime > kMaxSleepTime) { ALOGE("%s: Waited %d us, %d requests still in flight", __FUNCTION__, - mDevice->ops->get_in_progress_count(mDevice), totalTime); + totalTime, mHal2Device->ops->get_in_progress_count(mHal2Device)); return TIMED_OUT; } } @@ -433,7 +437,7 @@ status_t Camera2Device::waitUntilDrained() { status_t Camera2Device::setNotifyCallback(NotificationListener *listener) { ATRACE_CALL(); status_t res; - res = mDevice->ops->set_notify_callback(mDevice, notificationCallback, + res = mHal2Device->ops->set_notify_callback(mHal2Device, notificationCallback, reinterpret_cast<void*>(listener) ); if (res != OK) { ALOGE("%s: Unable to set notification callback!", __FUNCTION__); @@ -497,7 +501,7 @@ status_t Camera2Device::triggerAutofocus(uint32_t id) { ATRACE_CALL(); status_t res; ALOGV("%s: Triggering autofocus, id %d", __FUNCTION__, id); - res = mDevice->ops->trigger_action(mDevice, + res = mHal2Device->ops->trigger_action(mHal2Device, CAMERA2_TRIGGER_AUTOFOCUS, id, 0); if (res != OK) { ALOGE("%s: Error triggering autofocus (id %d)", @@ -510,7 +514,7 @@ status_t Camera2Device::triggerCancelAutofocus(uint32_t id) { ATRACE_CALL(); status_t res; ALOGV("%s: Canceling autofocus, id %d", __FUNCTION__, id); - res = mDevice->ops->trigger_action(mDevice, + res = mHal2Device->ops->trigger_action(mHal2Device, CAMERA2_TRIGGER_CANCEL_AUTOFOCUS, id, 0); if (res != OK) { ALOGE("%s: Error canceling autofocus (id %d)", @@ -523,7 +527,7 @@ status_t Camera2Device::triggerPrecaptureMetering(uint32_t id) { ATRACE_CALL(); status_t res; ALOGV("%s: Triggering precapture metering, id %d", __FUNCTION__, id); - res = mDevice->ops->trigger_action(mDevice, + res = mHal2Device->ops->trigger_action(mHal2Device, CAMERA2_TRIGGER_PRECAPTURE_METERING, id, 0); if (res != OK) { ALOGE("%s: Error triggering precapture metering (id %d)", @@ -560,18 +564,11 @@ status_t Camera2Device::pushReprocessBuffer(int reprocessStreamId, } /** - * Camera2Device::NotificationListener - */ - -Camera2Device::NotificationListener::~NotificationListener() { -} - -/** * Camera2Device::MetadataQueue */ Camera2Device::MetadataQueue::MetadataQueue(): - mDevice(NULL), + mHal2Device(NULL), mFrameCount(0), mLatestRequestId(0), mCount(0), @@ -602,7 +599,7 @@ status_t Camera2Device::MetadataQueue::setConsumerDevice(camera2_device_t *d) { res = d->ops->set_request_queue_src_ops(d, this); if (res != OK) return res; - mDevice = d; + mHal2Device = d; return OK; } @@ -765,7 +762,6 @@ status_t Camera2Device::MetadataQueue::setStreamSlot( ATRACE_CALL(); ALOGV("%s: E", __FUNCTION__); Mutex::Autolock l(mMutex); - status_t res; if (mStreamSlotCount > 0) { freeBuffers(mStreamSlot.begin(), mStreamSlot.end()); @@ -785,7 +781,7 @@ status_t Camera2Device::MetadataQueue::setStreamSlot( } status_t Camera2Device::MetadataQueue::dump(int fd, - const Vector<String16>& args) { + const Vector<String16>& /*args*/) { ATRACE_CALL(); String8 result; status_t notLocked; @@ -836,12 +832,12 @@ status_t Camera2Device::MetadataQueue::signalConsumerLocked() { ATRACE_CALL(); status_t res = OK; notEmpty.signal(); - if (mSignalConsumer && mDevice != NULL) { + if (mSignalConsumer && mHal2Device != NULL) { mSignalConsumer = false; mMutex.unlock(); ALOGV("%s: Signaling consumer", __FUNCTION__); - res = mDevice->ops->notify_request_queue_not_empty(mDevice); + res = mHal2Device->ops->notify_request_queue_not_empty(mHal2Device); mMutex.lock(); } return res; @@ -894,12 +890,13 @@ int Camera2Device::MetadataQueue::consumer_free( { ATRACE_CALL(); MetadataQueue *queue = getInstance(q); + (void)queue; free_camera_metadata(old_buffer); return OK; } int Camera2Device::MetadataQueue::producer_dequeue( - const camera2_frame_queue_dst_ops_t *q, + const camera2_frame_queue_dst_ops_t * /*q*/, size_t entries, size_t bytes, camera_metadata_t **buffer) { @@ -912,7 +909,7 @@ int Camera2Device::MetadataQueue::producer_dequeue( } int Camera2Device::MetadataQueue::producer_cancel( - const camera2_frame_queue_dst_ops_t *q, + const camera2_frame_queue_dst_ops_t * /*q*/, camera_metadata_t *old_buffer) { ATRACE_CALL(); @@ -939,7 +936,7 @@ int Camera2Device::MetadataQueue::producer_enqueue( Camera2Device::StreamAdapter::StreamAdapter(camera2_device_t *d): mState(RELEASED), - mDevice(d), + mHal2Device(d), mId(-1), mWidth(0), mHeight(0), mFormat(0), mSize(0), mUsage(0), mMaxProducerBuffers(0), mMaxConsumerBuffers(0), @@ -990,7 +987,7 @@ status_t Camera2Device::StreamAdapter::connectToDevice( uint32_t formatActual; uint32_t usage; uint32_t maxBuffers = 2; - res = mDevice->ops->allocate_stream(mDevice, + res = mHal2Device->ops->allocate_stream(mHal2Device, mWidth, mHeight, mFormatRequested, getStreamOps(), &id, &formatActual, &usage, &maxBuffers); if (res != OK) { @@ -1106,7 +1103,7 @@ status_t Camera2Device::StreamAdapter::connectToDevice( } ALOGV("%s: Registering %d buffers with camera HAL", __FUNCTION__, mTotalBuffers); - res = mDevice->ops->register_stream_buffers(mDevice, + res = mHal2Device->ops->register_stream_buffers(mHal2Device, mId, mTotalBuffers, buffers); @@ -1136,9 +1133,10 @@ cleanUpBuffers: status_t Camera2Device::StreamAdapter::release() { ATRACE_CALL(); status_t res; - ALOGV("%s: Releasing stream %d", __FUNCTION__, mId); + ALOGV("%s: Releasing stream %d (%d x %d, format %d)", __FUNCTION__, mId, + mWidth, mHeight, mFormat); if (mState >= ALLOCATED) { - res = mDevice->ops->release_stream(mDevice, mId); + res = mHal2Device->ops->release_stream(mHal2Device, mId); if (res != OK) { ALOGE("%s: Unable to release stream %d", __FUNCTION__, mId); @@ -1184,7 +1182,7 @@ status_t Camera2Device::StreamAdapter::setTransform(int transform) { } status_t Camera2Device::StreamAdapter::dump(int fd, - const Vector<String16>& args) { + const Vector<String16>& /*args*/) { ATRACE_CALL(); String8 result = String8::format(" Stream %d: %d x %d, format 0x%x\n", mId, mWidth, mHeight, mFormat); @@ -1319,7 +1317,7 @@ int Camera2Device::StreamAdapter::set_crop(const camera2_stream_ops_t* w, Camera2Device::ReprocessStreamAdapter::ReprocessStreamAdapter(camera2_device_t *d): mState(RELEASED), - mDevice(d), + mHal2Device(d), mId(-1), mWidth(0), mHeight(0), mFormat(0), mActiveBuffers(0), @@ -1361,7 +1359,7 @@ status_t Camera2Device::ReprocessStreamAdapter::connectToDevice( // Allocate device-side stream interface uint32_t id; - res = mDevice->ops->allocate_reprocess_stream_from_stream(mDevice, + res = mHal2Device->ops->allocate_reprocess_stream_from_stream(mHal2Device, outputStream->getId(), getStreamOps(), &id); if (res != OK) { @@ -1385,7 +1383,7 @@ status_t Camera2Device::ReprocessStreamAdapter::release() { status_t res; ALOGV("%s: Releasing stream %d", __FUNCTION__, mId); if (mState >= ACTIVE) { - res = mDevice->ops->release_reprocess_stream(mDevice, mId); + res = mHal2Device->ops->release_reprocess_stream(mHal2Device, mId); if (res != OK) { ALOGE("%s: Unable to release stream %d", __FUNCTION__, mId); @@ -1423,7 +1421,7 @@ status_t Camera2Device::ReprocessStreamAdapter::pushIntoStream( } status_t Camera2Device::ReprocessStreamAdapter::dump(int fd, - const Vector<String16>& args) { + const Vector<String16>& /*args*/) { ATRACE_CALL(); String8 result = String8::format(" Reprocess stream %d: %d x %d, fmt 0x%x\n", @@ -1444,7 +1442,7 @@ int Camera2Device::ReprocessStreamAdapter::acquire_buffer( const camera2_stream_in_ops_t *w, buffer_handle_t** buffer) { ATRACE_CALL(); - int res; + ReprocessStreamAdapter* stream = const_cast<ReprocessStreamAdapter*>( static_cast<const ReprocessStreamAdapter*>(w)); diff --git a/services/camera/libcameraservice/Camera2Device.h b/services/camera/libcameraservice/Camera2Device.h index 41df2e4..3034a1d 100644 --- a/services/camera/libcameraservice/Camera2Device.h +++ b/services/camera/libcameraservice/Camera2Device.h @@ -21,186 +21,54 @@ #include <utils/Errors.h> #include <utils/List.h> #include <utils/Mutex.h> -#include <utils/RefBase.h> -#include <utils/String8.h> -#include <utils/String16.h> -#include <utils/Vector.h> -#include "hardware/camera2.h" -#include "camera2/CameraMetadata.h" +#include "CameraDeviceBase.h" namespace android { -class Camera2Device : public virtual RefBase { +/** + * CameraDevice for HAL devices with version CAMERA_DEVICE_API_VERSION_2_0 + */ +class Camera2Device: public CameraDeviceBase { public: - typedef camera2::CameraMetadata CameraMetadata; - Camera2Device(int id); - ~Camera2Device(); - - status_t initialize(camera_module_t *module); - status_t disconnect(); - - status_t dump(int fd, const Vector<String16>& args); - - /** - * The device's static characteristics metadata buffer - */ - const CameraMetadata& info() const; - - /** - * Submit request for capture. The Camera2Device takes ownership of the - * passed-in buffer. - */ - status_t capture(CameraMetadata &request); - - /** - * Submit request for streaming. The Camera2Device makes a copy of the - * passed-in buffer and the caller retains ownership. - */ - status_t setStreamingRequest(const CameraMetadata &request); + virtual ~Camera2Device(); /** - * Clear the streaming request slot. + * CameraDevice interface */ - status_t clearStreamingRequest(); - - /** - * Wait until a request with the given ID has been dequeued by the - * HAL. Returns TIMED_OUT if the timeout duration is reached. Returns - * immediately if the latest request received by the HAL has this id. - */ - status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout); - - /** - * Create an output stream of the requested size and format. - * - * If format is CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, then the HAL device selects - * an appropriate format; it can be queried with getStreamInfo. - * - * If format is HAL_PIXEL_FORMAT_COMPRESSED, the size parameter must be - * equal to the size in bytes of the buffers to allocate for the stream. For - * other formats, the size parameter is ignored. - */ - status_t createStream(sp<ANativeWindow> consumer, + virtual int getId() const; + virtual status_t initialize(camera_module_t *module); + virtual status_t disconnect(); + virtual status_t dump(int fd, const Vector<String16>& args); + virtual const CameraMetadata& info() const; + virtual status_t capture(CameraMetadata &request); + virtual status_t setStreamingRequest(const CameraMetadata &request); + virtual status_t clearStreamingRequest(); + virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout); + virtual status_t createStream(sp<ANativeWindow> consumer, uint32_t width, uint32_t height, int format, size_t size, int *id); - - /** - * Create an input reprocess stream that uses buffers from an existing - * output stream. - */ - status_t createReprocessStreamFromStream(int outputId, int *id); - - /** - * Get information about a given stream. - */ - status_t getStreamInfo(int id, + virtual status_t createReprocessStreamFromStream(int outputId, int *id); + virtual status_t getStreamInfo(int id, uint32_t *width, uint32_t *height, uint32_t *format); - - /** - * Set stream gralloc buffer transform - */ - status_t setStreamTransform(int id, int transform); - - /** - * Delete stream. Must not be called if there are requests in flight which - * reference that stream. - */ - status_t deleteStream(int id); - - /** - * Delete reprocess stream. Must not be called if there are requests in - * flight which reference that stream. - */ - status_t deleteReprocessStream(int id); - - /** - * Create a metadata buffer with fields that the HAL device believes are - * best for the given use case - */ - status_t createDefaultRequest(int templateId, CameraMetadata *request); - - /** - * Wait until all requests have been processed. Returns INVALID_OPERATION if - * the streaming slot is not empty, or TIMED_OUT if the requests haven't - * finished processing in 10 seconds. - */ - status_t waitUntilDrained(); - - /** - * Abstract class for HAL notification listeners - */ - class NotificationListener { - public: - // Refer to the Camera2 HAL definition for notification definitions - virtual void notifyError(int errorCode, int arg1, int arg2) = 0; - virtual void notifyShutter(int frameNumber, nsecs_t timestamp) = 0; - virtual void notifyAutoFocus(uint8_t newState, int triggerId) = 0; - virtual void notifyAutoExposure(uint8_t newState, int triggerId) = 0; - virtual void notifyAutoWhitebalance(uint8_t newState, int triggerId) = 0; - protected: - virtual ~NotificationListener(); - }; - - /** - * Connect HAL notifications to a listener. Overwrites previous - * listener. Set to NULL to stop receiving notifications. - */ - status_t setNotifyCallback(NotificationListener *listener); - - /** - * Wait for a new frame to be produced, with timeout in nanoseconds. - * Returns TIMED_OUT when no frame produced within the specified duration - */ - status_t waitForNextFrame(nsecs_t timeout); - - /** - * Get next metadata frame from the frame queue. Returns NULL if the queue - * is empty; caller takes ownership of the metadata buffer. - */ - status_t getNextFrame(CameraMetadata *frame); - - /** - * Trigger auto-focus. The latest ID used in a trigger autofocus or cancel - * autofocus call will be returned by the HAL in all subsequent AF - * notifications. - */ - status_t triggerAutofocus(uint32_t id); - - /** - * Cancel auto-focus. The latest ID used in a trigger autofocus/cancel - * autofocus call will be returned by the HAL in all subsequent AF - * notifications. - */ - status_t triggerCancelAutofocus(uint32_t id); - - /** - * Trigger pre-capture metering. The latest ID used in a trigger pre-capture - * call will be returned by the HAL in all subsequent AE and AWB - * notifications. - */ - status_t triggerPrecaptureMetering(uint32_t id); - - /** - * Abstract interface for clients that want to listen to reprocess buffer - * release events - */ - struct BufferReleasedListener: public virtual RefBase { - virtual void onBufferReleased(buffer_handle_t *handle) = 0; - }; - - /** - * Push a buffer to be reprocessed into a reprocessing stream, and - * provide a listener to call once the buffer is returned by the HAL - */ - status_t pushReprocessBuffer(int reprocessStreamId, + virtual status_t setStreamTransform(int id, int transform); + virtual status_t deleteStream(int id); + virtual status_t deleteReprocessStream(int id); + virtual status_t createDefaultRequest(int templateId, CameraMetadata *request); + virtual status_t waitUntilDrained(); + virtual status_t setNotifyCallback(NotificationListener *listener); + virtual status_t waitForNextFrame(nsecs_t timeout); + virtual status_t getNextFrame(CameraMetadata *frame); + virtual status_t triggerAutofocus(uint32_t id); + virtual status_t triggerCancelAutofocus(uint32_t id); + virtual status_t triggerPrecaptureMetering(uint32_t id); + virtual status_t pushReprocessBuffer(int reprocessStreamId, buffer_handle_t *buffer, wp<BufferReleasedListener> listener); - private: const int mId; - camera2_device_t *mDevice; + camera2_device_t *mHal2Device; CameraMetadata mDeviceInfo; vendor_tag_query_ops_t *mVendorTagOps; @@ -251,7 +119,7 @@ class Camera2Device : public virtual RefBase { status_t freeBuffers(List<camera_metadata_t*>::iterator start, List<camera_metadata_t*>::iterator end); - camera2_device_t *mDevice; + camera2_device_t *mHal2Device; Mutex mMutex; Condition notEmpty; @@ -343,7 +211,7 @@ class Camera2Device : public virtual RefBase { } mState; sp<ANativeWindow> mConsumerInterface; - camera2_device_t *mDevice; + camera2_device_t *mHal2Device; uint32_t mId; uint32_t mWidth; @@ -437,7 +305,7 @@ class Camera2Device : public virtual RefBase { List<QueueEntry> mInFlightQueue; - camera2_device_t *mDevice; + camera2_device_t *mHal2Device; uint32_t mId; uint32_t mWidth; diff --git a/services/camera/libcameraservice/Camera3Device.cpp b/services/camera/libcameraservice/Camera3Device.cpp new file mode 100644 index 0000000..0b5e9c4 --- /dev/null +++ b/services/camera/libcameraservice/Camera3Device.cpp @@ -0,0 +1,2005 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera3-Device" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 +//#define LOG_NNDEBUG 0 // Per-frame verbose logging + +#ifdef LOG_NNDEBUG +#define ALOGVV(...) ALOGV(__VA_ARGS__) +#else +#define ALOGVV(...) ((void)0) +#endif + +// Convenience macro for transient errors +#define CLOGE(fmt, ...) ALOGE("Camera %d: %s: " fmt, mId, __FUNCTION__, \ + ##__VA_ARGS__) + +// Convenience macros for transitioning to the error state +#define SET_ERR(fmt, ...) setErrorState( \ + "%s: " fmt, __FUNCTION__, \ + ##__VA_ARGS__) +#define SET_ERR_L(fmt, ...) setErrorStateLocked( \ + "%s: " fmt, __FUNCTION__, \ + ##__VA_ARGS__) + +#include <utils/Log.h> +#include <utils/Trace.h> +#include <utils/Timers.h> +#include "Camera3Device.h" +#include "camera3/Camera3OutputStream.h" +#include "camera3/Camera3InputStream.h" + +using namespace android::camera3; + +namespace android { + +Camera3Device::Camera3Device(int id): + mId(id), + mHal3Device(NULL), + mStatus(STATUS_UNINITIALIZED), + mNextResultFrameNumber(0), + mNextShutterFrameNumber(0), + mListener(NULL) +{ + ATRACE_CALL(); + camera3_callback_ops::notify = &sNotify; + camera3_callback_ops::process_capture_result = &sProcessCaptureResult; + ALOGV("%s: Created device for camera %d", __FUNCTION__, id); +} + +Camera3Device::~Camera3Device() +{ + ATRACE_CALL(); + ALOGV("%s: Tearing down for camera id %d", __FUNCTION__, mId); + disconnect(); +} + +int Camera3Device::getId() const { + return mId; +} + +/** + * CameraDeviceBase interface + */ + +status_t Camera3Device::initialize(camera_module_t *module) +{ + ATRACE_CALL(); + Mutex::Autolock l(mLock); + + ALOGV("%s: Initializing device for camera %d", __FUNCTION__, mId); + if (mStatus != STATUS_UNINITIALIZED) { + CLOGE("Already initialized!"); + return INVALID_OPERATION; + } + + /** Open HAL device */ + + status_t res; + String8 deviceName = String8::format("%d", mId); + + camera3_device_t *device; + + res = module->common.methods->open(&module->common, deviceName.string(), + reinterpret_cast<hw_device_t**>(&device)); + + if (res != OK) { + SET_ERR_L("Could not open camera: %s (%d)", strerror(-res), res); + return res; + } + + /** Cross-check device version */ + + if (device->common.version != CAMERA_DEVICE_API_VERSION_3_0) { + SET_ERR_L("Could not open camera: " + "Camera device is not version %x, reports %x instead", + CAMERA_DEVICE_API_VERSION_3_0, + device->common.version); + device->common.close(&device->common); + return BAD_VALUE; + } + + camera_info info; + res = module->get_camera_info(mId, &info); + if (res != OK) return res; + + if (info.device_version != device->common.version) { + SET_ERR_L("HAL reporting mismatched camera_info version (%x)" + " and device version (%x).", + device->common.version, info.device_version); + device->common.close(&device->common); + return BAD_VALUE; + } + + /** Initialize device with callback functions */ + + res = device->ops->initialize(device, this); + if (res != OK) { + SET_ERR_L("Unable to initialize HAL device: %s (%d)", + strerror(-res), res); + device->common.close(&device->common); + return BAD_VALUE; + } + + /** Get vendor metadata tags */ + + mVendorTagOps.get_camera_vendor_section_name = NULL; + + device->ops->get_metadata_vendor_tag_ops(device, &mVendorTagOps); + + if (mVendorTagOps.get_camera_vendor_section_name != NULL) { + res = set_camera_metadata_vendor_tag_ops(&mVendorTagOps); + if (res != OK) { + SET_ERR_L("Unable to set tag ops: %s (%d)", + strerror(-res), res); + device->common.close(&device->common); + return res; + } + } + + /** Start up request queue thread */ + + mRequestThread = new RequestThread(this, device); + res = mRequestThread->run(String8::format("C3Dev-%d-ReqQueue", mId).string()); + if (res != OK) { + SET_ERR_L("Unable to start request queue thread: %s (%d)", + strerror(-res), res); + device->common.close(&device->common); + mRequestThread.clear(); + return res; + } + + /** Everything is good to go */ + + mDeviceInfo = info.static_camera_characteristics; + mHal3Device = device; + mStatus = STATUS_IDLE; + mNextStreamId = 0; + + return OK; +} + +status_t Camera3Device::disconnect() { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + + ALOGV("%s: E", __FUNCTION__); + + status_t res; + if (mStatus == STATUS_UNINITIALIZED) return OK; + + if (mStatus == STATUS_ACTIVE || + (mStatus == STATUS_ERROR && mRequestThread != NULL)) { + res = mRequestThread->clearRepeatingRequests(); + if (res != OK) { + SET_ERR_L("Can't stop streaming"); + return res; + } + res = waitUntilDrainedLocked(); + if (res != OK) { + SET_ERR_L("Timeout waiting for HAL to drain"); + return res; + } + } + assert(mStatus == STATUS_IDLE || mStatus == STATUS_ERROR); + + if (mRequestThread != NULL) { + mRequestThread->requestExit(); + } + + mOutputStreams.clear(); + mInputStream.clear(); + + if (mRequestThread != NULL) { + mRequestThread->join(); + mRequestThread.clear(); + } + + if (mHal3Device != NULL) { + mHal3Device->common.close(&mHal3Device->common); + mHal3Device = NULL; + } + + mStatus = STATUS_UNINITIALIZED; + + ALOGV("%s: X", __FUNCTION__); + return OK; +} + +status_t Camera3Device::dump(int fd, const Vector<String16> &args) { + ATRACE_CALL(); + (void)args; + String8 lines; + + const char *status = + mStatus == STATUS_ERROR ? "ERROR" : + mStatus == STATUS_UNINITIALIZED ? "UNINITIALIZED" : + mStatus == STATUS_IDLE ? "IDLE" : + mStatus == STATUS_ACTIVE ? "ACTIVE" : + "Unknown"; + lines.appendFormat(" Device status: %s\n", status); + if (mStatus == STATUS_ERROR) { + lines.appendFormat(" Error cause: %s\n", mErrorCause.string()); + } + lines.appendFormat(" Stream configuration:\n"); + + if (mInputStream != NULL) { + write(fd, lines.string(), lines.size()); + mInputStream->dump(fd, args); + } else { + lines.appendFormat(" No input stream.\n"); + write(fd, lines.string(), lines.size()); + } + for (size_t i = 0; i < mOutputStreams.size(); i++) { + mOutputStreams[i]->dump(fd,args); + } + + lines = String8(" In-flight requests:\n"); + if (mInFlightMap.size() == 0) { + lines.append(" None\n"); + } else { + for (size_t i = 0; i < mInFlightMap.size(); i++) { + InFlightRequest r = mInFlightMap.valueAt(i); + lines.appendFormat(" Frame %d | Timestamp: %lld, metadata" + " arrived: %s, buffers left: %d\n", mInFlightMap.keyAt(i), + r.captureTimestamp, r.haveResultMetadata ? "true" : "false", + r.numBuffersLeft); + } + } + write(fd, lines.string(), lines.size()); + + if (mHal3Device != NULL) { + lines = String8(" HAL device dump:\n"); + write(fd, lines.string(), lines.size()); + mHal3Device->ops->dump(mHal3Device, fd); + } + + return OK; +} + +const CameraMetadata& Camera3Device::info() const { + ALOGVV("%s: E", __FUNCTION__); + if (CC_UNLIKELY(mStatus == STATUS_UNINITIALIZED || + mStatus == STATUS_ERROR)) { + ALOGW("%s: Access to static info %s!", __FUNCTION__, + mStatus == STATUS_ERROR ? + "when in error state" : "before init"); + } + return mDeviceInfo; +} + +status_t Camera3Device::capture(CameraMetadata &request) { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + + // TODO: take ownership of the request + + switch (mStatus) { + case STATUS_ERROR: + CLOGE("Device has encountered a serious error"); + return INVALID_OPERATION; + case STATUS_UNINITIALIZED: + CLOGE("Device not initialized"); + return INVALID_OPERATION; + case STATUS_IDLE: + case STATUS_ACTIVE: + // OK + break; + default: + SET_ERR_L("Unexpected status: %d", mStatus); + return INVALID_OPERATION; + } + + sp<CaptureRequest> newRequest = setUpRequestLocked(request); + if (newRequest == NULL) { + CLOGE("Can't create capture request"); + return BAD_VALUE; + } + + return mRequestThread->queueRequest(newRequest); +} + + +status_t Camera3Device::setStreamingRequest(const CameraMetadata &request) { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + + switch (mStatus) { + case STATUS_ERROR: + CLOGE("Device has encountered a serious error"); + return INVALID_OPERATION; + case STATUS_UNINITIALIZED: + CLOGE("Device not initialized"); + return INVALID_OPERATION; + case STATUS_IDLE: + case STATUS_ACTIVE: + // OK + break; + default: + SET_ERR_L("Unexpected status: %d", mStatus); + return INVALID_OPERATION; + } + + sp<CaptureRequest> newRepeatingRequest = setUpRequestLocked(request); + if (newRepeatingRequest == NULL) { + CLOGE("Can't create repeating request"); + return BAD_VALUE; + } + + RequestList newRepeatingRequests; + newRepeatingRequests.push_back(newRepeatingRequest); + + return mRequestThread->setRepeatingRequests(newRepeatingRequests); +} + + +sp<Camera3Device::CaptureRequest> Camera3Device::setUpRequestLocked( + const CameraMetadata &request) { + status_t res; + + if (mStatus == STATUS_IDLE) { + res = configureStreamsLocked(); + if (res != OK) { + SET_ERR_L("Can't set up streams: %s (%d)", strerror(-res), res); + return NULL; + } + } + + sp<CaptureRequest> newRequest = createCaptureRequest(request); + return newRequest; +} + +status_t Camera3Device::clearStreamingRequest() { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + + switch (mStatus) { + case STATUS_ERROR: + CLOGE("Device has encountered a serious error"); + return INVALID_OPERATION; + case STATUS_UNINITIALIZED: + CLOGE("Device not initialized"); + return INVALID_OPERATION; + case STATUS_IDLE: + case STATUS_ACTIVE: + // OK + break; + default: + SET_ERR_L("Unexpected status: %d", mStatus); + return INVALID_OPERATION; + } + + return mRequestThread->clearRepeatingRequests(); +} + +status_t Camera3Device::waitUntilRequestReceived(int32_t requestId, nsecs_t timeout) { + ATRACE_CALL(); + + return mRequestThread->waitUntilRequestProcessed(requestId, timeout); +} + +status_t Camera3Device::createInputStream( + uint32_t width, uint32_t height, int format, int *id) { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + + status_t res; + bool wasActive = false; + + switch (mStatus) { + case STATUS_ERROR: + ALOGE("%s: Device has encountered a serious error", __FUNCTION__); + return INVALID_OPERATION; + case STATUS_UNINITIALIZED: + ALOGE("%s: Device not initialized", __FUNCTION__); + return INVALID_OPERATION; + case STATUS_IDLE: + // OK + break; + case STATUS_ACTIVE: + ALOGV("%s: Stopping activity to reconfigure streams", __FUNCTION__); + mRequestThread->setPaused(true); + res = waitUntilDrainedLocked(); + if (res != OK) { + ALOGE("%s: Can't pause captures to reconfigure streams!", + __FUNCTION__); + mStatus = STATUS_ERROR; + return res; + } + wasActive = true; + break; + default: + ALOGE("%s: Unexpected status: %d", __FUNCTION__, mStatus); + return INVALID_OPERATION; + } + assert(mStatus == STATUS_IDLE); + + if (mInputStream != 0) { + ALOGE("%s: Cannot create more than 1 input stream", __FUNCTION__); + return INVALID_OPERATION; + } + + sp<Camera3InputStream> newStream = new Camera3InputStream(mNextStreamId, + width, height, format); + + mInputStream = newStream; + + *id = mNextStreamId++; + + // Continue captures if active at start + if (wasActive) { + ALOGV("%s: Restarting activity to reconfigure streams", __FUNCTION__); + res = configureStreamsLocked(); + if (res != OK) { + ALOGE("%s: Can't reconfigure device for new stream %d: %s (%d)", + __FUNCTION__, mNextStreamId, strerror(-res), res); + return res; + } + mRequestThread->setPaused(false); + } + + return OK; +} + + +status_t Camera3Device::createZslStream( + uint32_t width, uint32_t height, + int depth, + /*out*/ + int *id, + sp<Camera3ZslStream>* zslStream) { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + + status_t res; + bool wasActive = false; + + switch (mStatus) { + case STATUS_ERROR: + ALOGE("%s: Device has encountered a serious error", __FUNCTION__); + return INVALID_OPERATION; + case STATUS_UNINITIALIZED: + ALOGE("%s: Device not initialized", __FUNCTION__); + return INVALID_OPERATION; + case STATUS_IDLE: + // OK + break; + case STATUS_ACTIVE: + ALOGV("%s: Stopping activity to reconfigure streams", __FUNCTION__); + mRequestThread->setPaused(true); + res = waitUntilDrainedLocked(); + if (res != OK) { + ALOGE("%s: Can't pause captures to reconfigure streams!", + __FUNCTION__); + mStatus = STATUS_ERROR; + return res; + } + wasActive = true; + break; + default: + ALOGE("%s: Unexpected status: %d", __FUNCTION__, mStatus); + return INVALID_OPERATION; + } + assert(mStatus == STATUS_IDLE); + + if (mInputStream != 0) { + ALOGE("%s: Cannot create more than 1 input stream", __FUNCTION__); + return INVALID_OPERATION; + } + + sp<Camera3ZslStream> newStream = new Camera3ZslStream(mNextStreamId, + width, height, depth); + + res = mOutputStreams.add(mNextStreamId, newStream); + if (res < 0) { + ALOGE("%s: Can't add new stream to set: %s (%d)", + __FUNCTION__, strerror(-res), res); + return res; + } + mInputStream = newStream; + + *id = mNextStreamId++; + *zslStream = newStream; + + // Continue captures if active at start + if (wasActive) { + ALOGV("%s: Restarting activity to reconfigure streams", __FUNCTION__); + res = configureStreamsLocked(); + if (res != OK) { + ALOGE("%s: Can't reconfigure device for new stream %d: %s (%d)", + __FUNCTION__, mNextStreamId, strerror(-res), res); + return res; + } + mRequestThread->setPaused(false); + } + + return OK; +} + +status_t Camera3Device::createStream(sp<ANativeWindow> consumer, + uint32_t width, uint32_t height, int format, size_t size, int *id) { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + + status_t res; + bool wasActive = false; + + switch (mStatus) { + case STATUS_ERROR: + CLOGE("Device has encountered a serious error"); + return INVALID_OPERATION; + case STATUS_UNINITIALIZED: + CLOGE("Device not initialized"); + return INVALID_OPERATION; + case STATUS_IDLE: + // OK + break; + case STATUS_ACTIVE: + ALOGV("%s: Stopping activity to reconfigure streams", __FUNCTION__); + mRequestThread->setPaused(true); + res = waitUntilDrainedLocked(); + if (res != OK) { + ALOGE("%s: Can't pause captures to reconfigure streams!", + __FUNCTION__); + return res; + } + wasActive = true; + break; + default: + SET_ERR_L("Unexpected status: %d", mStatus); + return INVALID_OPERATION; + } + assert(mStatus == STATUS_IDLE); + + sp<Camera3OutputStream> newStream; + if (format == HAL_PIXEL_FORMAT_BLOB) { + newStream = new Camera3OutputStream(mNextStreamId, consumer, + width, height, size, format); + } else { + newStream = new Camera3OutputStream(mNextStreamId, consumer, + width, height, format); + } + + res = mOutputStreams.add(mNextStreamId, newStream); + if (res < 0) { + SET_ERR_L("Can't add new stream to set: %s (%d)", strerror(-res), res); + return res; + } + + *id = mNextStreamId++; + + // Continue captures if active at start + if (wasActive) { + ALOGV("%s: Restarting activity to reconfigure streams", __FUNCTION__); + res = configureStreamsLocked(); + if (res != OK) { + CLOGE("Can't reconfigure device for new stream %d: %s (%d)", + mNextStreamId, strerror(-res), res); + return res; + } + mRequestThread->setPaused(false); + } + + return OK; +} + +status_t Camera3Device::createReprocessStreamFromStream(int outputId, int *id) { + ATRACE_CALL(); + (void)outputId; (void)id; + + CLOGE("Unimplemented"); + return INVALID_OPERATION; +} + + +status_t Camera3Device::getStreamInfo(int id, + uint32_t *width, uint32_t *height, uint32_t *format) { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + + switch (mStatus) { + case STATUS_ERROR: + CLOGE("Device has encountered a serious error"); + return INVALID_OPERATION; + case STATUS_UNINITIALIZED: + CLOGE("Device not initialized!"); + return INVALID_OPERATION; + case STATUS_IDLE: + case STATUS_ACTIVE: + // OK + break; + default: + SET_ERR_L("Unexpected status: %d", mStatus); + return INVALID_OPERATION; + } + + ssize_t idx = mOutputStreams.indexOfKey(id); + if (idx == NAME_NOT_FOUND) { + CLOGE("Stream %d is unknown", id); + return idx; + } + + if (width) *width = mOutputStreams[idx]->getWidth(); + if (height) *height = mOutputStreams[idx]->getHeight(); + if (format) *format = mOutputStreams[idx]->getFormat(); + + return OK; +} + +status_t Camera3Device::setStreamTransform(int id, + int transform) { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + + switch (mStatus) { + case STATUS_ERROR: + CLOGE("Device has encountered a serious error"); + return INVALID_OPERATION; + case STATUS_UNINITIALIZED: + CLOGE("Device not initialized"); + return INVALID_OPERATION; + case STATUS_IDLE: + case STATUS_ACTIVE: + // OK + break; + default: + SET_ERR_L("Unexpected status: %d", mStatus); + return INVALID_OPERATION; + } + + ssize_t idx = mOutputStreams.indexOfKey(id); + if (idx == NAME_NOT_FOUND) { + CLOGE("Stream %d does not exist", + id); + return BAD_VALUE; + } + + return mOutputStreams.editValueAt(idx)->setTransform(transform); +} + +status_t Camera3Device::deleteStream(int id) { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + status_t res; + + ALOGV("%s: Camera %d: Deleting stream %d", __FUNCTION__, mId, id); + + // CameraDevice semantics require device to already be idle before + // deleteStream is called, unlike for createStream. + if (mStatus != STATUS_IDLE) { + ALOGV("%s: Camera %d: Device not idle", __FUNCTION__, mId); + return -EBUSY; + } + + sp<Camera3StreamInterface> deletedStream; + if (mInputStream != NULL && id == mInputStream->getId()) { + deletedStream = mInputStream; + mInputStream.clear(); + } else { + ssize_t idx = mOutputStreams.indexOfKey(id); + if (idx == NAME_NOT_FOUND) { + CLOGE("Stream %d does not exist", id); + return BAD_VALUE; + } + deletedStream = mOutputStreams.editValueAt(idx); + mOutputStreams.removeItem(id); + } + + // Free up the stream endpoint so that it can be used by some other stream + res = deletedStream->disconnect(); + if (res != OK) { + SET_ERR_L("Can't disconnect deleted stream %d", id); + // fall through since we want to still list the stream as deleted. + } + mDeletedStreams.add(deletedStream); + + return res; +} + +status_t Camera3Device::deleteReprocessStream(int id) { + ATRACE_CALL(); + (void)id; + + CLOGE("Unimplemented"); + return INVALID_OPERATION; +} + + +status_t Camera3Device::createDefaultRequest(int templateId, + CameraMetadata *request) { + ATRACE_CALL(); + ALOGV("%s: E", __FUNCTION__); + Mutex::Autolock l(mLock); + + switch (mStatus) { + case STATUS_ERROR: + CLOGE("Device has encountered a serious error"); + return INVALID_OPERATION; + case STATUS_UNINITIALIZED: + CLOGE("Device is not initialized!"); + return INVALID_OPERATION; + case STATUS_IDLE: + case STATUS_ACTIVE: + // OK + break; + default: + SET_ERR_L("Unexpected status: %d", mStatus); + return INVALID_OPERATION; + } + + const camera_metadata_t *rawRequest; + rawRequest = mHal3Device->ops->construct_default_request_settings( + mHal3Device, templateId); + if (rawRequest == NULL) { + SET_ERR_L("HAL is unable to construct default settings for template %d", + templateId); + return DEAD_OBJECT; + } + *request = rawRequest; + + return OK; +} + +status_t Camera3Device::waitUntilDrained() { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + + return waitUntilDrainedLocked(); +} + +status_t Camera3Device::waitUntilDrainedLocked() { + ATRACE_CALL(); + status_t res; + + switch (mStatus) { + case STATUS_UNINITIALIZED: + case STATUS_IDLE: + ALOGV("%s: Already idle", __FUNCTION__); + return OK; + case STATUS_ERROR: + case STATUS_ACTIVE: + // Need to shut down + break; + default: + SET_ERR_L("Unexpected status: %d",mStatus); + return INVALID_OPERATION; + } + + if (mRequestThread != NULL) { + res = mRequestThread->waitUntilPaused(kShutdownTimeout); + if (res != OK) { + SET_ERR_L("Can't stop request thread in %f seconds!", + kShutdownTimeout/1e9); + return res; + } + } + if (mInputStream != NULL) { + res = mInputStream->waitUntilIdle(kShutdownTimeout); + if (res != OK) { + SET_ERR_L("Can't idle input stream %d in %f seconds!", + mInputStream->getId(), kShutdownTimeout/1e9); + return res; + } + } + for (size_t i = 0; i < mOutputStreams.size(); i++) { + res = mOutputStreams.editValueAt(i)->waitUntilIdle(kShutdownTimeout); + if (res != OK) { + SET_ERR_L("Can't idle output stream %d in %f seconds!", + mOutputStreams.keyAt(i), kShutdownTimeout/1e9); + return res; + } + } + + if (mStatus != STATUS_ERROR) { + mStatus = STATUS_IDLE; + } + + return OK; +} + +status_t Camera3Device::setNotifyCallback(NotificationListener *listener) { + ATRACE_CALL(); + Mutex::Autolock l(mOutputLock); + + if (listener != NULL && mListener != NULL) { + ALOGW("%s: Replacing old callback listener", __FUNCTION__); + } + mListener = listener; + + return OK; +} + +status_t Camera3Device::waitForNextFrame(nsecs_t timeout) { + ATRACE_CALL(); + status_t res; + Mutex::Autolock l(mOutputLock); + + while (mResultQueue.empty()) { + res = mResultSignal.waitRelative(mOutputLock, timeout); + if (res == TIMED_OUT) { + return res; + } else if (res != OK) { + ALOGW("%s: Camera %d: No frame in %lld ns: %s (%d)", + __FUNCTION__, mId, timeout, strerror(-res), res); + return res; + } + } + return OK; +} + +status_t Camera3Device::getNextFrame(CameraMetadata *frame) { + ATRACE_CALL(); + Mutex::Autolock l(mOutputLock); + + if (mResultQueue.empty()) { + return NOT_ENOUGH_DATA; + } + + CameraMetadata &result = *(mResultQueue.begin()); + frame->acquire(result); + mResultQueue.erase(mResultQueue.begin()); + + return OK; +} + +status_t Camera3Device::triggerAutofocus(uint32_t id) { + ATRACE_CALL(); + + ALOGV("%s: Triggering autofocus, id %d", __FUNCTION__, id); + // Mix-in this trigger into the next request and only the next request. + RequestTrigger trigger[] = { + { + ANDROID_CONTROL_AF_TRIGGER, + ANDROID_CONTROL_AF_TRIGGER_START + }, + { + ANDROID_CONTROL_AF_TRIGGER_ID, + static_cast<int32_t>(id) + }, + }; + + return mRequestThread->queueTrigger(trigger, + sizeof(trigger)/sizeof(trigger[0])); +} + +status_t Camera3Device::triggerCancelAutofocus(uint32_t id) { + ATRACE_CALL(); + + ALOGV("%s: Triggering cancel autofocus, id %d", __FUNCTION__, id); + // Mix-in this trigger into the next request and only the next request. + RequestTrigger trigger[] = { + { + ANDROID_CONTROL_AF_TRIGGER, + ANDROID_CONTROL_AF_TRIGGER_CANCEL + }, + { + ANDROID_CONTROL_AF_TRIGGER_ID, + static_cast<int32_t>(id) + }, + }; + + return mRequestThread->queueTrigger(trigger, + sizeof(trigger)/sizeof(trigger[0])); +} + +status_t Camera3Device::triggerPrecaptureMetering(uint32_t id) { + ATRACE_CALL(); + + ALOGV("%s: Triggering precapture metering, id %d", __FUNCTION__, id); + // Mix-in this trigger into the next request and only the next request. + RequestTrigger trigger[] = { + { + ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, + ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_START + }, + { + ANDROID_CONTROL_AE_PRECAPTURE_ID, + static_cast<int32_t>(id) + }, + }; + + return mRequestThread->queueTrigger(trigger, + sizeof(trigger)/sizeof(trigger[0])); +} + +status_t Camera3Device::pushReprocessBuffer(int reprocessStreamId, + buffer_handle_t *buffer, wp<BufferReleasedListener> listener) { + ATRACE_CALL(); + (void)reprocessStreamId; (void)buffer; (void)listener; + + CLOGE("Unimplemented"); + return INVALID_OPERATION; +} + +/** + * Camera3Device private methods + */ + +sp<Camera3Device::CaptureRequest> Camera3Device::createCaptureRequest( + const CameraMetadata &request) { + ATRACE_CALL(); + status_t res; + + sp<CaptureRequest> newRequest = new CaptureRequest; + newRequest->mSettings = request; + + camera_metadata_entry_t inputStreams = + newRequest->mSettings.find(ANDROID_REQUEST_INPUT_STREAMS); + if (inputStreams.count > 0) { + if (mInputStream == NULL || + mInputStream->getId() != inputStreams.data.u8[0]) { + CLOGE("Request references unknown input stream %d", + inputStreams.data.u8[0]); + return NULL; + } + // Lazy completion of stream configuration (allocation/registration) + // on first use + if (mInputStream->isConfiguring()) { + res = mInputStream->finishConfiguration(mHal3Device); + if (res != OK) { + SET_ERR_L("Unable to finish configuring input stream %d:" + " %s (%d)", + mInputStream->getId(), strerror(-res), res); + return NULL; + } + } + + newRequest->mInputStream = mInputStream; + newRequest->mSettings.erase(ANDROID_REQUEST_INPUT_STREAMS); + } + + camera_metadata_entry_t streams = + newRequest->mSettings.find(ANDROID_REQUEST_OUTPUT_STREAMS); + if (streams.count == 0) { + CLOGE("Zero output streams specified!"); + return NULL; + } + + for (size_t i = 0; i < streams.count; i++) { + int idx = mOutputStreams.indexOfKey(streams.data.u8[i]); + if (idx == NAME_NOT_FOUND) { + CLOGE("Request references unknown stream %d", + streams.data.u8[i]); + return NULL; + } + sp<Camera3OutputStreamInterface> stream = + mOutputStreams.editValueAt(idx); + + // Lazy completion of stream configuration (allocation/registration) + // on first use + if (stream->isConfiguring()) { + res = stream->finishConfiguration(mHal3Device); + if (res != OK) { + SET_ERR_L("Unable to finish configuring stream %d: %s (%d)", + stream->getId(), strerror(-res), res); + return NULL; + } + } + + newRequest->mOutputStreams.push(stream); + } + newRequest->mSettings.erase(ANDROID_REQUEST_OUTPUT_STREAMS); + + return newRequest; +} + +status_t Camera3Device::configureStreamsLocked() { + ATRACE_CALL(); + status_t res; + + if (mStatus != STATUS_IDLE) { + CLOGE("Not idle"); + return INVALID_OPERATION; + } + + // Start configuring the streams + + camera3_stream_configuration config; + + config.num_streams = (mInputStream != NULL) + mOutputStreams.size(); + + Vector<camera3_stream_t*> streams; + streams.setCapacity(config.num_streams); + + if (mInputStream != NULL) { + camera3_stream_t *inputStream; + inputStream = mInputStream->startConfiguration(); + if (inputStream == NULL) { + SET_ERR_L("Can't start input stream configuration"); + return INVALID_OPERATION; + } + streams.add(inputStream); + } + + for (size_t i = 0; i < mOutputStreams.size(); i++) { + + // Don't configure bidi streams twice, nor add them twice to the list + if (mOutputStreams[i].get() == + static_cast<Camera3StreamInterface*>(mInputStream.get())) { + + config.num_streams--; + continue; + } + + camera3_stream_t *outputStream; + outputStream = mOutputStreams.editValueAt(i)->startConfiguration(); + if (outputStream == NULL) { + SET_ERR_L("Can't start output stream configuration"); + return INVALID_OPERATION; + } + streams.add(outputStream); + } + + config.streams = streams.editArray(); + + // Do the HAL configuration; will potentially touch stream + // max_buffers, usage, priv fields. + + res = mHal3Device->ops->configure_streams(mHal3Device, &config); + + if (res != OK) { + SET_ERR_L("Unable to configure streams with HAL: %s (%d)", + strerror(-res), res); + return res; + } + + // Finish all stream configuration immediately. + // TODO: Try to relax this later back to lazy completion, which should be + // faster + + if (mInputStream != NULL && mInputStream->isConfiguring()) { + res = mInputStream->finishConfiguration(mHal3Device); + if (res != OK) { + SET_ERR_L("Can't finish configuring input stream %d: %s (%d)", + mInputStream->getId(), strerror(-res), res); + return res; + } + } + + for (size_t i = 0; i < mOutputStreams.size(); i++) { + sp<Camera3OutputStreamInterface> outputStream = + mOutputStreams.editValueAt(i); + if (outputStream->isConfiguring()) { + res = outputStream->finishConfiguration(mHal3Device); + if (res != OK) { + SET_ERR_L("Can't finish configuring output stream %d: %s (%d)", + outputStream->getId(), strerror(-res), res); + return res; + } + } + } + + // Request thread needs to know to avoid using repeat-last-settings protocol + // across configure_streams() calls + mRequestThread->configurationComplete(); + + // Finish configuring the streams lazily on first reference + + mStatus = STATUS_ACTIVE; + + return OK; +} + +void Camera3Device::setErrorState(const char *fmt, ...) { + Mutex::Autolock l(mLock); + va_list args; + va_start(args, fmt); + + setErrorStateLockedV(fmt, args); + + va_end(args); +} + +void Camera3Device::setErrorStateV(const char *fmt, va_list args) { + Mutex::Autolock l(mLock); + setErrorStateLockedV(fmt, args); +} + +void Camera3Device::setErrorStateLocked(const char *fmt, ...) { + va_list args; + va_start(args, fmt); + + setErrorStateLockedV(fmt, args); + + va_end(args); +} + +void Camera3Device::setErrorStateLockedV(const char *fmt, va_list args) { + // Print out all error messages to log + String8 errorCause = String8::formatV(fmt, args); + ALOGE("Camera %d: %s", mId, errorCause.string()); + + // But only do error state transition steps for the first error + if (mStatus == STATUS_ERROR) return; + + mErrorCause = errorCause; + + mRequestThread->setPaused(true); + mStatus = STATUS_ERROR; +} + +/** + * In-flight request management + */ + +status_t Camera3Device::registerInFlight(int32_t frameNumber, + int32_t numBuffers) { + ATRACE_CALL(); + Mutex::Autolock l(mInFlightLock); + + ssize_t res; + res = mInFlightMap.add(frameNumber, InFlightRequest(numBuffers)); + if (res < 0) return res; + + return OK; +} + +/** + * Camera HAL device callback methods + */ + +void Camera3Device::processCaptureResult(const camera3_capture_result *result) { + ATRACE_CALL(); + + status_t res; + + uint32_t frameNumber = result->frame_number; + if (result->result == NULL && result->num_output_buffers == 0) { + SET_ERR("No result data provided by HAL for frame %d", + frameNumber); + return; + } + + // Get capture timestamp from list of in-flight requests, where it was added + // by the shutter notification for this frame. Then update the in-flight + // status and remove the in-flight entry if all result data has been + // received. + nsecs_t timestamp = 0; + { + Mutex::Autolock l(mInFlightLock); + ssize_t idx = mInFlightMap.indexOfKey(frameNumber); + if (idx == NAME_NOT_FOUND) { + SET_ERR("Unknown frame number for capture result: %d", + frameNumber); + return; + } + InFlightRequest &request = mInFlightMap.editValueAt(idx); + timestamp = request.captureTimestamp; + if (timestamp == 0) { + SET_ERR("Called before shutter notify for frame %d", + frameNumber); + return; + } + + if (result->result != NULL) { + if (request.haveResultMetadata) { + SET_ERR("Called multiple times with metadata for frame %d", + frameNumber); + return; + } + request.haveResultMetadata = true; + } + + request.numBuffersLeft -= result->num_output_buffers; + + if (request.numBuffersLeft < 0) { + SET_ERR("Too many buffers returned for frame %d", + frameNumber); + return; + } + + if (request.haveResultMetadata && request.numBuffersLeft == 0) { + mInFlightMap.removeItemsAt(idx, 1); + } + + // Sanity check - if we have too many in-flight frames, something has + // likely gone wrong + if (mInFlightMap.size() > kInFlightWarnLimit) { + CLOGE("In-flight list too large: %d", mInFlightMap.size()); + } + + } + + AlgState cur3aState; + AlgState new3aState; + int32_t aeTriggerId = 0; + int32_t afTriggerId = 0; + + NotificationListener *listener = NULL; + + // Process the result metadata, if provided + if (result->result != NULL) { + Mutex::Autolock l(mOutputLock); + + if (frameNumber != mNextResultFrameNumber) { + SET_ERR("Out-of-order capture result metadata submitted! " + "(got frame number %d, expecting %d)", + frameNumber, mNextResultFrameNumber); + return; + } + mNextResultFrameNumber++; + + CameraMetadata &captureResult = + *mResultQueue.insert(mResultQueue.end(), CameraMetadata()); + + captureResult = result->result; + if (captureResult.update(ANDROID_REQUEST_FRAME_COUNT, + (int32_t*)&frameNumber, 1) != OK) { + SET_ERR("Failed to set frame# in metadata (%d)", + frameNumber); + } else { + ALOGVV("%s: Camera %d: Set frame# in metadata (%d)", + __FUNCTION__, mId, frameNumber); + } + + // Check that there's a timestamp in the result metadata + + camera_metadata_entry entry = + captureResult.find(ANDROID_SENSOR_TIMESTAMP); + if (entry.count == 0) { + SET_ERR("No timestamp provided by HAL for frame %d!", + frameNumber); + } + if (timestamp != entry.data.i64[0]) { + SET_ERR("Timestamp mismatch between shutter notify and result" + " metadata for frame %d (%lld vs %lld respectively)", + frameNumber, timestamp, entry.data.i64[0]); + } + + // Get 3A states from result metadata + + entry = captureResult.find(ANDROID_CONTROL_AE_STATE); + if (entry.count == 0) { + CLOGE("No AE state provided by HAL for frame %d!", + frameNumber); + } else { + new3aState.aeState = + static_cast<camera_metadata_enum_android_control_ae_state>( + entry.data.u8[0]); + } + + entry = captureResult.find(ANDROID_CONTROL_AF_STATE); + if (entry.count == 0) { + CLOGE("No AF state provided by HAL for frame %d!", + frameNumber); + } else { + new3aState.afState = + static_cast<camera_metadata_enum_android_control_af_state>( + entry.data.u8[0]); + } + + entry = captureResult.find(ANDROID_CONTROL_AWB_STATE); + if (entry.count == 0) { + CLOGE("No AWB state provided by HAL for frame %d!", + frameNumber); + } else { + new3aState.awbState = + static_cast<camera_metadata_enum_android_control_awb_state>( + entry.data.u8[0]); + } + + entry = captureResult.find(ANDROID_CONTROL_AF_TRIGGER_ID); + if (entry.count == 0) { + CLOGE("No AF trigger ID provided by HAL for frame %d!", + frameNumber); + } else { + afTriggerId = entry.data.i32[0]; + } + + entry = captureResult.find(ANDROID_CONTROL_AE_PRECAPTURE_ID); + if (entry.count == 0) { + CLOGE("No AE precapture trigger ID provided by HAL" + " for frame %d!", frameNumber); + } else { + aeTriggerId = entry.data.i32[0]; + } + + listener = mListener; + cur3aState = m3AState; + + m3AState = new3aState; + } // scope for mOutputLock + + // Return completed buffers to their streams with the timestamp + + for (size_t i = 0; i < result->num_output_buffers; i++) { + Camera3Stream *stream = + Camera3Stream::cast(result->output_buffers[i].stream); + res = stream->returnBuffer(result->output_buffers[i], timestamp); + // Note: stream may be deallocated at this point, if this buffer was the + // last reference to it. + if (res != OK) { + SET_ERR("Can't return buffer %d for frame %d to its stream: " + " %s (%d)", i, frameNumber, strerror(-res), res); + } + } + + // Finally, dispatch any 3A change events to listeners if we got metadata + + if (result->result != NULL) { + mResultSignal.signal(); + } + + if (result->result != NULL && listener != NULL) { + if (new3aState.aeState != cur3aState.aeState) { + ALOGVV("%s: AE state changed from 0x%x to 0x%x", + __FUNCTION__, cur3aState.aeState, new3aState.aeState); + listener->notifyAutoExposure(new3aState.aeState, aeTriggerId); + } + if (new3aState.afState != cur3aState.afState) { + ALOGVV("%s: AF state changed from 0x%x to 0x%x", + __FUNCTION__, cur3aState.afState, new3aState.afState); + listener->notifyAutoFocus(new3aState.afState, afTriggerId); + } + if (new3aState.awbState != cur3aState.awbState) { + listener->notifyAutoWhitebalance(new3aState.awbState, aeTriggerId); + } + } + +} + +void Camera3Device::notify(const camera3_notify_msg *msg) { + NotificationListener *listener; + { + Mutex::Autolock l(mOutputLock); + listener = mListener; + } + + if (msg == NULL) { + SET_ERR("HAL sent NULL notify message!"); + return; + } + + switch (msg->type) { + case CAMERA3_MSG_ERROR: { + int streamId = 0; + if (msg->message.error.error_stream != NULL) { + Camera3Stream *stream = + Camera3Stream::cast( + msg->message.error.error_stream); + streamId = stream->getId(); + } + if (listener != NULL) { + listener->notifyError(msg->message.error.error_code, + msg->message.error.frame_number, streamId); + } + break; + } + case CAMERA3_MSG_SHUTTER: { + ssize_t idx; + uint32_t frameNumber = msg->message.shutter.frame_number; + nsecs_t timestamp = msg->message.shutter.timestamp; + // Verify ordering of shutter notifications + { + Mutex::Autolock l(mOutputLock); + if (frameNumber != mNextShutterFrameNumber) { + SET_ERR("Shutter notification out-of-order. Expected " + "notification for frame %d, got frame %d", + mNextShutterFrameNumber, frameNumber); + break; + } + mNextShutterFrameNumber++; + } + + // Set timestamp for the request in the in-flight tracking + { + Mutex::Autolock l(mInFlightLock); + idx = mInFlightMap.indexOfKey(frameNumber); + if (idx >= 0) { + mInFlightMap.editValueAt(idx).captureTimestamp = timestamp; + } + } + if (idx < 0) { + SET_ERR("Shutter notification for non-existent frame number %d", + frameNumber); + break; + } + + // Call listener, if any + if (listener != NULL) { + listener->notifyShutter(frameNumber, timestamp); + } + break; + } + default: + SET_ERR("Unknown notify message from HAL: %d", + msg->type); + } +} + +/** + * RequestThread inner class methods + */ + +Camera3Device::RequestThread::RequestThread(wp<Camera3Device> parent, + camera3_device_t *hal3Device) : + Thread(false), + mParent(parent), + mHal3Device(hal3Device), + mId(getId(parent)), + mReconfigured(false), + mDoPause(false), + mPaused(true), + mFrameNumber(0), + mLatestRequestId(NAME_NOT_FOUND) { +} + +void Camera3Device::RequestThread::configurationComplete() { + Mutex::Autolock l(mRequestLock); + mReconfigured = true; +} + +status_t Camera3Device::RequestThread::queueRequest( + sp<CaptureRequest> request) { + Mutex::Autolock l(mRequestLock); + mRequestQueue.push_back(request); + + return OK; +} + + +status_t Camera3Device::RequestThread::queueTrigger( + RequestTrigger trigger[], + size_t count) { + + Mutex::Autolock l(mTriggerMutex); + status_t ret; + + for (size_t i = 0; i < count; ++i) { + ret = queueTriggerLocked(trigger[i]); + + if (ret != OK) { + return ret; + } + } + + return OK; +} + +int Camera3Device::RequestThread::getId(const wp<Camera3Device> &device) { + sp<Camera3Device> d = device.promote(); + if (d != NULL) return d->mId; + return 0; +} + +status_t Camera3Device::RequestThread::queueTriggerLocked( + RequestTrigger trigger) { + + uint32_t tag = trigger.metadataTag; + ssize_t index = mTriggerMap.indexOfKey(tag); + + switch (trigger.getTagType()) { + case TYPE_BYTE: + // fall-through + case TYPE_INT32: + break; + default: + ALOGE("%s: Type not supported: 0x%x", __FUNCTION__, + trigger.getTagType()); + return INVALID_OPERATION; + } + + /** + * Collect only the latest trigger, since we only have 1 field + * in the request settings per trigger tag, and can't send more than 1 + * trigger per request. + */ + if (index != NAME_NOT_FOUND) { + mTriggerMap.editValueAt(index) = trigger; + } else { + mTriggerMap.add(tag, trigger); + } + + return OK; +} + +status_t Camera3Device::RequestThread::setRepeatingRequests( + const RequestList &requests) { + Mutex::Autolock l(mRequestLock); + mRepeatingRequests.clear(); + mRepeatingRequests.insert(mRepeatingRequests.begin(), + requests.begin(), requests.end()); + return OK; +} + +status_t Camera3Device::RequestThread::clearRepeatingRequests() { + Mutex::Autolock l(mRequestLock); + mRepeatingRequests.clear(); + return OK; +} + +void Camera3Device::RequestThread::setPaused(bool paused) { + Mutex::Autolock l(mPauseLock); + mDoPause = paused; + mDoPauseSignal.signal(); +} + +status_t Camera3Device::RequestThread::waitUntilPaused(nsecs_t timeout) { + status_t res; + Mutex::Autolock l(mPauseLock); + while (!mPaused) { + res = mPausedSignal.waitRelative(mPauseLock, timeout); + if (res == TIMED_OUT) { + return res; + } + } + return OK; +} + +status_t Camera3Device::RequestThread::waitUntilRequestProcessed( + int32_t requestId, nsecs_t timeout) { + Mutex::Autolock l(mLatestRequestMutex); + status_t res; + while (mLatestRequestId != requestId) { + nsecs_t startTime = systemTime(); + + res = mLatestRequestSignal.waitRelative(mLatestRequestMutex, timeout); + if (res != OK) return res; + + timeout -= (systemTime() - startTime); + } + + return OK; +} + + + +bool Camera3Device::RequestThread::threadLoop() { + + status_t res; + + // Handle paused state. + if (waitIfPaused()) { + return true; + } + + // Get work to do + + sp<CaptureRequest> nextRequest = waitForNextRequest(); + if (nextRequest == NULL) { + return true; + } + + // Create request to HAL + camera3_capture_request_t request = camera3_capture_request_t(); + Vector<camera3_stream_buffer_t> outputBuffers; + + // Insert any queued triggers (before metadata is locked) + int32_t triggerCount; + res = insertTriggers(nextRequest); + if (res < 0) { + SET_ERR("RequestThread: Unable to insert triggers " + "(capture request %d, HAL device: %s (%d)", + (mFrameNumber+1), strerror(-res), res); + cleanUpFailedRequest(request, nextRequest, outputBuffers); + return false; + } + triggerCount = res; + + bool triggersMixedIn = (triggerCount > 0 || mPrevTriggers > 0); + + // If the request is the same as last, or we had triggers last time + if (mPrevRequest != nextRequest || triggersMixedIn) { + /** + * The request should be presorted so accesses in HAL + * are O(logn). Sidenote, sorting a sorted metadata is nop. + */ + nextRequest->mSettings.sort(); + request.settings = nextRequest->mSettings.getAndLock(); + mPrevRequest = nextRequest; + ALOGVV("%s: Request settings are NEW", __FUNCTION__); + + IF_ALOGV() { + camera_metadata_ro_entry_t e = camera_metadata_ro_entry_t(); + find_camera_metadata_ro_entry( + request.settings, + ANDROID_CONTROL_AF_TRIGGER, + &e + ); + if (e.count > 0) { + ALOGV("%s: Request (frame num %d) had AF trigger 0x%x", + __FUNCTION__, + mFrameNumber+1, + e.data.u8[0]); + } + } + } else { + // leave request.settings NULL to indicate 'reuse latest given' + ALOGVV("%s: Request settings are REUSED", + __FUNCTION__); + } + + camera3_stream_buffer_t inputBuffer; + + // Fill in buffers + + if (nextRequest->mInputStream != NULL) { + request.input_buffer = &inputBuffer; + res = nextRequest->mInputStream->getInputBuffer(&inputBuffer); + if (res != OK) { + SET_ERR("RequestThread: Can't get input buffer, skipping request:" + " %s (%d)", strerror(-res), res); + cleanUpFailedRequest(request, nextRequest, outputBuffers); + return true; + } + } else { + request.input_buffer = NULL; + } + + outputBuffers.insertAt(camera3_stream_buffer_t(), 0, + nextRequest->mOutputStreams.size()); + request.output_buffers = outputBuffers.array(); + for (size_t i = 0; i < nextRequest->mOutputStreams.size(); i++) { + res = nextRequest->mOutputStreams.editItemAt(i)-> + getBuffer(&outputBuffers.editItemAt(i)); + if (res != OK) { + SET_ERR("RequestThread: Can't get output buffer, skipping request:" + "%s (%d)", strerror(-res), res); + cleanUpFailedRequest(request, nextRequest, outputBuffers); + return true; + } + request.num_output_buffers++; + } + + request.frame_number = mFrameNumber++; + + // Log request in the in-flight queue + sp<Camera3Device> parent = mParent.promote(); + if (parent == NULL) { + CLOGE("RequestThread: Parent is gone"); + cleanUpFailedRequest(request, nextRequest, outputBuffers); + return false; + } + + res = parent->registerInFlight(request.frame_number, + request.num_output_buffers); + if (res != OK) { + SET_ERR("RequestThread: Unable to register new in-flight request:" + " %s (%d)", strerror(-res), res); + cleanUpFailedRequest(request, nextRequest, outputBuffers); + return false; + } + + // Submit request and block until ready for next one + + res = mHal3Device->ops->process_capture_request(mHal3Device, &request); + if (res != OK) { + SET_ERR("RequestThread: Unable to submit capture request %d to HAL" + " device: %s (%d)", request.frame_number, strerror(-res), res); + cleanUpFailedRequest(request, nextRequest, outputBuffers); + return false; + } + + if (request.settings != NULL) { + nextRequest->mSettings.unlock(request.settings); + } + + // Remove any previously queued triggers (after unlock) + res = removeTriggers(mPrevRequest); + if (res != OK) { + SET_ERR("RequestThread: Unable to remove triggers " + "(capture request %d, HAL device: %s (%d)", + request.frame_number, strerror(-res), res); + return false; + } + mPrevTriggers = triggerCount; + + // Read android.request.id from the request settings metadata + // - inform waitUntilRequestProcessed thread of a new request ID + { + Mutex::Autolock al(mLatestRequestMutex); + + camera_metadata_entry_t requestIdEntry = + nextRequest->mSettings.find(ANDROID_REQUEST_ID); + if (requestIdEntry.count > 0) { + mLatestRequestId = requestIdEntry.data.i32[0]; + } else { + ALOGW("%s: Did not have android.request.id set in the request", + __FUNCTION__); + mLatestRequestId = NAME_NOT_FOUND; + } + + mLatestRequestSignal.signal(); + } + + // Return input buffer back to framework + if (request.input_buffer != NULL) { + Camera3Stream *stream = + Camera3Stream::cast(request.input_buffer->stream); + res = stream->returnInputBuffer(*(request.input_buffer)); + // Note: stream may be deallocated at this point, if this buffer was the + // last reference to it. + if (res != OK) { + ALOGE("%s: RequestThread: Can't return input buffer for frame %d to" + " its stream:%s (%d)", __FUNCTION__, + request.frame_number, strerror(-res), res); + // TODO: Report error upstream + } + } + + + + return true; +} + +void Camera3Device::RequestThread::cleanUpFailedRequest( + camera3_capture_request_t &request, + sp<CaptureRequest> &nextRequest, + Vector<camera3_stream_buffer_t> &outputBuffers) { + + if (request.settings != NULL) { + nextRequest->mSettings.unlock(request.settings); + } + if (request.input_buffer != NULL) { + request.input_buffer->status = CAMERA3_BUFFER_STATUS_ERROR; + nextRequest->mInputStream->returnInputBuffer(*(request.input_buffer)); + } + for (size_t i = 0; i < request.num_output_buffers; i++) { + outputBuffers.editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR; + nextRequest->mOutputStreams.editItemAt(i)->returnBuffer( + outputBuffers[i], 0); + } +} + +sp<Camera3Device::CaptureRequest> + Camera3Device::RequestThread::waitForNextRequest() { + status_t res; + sp<CaptureRequest> nextRequest; + + // Optimized a bit for the simple steady-state case (single repeating + // request), to avoid putting that request in the queue temporarily. + Mutex::Autolock l(mRequestLock); + + while (mRequestQueue.empty()) { + if (!mRepeatingRequests.empty()) { + // Always atomically enqueue all requests in a repeating request + // list. Guarantees a complete in-sequence set of captures to + // application. + const RequestList &requests = mRepeatingRequests; + RequestList::const_iterator firstRequest = + requests.begin(); + nextRequest = *firstRequest; + mRequestQueue.insert(mRequestQueue.end(), + ++firstRequest, + requests.end()); + // No need to wait any longer + break; + } + + res = mRequestSignal.waitRelative(mRequestLock, kRequestTimeout); + + if (res == TIMED_OUT) { + // Signal that we're paused by starvation + Mutex::Autolock pl(mPauseLock); + if (mPaused == false) { + mPaused = true; + mPausedSignal.signal(); + } + // Stop waiting for now and let thread management happen + return NULL; + } + } + + if (nextRequest == NULL) { + // Don't have a repeating request already in hand, so queue + // must have an entry now. + RequestList::iterator firstRequest = + mRequestQueue.begin(); + nextRequest = *firstRequest; + mRequestQueue.erase(firstRequest); + } + + // Not paused + Mutex::Autolock pl(mPauseLock); + mPaused = false; + + // Check if we've reconfigured since last time, and reset the preview + // request if so. Can't use 'NULL request == repeat' across configure calls. + if (mReconfigured) { + mPrevRequest.clear(); + mReconfigured = false; + } + + return nextRequest; +} + +bool Camera3Device::RequestThread::waitIfPaused() { + status_t res; + Mutex::Autolock l(mPauseLock); + while (mDoPause) { + // Signal that we're paused by request + if (mPaused == false) { + mPaused = true; + mPausedSignal.signal(); + } + res = mDoPauseSignal.waitRelative(mPauseLock, kRequestTimeout); + if (res == TIMED_OUT) { + return true; + } + } + // We don't set mPaused to false here, because waitForNextRequest needs + // to further manage the paused state in case of starvation. + return false; +} + +void Camera3Device::RequestThread::setErrorState(const char *fmt, ...) { + sp<Camera3Device> parent = mParent.promote(); + if (parent != NULL) { + va_list args; + va_start(args, fmt); + + parent->setErrorStateV(fmt, args); + + va_end(args); + } +} + +status_t Camera3Device::RequestThread::insertTriggers( + const sp<CaptureRequest> &request) { + + Mutex::Autolock al(mTriggerMutex); + + CameraMetadata &metadata = request->mSettings; + size_t count = mTriggerMap.size(); + + for (size_t i = 0; i < count; ++i) { + RequestTrigger trigger = mTriggerMap.valueAt(i); + + uint32_t tag = trigger.metadataTag; + camera_metadata_entry entry = metadata.find(tag); + + if (entry.count > 0) { + /** + * Already has an entry for this trigger in the request. + * Rewrite it with our requested trigger value. + */ + RequestTrigger oldTrigger = trigger; + + oldTrigger.entryValue = entry.data.u8[0]; + + mTriggerReplacedMap.add(tag, oldTrigger); + } else { + /** + * More typical, no trigger entry, so we just add it + */ + mTriggerRemovedMap.add(tag, trigger); + } + + status_t res; + + switch (trigger.getTagType()) { + case TYPE_BYTE: { + uint8_t entryValue = static_cast<uint8_t>(trigger.entryValue); + res = metadata.update(tag, + &entryValue, + /*count*/1); + break; + } + case TYPE_INT32: + res = metadata.update(tag, + &trigger.entryValue, + /*count*/1); + break; + default: + ALOGE("%s: Type not supported: 0x%x", + __FUNCTION__, + trigger.getTagType()); + return INVALID_OPERATION; + } + + if (res != OK) { + ALOGE("%s: Failed to update request metadata with trigger tag %s" + ", value %d", __FUNCTION__, trigger.getTagName(), + trigger.entryValue); + return res; + } + + ALOGV("%s: Mixed in trigger %s, value %d", __FUNCTION__, + trigger.getTagName(), + trigger.entryValue); + } + + mTriggerMap.clear(); + + return count; +} + +status_t Camera3Device::RequestThread::removeTriggers( + const sp<CaptureRequest> &request) { + Mutex::Autolock al(mTriggerMutex); + + CameraMetadata &metadata = request->mSettings; + + /** + * Replace all old entries with their old values. + */ + for (size_t i = 0; i < mTriggerReplacedMap.size(); ++i) { + RequestTrigger trigger = mTriggerReplacedMap.valueAt(i); + + status_t res; + + uint32_t tag = trigger.metadataTag; + switch (trigger.getTagType()) { + case TYPE_BYTE: { + uint8_t entryValue = static_cast<uint8_t>(trigger.entryValue); + res = metadata.update(tag, + &entryValue, + /*count*/1); + break; + } + case TYPE_INT32: + res = metadata.update(tag, + &trigger.entryValue, + /*count*/1); + break; + default: + ALOGE("%s: Type not supported: 0x%x", + __FUNCTION__, + trigger.getTagType()); + return INVALID_OPERATION; + } + + if (res != OK) { + ALOGE("%s: Failed to restore request metadata with trigger tag %s" + ", trigger value %d", __FUNCTION__, + trigger.getTagName(), trigger.entryValue); + return res; + } + } + mTriggerReplacedMap.clear(); + + /** + * Remove all new entries. + */ + for (size_t i = 0; i < mTriggerRemovedMap.size(); ++i) { + RequestTrigger trigger = mTriggerRemovedMap.valueAt(i); + status_t res = metadata.erase(trigger.metadataTag); + + if (res != OK) { + ALOGE("%s: Failed to erase metadata with trigger tag %s" + ", trigger value %d", __FUNCTION__, + trigger.getTagName(), trigger.entryValue); + return res; + } + } + mTriggerRemovedMap.clear(); + + return OK; +} + + + +/** + * Static callback forwarding methods from HAL to instance + */ + +void Camera3Device::sProcessCaptureResult(const camera3_callback_ops *cb, + const camera3_capture_result *result) { + Camera3Device *d = + const_cast<Camera3Device*>(static_cast<const Camera3Device*>(cb)); + d->processCaptureResult(result); +} + +void Camera3Device::sNotify(const camera3_callback_ops *cb, + const camera3_notify_msg *msg) { + Camera3Device *d = + const_cast<Camera3Device*>(static_cast<const Camera3Device*>(cb)); + d->notify(msg); +} + +}; // namespace android diff --git a/services/camera/libcameraservice/Camera3Device.h b/services/camera/libcameraservice/Camera3Device.h new file mode 100644 index 0000000..7a8c22a --- /dev/null +++ b/services/camera/libcameraservice/Camera3Device.h @@ -0,0 +1,423 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA3DEVICE_H +#define ANDROID_SERVERS_CAMERA3DEVICE_H + +#include <utils/Condition.h> +#include <utils/Errors.h> +#include <utils/List.h> +#include <utils/Mutex.h> +#include <utils/Thread.h> + +#include "CameraDeviceBase.h" +#include "camera3/Camera3Stream.h" +#include "camera3/Camera3OutputStream.h" +#include "camera3/Camera3ZslStream.h" + +#include "hardware/camera3.h" + +/** + * Function pointer types with C calling convention to + * use for HAL callback functions. + */ +extern "C" { + typedef void (callbacks_process_capture_result_t)( + const struct camera3_callback_ops *, + const camera3_capture_result_t *); + + typedef void (callbacks_notify_t)( + const struct camera3_callback_ops *, + const camera3_notify_msg_t *); +} + +namespace android { + +/** + * CameraDevice for HAL devices with version CAMERA_DEVICE_API_VERSION_3_0 + */ +class Camera3Device : + public CameraDeviceBase, + private camera3_callback_ops { + public: + Camera3Device(int id); + + virtual ~Camera3Device(); + + /** + * CameraDeviceBase interface + */ + + virtual int getId() const; + + // Transitions to idle state on success. + virtual status_t initialize(camera_module_t *module); + virtual status_t disconnect(); + virtual status_t dump(int fd, const Vector<String16> &args); + virtual const CameraMetadata& info() const; + + // Capture and setStreamingRequest will configure streams if currently in + // idle state + virtual status_t capture(CameraMetadata &request); + virtual status_t setStreamingRequest(const CameraMetadata &request); + virtual status_t clearStreamingRequest(); + + virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout); + + // Actual stream creation/deletion is delayed until first request is submitted + // If adding streams while actively capturing, will pause device before adding + // stream, reconfiguring device, and unpausing. + virtual status_t createStream(sp<ANativeWindow> consumer, + uint32_t width, uint32_t height, int format, size_t size, + int *id); + virtual status_t createInputStream( + uint32_t width, uint32_t height, int format, + int *id); + virtual status_t createZslStream( + uint32_t width, uint32_t height, + int depth, + /*out*/ + int *id, + sp<camera3::Camera3ZslStream>* zslStream); + virtual status_t createReprocessStreamFromStream(int outputId, int *id); + + virtual status_t getStreamInfo(int id, + uint32_t *width, uint32_t *height, uint32_t *format); + virtual status_t setStreamTransform(int id, int transform); + + virtual status_t deleteStream(int id); + virtual status_t deleteReprocessStream(int id); + + virtual status_t createDefaultRequest(int templateId, CameraMetadata *request); + + // Transitions to the idle state on success + virtual status_t waitUntilDrained(); + + virtual status_t setNotifyCallback(NotificationListener *listener); + virtual status_t waitForNextFrame(nsecs_t timeout); + virtual status_t getNextFrame(CameraMetadata *frame); + + virtual status_t triggerAutofocus(uint32_t id); + virtual status_t triggerCancelAutofocus(uint32_t id); + virtual status_t triggerPrecaptureMetering(uint32_t id); + + virtual status_t pushReprocessBuffer(int reprocessStreamId, + buffer_handle_t *buffer, wp<BufferReleasedListener> listener); + + private: + static const size_t kInFlightWarnLimit = 20; + static const nsecs_t kShutdownTimeout = 5000000000; // 5 sec + struct RequestTrigger; + + Mutex mLock; + + /**** Scope for mLock ****/ + + const int mId; + camera3_device_t *mHal3Device; + + CameraMetadata mDeviceInfo; + vendor_tag_query_ops_t mVendorTagOps; + + enum { + STATUS_ERROR, + STATUS_UNINITIALIZED, + STATUS_IDLE, + STATUS_ACTIVE + } mStatus; + + // Tracking cause of fatal errors when in STATUS_ERROR + String8 mErrorCause; + + // Mapping of stream IDs to stream instances + typedef KeyedVector<int, sp<camera3::Camera3OutputStreamInterface> > + StreamSet; + + StreamSet mOutputStreams; + sp<camera3::Camera3Stream> mInputStream; + int mNextStreamId; + + // Need to hold on to stream references until configure completes. + Vector<sp<camera3::Camera3StreamInterface> > mDeletedStreams; + + /**** End scope for mLock ****/ + + class CaptureRequest : public LightRefBase<CaptureRequest> { + public: + CameraMetadata mSettings; + sp<camera3::Camera3Stream> mInputStream; + Vector<sp<camera3::Camera3OutputStreamInterface> > + mOutputStreams; + }; + typedef List<sp<CaptureRequest> > RequestList; + + /** + * Lock-held version of waitUntilDrained. Will transition to IDLE on + * success. + */ + status_t waitUntilDrainedLocked(); + + /** + * Do common work for setting up a streaming or single capture request. + * On success, will transition to ACTIVE if in IDLE. + */ + sp<CaptureRequest> setUpRequestLocked(const CameraMetadata &request); + + /** + * Build a CaptureRequest request from the CameraDeviceBase request + * settings. + */ + sp<CaptureRequest> createCaptureRequest(const CameraMetadata &request); + + /** + * Take the currently-defined set of streams and configure the HAL to use + * them. This is a long-running operation (may be several hundered ms). + */ + status_t configureStreamsLocked(); + + /** + * Set device into an error state due to some fatal failure, and set an + * error message to indicate why. Only the first call's message will be + * used. The message is also sent to the log. + */ + void setErrorState(const char *fmt, ...); + void setErrorStateV(const char *fmt, va_list args); + void setErrorStateLocked(const char *fmt, ...); + void setErrorStateLockedV(const char *fmt, va_list args); + + struct RequestTrigger { + // Metadata tag number, e.g. android.control.aePrecaptureTrigger + uint32_t metadataTag; + // Metadata value, e.g. 'START' or the trigger ID + int32_t entryValue; + + // The last part of the fully qualified path, e.g. afTrigger + const char *getTagName() const { + return get_camera_metadata_tag_name(metadataTag) ?: "NULL"; + } + + // e.g. TYPE_BYTE, TYPE_INT32, etc. + int getTagType() const { + return get_camera_metadata_tag_type(metadataTag); + } + }; + + /** + * Thread for managing capture request submission to HAL device. + */ + class RequestThread : public Thread { + + public: + + RequestThread(wp<Camera3Device> parent, + camera3_device_t *hal3Device); + + /** + * Call after stream (re)-configuration is completed. + */ + void configurationComplete(); + + /** + * Set or clear the list of repeating requests. Does not block + * on either. Use waitUntilPaused to wait until request queue + * has emptied out. + */ + status_t setRepeatingRequests(const RequestList& requests); + status_t clearRepeatingRequests(); + + status_t queueRequest(sp<CaptureRequest> request); + + /** + * Queue a trigger to be dispatched with the next outgoing + * process_capture_request. The settings for that request only + * will be temporarily rewritten to add the trigger tag/value. + * Subsequent requests will not be rewritten (for this tag). + */ + status_t queueTrigger(RequestTrigger trigger[], size_t count); + + /** + * Pause/unpause the capture thread. Doesn't block, so use + * waitUntilPaused to wait until the thread is paused. + */ + void setPaused(bool paused); + + /** + * Wait until thread is paused, either due to setPaused(true) + * or due to lack of input requests. Returns TIMED_OUT in case + * the thread does not pause within the timeout. + */ + status_t waitUntilPaused(nsecs_t timeout); + + /** + * Wait until thread processes the capture request with settings' + * android.request.id == requestId. + * + * Returns TIMED_OUT in case the thread does not process the request + * within the timeout. + */ + status_t waitUntilRequestProcessed(int32_t requestId, nsecs_t timeout); + + protected: + + virtual bool threadLoop(); + + private: + static int getId(const wp<Camera3Device> &device); + + status_t queueTriggerLocked(RequestTrigger trigger); + // Mix-in queued triggers into this request + int32_t insertTriggers(const sp<CaptureRequest> &request); + // Purge the queued triggers from this request, + // restoring the old field values for those tags. + status_t removeTriggers(const sp<CaptureRequest> &request); + + static const nsecs_t kRequestTimeout = 50e6; // 50 ms + + // Waits for a request, or returns NULL if times out. + sp<CaptureRequest> waitForNextRequest(); + + // Return buffers, etc, for a request that couldn't be fully + // constructed. The buffers will be returned in the ERROR state + // to mark them as not having valid data. + // All arguments will be modified. + void cleanUpFailedRequest(camera3_capture_request_t &request, + sp<CaptureRequest> &nextRequest, + Vector<camera3_stream_buffer_t> &outputBuffers); + + // Pause handling + bool waitIfPaused(); + + // Relay error to parent device object setErrorState + void setErrorState(const char *fmt, ...); + + wp<Camera3Device> mParent; + camera3_device_t *mHal3Device; + + const int mId; + + Mutex mRequestLock; + Condition mRequestSignal; + RequestList mRequestQueue; + RequestList mRepeatingRequests; + + bool mReconfigured; + + // Used by waitIfPaused, waitForNextRequest, and waitUntilPaused + Mutex mPauseLock; + bool mDoPause; + Condition mDoPauseSignal; + bool mPaused; + Condition mPausedSignal; + + sp<CaptureRequest> mPrevRequest; + int32_t mPrevTriggers; + + uint32_t mFrameNumber; + + Mutex mLatestRequestMutex; + Condition mLatestRequestSignal; + // android.request.id for latest process_capture_request + int32_t mLatestRequestId; + + typedef KeyedVector<uint32_t/*tag*/, RequestTrigger> TriggerMap; + Mutex mTriggerMutex; + TriggerMap mTriggerMap; + TriggerMap mTriggerRemovedMap; + TriggerMap mTriggerReplacedMap; + }; + sp<RequestThread> mRequestThread; + + /** + * In-flight queue for tracking completion of capture requests. + */ + + struct InFlightRequest { + // Set by notify() SHUTTER call. + nsecs_t captureTimestamp; + // Set by process_capture_result call with valid metadata + bool haveResultMetadata; + // Decremented by calls to process_capture_result with valid output + // buffers + int numBuffersLeft; + + InFlightRequest() : + captureTimestamp(0), + haveResultMetadata(false), + numBuffersLeft(0) { + } + + explicit InFlightRequest(int numBuffers) : + captureTimestamp(0), + haveResultMetadata(false), + numBuffersLeft(numBuffers) { + } + }; + // Map from frame number to the in-flight request state + typedef KeyedVector<uint32_t, InFlightRequest> InFlightMap; + + Mutex mInFlightLock; // Protects mInFlightMap + InFlightMap mInFlightMap; + + status_t registerInFlight(int32_t frameNumber, int32_t numBuffers); + + /** + * Output result queue and current HAL device 3A state + */ + + // Lock for output side of device + Mutex mOutputLock; + + /**** Scope for mOutputLock ****/ + + uint32_t mNextResultFrameNumber; + uint32_t mNextShutterFrameNumber; + List<CameraMetadata> mResultQueue; + Condition mResultSignal; + NotificationListener *mListener; + + struct AlgState { + camera_metadata_enum_android_control_ae_state aeState; + camera_metadata_enum_android_control_af_state afState; + camera_metadata_enum_android_control_awb_state awbState; + + AlgState() : + aeState(ANDROID_CONTROL_AE_STATE_INACTIVE), + afState(ANDROID_CONTROL_AF_STATE_INACTIVE), + awbState(ANDROID_CONTROL_AWB_STATE_INACTIVE) { + } + } m3AState; + + /**** End scope for mOutputLock ****/ + + /** + * Callback functions from HAL device + */ + void processCaptureResult(const camera3_capture_result *result); + + void notify(const camera3_notify_msg *msg); + + /** + * Static callback forwarding methods from HAL to instance + */ + static callbacks_process_capture_result_t sProcessCaptureResult; + + static callbacks_notify_t sNotify; + +}; // class Camera3Device + +}; // namespace android + +#endif diff --git a/services/camera/libcameraservice/CameraClient.cpp b/services/camera/libcameraservice/CameraClient.cpp index b930c02..e577fa3 100644 --- a/services/camera/libcameraservice/CameraClient.cpp +++ b/services/camera/libcameraservice/CameraClient.cpp @@ -18,7 +18,6 @@ //#define LOG_NDEBUG 0 #include <cutils/properties.h> -#include <gui/SurfaceTextureClient.h> #include <gui/Surface.h> #include "CameraClient.h" @@ -34,15 +33,14 @@ static int getCallingPid() { return IPCThreadState::self()->getCallingPid(); } -static int getCallingUid() { - return IPCThreadState::self()->getCallingUid(); -} - CameraClient::CameraClient(const sp<CameraService>& cameraService, const sp<ICameraClient>& cameraClient, - int cameraId, int cameraFacing, int clientPid, int servicePid): - Client(cameraService, cameraClient, - cameraId, cameraFacing, clientPid, servicePid) + const String16& clientPackageName, + int cameraId, int cameraFacing, + int clientPid, int clientUid, + int servicePid): + Client(cameraService, cameraClient, clientPackageName, + cameraId, cameraFacing, clientPid, clientUid, servicePid) { int callingPid = getCallingPid(); LOG1("CameraClient::CameraClient E (pid %d, id %d)", callingPid, cameraId); @@ -62,10 +60,17 @@ CameraClient::CameraClient(const sp<CameraService>& cameraService, status_t CameraClient::initialize(camera_module_t *module) { int callingPid = getCallingPid(); + status_t res; + LOG1("CameraClient::initialize E (pid %d, id %d)", callingPid, mCameraId); + // Verify ops permissions + res = startCameraOps(); + if (res != OK) { + return res; + } + char camera_device_name[10]; - status_t res; snprintf(camera_device_name, sizeof(camera_device_name), "%d", mCameraId); mHardware = new CameraHardwareInterface(camera_device_name); @@ -112,7 +117,7 @@ status_t CameraClient::dump(int fd, const Vector<String16>& args) { size_t len = snprintf(buffer, SIZE, "Client[%d] (%p) PID: %d\n", mCameraId, - getCameraClient()->asBinder().get(), + getRemoteCallback()->asBinder().get(), mClientPid); len = (len > SIZE - 1) ? SIZE - 1 : len; write(fd, buffer, len); @@ -168,10 +173,10 @@ status_t CameraClient::unlock() { return INVALID_OPERATION; } mClientPid = 0; - LOG1("clear mCameraClient (pid %d)", callingPid); + LOG1("clear mRemoteCallback (pid %d)", callingPid); // we need to remove the reference to ICameraClient so that when the app // goes away, the reference count goes to 0. - mCameraClient.clear(); + mRemoteCallback.clear(); } return result; } @@ -188,14 +193,15 @@ status_t CameraClient::connect(const sp<ICameraClient>& client) { return EBUSY; } - if (mCameraClient != 0 && (client->asBinder() == mCameraClient->asBinder())) { + if (mRemoteCallback != 0 && + (client->asBinder() == mRemoteCallback->asBinder())) { LOG1("Connect to the same client"); return NO_ERROR; } mPreviewCallbackFlag = CAMERA_FRAME_CALLBACK_FLAG_NOOP; mClientPid = callingPid; - mCameraClient = client; + mRemoteCallback = client; LOG1("connect X (pid %d)", callingPid); return NO_ERROR; @@ -306,22 +312,22 @@ status_t CameraClient::setPreviewWindow(const sp<IBinder>& binder, status_t CameraClient::setPreviewDisplay(const sp<Surface>& surface) { LOG1("setPreviewDisplay(%p) (pid %d)", surface.get(), getCallingPid()); - sp<IBinder> binder(surface != 0 ? surface->asBinder() : 0); + sp<IBinder> binder(surface != 0 ? surface->getIGraphicBufferProducer()->asBinder() : 0); sp<ANativeWindow> window(surface); return setPreviewWindow(binder, window); } -// set the SurfaceTexture that the preview will use +// set the SurfaceTextureClient that the preview will use status_t CameraClient::setPreviewTexture( - const sp<ISurfaceTexture>& surfaceTexture) { - LOG1("setPreviewTexture(%p) (pid %d)", surfaceTexture.get(), + const sp<IGraphicBufferProducer>& bufferProducer) { + LOG1("setPreviewTexture(%p) (pid %d)", bufferProducer.get(), getCallingPid()); sp<IBinder> binder; sp<ANativeWindow> window; - if (surfaceTexture != 0) { - binder = surfaceTexture->asBinder(); - window = new SurfaceTextureClient(surfaceTexture); + if (bufferProducer != 0) { + binder = bufferProducer->asBinder(); + window = new Surface(bufferProducer); } return setPreviewWindow(binder, window); } @@ -775,7 +781,7 @@ void CameraClient::handleShutter(void) { mCameraService->playSound(CameraService::SOUND_SHUTTER); } - sp<ICameraClient> c = mCameraClient; + sp<ICameraClient> c = mRemoteCallback; if (c != 0) { mLock.unlock(); c->notifyCallback(CAMERA_MSG_SHUTTER, 0, 0); @@ -806,7 +812,7 @@ void CameraClient::handlePreviewData(int32_t msgType, } // hold a strong pointer to the client - sp<ICameraClient> c = mCameraClient; + sp<ICameraClient> c = mRemoteCallback; // clear callback flags if no client or one-shot mode if (c == 0 || (mPreviewCallbackFlag & CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK)) { @@ -836,7 +842,7 @@ void CameraClient::handlePreviewData(int32_t msgType, void CameraClient::handlePostview(const sp<IMemory>& mem) { disableMsgType(CAMERA_MSG_POSTVIEW_FRAME); - sp<ICameraClient> c = mCameraClient; + sp<ICameraClient> c = mRemoteCallback; mLock.unlock(); if (c != 0) { c->dataCallback(CAMERA_MSG_POSTVIEW_FRAME, mem, NULL); @@ -851,7 +857,7 @@ void CameraClient::handleRawPicture(const sp<IMemory>& mem) { size_t size; sp<IMemoryHeap> heap = mem->getMemory(&offset, &size); - sp<ICameraClient> c = mCameraClient; + sp<ICameraClient> c = mRemoteCallback; mLock.unlock(); if (c != 0) { c->dataCallback(CAMERA_MSG_RAW_IMAGE, mem, NULL); @@ -862,7 +868,7 @@ void CameraClient::handleRawPicture(const sp<IMemory>& mem) { void CameraClient::handleCompressedPicture(const sp<IMemory>& mem) { disableMsgType(CAMERA_MSG_COMPRESSED_IMAGE); - sp<ICameraClient> c = mCameraClient; + sp<ICameraClient> c = mRemoteCallback; mLock.unlock(); if (c != 0) { c->dataCallback(CAMERA_MSG_COMPRESSED_IMAGE, mem, NULL); @@ -872,7 +878,7 @@ void CameraClient::handleCompressedPicture(const sp<IMemory>& mem) { void CameraClient::handleGenericNotify(int32_t msgType, int32_t ext1, int32_t ext2) { - sp<ICameraClient> c = mCameraClient; + sp<ICameraClient> c = mRemoteCallback; mLock.unlock(); if (c != 0) { c->notifyCallback(msgType, ext1, ext2); @@ -881,7 +887,7 @@ void CameraClient::handleGenericNotify(int32_t msgType, void CameraClient::handleGenericData(int32_t msgType, const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata) { - sp<ICameraClient> c = mCameraClient; + sp<ICameraClient> c = mRemoteCallback; mLock.unlock(); if (c != 0) { c->dataCallback(msgType, dataPtr, metadata); @@ -890,7 +896,7 @@ void CameraClient::handleGenericData(int32_t msgType, void CameraClient::handleGenericDataTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) { - sp<ICameraClient> c = mCameraClient; + sp<ICameraClient> c = mRemoteCallback; mLock.unlock(); if (c != 0) { c->dataCallbackTimestamp(timestamp, msgType, dataPtr); diff --git a/services/camera/libcameraservice/CameraClient.h b/services/camera/libcameraservice/CameraClient.h index 2f31c4e..7f0cb29 100644 --- a/services/camera/libcameraservice/CameraClient.h +++ b/services/camera/libcameraservice/CameraClient.h @@ -24,6 +24,11 @@ namespace android { class MemoryHeapBase; class CameraHardwareInterface; +/** + * Interface between android.hardware.Camera API and Camera HAL device for version + * CAMERA_DEVICE_API_VERSION_1_0. + */ + class CameraClient : public CameraService::Client { public: @@ -33,7 +38,7 @@ public: virtual status_t lock(); virtual status_t unlock(); virtual status_t setPreviewDisplay(const sp<Surface>& surface); - virtual status_t setPreviewTexture(const sp<ISurfaceTexture>& surfaceTexture); + virtual status_t setPreviewTexture(const sp<IGraphicBufferProducer>& bufferProducer); virtual void setPreviewCallbackFlag(int flag); virtual status_t startPreview(); virtual void stopPreview(); @@ -53,9 +58,11 @@ public: // Interface used by CameraService CameraClient(const sp<CameraService>& cameraService, const sp<ICameraClient>& cameraClient, + const String16& clientPackageName, int cameraId, int cameraFacing, int clientPid, + int clientUid, int servicePid); ~CameraClient(); @@ -124,7 +131,7 @@ private: // Ensures atomicity among the public methods mutable Mutex mLock; - // This is a binder of Surface or SurfaceTexture. + // This is a binder of Surface or Surface. sp<IBinder> mSurface; sp<ANativeWindow> mPreviewWindow; diff --git a/services/camera/libcameraservice/CameraDeviceBase.cpp b/services/camera/libcameraservice/CameraDeviceBase.cpp new file mode 100644 index 0000000..6c4e87f --- /dev/null +++ b/services/camera/libcameraservice/CameraDeviceBase.cpp @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "CameraDeviceBase.h" + +namespace android { + +/** + * Base class destructors + */ +CameraDeviceBase::~CameraDeviceBase() { +} + +CameraDeviceBase::NotificationListener::~NotificationListener() { +} + +} // namespace android diff --git a/services/camera/libcameraservice/CameraDeviceBase.h b/services/camera/libcameraservice/CameraDeviceBase.h new file mode 100644 index 0000000..8c457d9 --- /dev/null +++ b/services/camera/libcameraservice/CameraDeviceBase.h @@ -0,0 +1,209 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA_CAMERADEVICEBASE_H +#define ANDROID_SERVERS_CAMERA_CAMERADEVICEBASE_H + +#include <utils/RefBase.h> +#include <utils/String8.h> +#include <utils/String16.h> +#include <utils/Vector.h> +#include <utils/Timers.h> + +#include "hardware/camera2.h" +#include "camera/CameraMetadata.h" + +namespace android { + +/** + * Base interface for version >= 2 camera device classes, which interface to + * camera HAL device versions >= 2. + */ +class CameraDeviceBase : public virtual RefBase { + public: + virtual ~CameraDeviceBase(); + + /** + * The device's camera ID + */ + virtual int getId() const = 0; + + virtual status_t initialize(camera_module_t *module) = 0; + virtual status_t disconnect() = 0; + + virtual status_t dump(int fd, const Vector<String16>& args) = 0; + + /** + * The device's static characteristics metadata buffer + */ + virtual const CameraMetadata& info() const = 0; + + /** + * Submit request for capture. The CameraDevice takes ownership of the + * passed-in buffer. + */ + virtual status_t capture(CameraMetadata &request) = 0; + + /** + * Submit request for streaming. The CameraDevice makes a copy of the + * passed-in buffer and the caller retains ownership. + */ + virtual status_t setStreamingRequest(const CameraMetadata &request) = 0; + + /** + * Clear the streaming request slot. + */ + virtual status_t clearStreamingRequest() = 0; + + /** + * Wait until a request with the given ID has been dequeued by the + * HAL. Returns TIMED_OUT if the timeout duration is reached. Returns + * immediately if the latest request received by the HAL has this id. + */ + virtual status_t waitUntilRequestReceived(int32_t requestId, + nsecs_t timeout) = 0; + + /** + * Create an output stream of the requested size and format. + * + * If format is CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, then the HAL device selects + * an appropriate format; it can be queried with getStreamInfo. + * + * If format is HAL_PIXEL_FORMAT_COMPRESSED, the size parameter must be + * equal to the size in bytes of the buffers to allocate for the stream. For + * other formats, the size parameter is ignored. + */ + virtual status_t createStream(sp<ANativeWindow> consumer, + uint32_t width, uint32_t height, int format, size_t size, + int *id) = 0; + + /** + * Create an input reprocess stream that uses buffers from an existing + * output stream. + */ + virtual status_t createReprocessStreamFromStream(int outputId, int *id) = 0; + + /** + * Get information about a given stream. + */ + virtual status_t getStreamInfo(int id, + uint32_t *width, uint32_t *height, uint32_t *format) = 0; + + /** + * Set stream gralloc buffer transform + */ + virtual status_t setStreamTransform(int id, int transform) = 0; + + /** + * Delete stream. Must not be called if there are requests in flight which + * reference that stream. + */ + virtual status_t deleteStream(int id) = 0; + + /** + * Delete reprocess stream. Must not be called if there are requests in + * flight which reference that stream. + */ + virtual status_t deleteReprocessStream(int id) = 0; + + /** + * Create a metadata buffer with fields that the HAL device believes are + * best for the given use case + */ + virtual status_t createDefaultRequest(int templateId, + CameraMetadata *request) = 0; + + /** + * Wait until all requests have been processed. Returns INVALID_OPERATION if + * the streaming slot is not empty, or TIMED_OUT if the requests haven't + * finished processing in 10 seconds. + */ + virtual status_t waitUntilDrained() = 0; + + /** + * Abstract class for HAL notification listeners + */ + class NotificationListener { + public: + // Refer to the Camera2 HAL definition for notification definitions + virtual void notifyError(int errorCode, int arg1, int arg2) = 0; + virtual void notifyShutter(int frameNumber, nsecs_t timestamp) = 0; + virtual void notifyAutoFocus(uint8_t newState, int triggerId) = 0; + virtual void notifyAutoExposure(uint8_t newState, int triggerId) = 0; + virtual void notifyAutoWhitebalance(uint8_t newState, + int triggerId) = 0; + protected: + virtual ~NotificationListener(); + }; + + /** + * Connect HAL notifications to a listener. Overwrites previous + * listener. Set to NULL to stop receiving notifications. + */ + virtual status_t setNotifyCallback(NotificationListener *listener) = 0; + + /** + * Wait for a new frame to be produced, with timeout in nanoseconds. + * Returns TIMED_OUT when no frame produced within the specified duration + */ + virtual status_t waitForNextFrame(nsecs_t timeout) = 0; + + /** + * Get next metadata frame from the frame queue. Returns NULL if the queue + * is empty; caller takes ownership of the metadata buffer. + */ + virtual status_t getNextFrame(CameraMetadata *frame) = 0; + + /** + * Trigger auto-focus. The latest ID used in a trigger autofocus or cancel + * autofocus call will be returned by the HAL in all subsequent AF + * notifications. + */ + virtual status_t triggerAutofocus(uint32_t id) = 0; + + /** + * Cancel auto-focus. The latest ID used in a trigger autofocus/cancel + * autofocus call will be returned by the HAL in all subsequent AF + * notifications. + */ + virtual status_t triggerCancelAutofocus(uint32_t id) = 0; + + /** + * Trigger pre-capture metering. The latest ID used in a trigger pre-capture + * call will be returned by the HAL in all subsequent AE and AWB + * notifications. + */ + virtual status_t triggerPrecaptureMetering(uint32_t id) = 0; + + /** + * Abstract interface for clients that want to listen to reprocess buffer + * release events + */ + struct BufferReleasedListener : public virtual RefBase { + virtual void onBufferReleased(buffer_handle_t *handle) = 0; + }; + + /** + * Push a buffer to be reprocessed into a reprocessing stream, and + * provide a listener to call once the buffer is returned by the HAL + */ + virtual status_t pushReprocessBuffer(int reprocessStreamId, + buffer_handle_t *buffer, wp<BufferReleasedListener> listener) = 0; +}; + +}; // namespace android + +#endif diff --git a/services/camera/libcameraservice/CameraHardwareInterface.h b/services/camera/libcameraservice/CameraHardwareInterface.h index 05ac9fa..87b2807 100644 --- a/services/camera/libcameraservice/CameraHardwareInterface.h +++ b/services/camera/libcameraservice/CameraHardwareInterface.h @@ -47,7 +47,8 @@ typedef void (*data_callback_timestamp)(nsecs_t timestamp, /** * CameraHardwareInterface.h defines the interface to the * camera hardware abstraction layer, used for setting and getting - * parameters, live previewing, and taking pictures. + * parameters, live previewing, and taking pictures. It is used for + * HAL devices with version CAMERA_DEVICE_API_VERSION_1_0 only. * * It is a referenced counted interface with RefBase as its base class. * CameraService calls openCameraHardware() to retrieve a strong pointer to the @@ -56,24 +57,18 @@ typedef void (*data_callback_timestamp)(nsecs_t timestamp, * * -# After CameraService calls openCameraHardware(), getParameters() and * setParameters() are used to initialize the camera instance. - * CameraService calls getPreviewHeap() to establish access to the - * preview heap so it can be registered with SurfaceFlinger for - * efficient display updating while in preview mode. - * -# startPreview() is called. The camera instance then periodically - * sends the message CAMERA_MSG_PREVIEW_FRAME (if enabled) each time - * a new preview frame is available. If data callback code needs to use - * this memory after returning, it must copy the data. + * -# startPreview() is called. * - * Prior to taking a picture, CameraService calls autofocus(). When auto + * Prior to taking a picture, CameraService often calls autofocus(). When auto * focusing has completed, the camera instance sends a CAMERA_MSG_FOCUS notification, * which informs the application whether focusing was successful. The camera instance * only sends this message once and it is up to the application to call autoFocus() * again if refocusing is desired. * * CameraService calls takePicture() to request the camera instance take a - * picture. At this point, if a shutter, postview, raw, and/or compressed callback - * is desired, the corresponding message must be enabled. As with CAMERA_MSG_PREVIEW_FRAME, - * any memory provided in a data callback must be copied if it's needed after returning. + * picture. At this point, if a shutter, postview, raw, and/or compressed + * callback is desired, the corresponding message must be enabled. Any memory + * provided in a data callback must be copied if it's needed after returning. */ class CameraHardwareInterface : public virtual RefBase { @@ -427,7 +422,7 @@ public: /** * Dump state of the camera hardware */ - status_t dump(int fd, const Vector<String16>& args) const + status_t dump(int fd, const Vector<String16>& /*args*/) const { ALOGV("%s(%s)", __FUNCTION__, mName.string()); if (mDevice->ops->dump) @@ -584,9 +579,10 @@ private: #endif static int __lock_buffer(struct preview_stream_ops* w, - buffer_handle_t* buffer) + buffer_handle_t* /*buffer*/) { ANativeWindow *a = anw(w); + (void)a; return 0; } diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp index 124d24d..757a781 100644 --- a/services/camera/libcameraservice/CameraService.cpp +++ b/services/camera/libcameraservice/CameraService.cpp @@ -22,13 +22,13 @@ #include <sys/types.h> #include <pthread.h> +#include <binder/AppOpsManager.h> #include <binder/IPCThreadState.h> #include <binder/IServiceManager.h> #include <binder/MemoryBase.h> #include <binder/MemoryHeapBase.h> #include <cutils/atomic.h> #include <cutils/properties.h> -#include <gui/SurfaceTextureClient.h> #include <gui/Surface.h> #include <hardware/hardware.h> #include <media/AudioSystem.h> @@ -40,6 +40,7 @@ #include "CameraService.h" #include "CameraClient.h" #include "Camera2Client.h" +#include "ProCamera2Client.h" namespace android { @@ -65,6 +66,20 @@ static int getCallingUid() { return IPCThreadState::self()->getCallingUid(); } +extern "C" { +static void camera_device_status_change( + const struct camera_module_callbacks* callbacks, + int camera_id, + int new_status) { + sp<CameraService> cs = const_cast<CameraService*>( + static_cast<const CameraService*>(callbacks)); + + cs->onDeviceStatusChanged( + camera_id, + new_status); +} +} // extern "C" + // ---------------------------------------------------------------------------- // This is ugly and only safe if we never re-create the CameraService, but @@ -72,14 +87,22 @@ static int getCallingUid() { static CameraService *gCameraService; CameraService::CameraService() -:mSoundRef(0), mModule(0) + :mSoundRef(0), mModule(0) { ALOGI("CameraService started (pid=%d)", getpid()); gCameraService = this; + + for (size_t i = 0; i < MAX_CAMERAS; ++i) { + mStatusList[i] = ICameraServiceListener::STATUS_PRESENT; + } + + this->camera_device_status_change = android::camera_device_status_change; } void CameraService::onFirstRef() { + LOG1("CameraService::onFirstRef"); + BnCameraService::onFirstRef(); if (hw_get_module(CAMERA_HARDWARE_MODULE_ID, @@ -88,6 +111,7 @@ void CameraService::onFirstRef() mNumberOfCameras = 0; } else { + ALOGI("Loaded \"%s\" camera module", mModule->common.name); mNumberOfCameras = mModule->get_number_of_cameras(); if (mNumberOfCameras > MAX_CAMERAS) { ALOGE("Number of cameras(%d) > MAX_CAMERAS(%d).", @@ -97,6 +121,11 @@ void CameraService::onFirstRef() for (int i = 0; i < mNumberOfCameras; i++) { setCameraFree(i); } + + if (mModule->common.module_api_version >= + CAMERA_MODULE_API_VERSION_2_1) { + mModule->set_callbacks(this); + } } } @@ -110,6 +139,67 @@ CameraService::~CameraService() { gCameraService = NULL; } +void CameraService::onDeviceStatusChanged(int cameraId, + int newStatus) +{ + ALOGI("%s: Status changed for cameraId=%d, newStatus=%d", __FUNCTION__, + cameraId, newStatus); + + if (cameraId < 0 || cameraId >= MAX_CAMERAS) { + ALOGE("%s: Bad camera ID %d", __FUNCTION__, cameraId); + return; + } + + if ((int)getStatus(cameraId) == newStatus) { + ALOGE("%s: State transition to the same status 0x%x not allowed", + __FUNCTION__, (uint32_t)newStatus); + return; + } + + /* don't do this in updateStatus + since it is also called from connect and we could get into a deadlock */ + if (newStatus == CAMERA_DEVICE_STATUS_NOT_PRESENT) { + Vector<sp<BasicClient> > clientsToDisconnect; + { + Mutex::Autolock al(mServiceLock); + + /* Find all clients that we need to disconnect */ + sp<Client> client = mClient[cameraId].promote(); + if (client.get() != NULL) { + clientsToDisconnect.push_back(client); + } + + int i = cameraId; + for (size_t j = 0; j < mProClientList[i].size(); ++j) { + sp<ProClient> cl = mProClientList[i][j].promote(); + if (cl != NULL) { + clientsToDisconnect.push_back(cl); + } + } + } + + /* now disconnect them. don't hold the lock + or we can get into a deadlock */ + + for (size_t i = 0; i < clientsToDisconnect.size(); ++i) { + sp<BasicClient> client = clientsToDisconnect[i]; + + client->disconnect(); + /** + * The remote app will no longer be able to call methods on the + * client since the client PID will be reset to 0 + */ + } + + ALOGV("%s: After unplug, disconnected %d clients", + __FUNCTION__, clientsToDisconnect.size()); + } + + updateStatus( + static_cast<ICameraServiceListener::Status>(newStatus), cameraId); + +} + int32_t CameraService::getNumberOfCameras() { return mNumberOfCameras; } @@ -131,22 +221,69 @@ status_t CameraService::getCameraInfo(int cameraId, return rc; } -sp<ICamera> CameraService::connect( - const sp<ICameraClient>& cameraClient, int cameraId) { +int CameraService::getDeviceVersion(int cameraId, int* facing) { + struct camera_info info; + if (mModule->get_camera_info(cameraId, &info) != OK) { + return -1; + } + + int deviceVersion; + if (mModule->common.module_api_version >= CAMERA_MODULE_API_VERSION_2_0) { + deviceVersion = info.device_version; + } else { + deviceVersion = CAMERA_DEVICE_API_VERSION_1_0; + } + + if (facing) { + *facing = info.facing; + } + + return deviceVersion; +} + +bool CameraService::isValidCameraId(int cameraId) { + int facing; + int deviceVersion = getDeviceVersion(cameraId, &facing); + + switch(deviceVersion) { + case CAMERA_DEVICE_API_VERSION_1_0: + case CAMERA_DEVICE_API_VERSION_2_0: + case CAMERA_DEVICE_API_VERSION_2_1: + case CAMERA_DEVICE_API_VERSION_3_0: + return true; + default: + return false; + } + + return false; +} + +bool CameraService::validateConnect(int cameraId, + /*inout*/ + int& clientUid) const { + int callingPid = getCallingPid(); - LOG1("CameraService::connect E (pid %d, id %d)", callingPid, cameraId); + if (clientUid == USE_CALLING_UID) { + clientUid = getCallingUid(); + } else { + // We only trust our own process to forward client UIDs + if (callingPid != getpid()) { + ALOGE("CameraService::connect X (pid %d) rejected (don't trust clientUid)", + callingPid); + return false; + } + } if (!mModule) { ALOGE("Camera HAL module not loaded"); - return NULL; + return false; } - sp<Client> client; if (cameraId < 0 || cameraId >= mNumberOfCameras) { ALOGE("CameraService::connect X (pid %d) rejected (invalid cameraId %d).", callingPid, cameraId); - return NULL; + return false; } char value[PROPERTY_VALUE_MAX]; @@ -154,80 +291,282 @@ sp<ICamera> CameraService::connect( if (strcmp(value, "1") == 0) { // Camera is disabled by DevicePolicyManager. ALOGI("Camera is disabled. connect X (pid %d) rejected", callingPid); - return NULL; + return false; } - Mutex::Autolock lock(mServiceLock); + ICameraServiceListener::Status currentStatus = getStatus(cameraId); + if (currentStatus == ICameraServiceListener::STATUS_NOT_PRESENT) { + ALOGI("Camera is not plugged in," + " connect X (pid %d) rejected", callingPid); + return false; + } else if (currentStatus == ICameraServiceListener::STATUS_ENUMERATING) { + ALOGI("Camera is enumerating," + " connect X (pid %d) rejected", callingPid); + return false; + } + // Else don't check for STATUS_NOT_AVAILABLE. + // -- It's done implicitly in canConnectUnsafe /w the mBusy array + + return true; +} + +bool CameraService::canConnectUnsafe(int cameraId, + const String16& clientPackageName, + const sp<IBinder>& remoteCallback, + sp<Client> &client) { + String8 clientName8(clientPackageName); + int callingPid = getCallingPid(); + if (mClient[cameraId] != 0) { client = mClient[cameraId].promote(); if (client != 0) { - if (cameraClient->asBinder() == client->getCameraClient()->asBinder()) { + if (remoteCallback == client->getRemoteCallback()->asBinder()) { LOG1("CameraService::connect X (pid %d) (the same client)", callingPid); - return client; + return true; } else { - ALOGW("CameraService::connect X (pid %d) rejected (existing client).", - callingPid); - return NULL; + // TODOSC: need to support 1 regular client, + // multiple shared clients here + ALOGW("CameraService::connect X (pid %d) rejected" + " (existing client).", callingPid); + return false; } } mClient[cameraId].clear(); } + /* + mBusy is set to false as the last step of the Client destructor, + after which it is guaranteed that the Client destructor has finished ( + including any inherited destructors) + + We only need this for a Client subclasses since we don't allow + multiple Clents to be opened concurrently, but multiple BasicClient + would be fine + */ if (mBusy[cameraId]) { - ALOGW("CameraService::connect X (pid %d) rejected" - " (camera %d is still busy).", callingPid, cameraId); - return NULL; + ALOGW("CameraService::connect X (pid %d, \"%s\") rejected" + " (camera %d is still busy).", callingPid, + clientName8.string(), cameraId); + return false; } - struct camera_info info; - if (mModule->get_camera_info(cameraId, &info) != OK) { - ALOGE("Invalid camera id %d", cameraId); + return true; +} + +sp<ICamera> CameraService::connect( + const sp<ICameraClient>& cameraClient, + int cameraId, + const String16& clientPackageName, + int clientUid) { + + String8 clientName8(clientPackageName); + int callingPid = getCallingPid(); + + LOG1("CameraService::connect E (pid %d \"%s\", id %d)", callingPid, + clientName8.string(), cameraId); + + if (!validateConnect(cameraId, /*inout*/clientUid)) { return NULL; } - int deviceVersion; - if (mModule->common.module_api_version == CAMERA_MODULE_API_VERSION_2_0) { - deviceVersion = info.device_version; - } else { - deviceVersion = CAMERA_DEVICE_API_VERSION_1_0; - } + sp<Client> client; - switch(deviceVersion) { - case CAMERA_DEVICE_API_VERSION_1_0: - client = new CameraClient(this, cameraClient, cameraId, - info.facing, callingPid, getpid()); - break; - case CAMERA_DEVICE_API_VERSION_2_0: - client = new Camera2Client(this, cameraClient, cameraId, - info.facing, callingPid, getpid()); - break; - default: - ALOGE("Unknown camera device HAL version: %d", deviceVersion); - return NULL; + { + Mutex::Autolock lock(mServiceLock); + if (!canConnectUnsafe(cameraId, clientPackageName, + cameraClient->asBinder(), + /*out*/client)) { + return NULL; + } else if (client.get() != NULL) { + return client; + } + + int facing = -1; + int deviceVersion = getDeviceVersion(cameraId, &facing); + + // If there are other non-exclusive users of the camera, + // this will tear them down before we can reuse the camera + if (isValidCameraId(cameraId)) { + // transition from PRESENT -> NOT_AVAILABLE + updateStatus(ICameraServiceListener::STATUS_NOT_AVAILABLE, + cameraId); + } + + switch(deviceVersion) { + case CAMERA_DEVICE_API_VERSION_1_0: + client = new CameraClient(this, cameraClient, + clientPackageName, cameraId, + facing, callingPid, clientUid, getpid()); + break; + case CAMERA_DEVICE_API_VERSION_2_0: + case CAMERA_DEVICE_API_VERSION_2_1: + case CAMERA_DEVICE_API_VERSION_3_0: + client = new Camera2Client(this, cameraClient, + clientPackageName, cameraId, + facing, callingPid, clientUid, getpid(), + deviceVersion); + break; + case -1: + ALOGE("Invalid camera id %d", cameraId); + return NULL; + default: + ALOGE("Unknown camera device HAL version: %d", deviceVersion); + return NULL; + } + + if (!connectFinishUnsafe(client, client->asBinder())) { + // this is probably not recoverable.. maybe the client can try again + // OK: we can only get here if we were originally in PRESENT state + updateStatus(ICameraServiceListener::STATUS_PRESENT, cameraId); + + return NULL; + } + + mClient[cameraId] = client; + LOG1("CameraService::connect X (id %d, this pid is %d)", cameraId, + getpid()); } + // important: release the mutex here so the client can call back + // into the service from its destructor (can be at the end of the call) + return client; +} + +bool CameraService::connectFinishUnsafe(const sp<BasicClient>& client, + const sp<IBinder>& clientBinder) { if (client->initialize(mModule) != OK) { + return false; + } + + clientBinder->linkToDeath(this); + + return true; +} + +sp<IProCameraUser> CameraService::connect( + const sp<IProCameraCallbacks>& cameraCb, + int cameraId, + const String16& clientPackageName, + int clientUid) +{ + String8 clientName8(clientPackageName); + int callingPid = getCallingPid(); + + LOG1("CameraService::connectPro E (pid %d \"%s\", id %d)", callingPid, + clientName8.string(), cameraId); + + if (!validateConnect(cameraId, /*inout*/clientUid)) { return NULL; } - cameraClient->asBinder()->linkToDeath(this); + sp<ProClient> client; + { + Mutex::Autolock lock(mServiceLock); + { + sp<Client> client; + if (!canConnectUnsafe(cameraId, clientPackageName, + cameraCb->asBinder(), + /*out*/client)) { + return NULL; + } + } + + int facing = -1; + int deviceVersion = getDeviceVersion(cameraId, &facing); + + switch(deviceVersion) { + case CAMERA_DEVICE_API_VERSION_1_0: + ALOGE("Camera id %d uses HALv1, doesn't support ProCamera", + cameraId); + return NULL; + break; + case CAMERA_DEVICE_API_VERSION_2_0: + case CAMERA_DEVICE_API_VERSION_2_1: + client = new ProCamera2Client(this, cameraCb, String16(), + cameraId, facing, callingPid, USE_CALLING_UID, getpid()); + break; + case -1: + ALOGE("Invalid camera id %d", cameraId); + return NULL; + default: + ALOGE("Unknown camera device HAL version: %d", deviceVersion); + return NULL; + } + + if (!connectFinishUnsafe(client, client->asBinder())) { + return NULL; + } + + mProClientList[cameraId].push(client); + + LOG1("CameraService::connectPro X (id %d, this pid is %d)", cameraId, + getpid()); + } + // important: release the mutex here so the client can call back + // into the service from its destructor (can be at the end of the call) - mClient[cameraId] = client; - LOG1("CameraService::connect X (id %d, this pid is %d)", cameraId, getpid()); return client; } -void CameraService::removeClient(const sp<ICameraClient>& cameraClient) { +status_t CameraService::addListener( + const sp<ICameraServiceListener>& listener) { + ALOGV("%s: Add listener %p", __FUNCTION__, listener.get()); + + Mutex::Autolock lock(mServiceLock); + + Vector<sp<ICameraServiceListener> >::iterator it, end; + for (it = mListenerList.begin(); it != mListenerList.end(); ++it) { + if ((*it)->asBinder() == listener->asBinder()) { + ALOGW("%s: Tried to add listener %p which was already subscribed", + __FUNCTION__, listener.get()); + return ALREADY_EXISTS; + } + } + + mListenerList.push_back(listener); + + /* Immediately signal current status to this listener only */ + { + Mutex::Autolock m(mStatusMutex) ; + int numCams = getNumberOfCameras(); + for (int i = 0; i < numCams; ++i) { + listener->onStatusChanged(mStatusList[i], i); + } + } + + return OK; +} +status_t CameraService::removeListener( + const sp<ICameraServiceListener>& listener) { + ALOGV("%s: Remove listener %p", __FUNCTION__, listener.get()); + + Mutex::Autolock lock(mServiceLock); + + Vector<sp<ICameraServiceListener> >::iterator it; + for (it = mListenerList.begin(); it != mListenerList.end(); ++it) { + if ((*it)->asBinder() == listener->asBinder()) { + mListenerList.erase(it); + return OK; + } + } + + ALOGW("%s: Tried to remove a listener %p which was not subscribed", + __FUNCTION__, listener.get()); + + return BAD_VALUE; +} + +void CameraService::removeClientByRemote(const wp<IBinder>& remoteBinder) { int callingPid = getCallingPid(); - LOG1("CameraService::removeClient E (pid %d)", callingPid); + LOG1("CameraService::removeClientByRemote E (pid %d)", callingPid); // Declare this before the lock to make absolutely sure the // destructor won't be called with the lock held. Mutex::Autolock lock(mServiceLock); int outIndex; - sp<Client> client = findClientUnsafe(cameraClient->asBinder(), outIndex); + sp<Client> client = findClientUnsafe(remoteBinder, outIndex); if (client != 0) { // Found our camera, clear and leave. @@ -235,9 +574,50 @@ void CameraService::removeClient(const sp<ICameraClient>& cameraClient) { mClient[outIndex].clear(); client->unlinkToDeath(this); + } else { + + sp<ProClient> clientPro = findProClientUnsafe(remoteBinder); + + if (clientPro != NULL) { + // Found our camera, clear and leave. + LOG1("removeClient: clear pro %p", clientPro.get()); + + clientPro->getRemoteCallback()->asBinder()->unlinkToDeath(this); + } } - LOG1("CameraService::removeClient X (pid %d)", callingPid); + LOG1("CameraService::removeClientByRemote X (pid %d)", callingPid); +} + +sp<CameraService::ProClient> CameraService::findProClientUnsafe( + const wp<IBinder>& cameraCallbacksRemote) +{ + sp<ProClient> clientPro; + + for (int i = 0; i < mNumberOfCameras; ++i) { + Vector<size_t> removeIdx; + + for (size_t j = 0; j < mProClientList[i].size(); ++j) { + wp<ProClient> cl = mProClientList[i][j]; + + sp<ProClient> clStrong = cl.promote(); + if (clStrong != NULL && clStrong->getRemote() == cameraCallbacksRemote) { + clientPro = clStrong; + break; + } else if (clStrong == NULL) { + // mark to clean up dead ptr + removeIdx.push(j); + } + } + + // remove stale ptrs (in reverse so the indices dont change) + for (ssize_t j = (ssize_t)removeIdx.size() - 1; j >= 0; --j) { + mProClientList[i].removeAt(removeIdx[j]); + } + + } + + return clientPro; } sp<CameraService::Client> CameraService::findClientUnsafe( @@ -251,7 +631,7 @@ sp<CameraService::Client> CameraService::findClientUnsafe( if (mClient[i] == 0) continue; // Promote mClient. It can fail if we are called from this path: - // Client::~Client() -> disconnect() -> removeClient(). + // Client::~Client() -> disconnect() -> removeClientByRemote(). client = mClient[i].promote(); // Clean up stale client entry @@ -260,7 +640,7 @@ sp<CameraService::Client> CameraService::findClientUnsafe( continue; } - if (cameraClient == client->getCameraClient()->asBinder()) { + if (cameraClient == client->getRemoteCallback()->asBinder()) { // Found our camera outIndex = i; return client; @@ -281,12 +661,12 @@ Mutex* CameraService::getClientLockById(int cameraId) { return &mClientLock[cameraId]; } -sp<CameraService::Client> CameraService::getClientByRemote( +sp<CameraService::BasicClient> CameraService::getClientByRemote( const wp<IBinder>& cameraClient) { // Declare this before the lock to make absolutely sure the // destructor won't be called with the lock held. - sp<Client> client; + sp<BasicClient> client; Mutex::Autolock lock(mServiceLock); @@ -301,6 +681,7 @@ status_t CameraService::onTransact( // Permission checks switch (code) { case BnCameraService::CONNECT: + case BnCameraService::CONNECT_PRO: const int pid = getCallingPid(); const int self_pid = getpid(); if (pid != self_pid) { @@ -389,31 +770,133 @@ void CameraService::playSound(sound_kind kind) { CameraService::Client::Client(const sp<CameraService>& cameraService, const sp<ICameraClient>& cameraClient, - int cameraId, int cameraFacing, int clientPid, int servicePid) { + const String16& clientPackageName, + int cameraId, int cameraFacing, + int clientPid, uid_t clientUid, + int servicePid) : + CameraService::BasicClient(cameraService, cameraClient->asBinder(), + clientPackageName, + cameraId, cameraFacing, + clientPid, clientUid, + servicePid) +{ int callingPid = getCallingPid(); LOG1("Client::Client E (pid %d, id %d)", callingPid, cameraId); - mCameraService = cameraService; - mCameraClient = cameraClient; - mCameraId = cameraId; - mCameraFacing = cameraFacing; - mClientPid = clientPid; - mServicePid = servicePid; - mDestructionStarted = false; + mRemoteCallback = cameraClient; cameraService->setCameraBusy(cameraId); cameraService->loadSound(); + LOG1("Client::Client X (pid %d, id %d)", callingPid, cameraId); } // tear down the client CameraService::Client::~Client() { - mCameraService->releaseSound(); + ALOGV("~Client"); + mDestructionStarted = true; + mCameraService->releaseSound(); // unconditionally disconnect. function is idempotent Client::disconnect(); } +CameraService::BasicClient::BasicClient(const sp<CameraService>& cameraService, + const sp<IBinder>& remoteCallback, + const String16& clientPackageName, + int cameraId, int cameraFacing, + int clientPid, uid_t clientUid, + int servicePid): + mClientPackageName(clientPackageName) +{ + mCameraService = cameraService; + mRemoteBinder = remoteCallback; + mCameraId = cameraId; + mCameraFacing = cameraFacing; + mClientPid = clientPid; + mClientUid = clientUid; + mServicePid = servicePid; + mOpsActive = false; + mDestructionStarted = false; +} + +CameraService::BasicClient::~BasicClient() { + ALOGV("~BasicClient"); + mDestructionStarted = true; +} + +void CameraService::BasicClient::disconnect() { + ALOGV("BasicClient::disconnect"); + mCameraService->removeClientByRemote(mRemoteBinder); + // client shouldn't be able to call into us anymore + mClientPid = 0; +} + +status_t CameraService::BasicClient::startCameraOps() { + int32_t res; + + mOpsCallback = new OpsCallback(this); + + { + ALOGV("%s: Start camera ops, package name = %s, client UID = %d", + __FUNCTION__, String8(mClientPackageName).string(), mClientUid); + } + + mAppOpsManager.startWatchingMode(AppOpsManager::OP_CAMERA, + mClientPackageName, mOpsCallback); + res = mAppOpsManager.startOp(AppOpsManager::OP_CAMERA, + mClientUid, mClientPackageName); + + if (res != AppOpsManager::MODE_ALLOWED) { + ALOGI("Camera %d: Access for \"%s\" has been revoked", + mCameraId, String8(mClientPackageName).string()); + return PERMISSION_DENIED; + } + mOpsActive = true; + return OK; +} + +status_t CameraService::BasicClient::finishCameraOps() { + if (mOpsActive) { + mAppOpsManager.finishOp(AppOpsManager::OP_CAMERA, mClientUid, + mClientPackageName); + mOpsActive = false; + } + mAppOpsManager.stopWatchingMode(mOpsCallback); + mOpsCallback.clear(); + + return OK; +} + +void CameraService::BasicClient::opChanged(int32_t op, const String16& packageName) { + String8 name(packageName); + String8 myName(mClientPackageName); + + if (op != AppOpsManager::OP_CAMERA) { + ALOGW("Unexpected app ops notification received: %d", op); + return; + } + + int32_t res; + res = mAppOpsManager.checkOp(AppOpsManager::OP_CAMERA, + mClientUid, mClientPackageName); + ALOGV("checkOp returns: %d, %s ", res, + res == AppOpsManager::MODE_ALLOWED ? "ALLOWED" : + res == AppOpsManager::MODE_IGNORED ? "IGNORED" : + res == AppOpsManager::MODE_ERRORED ? "ERRORED" : + "UNKNOWN"); + + if (res != AppOpsManager::MODE_ALLOWED) { + ALOGI("Camera %d: Access for \"%s\" revoked", mCameraId, + myName.string()); + // Reset the client PID to allow server-initiated disconnect, + // and to prevent further calls by client. + mClientPid = getCallingPid(); + notifyError(); + disconnect(); + } +} + // ---------------------------------------------------------------------------- Mutex* CameraService::Client::getClientLockFromCookie(void* user) { @@ -436,10 +919,62 @@ CameraService::Client* CameraService::Client::getClientFromCookie(void* user) { return client; } +void CameraService::Client::notifyError() { + mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0); +} + // NOTE: function is idempotent void CameraService::Client::disconnect() { - mCameraService->removeClient(mCameraClient); + ALOGV("Client::disconnect"); + BasicClient::disconnect(); mCameraService->setCameraFree(mCameraId); + + StatusVector rejectSourceStates; + rejectSourceStates.push_back(ICameraServiceListener::STATUS_NOT_PRESENT); + rejectSourceStates.push_back(ICameraServiceListener::STATUS_ENUMERATING); + + // Transition to PRESENT if the camera is not in either of above 2 states + mCameraService->updateStatus(ICameraServiceListener::STATUS_PRESENT, + mCameraId, + &rejectSourceStates); +} + +CameraService::Client::OpsCallback::OpsCallback(wp<BasicClient> client): + mClient(client) { +} + +void CameraService::Client::OpsCallback::opChanged(int32_t op, + const String16& packageName) { + sp<BasicClient> client = mClient.promote(); + if (client != NULL) { + client->opChanged(op, packageName); + } +} + +// ---------------------------------------------------------------------------- +// IProCamera +// ---------------------------------------------------------------------------- + +CameraService::ProClient::ProClient(const sp<CameraService>& cameraService, + const sp<IProCameraCallbacks>& remoteCallback, + const String16& clientPackageName, + int cameraId, + int cameraFacing, + int clientPid, + uid_t clientUid, + int servicePid) + : CameraService::BasicClient(cameraService, remoteCallback->asBinder(), + clientPackageName, cameraId, cameraFacing, + clientPid, clientUid, servicePid) +{ + mRemoteCallback = remoteCallback; +} + +CameraService::ProClient::~ProClient() { +} + +void CameraService::ProClient::notifyError() { + mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0); } // ---------------------------------------------------------------------------- @@ -568,7 +1103,7 @@ status_t CameraService::dump(int fd, const Vector<String16>& args) { ALOGV("java clients' binder died"); - sp<Client> cameraClient = getClientByRemote(who); + sp<BasicClient> cameraClient = getClientByRemote(who); if (cameraClient == 0) { ALOGV("java clients' binder death already cleaned up (normal case)"); @@ -582,4 +1117,83 @@ status_t CameraService::dump(int fd, const Vector<String16>& args) { } +void CameraService::updateStatus(ICameraServiceListener::Status status, + int32_t cameraId, + const StatusVector *rejectSourceStates) { + // do not lock mServiceLock here or can get into a deadlock from + // connect() -> ProClient::disconnect -> updateStatus + Mutex::Autolock lock(mStatusMutex); + + ICameraServiceListener::Status oldStatus = mStatusList[cameraId]; + + mStatusList[cameraId] = status; + + if (oldStatus != status) { + ALOGV("%s: Status has changed for camera ID %d from 0x%x to 0x%x", + __FUNCTION__, cameraId, (uint32_t)oldStatus, (uint32_t)status); + + if (oldStatus == ICameraServiceListener::STATUS_NOT_PRESENT && + (status != ICameraServiceListener::STATUS_PRESENT && + status != ICameraServiceListener::STATUS_ENUMERATING)) { + + ALOGW("%s: From NOT_PRESENT can only transition into PRESENT" + " or ENUMERATING", __FUNCTION__); + mStatusList[cameraId] = oldStatus; + return; + } + + if (rejectSourceStates != NULL) { + const StatusVector &rejectList = *rejectSourceStates; + StatusVector::const_iterator it = rejectList.begin(); + + /** + * Sometimes we want to conditionally do a transition. + * For example if a client disconnects, we want to go to PRESENT + * only if we weren't already in NOT_PRESENT or ENUMERATING. + */ + for (; it != rejectList.end(); ++it) { + if (oldStatus == *it) { + ALOGV("%s: Rejecting status transition for Camera ID %d, " + " since the source state was was in one of the bad " + " states.", __FUNCTION__, cameraId); + mStatusList[cameraId] = oldStatus; + return; + } + } + } + + /** + * ProClients lose their exclusive lock. + * - Done before the CameraClient can initialize the HAL device, + * since we want to be able to close it before they get to initialize + */ + if (status == ICameraServiceListener::STATUS_NOT_AVAILABLE) { + Vector<wp<ProClient> > proClients(mProClientList[cameraId]); + Vector<wp<ProClient> >::const_iterator it; + + for (it = proClients.begin(); it != proClients.end(); ++it) { + sp<ProClient> proCl = it->promote(); + if (proCl.get() != NULL) { + proCl->onExclusiveLockStolen(); + } + } + } + + Vector<sp<ICameraServiceListener> >::const_iterator it; + for (it = mListenerList.begin(); it != mListenerList.end(); ++it) { + (*it)->onStatusChanged(status, cameraId); + } + } +} + +ICameraServiceListener::Status CameraService::getStatus(int cameraId) const { + if (cameraId < 0 || cameraId >= MAX_CAMERAS) { + ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId); + return ICameraServiceListener::STATUS_UNKNOWN; + } + + Mutex::Autolock al(mStatusMutex); + return mStatusList[cameraId]; +} + }; // namespace android diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h index 4dab340..710f164 100644 --- a/services/camera/libcameraservice/CameraService.h +++ b/services/camera/libcameraservice/CameraService.h @@ -18,10 +18,20 @@ #ifndef ANDROID_SERVERS_CAMERA_CAMERASERVICE_H #define ANDROID_SERVERS_CAMERA_CAMERASERVICE_H +#include <utils/Vector.h> +#include <binder/AppOpsManager.h> #include <binder/BinderService.h> +#include <binder/IAppOpsCallback.h> #include <camera/ICameraService.h> #include <hardware/camera.h> +#include <camera/ICamera.h> +#include <camera/ICameraClient.h> +#include <camera/IProCameraUser.h> +#include <camera/IProCameraCallbacks.h> + +#include <camera/ICameraServiceListener.h> + /* This needs to be increased if we can have more cameras */ #define MAX_CAMERAS 2 @@ -35,32 +45,49 @@ class MediaPlayer; class CameraService : public BinderService<CameraService>, public BnCameraService, - public IBinder::DeathRecipient + public IBinder::DeathRecipient, + public camera_module_callbacks_t { friend class BinderService<CameraService>; public: class Client; + class BasicClient; + + // Implementation of BinderService<T> static char const* getServiceName() { return "media.camera"; } CameraService(); virtual ~CameraService(); + ///////////////////////////////////////////////////////////////////// + // HAL Callbacks + virtual void onDeviceStatusChanged(int cameraId, + int newStatus); + + ///////////////////////////////////////////////////////////////////// + // ICameraService virtual int32_t getNumberOfCameras(); virtual status_t getCameraInfo(int cameraId, struct CameraInfo* cameraInfo); - virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient, int cameraId); - virtual void removeClient(const sp<ICameraClient>& cameraClient); - // returns plain pointer of client. Note that mClientLock should be acquired to - // prevent the client from destruction. The result can be NULL. - virtual Client* getClientByIdUnsafe(int cameraId); - virtual Mutex* getClientLockById(int cameraId); - virtual sp<Client> getClientByRemote(const wp<IBinder>& cameraClient); + virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient, int cameraId, + const String16& clientPackageName, int clientUid); + virtual sp<IProCameraUser> connect(const sp<IProCameraCallbacks>& cameraCb, + int cameraId, const String16& clientPackageName, int clientUid); - virtual status_t dump(int fd, const Vector<String16>& args); + virtual status_t addListener(const sp<ICameraServiceListener>& listener); + virtual status_t removeListener( + const sp<ICameraServiceListener>& listener); + + // Extra permissions checks virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags); - virtual void onFirstRef(); + + virtual status_t dump(int fd, const Vector<String16>& args); + + ///////////////////////////////////////////////////////////////////// + // Client functionality + virtual void removeClientByRemote(const wp<IBinder>& remoteBinder); enum sound_kind { SOUND_SHUTTER = 0, @@ -72,16 +99,96 @@ public: void playSound(sound_kind kind); void releaseSound(); - class Client : public BnCamera + + ///////////////////////////////////////////////////////////////////// + // CameraClient functionality + + // returns plain pointer of client. Note that mClientLock should be acquired to + // prevent the client from destruction. The result can be NULL. + virtual Client* getClientByIdUnsafe(int cameraId); + virtual Mutex* getClientLockById(int cameraId); + + class BasicClient : public virtual RefBase { + public: + virtual status_t initialize(camera_module_t *module) = 0; + + virtual void disconnect() = 0; + + // Return the remote callback binder object (e.g. IProCameraCallbacks) + wp<IBinder> getRemote() { + return mRemoteBinder; + } + + protected: + BasicClient(const sp<CameraService>& cameraService, + const sp<IBinder>& remoteCallback, + const String16& clientPackageName, + int cameraId, + int cameraFacing, + int clientPid, + uid_t clientUid, + int servicePid); + + virtual ~BasicClient(); + + // the instance is in the middle of destruction. When this is set, + // the instance should not be accessed from callback. + // CameraService's mClientLock should be acquired to access this. + // - subclasses should set this to true in their destructors. + bool mDestructionStarted; + + // these are initialized in the constructor. + sp<CameraService> mCameraService; // immutable after constructor + int mCameraId; // immutable after constructor + int mCameraFacing; // immutable after constructor + const String16 mClientPackageName; + pid_t mClientPid; + uid_t mClientUid; // immutable after constructor + pid_t mServicePid; // immutable after constructor + + // - The app-side Binder interface to receive callbacks from us + wp<IBinder> mRemoteBinder; // immutable after constructor + + // permissions management + status_t startCameraOps(); + status_t finishCameraOps(); + + // Notify client about a fatal error + virtual void notifyError() = 0; + private: + AppOpsManager mAppOpsManager; + + class OpsCallback : public BnAppOpsCallback { + public: + OpsCallback(wp<BasicClient> client); + virtual void opChanged(int32_t op, const String16& packageName); + + private: + wp<BasicClient> mClient; + + }; // class OpsCallback + + sp<OpsCallback> mOpsCallback; + // Track whether startCameraOps was called successfully, to avoid + // finishing what we didn't start. + bool mOpsActive; + + // IAppOpsCallback interface, indirected through opListener + virtual void opChanged(int32_t op, const String16& packageName); + }; // class BasicClient + + class Client : public BnCamera, public BasicClient { public: + typedef ICameraClient TCamCallbacks; + // ICamera interface (see ICamera for details) virtual void disconnect(); virtual status_t connect(const sp<ICameraClient>& client) = 0; virtual status_t lock() = 0; virtual status_t unlock() = 0; virtual status_t setPreviewDisplay(const sp<Surface>& surface) = 0; - virtual status_t setPreviewTexture(const sp<ISurfaceTexture>& surfaceTexture) = 0; + virtual status_t setPreviewTexture(const sp<IGraphicBufferProducer>& bufferProducer)=0; virtual void setPreviewCallbackFlag(int flag) = 0; virtual status_t startPreview() = 0; virtual void stopPreview() = 0; @@ -101,49 +208,112 @@ public: // Interface used by CameraService Client(const sp<CameraService>& cameraService, const sp<ICameraClient>& cameraClient, + const String16& clientPackageName, int cameraId, int cameraFacing, int clientPid, + uid_t clientUid, int servicePid); ~Client(); // return our camera client - const sp<ICameraClient>& getCameraClient() { - return mCameraClient; + const sp<ICameraClient>& getRemoteCallback() { + return mRemoteCallback; } - virtual status_t initialize(camera_module_t *module) = 0; - - virtual status_t dump(int fd, const Vector<String16>& args) = 0; - protected: static Mutex* getClientLockFromCookie(void* user); // convert client from cookie. Client lock should be acquired before getting Client. static Client* getClientFromCookie(void* user); - // the instance is in the middle of destruction. When this is set, - // the instance should not be accessed from callback. - // CameraService's mClientLock should be acquired to access this. - bool mDestructionStarted; + virtual void notifyError(); - // these are initialized in the constructor. - sp<CameraService> mCameraService; // immutable after constructor - sp<ICameraClient> mCameraClient; - int mCameraId; // immutable after constructor - int mCameraFacing; // immutable after constructor - pid_t mClientPid; - pid_t mServicePid; // immutable after constructor + // Initialized in constructor - }; + // - The app-side Binder interface to receive callbacks from us + sp<ICameraClient> mRemoteCallback; + + }; // class Client + + class ProClient : public BnProCameraUser, public BasicClient { + public: + typedef IProCameraCallbacks TCamCallbacks; + + ProClient(const sp<CameraService>& cameraService, + const sp<IProCameraCallbacks>& remoteCallback, + const String16& clientPackageName, + int cameraId, + int cameraFacing, + int clientPid, + uid_t clientUid, + int servicePid); + + virtual ~ProClient(); + + const sp<IProCameraCallbacks>& getRemoteCallback() { + return mRemoteCallback; + } + + /*** + IProCamera implementation + ***/ + virtual status_t connect(const sp<IProCameraCallbacks>& callbacks) + = 0; + virtual status_t exclusiveTryLock() = 0; + virtual status_t exclusiveLock() = 0; + virtual status_t exclusiveUnlock() = 0; + + virtual bool hasExclusiveLock() = 0; + + // Note that the callee gets a copy of the metadata. + virtual int submitRequest(camera_metadata_t* metadata, + bool streaming = false) = 0; + virtual status_t cancelRequest(int requestId) = 0; + + // Callbacks from camera service + virtual void onExclusiveLockStolen() = 0; + + protected: + virtual void notifyError(); + + sp<IProCameraCallbacks> mRemoteCallback; + }; // class ProClient private: + + // Delay-load the Camera HAL module + virtual void onFirstRef(); + + // Step 1. Check if we can connect, before we acquire the service lock. + bool validateConnect(int cameraId, + /*inout*/ + int& clientUid) const; + + // Step 2. Check if we can connect, after we acquire the service lock. + bool canConnectUnsafe(int cameraId, + const String16& clientPackageName, + const sp<IBinder>& remoteCallback, + /*out*/ + sp<Client> &client); + + // When connection is successful, initialize client and track its death + bool connectFinishUnsafe(const sp<BasicClient>& client, + const sp<IBinder>& clientBinder); + + virtual sp<BasicClient> getClientByRemote(const wp<IBinder>& cameraClient); + Mutex mServiceLock; wp<Client> mClient[MAX_CAMERAS]; // protected by mServiceLock Mutex mClientLock[MAX_CAMERAS]; // prevent Client destruction inside callbacks int mNumberOfCameras; + typedef wp<ProClient> weak_pro_client_ptr; + Vector<weak_pro_client_ptr> mProClientList[MAX_CAMERAS]; + // needs to be called with mServiceLock held sp<Client> findClientUnsafe(const wp<IBinder>& cameraClient, int& outIndex); + sp<ProClient> findProClientUnsafe( + const wp<IBinder>& cameraCallbacksRemote); // atomics to record whether the hardware is allocated to some client. volatile int32_t mBusy[MAX_CAMERAS]; @@ -159,8 +329,32 @@ private: camera_module_t *mModule; + Vector<sp<ICameraServiceListener> > + mListenerList; + + // guard only mStatusList and the broadcasting of ICameraServiceListener + mutable Mutex mStatusMutex; + ICameraServiceListener::Status + mStatusList[MAX_CAMERAS]; + + // Read the current status (locks mStatusMutex) + ICameraServiceListener::Status + getStatus(int cameraId) const; + + typedef Vector<ICameraServiceListener::Status> StatusVector; + // Broadcast the new status if it changed (locks the service mutex) + void updateStatus( + ICameraServiceListener::Status status, + int32_t cameraId, + const StatusVector *rejectSourceStates = NULL); + // IBinder::DeathRecipient implementation - virtual void binderDied(const wp<IBinder> &who); + virtual void binderDied(const wp<IBinder> &who); + + // Helpers + int getDeviceVersion(int cameraId, int* facing); + + bool isValidCameraId(int cameraId); }; } // namespace android diff --git a/services/camera/libcameraservice/ProCamera2Client.cpp b/services/camera/libcameraservice/ProCamera2Client.cpp new file mode 100644 index 0000000..251fdab --- /dev/null +++ b/services/camera/libcameraservice/ProCamera2Client.cpp @@ -0,0 +1,446 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "ProCamera2Client" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> +#include <utils/Trace.h> + +#include <cutils/properties.h> +#include <gui/Surface.h> +#include <gui/Surface.h> +#include "camera2/Parameters.h" +#include "ProCamera2Client.h" +#include "camera2/ProFrameProcessor.h" +#include "CameraDeviceBase.h" + +namespace android { +using namespace camera2; + +// Interface used by CameraService + +ProCamera2Client::ProCamera2Client(const sp<CameraService>& cameraService, + const sp<IProCameraCallbacks>& remoteCallback, + const String16& clientPackageName, + int cameraId, + int cameraFacing, + int clientPid, + uid_t clientUid, + int servicePid) : + Camera2ClientBase(cameraService, remoteCallback, clientPackageName, + cameraId, cameraFacing, clientPid, clientUid, servicePid) +{ + ATRACE_CALL(); + ALOGI("ProCamera %d: Opened", cameraId); + + mExclusiveLock = false; +} + +status_t ProCamera2Client::initialize(camera_module_t *module) +{ + ATRACE_CALL(); + status_t res; + + res = Camera2ClientBase::initialize(module); + if (res != OK) { + return res; + } + + String8 threadName; + mFrameProcessor = new ProFrameProcessor(mDevice); + threadName = String8::format("PC2-%d-FrameProc", mCameraId); + mFrameProcessor->run(threadName.string()); + + mFrameProcessor->registerListener(FRAME_PROCESSOR_LISTENER_MIN_ID, + FRAME_PROCESSOR_LISTENER_MAX_ID, + /*listener*/this); + + return OK; +} + +ProCamera2Client::~ProCamera2Client() { +} + +status_t ProCamera2Client::exclusiveTryLock() { + ATRACE_CALL(); + ALOGV("%s", __FUNCTION__); + + Mutex::Autolock icl(mBinderSerializationLock); + SharedCameraCallbacks::Lock l(mSharedCameraCallbacks); + + if (!mDevice.get()) return PERMISSION_DENIED; + + if (!mExclusiveLock) { + mExclusiveLock = true; + + if (mRemoteCallback != NULL) { + mRemoteCallback->onLockStatusChanged( + IProCameraCallbacks::LOCK_ACQUIRED); + } + + ALOGV("%s: exclusive lock acquired", __FUNCTION__); + + return OK; + } + + // TODO: have a PERMISSION_DENIED case for when someone else owns the lock + + // don't allow recursive locking + ALOGW("%s: exclusive lock already exists - recursive locking is not" + "allowed", __FUNCTION__); + + return ALREADY_EXISTS; +} + +status_t ProCamera2Client::exclusiveLock() { + ATRACE_CALL(); + ALOGV("%s", __FUNCTION__); + + Mutex::Autolock icl(mBinderSerializationLock); + SharedCameraCallbacks::Lock l(mSharedCameraCallbacks); + + if (!mDevice.get()) return PERMISSION_DENIED; + + /** + * TODO: this should asynchronously 'wait' until the lock becomes available + * if another client already has an exclusive lock. + * + * once we have proper sharing support this will need to do + * more than just return immediately + */ + if (!mExclusiveLock) { + mExclusiveLock = true; + + if (mRemoteCallback != NULL) { + mRemoteCallback->onLockStatusChanged(IProCameraCallbacks::LOCK_ACQUIRED); + } + + ALOGV("%s: exclusive lock acquired", __FUNCTION__); + + return OK; + } + + // don't allow recursive locking + ALOGW("%s: exclusive lock already exists - recursive locking is not allowed" + , __FUNCTION__); + return ALREADY_EXISTS; +} + +status_t ProCamera2Client::exclusiveUnlock() { + ATRACE_CALL(); + ALOGV("%s", __FUNCTION__); + + Mutex::Autolock icl(mBinderSerializationLock); + SharedCameraCallbacks::Lock l(mSharedCameraCallbacks); + + // don't allow unlocking if we have no lock + if (!mExclusiveLock) { + ALOGW("%s: cannot unlock, no lock was held in the first place", + __FUNCTION__); + return BAD_VALUE; + } + + mExclusiveLock = false; + if (mRemoteCallback != NULL ) { + mRemoteCallback->onLockStatusChanged( + IProCameraCallbacks::LOCK_RELEASED); + } + ALOGV("%s: exclusive lock released", __FUNCTION__); + + return OK; +} + +bool ProCamera2Client::hasExclusiveLock() { + Mutex::Autolock icl(mBinderSerializationLock); + return mExclusiveLock; +} + +void ProCamera2Client::onExclusiveLockStolen() { + ALOGV("%s: ProClient lost exclusivity (id %d)", + __FUNCTION__, mCameraId); + + Mutex::Autolock icl(mBinderSerializationLock); + SharedCameraCallbacks::Lock l(mSharedCameraCallbacks); + + if (mExclusiveLock && mRemoteCallback.get() != NULL) { + mRemoteCallback->onLockStatusChanged( + IProCameraCallbacks::LOCK_STOLEN); + } + + mExclusiveLock = false; + + //TODO: we should not need to detach the device, merely reset it. + detachDevice(); +} + +status_t ProCamera2Client::submitRequest(camera_metadata_t* request, + bool streaming) { + ATRACE_CALL(); + ALOGV("%s", __FUNCTION__); + + Mutex::Autolock icl(mBinderSerializationLock); + + if (!mDevice.get()) return DEAD_OBJECT; + + if (!mExclusiveLock) { + return PERMISSION_DENIED; + } + + CameraMetadata metadata(request); + + if (!enforceRequestPermissions(metadata)) { + return PERMISSION_DENIED; + } + + if (streaming) { + return mDevice->setStreamingRequest(metadata); + } else { + return mDevice->capture(metadata); + } + + // unreachable. thx gcc for a useless warning + return OK; +} + +status_t ProCamera2Client::cancelRequest(int requestId) { + ATRACE_CALL(); + ALOGV("%s", __FUNCTION__); + + Mutex::Autolock icl(mBinderSerializationLock); + + if (!mDevice.get()) return DEAD_OBJECT; + + if (!mExclusiveLock) { + return PERMISSION_DENIED; + } + + // TODO: implement + ALOGE("%s: not fully implemented yet", __FUNCTION__); + return INVALID_OPERATION; +} + +status_t ProCamera2Client::deleteStream(int streamId) { + ATRACE_CALL(); + ALOGV("%s (streamId = 0x%x)", __FUNCTION__, streamId); + + status_t res; + if ( (res = checkPid(__FUNCTION__) ) != OK) return res; + + Mutex::Autolock icl(mBinderSerializationLock); + + if (!mDevice.get()) return DEAD_OBJECT; + mDevice->clearStreamingRequest(); + + status_t code; + if ((code = mDevice->waitUntilDrained()) != OK) { + ALOGE("%s: waitUntilDrained failed with code 0x%x", __FUNCTION__, code); + } + + return mDevice->deleteStream(streamId); +} + +status_t ProCamera2Client::createStream(int width, int height, int format, + const sp<IGraphicBufferProducer>& bufferProducer, + /*out*/ + int* streamId) +{ + if (streamId) { + *streamId = -1; + } + + ATRACE_CALL(); + ALOGV("%s (w = %d, h = %d, f = 0x%x)", __FUNCTION__, width, height, format); + + status_t res; + if ( (res = checkPid(__FUNCTION__) ) != OK) return res; + + Mutex::Autolock icl(mBinderSerializationLock); + + if (!mDevice.get()) return DEAD_OBJECT; + + sp<IBinder> binder; + sp<ANativeWindow> window; + if (bufferProducer != 0) { + binder = bufferProducer->asBinder(); + window = new Surface(bufferProducer); + } + + return mDevice->createStream(window, width, height, format, /*size*/1, + streamId); +} + +// Create a request object from a template. +// -- Caller owns the newly allocated metadata +status_t ProCamera2Client::createDefaultRequest(int templateId, + /*out*/ + camera_metadata** request) +{ + ATRACE_CALL(); + ALOGV("%s (templateId = 0x%x)", __FUNCTION__, templateId); + + if (request) { + *request = NULL; + } + + status_t res; + if ( (res = checkPid(__FUNCTION__) ) != OK) return res; + + Mutex::Autolock icl(mBinderSerializationLock); + + if (!mDevice.get()) return DEAD_OBJECT; + + CameraMetadata metadata; + if ( (res = mDevice->createDefaultRequest(templateId, &metadata) ) == OK) { + *request = metadata.release(); + } + + return res; +} + +status_t ProCamera2Client::getCameraInfo(int cameraId, + /*out*/ + camera_metadata** info) +{ + if (cameraId != mCameraId) { + return INVALID_OPERATION; + } + + Mutex::Autolock icl(mBinderSerializationLock); + + if (!mDevice.get()) return DEAD_OBJECT; + + CameraMetadata deviceInfo = mDevice->info(); + *info = deviceInfo.release(); + + return OK; +} + +status_t ProCamera2Client::dump(int fd, const Vector<String16>& args) { + String8 result; + result.appendFormat("ProCamera2Client[%d] (%p) PID: %d, dump:\n", + mCameraId, + getRemoteCallback()->asBinder().get(), + mClientPid); + result.append(" State: "); + + // TODO: print dynamic/request section from most recent requests + mFrameProcessor->dump(fd, args); + + return dumpDevice(fd, args); +} + +// IProCameraUser interface + +void ProCamera2Client::detachDevice() { + if (mDevice == 0) return; + + ALOGV("Camera %d: Stopping processors", mCameraId); + + mFrameProcessor->removeListener(FRAME_PROCESSOR_LISTENER_MIN_ID, + FRAME_PROCESSOR_LISTENER_MAX_ID, + /*listener*/this); + mFrameProcessor->requestExit(); + ALOGV("Camera %d: Waiting for threads", mCameraId); + mFrameProcessor->join(); + ALOGV("Camera %d: Disconnecting device", mCameraId); + + // WORKAROUND: HAL refuses to disconnect while there's streams in flight + { + mDevice->clearStreamingRequest(); + + status_t code; + if ((code = mDevice->waitUntilDrained()) != OK) { + ALOGE("%s: waitUntilDrained failed with code 0x%x", __FUNCTION__, + code); + } + } + + Camera2ClientBase::detachDevice(); +} + +/** Device-related methods */ +void ProCamera2Client::onFrameAvailable(int32_t frameId, + const CameraMetadata& frame) { + ATRACE_CALL(); + ALOGV("%s", __FUNCTION__); + + Mutex::Autolock icl(mBinderSerializationLock); + SharedCameraCallbacks::Lock l(mSharedCameraCallbacks); + + if (mRemoteCallback != NULL) { + CameraMetadata tmp(frame); + camera_metadata_t* meta = tmp.release(); + ALOGV("%s: meta = %p ", __FUNCTION__, meta); + mRemoteCallback->onResultReceived(frameId, meta); + tmp.acquire(meta); + } + +} + +bool ProCamera2Client::enforceRequestPermissions(CameraMetadata& metadata) { + + const int pid = IPCThreadState::self()->getCallingPid(); + const int selfPid = getpid(); + camera_metadata_entry_t entry; + + /** + * Mixin default important security values + * - android.led.transmit = defaulted ON + */ + CameraMetadata staticInfo = mDevice->info(); + entry = staticInfo.find(ANDROID_LED_AVAILABLE_LEDS); + for(size_t i = 0; i < entry.count; ++i) { + uint8_t led = entry.data.u8[i]; + + switch(led) { + case ANDROID_LED_AVAILABLE_LEDS_TRANSMIT: { + uint8_t transmitDefault = ANDROID_LED_TRANSMIT_ON; + if (!metadata.exists(ANDROID_LED_TRANSMIT)) { + metadata.update(ANDROID_LED_TRANSMIT, + &transmitDefault, 1); + } + break; + } + } + } + + // We can do anything! + if (pid == selfPid) { + return true; + } + + /** + * Permission check special fields in the request + * - android.led.transmit = android.permission.CAMERA_DISABLE_TRANSMIT + */ + entry = metadata.find(ANDROID_LED_TRANSMIT); + if (entry.count > 0 && entry.data.u8[0] != ANDROID_LED_TRANSMIT_ON) { + String16 permissionString = + String16("android.permission.CAMERA_DISABLE_TRANSMIT_LED"); + if (!checkCallingPermission(permissionString)) { + const int uid = IPCThreadState::self()->getCallingUid(); + ALOGE("Permission Denial: " + "can't disable transmit LED pid=%d, uid=%d", pid, uid); + return false; + } + } + + return true; +} + +} // namespace android diff --git a/services/camera/libcameraservice/ProCamera2Client.h b/services/camera/libcameraservice/ProCamera2Client.h new file mode 100644 index 0000000..faee9f9 --- /dev/null +++ b/services/camera/libcameraservice/ProCamera2Client.h @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA_PROCAMERA2CLIENT_H +#define ANDROID_SERVERS_CAMERA_PROCAMERA2CLIENT_H + +#include "Camera2Device.h" +#include "CameraService.h" +#include "camera2/ProFrameProcessor.h" +#include "Camera2ClientBase.h" + +namespace android { + +class IMemory; +/** + * Implements the binder IProCameraUser API, + * meant for HAL2-level private API access. + */ +class ProCamera2Client : + public Camera2ClientBase<CameraService::ProClient>, + public camera2::ProFrameProcessor::FilteredListener +{ +public: + /** + * IProCameraUser interface (see IProCameraUser for details) + */ + virtual status_t exclusiveTryLock(); + virtual status_t exclusiveLock(); + virtual status_t exclusiveUnlock(); + + virtual bool hasExclusiveLock(); + + // Note that the callee gets a copy of the metadata. + virtual int submitRequest(camera_metadata_t* metadata, + bool streaming = false); + virtual status_t cancelRequest(int requestId); + + virtual status_t deleteStream(int streamId); + + virtual status_t createStream( + int width, + int height, + int format, + const sp<IGraphicBufferProducer>& bufferProducer, + /*out*/ + int* streamId); + + // Create a request object from a template. + // -- Caller owns the newly allocated metadata + virtual status_t createDefaultRequest(int templateId, + /*out*/ + camera_metadata** request); + + // Get the static metadata for the camera + // -- Caller owns the newly allocated metadata + virtual status_t getCameraInfo(int cameraId, + /*out*/ + camera_metadata** info); + + /** + * Interface used by CameraService + */ + + ProCamera2Client(const sp<CameraService>& cameraService, + const sp<IProCameraCallbacks>& remoteCallback, + const String16& clientPackageName, + int cameraId, + int cameraFacing, + int clientPid, + uid_t clientUid, + int servicePid); + virtual ~ProCamera2Client(); + + virtual status_t initialize(camera_module_t *module); + + virtual status_t dump(int fd, const Vector<String16>& args); + + // Callbacks from camera service + virtual void onExclusiveLockStolen(); + + /** + * Interface used by independent components of ProCamera2Client. + */ + +protected: + /** FilteredListener implementation **/ + virtual void onFrameAvailable(int32_t frameId, + const CameraMetadata& frame); + virtual void detachDevice(); + +private: + /** IProCameraUser interface-related private members */ + + /** Preview callback related members */ + sp<camera2::ProFrameProcessor> mFrameProcessor; + static const int32_t FRAME_PROCESSOR_LISTENER_MIN_ID = 0; + static const int32_t FRAME_PROCESSOR_LISTENER_MAX_ID = 0x7fffffffL; + + /** Utility members */ + bool enforceRequestPermissions(CameraMetadata& metadata); + + // Whether or not we have an exclusive lock on the device + // - if no we can't modify the request queue. + // note that creating/deleting streams we own is still OK + bool mExclusiveLock; +}; + +}; // namespace android + +#endif diff --git a/services/camera/libcameraservice/camera2/BurstCapture.cpp b/services/camera/libcameraservice/camera2/BurstCapture.cpp index f56c50c..192d419 100644 --- a/services/camera/libcameraservice/camera2/BurstCapture.cpp +++ b/services/camera/libcameraservice/camera2/BurstCapture.cpp @@ -38,7 +38,8 @@ BurstCapture::BurstCapture(wp<Camera2Client> client, wp<CaptureSequencer> sequen BurstCapture::~BurstCapture() { } -status_t BurstCapture::start(Vector<CameraMetadata> &metadatas, int32_t firstCaptureId) { +status_t BurstCapture::start(Vector<CameraMetadata> &/*metadatas*/, + int32_t /*firstCaptureId*/) { ALOGE("Not completely implemented"); return INVALID_OPERATION; } @@ -75,7 +76,7 @@ bool BurstCapture::threadLoop() { CpuConsumer::LockedBuffer* BurstCapture::jpegEncode( CpuConsumer::LockedBuffer *imgBuffer, - int quality) + int /*quality*/) { ALOGV("%s", __FUNCTION__); @@ -91,7 +92,7 @@ CpuConsumer::LockedBuffer* BurstCapture::jpegEncode( buffers.push_back(imgEncoded); sp<JpegCompressor> jpeg = new JpegCompressor(); - status_t res = jpeg->start(buffers, 1); + jpeg->start(buffers, 1); bool success = jpeg->waitForDone(10 * 1e9); if(success) { @@ -103,7 +104,7 @@ CpuConsumer::LockedBuffer* BurstCapture::jpegEncode( } } -status_t BurstCapture::processFrameAvailable(sp<Camera2Client> &client) { +status_t BurstCapture::processFrameAvailable(sp<Camera2Client> &/*client*/) { ALOGE("Not implemented"); return INVALID_OPERATION; } diff --git a/services/camera/libcameraservice/camera2/BurstCapture.h b/services/camera/libcameraservice/camera2/BurstCapture.h index dfc45eb..a2cc893 100644 --- a/services/camera/libcameraservice/camera2/BurstCapture.h +++ b/services/camera/libcameraservice/camera2/BurstCapture.h @@ -17,7 +17,7 @@ #ifndef ANDROID_SERVERS_CAMERA_BURST_CAPTURE_H #define ANDROID_SERVERS_CAMERA_BURST_CAPTURE_H -#include "camera2/CameraMetadata.h" +#include "camera/CameraMetadata.h" #include <binder/MemoryBase.h> #include <binder/MemoryHeapBase.h> #include <gui/CpuConsumer.h> diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp index 3e9c255..5fa84e0 100644 --- a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp +++ b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp @@ -22,17 +22,20 @@ #include <utils/Trace.h> #include "CallbackProcessor.h" -#include <gui/SurfaceTextureClient.h> -#include "../Camera2Device.h" +#include <gui/Surface.h> +#include "../CameraDeviceBase.h" #include "../Camera2Client.h" +#define ALIGN(x, mask) ( ((x) + (mask) - 1) & ~((mask) - 1) ) namespace android { namespace camera2 { -CallbackProcessor::CallbackProcessor(wp<Camera2Client> client): +CallbackProcessor::CallbackProcessor(sp<Camera2Client> client): Thread(false), mClient(client), + mDevice(client->getCameraDevice()), + mId(client->getCameraId()), mCallbackAvailable(false), mCallbackStreamId(NO_STREAM) { } @@ -56,16 +59,26 @@ status_t CallbackProcessor::updateStream(const Parameters ¶ms) { Mutex::Autolock l(mInputMutex); - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return OK; - sp<Camera2Device> device = client->getCameraDevice(); + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + // If possible, use the flexible YUV format + int32_t callbackFormat = params.previewFormat; + if (params.fastInfo.useFlexibleYuv && + (params.previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP || + params.previewFormat == HAL_PIXEL_FORMAT_YV12) ) { + callbackFormat = HAL_PIXEL_FORMAT_YCbCr_420_888; + } if (mCallbackConsumer == 0) { // Create CPU buffer queue endpoint mCallbackConsumer = new CpuConsumer(kCallbackHeapCount); mCallbackConsumer->setFrameAvailableListener(this); mCallbackConsumer->setName(String8("Camera2Client::CallbackConsumer")); - mCallbackWindow = new SurfaceTextureClient( + mCallbackWindow = new Surface( mCallbackConsumer->getProducerInterface()); } @@ -76,22 +89,22 @@ status_t CallbackProcessor::updateStream(const Parameters ¶ms) { ¤tWidth, ¤tHeight, ¤tFormat); if (res != OK) { ALOGE("%s: Camera %d: Error querying callback output stream info: " - "%s (%d)", __FUNCTION__, client->getCameraId(), + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } if (currentWidth != (uint32_t)params.previewWidth || currentHeight != (uint32_t)params.previewHeight || - currentFormat != (uint32_t)params.previewFormat) { + currentFormat != (uint32_t)callbackFormat) { // Since size should only change while preview is not running, // assuming that all existing use of old callback stream is // completed. - ALOGV("%s: Camera %d: Deleting stream %d since the buffer dimensions changed", - __FUNCTION__, client->getCameraId(), mCallbackStreamId); + ALOGV("%s: Camera %d: Deleting stream %d since the buffer " + "parameters changed", __FUNCTION__, mId, mCallbackStreamId); res = device->deleteStream(mCallbackStreamId); if (res != OK) { ALOGE("%s: Camera %d: Unable to delete old output stream " - "for callbacks: %s (%d)", __FUNCTION__, client->getCameraId(), + "for callbacks: %s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } @@ -100,15 +113,15 @@ status_t CallbackProcessor::updateStream(const Parameters ¶ms) { } if (mCallbackStreamId == NO_STREAM) { - ALOGV("Creating callback stream: %d %d format 0x%x", + ALOGV("Creating callback stream: %d x %d, format 0x%x, API format 0x%x", params.previewWidth, params.previewHeight, - params.previewFormat); + callbackFormat, params.previewFormat); res = device->createStream(mCallbackWindow, params.previewWidth, params.previewHeight, - params.previewFormat, 0, &mCallbackStreamId); + callbackFormat, 0, &mCallbackStreamId); if (res != OK) { ALOGE("%s: Camera %d: Can't create output stream for callbacks: " - "%s (%d)", __FUNCTION__, client->getCameraId(), + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } @@ -119,16 +132,24 @@ status_t CallbackProcessor::updateStream(const Parameters ¶ms) { status_t CallbackProcessor::deleteStream() { ATRACE_CALL(); - status_t res; + sp<CameraDeviceBase> device; - Mutex::Autolock l(mInputMutex); + { + Mutex::Autolock l(mInputMutex); - if (mCallbackStreamId != NO_STREAM) { - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return OK; - sp<Camera2Device> device = client->getCameraDevice(); + if (mCallbackStreamId == NO_STREAM) { + return OK; + } + device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + } + device->deleteStream(mCallbackStreamId); - device->deleteStream(mCallbackStreamId); + { + Mutex::Autolock l(mInputMutex); mCallbackHeap.clear(); mCallbackWindow.clear(); @@ -144,7 +165,7 @@ int CallbackProcessor::getStreamId() const { return mCallbackStreamId; } -void CallbackProcessor::dump(int fd, const Vector<String16>& args) const { +void CallbackProcessor::dump(int /*fd*/, const Vector<String16>& /*args*/) const { } bool CallbackProcessor::threadLoop() { @@ -162,18 +183,36 @@ bool CallbackProcessor::threadLoop() { do { sp<Camera2Client> client = mClient.promote(); - if (client == 0) return false; - res = processNewCallback(client); + if (client == 0) { + res = discardNewCallback(); + } else { + res = processNewCallback(client); + } } while (res == OK); return true; } +status_t CallbackProcessor::discardNewCallback() { + ATRACE_CALL(); + status_t res; + CpuConsumer::LockedBuffer imgBuffer; + res = mCallbackConsumer->lockNextBuffer(&imgBuffer); + if (res != OK) { + if (res != BAD_VALUE) { + ALOGE("%s: Camera %d: Error receiving next callback buffer: " + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); + } + return res; + } + mCallbackConsumer->unlockBuffer(imgBuffer); + return OK; +} + status_t CallbackProcessor::processNewCallback(sp<Camera2Client> &client) { ATRACE_CALL(); status_t res; - int callbackHeapId; sp<Camera2Heap> callbackHeap; size_t heapIdx; @@ -183,13 +222,15 @@ status_t CallbackProcessor::processNewCallback(sp<Camera2Client> &client) { if (res != OK) { if (res != BAD_VALUE) { ALOGE("%s: Camera %d: Error receiving next callback buffer: " - "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res); + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); } return res; } ALOGV("%s: Camera %d: Preview callback available", __FUNCTION__, - client->getCameraId()); + mId); + bool useFlexibleYuv = false; + int32_t previewFormat = 0; { SharedParameters::Lock l(client->getParameters()); @@ -197,7 +238,7 @@ status_t CallbackProcessor::processNewCallback(sp<Camera2Client> &client) { && l.mParameters.state != Parameters::RECORD && l.mParameters.state != Parameters::VIDEO_SNAPSHOT) { ALOGV("%s: Camera %d: No longer streaming", - __FUNCTION__, client->getCameraId()); + __FUNCTION__, mId); mCallbackConsumer->unlockBuffer(imgBuffer); return OK; } @@ -216,10 +257,18 @@ status_t CallbackProcessor::processNewCallback(sp<Camera2Client> &client) { return OK; } - if (imgBuffer.format != l.mParameters.previewFormat) { + previewFormat = l.mParameters.previewFormat; + useFlexibleYuv = l.mParameters.fastInfo.useFlexibleYuv && + (previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP || + previewFormat == HAL_PIXEL_FORMAT_YV12); + + int32_t expectedFormat = useFlexibleYuv ? + HAL_PIXEL_FORMAT_YCbCr_420_888 : previewFormat; + + if (imgBuffer.format != expectedFormat) { ALOGE("%s: Camera %d: Unexpected format for callback: " - "%x, expected %x", __FUNCTION__, client->getCameraId(), - imgBuffer.format, l.mParameters.previewFormat); + "0x%x, expected 0x%x", __FUNCTION__, mId, + imgBuffer.format, expectedFormat); mCallbackConsumer->unlockBuffer(imgBuffer); return INVALID_OPERATION; } @@ -232,9 +281,28 @@ status_t CallbackProcessor::processNewCallback(sp<Camera2Client> &client) { } } + uint32_t destYStride = 0; + uint32_t destCStride = 0; + if (useFlexibleYuv) { + if (previewFormat == HAL_PIXEL_FORMAT_YV12) { + // Strides must align to 16 for YV12 + destYStride = ALIGN(imgBuffer.width, 16); + destCStride = ALIGN(destYStride / 2, 16); + } else { + // No padding for NV21 + ALOG_ASSERT(previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP, + "Unexpected preview format 0x%x", previewFormat); + destYStride = imgBuffer.width; + destCStride = destYStride / 2; + } + } else { + destYStride = imgBuffer.stride; + // don't care about cStride + } + size_t bufferSize = Camera2Client::calculateBufferSize( imgBuffer.width, imgBuffer.height, - imgBuffer.format, imgBuffer.stride); + previewFormat, destYStride); size_t currentBufferSize = (mCallbackHeap == 0) ? 0 : (mCallbackHeap->mHeap->getSize() / kCallbackHeapCount); if (bufferSize != currentBufferSize) { @@ -243,7 +311,7 @@ status_t CallbackProcessor::processNewCallback(sp<Camera2Client> &client) { "Camera2Client::CallbackHeap"); if (mCallbackHeap->mHeap->getSize() == 0) { ALOGE("%s: Camera %d: Unable to allocate memory for callbacks", - __FUNCTION__, client->getCameraId()); + __FUNCTION__, mId); mCallbackConsumer->unlockBuffer(imgBuffer); return INVALID_OPERATION; } @@ -254,7 +322,7 @@ status_t CallbackProcessor::processNewCallback(sp<Camera2Client> &client) { if (mCallbackHeapFree == 0) { ALOGE("%s: Camera %d: No free callback buffers, dropping frame", - __FUNCTION__, client->getCameraId()); + __FUNCTION__, mId); mCallbackConsumer->unlockBuffer(imgBuffer); return OK; } @@ -264,7 +332,7 @@ status_t CallbackProcessor::processNewCallback(sp<Camera2Client> &client) { mCallbackHeapHead = (mCallbackHeapHead + 1) & kCallbackHeapCount; mCallbackHeapFree--; - // TODO: Get rid of this memcpy by passing the gralloc queue all the way + // TODO: Get rid of this copy by passing the gralloc queue all the way // to app ssize_t offset; @@ -273,18 +341,32 @@ status_t CallbackProcessor::processNewCallback(sp<Camera2Client> &client) { mCallbackHeap->mBuffers[heapIdx]->getMemory(&offset, &size); uint8_t *data = (uint8_t*)heap->getBase() + offset; - memcpy(data, imgBuffer.data, bufferSize); + + if (!useFlexibleYuv) { + // Can just memcpy when HAL format matches API format + memcpy(data, imgBuffer.data, bufferSize); + } else { + res = convertFromFlexibleYuv(previewFormat, data, imgBuffer, + destYStride, destCStride); + if (res != OK) { + ALOGE("%s: Camera %d: Can't convert between 0x%x and 0x%x formats!", + __FUNCTION__, mId, imgBuffer.format, previewFormat); + mCallbackConsumer->unlockBuffer(imgBuffer); + return BAD_VALUE; + } + } ALOGV("%s: Freeing buffer", __FUNCTION__); mCallbackConsumer->unlockBuffer(imgBuffer); // Call outside parameter lock to allow re-entrancy from notification { - Camera2Client::SharedCameraClient::Lock l(client->mSharedCameraClient); - if (l.mCameraClient != 0) { + Camera2Client::SharedCameraCallbacks::Lock + l(client->mSharedCameraCallbacks); + if (l.mRemoteCallback != 0) { ALOGV("%s: Camera %d: Invoking client data callback", - __FUNCTION__, client->getCameraId()); - l.mCameraClient->dataCallback(CAMERA_MSG_PREVIEW_FRAME, + __FUNCTION__, mId); + l.mRemoteCallback->dataCallback(CAMERA_MSG_PREVIEW_FRAME, mCallbackHeap->mBuffers[heapIdx], NULL); } } @@ -297,5 +379,102 @@ status_t CallbackProcessor::processNewCallback(sp<Camera2Client> &client) { return OK; } +status_t CallbackProcessor::convertFromFlexibleYuv(int32_t previewFormat, + uint8_t *dst, + const CpuConsumer::LockedBuffer &src, + uint32_t dstYStride, + uint32_t dstCStride) const { + + if (previewFormat != HAL_PIXEL_FORMAT_YCrCb_420_SP && + previewFormat != HAL_PIXEL_FORMAT_YV12) { + ALOGE("%s: Camera %d: Unexpected preview format when using " + "flexible YUV: 0x%x", __FUNCTION__, mId, previewFormat); + return INVALID_OPERATION; + } + + // Copy Y plane, adjusting for stride + const uint8_t *ySrc = src.data; + uint8_t *yDst = dst; + for (size_t row = 0; row < src.height; row++) { + memcpy(yDst, ySrc, src.width); + ySrc += src.stride; + yDst += dstYStride; + } + + // Copy/swizzle chroma planes, 4:2:0 subsampling + const uint8_t *cbSrc = src.dataCb; + const uint8_t *crSrc = src.dataCr; + size_t chromaHeight = src.height / 2; + size_t chromaWidth = src.width / 2; + ssize_t chromaGap = src.chromaStride - + (chromaWidth * src.chromaStep); + size_t dstChromaGap = dstCStride - chromaWidth; + + if (previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP) { + // Flexible YUV chroma to NV21 chroma + uint8_t *crcbDst = yDst; + // Check for shortcuts + if (cbSrc == crSrc + 1 && src.chromaStep == 2) { + ALOGV("%s: Fast NV21->NV21", __FUNCTION__); + // Source has semiplanar CrCb chroma layout, can copy by rows + for (size_t row = 0; row < chromaHeight; row++) { + memcpy(crcbDst, crSrc, src.width); + crcbDst += src.width; + crSrc += src.chromaStride; + } + } else { + ALOGV("%s: Generic->NV21", __FUNCTION__); + // Generic copy, always works but not very efficient + for (size_t row = 0; row < chromaHeight; row++) { + for (size_t col = 0; col < chromaWidth; col++) { + *(crcbDst++) = *crSrc; + *(crcbDst++) = *cbSrc; + crSrc += src.chromaStep; + cbSrc += src.chromaStep; + } + crSrc += chromaGap; + cbSrc += chromaGap; + } + } + } else { + // flexible YUV chroma to YV12 chroma + ALOG_ASSERT(previewFormat == HAL_PIXEL_FORMAT_YV12, + "Unexpected preview format 0x%x", previewFormat); + uint8_t *crDst = yDst; + uint8_t *cbDst = yDst + chromaHeight * dstCStride; + if (src.chromaStep == 1) { + ALOGV("%s: Fast YV12->YV12", __FUNCTION__); + // Source has planar chroma layout, can copy by row + for (size_t row = 0; row < chromaHeight; row++) { + memcpy(crDst, crSrc, chromaWidth); + crDst += dstCStride; + crSrc += src.chromaStride; + } + for (size_t row = 0; row < chromaHeight; row++) { + memcpy(cbDst, cbSrc, chromaWidth); + cbDst += dstCStride; + cbSrc += src.chromaStride; + } + } else { + ALOGV("%s: Generic->YV12", __FUNCTION__); + // Generic copy, always works but not very efficient + for (size_t row = 0; row < chromaHeight; row++) { + for (size_t col = 0; col < chromaWidth; col++) { + *(crDst++) = *crSrc; + *(cbDst++) = *cbSrc; + crSrc += src.chromaStep; + cbSrc += src.chromaStep; + } + crSrc += chromaGap; + cbSrc += chromaGap; + crDst += dstChromaGap; + cbDst += dstChromaGap; + } + } + } + + return OK; +} + }; // namespace camera2 }; // namespace android diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.h b/services/camera/libcameraservice/camera2/CallbackProcessor.h index c2a1372..d851a84 100644 --- a/services/camera/libcameraservice/camera2/CallbackProcessor.h +++ b/services/camera/libcameraservice/camera2/CallbackProcessor.h @@ -24,12 +24,13 @@ #include <utils/Condition.h> #include <gui/CpuConsumer.h> #include "Parameters.h" -#include "CameraMetadata.h" +#include "camera/CameraMetadata.h" #include "Camera2Heap.h" namespace android { class Camera2Client; +class CameraDeviceBase; namespace camera2 { @@ -39,7 +40,7 @@ namespace camera2 { class CallbackProcessor: public Thread, public CpuConsumer::FrameAvailableListener { public: - CallbackProcessor(wp<Camera2Client> client); + CallbackProcessor(sp<Camera2Client> client); ~CallbackProcessor(); void onFrameAvailable(); @@ -52,6 +53,8 @@ class CallbackProcessor: private: static const nsecs_t kWaitDuration = 10000000; // 10 ms wp<Camera2Client> mClient; + wp<CameraDeviceBase> mDevice; + int mId; mutable Mutex mInputMutex; bool mCallbackAvailable; @@ -72,7 +75,15 @@ class CallbackProcessor: virtual bool threadLoop(); status_t processNewCallback(sp<Camera2Client> &client); - + // Used when shutting down + status_t discardNewCallback(); + + // Convert from flexible YUV to NV21 or YV12 + status_t convertFromFlexibleYuv(int32_t previewFormat, + uint8_t *dst, + const CpuConsumer::LockedBuffer &src, + uint32_t dstYStride, + uint32_t dstCStride) const; }; diff --git a/services/camera/libcameraservice/camera2/CaptureSequencer.cpp b/services/camera/libcameraservice/camera2/CaptureSequencer.cpp index 072453b..266e516 100644 --- a/services/camera/libcameraservice/camera2/CaptureSequencer.cpp +++ b/services/camera/libcameraservice/camera2/CaptureSequencer.cpp @@ -27,6 +27,7 @@ #include "../Camera2Device.h" #include "../Camera2Client.h" #include "Parameters.h" +#include "ZslProcessorInterface.h" namespace android { namespace camera2 { @@ -54,7 +55,7 @@ CaptureSequencer::~CaptureSequencer() { ALOGV("%s: Exit", __FUNCTION__); } -void CaptureSequencer::setZslProcessor(wp<ZslProcessor> processor) { +void CaptureSequencer::setZslProcessor(wp<ZslProcessorInterface> processor) { Mutex::Autolock l(mInputMutex); mZslProcessor = processor; } @@ -130,7 +131,7 @@ void CaptureSequencer::onCaptureAvailable(nsecs_t timestamp, } -void CaptureSequencer::dump(int fd, const Vector<String16>& args) { +void CaptureSequencer::dump(int fd, const Vector<String16>& /*args*/) { String8 result; if (mCaptureRequest.entryCount() != 0) { result = " Capture request:\n"; @@ -184,7 +185,6 @@ const CaptureSequencer::StateManager }; bool CaptureSequencer::threadLoop() { - status_t res; sp<Camera2Client> client = mClient.promote(); if (client == 0) return false; @@ -215,7 +215,8 @@ bool CaptureSequencer::threadLoop() { return true; } -CaptureSequencer::CaptureState CaptureSequencer::manageIdle(sp<Camera2Client> &client) { +CaptureSequencer::CaptureState CaptureSequencer::manageIdle( + sp<Camera2Client> &/*client*/) { status_t res; Mutex::Autolock l(mInputMutex); while (!mStartCapture) { @@ -265,16 +266,22 @@ CaptureSequencer::CaptureState CaptureSequencer::manageDone(sp<Camera2Client> &c res = INVALID_OPERATION; } } - sp<ZslProcessor> processor = mZslProcessor.promote(); + sp<ZslProcessorInterface> processor = mZslProcessor.promote(); if (processor != 0) { + ALOGV("%s: Memory optimization, clearing ZSL queue", + __FUNCTION__); processor->clearZslQueue(); } + /** + * Fire the jpegCallback in Camera#takePicture(..., jpegCallback) + */ if (mCaptureBuffer != 0 && res == OK) { - Camera2Client::SharedCameraClient::Lock l(client->mSharedCameraClient); + Camera2Client::SharedCameraCallbacks::Lock + l(client->mSharedCameraCallbacks); ALOGV("%s: Sending still image to client", __FUNCTION__); - if (l.mCameraClient != 0) { - l.mCameraClient->dataCallback(CAMERA_MSG_COMPRESSED_IMAGE, + if (l.mRemoteCallback != 0) { + l.mRemoteCallback->dataCallback(CAMERA_MSG_COMPRESSED_IMAGE, mCaptureBuffer, NULL); } else { ALOGV("%s: No client!", __FUNCTION__); @@ -320,7 +327,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageZslStart( sp<Camera2Client> &client) { ALOGV("%s", __FUNCTION__); status_t res; - sp<ZslProcessor> processor = mZslProcessor.promote(); + sp<ZslProcessorInterface> processor = mZslProcessor.promote(); if (processor == 0) { ALOGE("%s: No ZSL queue to use!", __FUNCTION__); return DONE; @@ -344,7 +351,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageZslStart( } SharedParameters::Lock l(client->getParameters()); - /* warning: this also locks a SharedCameraClient */ + /* warning: this also locks a SharedCameraCallbacks */ shutterNotifyLocked(l.mParameters, client, mMsgType); mShutterNotified = true; mTimeoutCount = kMaxTimeoutsForCaptureEnd; @@ -352,13 +359,13 @@ CaptureSequencer::CaptureState CaptureSequencer::manageZslStart( } CaptureSequencer::CaptureState CaptureSequencer::manageZslWaiting( - sp<Camera2Client> &client) { + sp<Camera2Client> &/*client*/) { ALOGV("%s", __FUNCTION__); return DONE; } CaptureSequencer::CaptureState CaptureSequencer::manageZslReprocessing( - sp<Camera2Client> &client) { + sp<Camera2Client> &/*client*/) { ALOGV("%s", __FUNCTION__); return START; } @@ -366,6 +373,8 @@ CaptureSequencer::CaptureState CaptureSequencer::manageZslReprocessing( CaptureSequencer::CaptureState CaptureSequencer::manageStandardStart( sp<Camera2Client> &client) { ATRACE_CALL(); + + // Get the onFrameAvailable callback when the requestID == mCaptureId client->registerFrameListener(mCaptureId, mCaptureId + 1, this); { @@ -380,7 +389,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardStart( } CaptureSequencer::CaptureState CaptureSequencer::manageStandardPrecaptureWait( - sp<Camera2Client> &client) { + sp<Camera2Client> &/*client*/) { status_t res; ATRACE_CALL(); Mutex::Autolock l(mInputMutex); @@ -425,6 +434,13 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCapture( SharedParameters::Lock l(client->getParameters()); Vector<uint8_t> outputStreams; + /** + * Set up output streams in the request + * - preview + * - capture/jpeg + * - callback (if preview callbacks enabled) + * - recording (if recording enabled) + */ outputStreams.push(client->getPreviewStreamId()); outputStreams.push(client->getCaptureStreamId()); @@ -453,6 +469,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCapture( return DONE; } + // Create a capture copy since CameraDeviceBase#capture takes ownership CameraMetadata captureCopy = mCaptureRequest; if (captureCopy.entryCount() == 0) { ALOGE("%s: Camera %d: Unable to copy capture request for HAL device", @@ -460,7 +477,12 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCapture( return DONE; } + /** + * Clear the streaming request for still-capture pictures + * (as opposed to i.e. video snapshots) + */ if (l.mParameters.state == Parameters::STILL_CAPTURE) { + // API definition of takePicture() - stop preview before taking pic res = client->stopStream(); if (res != OK) { ALOGE("%s: Camera %d: Unable to stop preview for still capture: " @@ -487,6 +509,8 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait( status_t res; ATRACE_CALL(); Mutex::Autolock l(mInputMutex); + + // Wait for new metadata result (mNewFrame) while (!mNewFrameReceived) { res = mNewFrameSignal.waitRelative(mInputMutex, kWaitDuration); if (res == TIMED_OUT) { @@ -494,12 +518,17 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait( break; } } + + // Approximation of the shutter being closed + // - TODO: use the hal3 exposure callback in Camera3Device instead if (mNewFrameReceived && !mShutterNotified) { SharedParameters::Lock l(client->getParameters()); - /* warning: this also locks a SharedCameraClient */ + /* warning: this also locks a SharedCameraCallbacks */ shutterNotifyLocked(l.mParameters, client, mMsgType); mShutterNotified = true; } + + // Wait until jpeg was captured by JpegProcessor while (mNewFrameReceived && !mNewCaptureReceived) { res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration); if (res == TIMED_OUT) { @@ -523,7 +552,9 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait( } if (entry.data.i64[0] != mCaptureTimestamp) { ALOGW("Mismatched capture timestamps: Metadata frame %lld," - " captured buffer %lld", entry.data.i64[0], mCaptureTimestamp); + " captured buffer %lld", + entry.data.i64[0], + mCaptureTimestamp); } client->removeFrameListener(mCaptureId, mCaptureId + 1, this); @@ -580,7 +611,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureStart( } CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureWait( - sp<Camera2Client> &client) { + sp<Camera2Client> &/*client*/) { status_t res; ATRACE_CALL(); @@ -651,16 +682,17 @@ status_t CaptureSequencer::updateCaptureRequest(const Parameters ¶ms, } { - Camera2Client::SharedCameraClient::Lock l(client->mSharedCameraClient); + Camera2Client::SharedCameraCallbacks::Lock + l(client->mSharedCameraCallbacks); ALOGV("%s: Notifying of shutter close to client", __FUNCTION__); - if (l.mCameraClient != 0) { + if (l.mRemoteCallback != 0) { // ShutterCallback - l.mCameraClient->notifyCallback(CAMERA_MSG_SHUTTER, + l.mRemoteCallback->notifyCallback(CAMERA_MSG_SHUTTER, /*ext1*/0, /*ext2*/0); // RawCallback with null buffer - l.mCameraClient->notifyCallback(CAMERA_MSG_RAW_IMAGE_NOTIFY, + l.mRemoteCallback->notifyCallback(CAMERA_MSG_RAW_IMAGE_NOTIFY, /*ext1*/0, /*ext2*/0); } else { ALOGV("%s: No client!", __FUNCTION__); diff --git a/services/camera/libcameraservice/camera2/CaptureSequencer.h b/services/camera/libcameraservice/camera2/CaptureSequencer.h index c42df05..76750aa 100644 --- a/services/camera/libcameraservice/camera2/CaptureSequencer.h +++ b/services/camera/libcameraservice/camera2/CaptureSequencer.h @@ -23,7 +23,7 @@ #include <utils/Vector.h> #include <utils/Mutex.h> #include <utils/Condition.h> -#include "CameraMetadata.h" +#include "camera/CameraMetadata.h" #include "Parameters.h" #include "FrameProcessor.h" @@ -33,7 +33,7 @@ class Camera2Client; namespace camera2 { -class ZslProcessor; +class ZslProcessorInterface; class BurstCapture; /** @@ -48,7 +48,7 @@ class CaptureSequencer: ~CaptureSequencer(); // Get reference to the ZslProcessor, which holds the ZSL buffers and frames - void setZslProcessor(wp<ZslProcessor> processor); + void setZslProcessor(wp<ZslProcessorInterface> processor); // Begin still image capture status_t startCapture(int msgType); @@ -105,7 +105,7 @@ class CaptureSequencer: static const int kMaxTimeoutsForCaptureEnd = 40; // 4 sec wp<Camera2Client> mClient; - wp<ZslProcessor> mZslProcessor; + wp<ZslProcessorInterface> mZslProcessor; sp<BurstCapture> mBurstCapture; enum CaptureState { diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.cpp b/services/camera/libcameraservice/camera2/FrameProcessor.cpp index 76e84cc..d13d398 100644 --- a/services/camera/libcameraservice/camera2/FrameProcessor.cpp +++ b/services/camera/libcameraservice/camera2/FrameProcessor.cpp @@ -22,158 +22,48 @@ #include <utils/Trace.h> #include "FrameProcessor.h" -#include "../Camera2Device.h" +#include "../CameraDeviceBase.h" #include "../Camera2Client.h" namespace android { namespace camera2 { -FrameProcessor::FrameProcessor(wp<Camera2Client> client): - Thread(false), mClient(client), mLastFrameNumberOfFaces(0) { +FrameProcessor::FrameProcessor(wp<CameraDeviceBase> device, + wp<Camera2Client> client) : + ProFrameProcessor(device), + mClient(client), + mLastFrameNumberOfFaces(0) { } FrameProcessor::~FrameProcessor() { - ALOGV("%s: Exit", __FUNCTION__); } -status_t FrameProcessor::registerListener(int32_t minId, - int32_t maxId, wp<FilteredListener> listener) { - Mutex::Autolock l(mInputMutex); - ALOGV("%s: Registering listener for frame id range %d - %d", - __FUNCTION__, minId, maxId); - RangeListener rListener = { minId, maxId, listener }; - mRangeListeners.push_back(rListener); - return OK; -} +bool FrameProcessor::processSingleFrame(CameraMetadata &frame, + const sp<CameraDeviceBase> &device) { -status_t FrameProcessor::removeListener(int32_t minId, - int32_t maxId, wp<FilteredListener> listener) { - Mutex::Autolock l(mInputMutex); - List<RangeListener>::iterator item = mRangeListeners.begin(); - while (item != mRangeListeners.end()) { - if (item->minId == minId && - item->maxId == maxId && - item->listener == listener) { - item = mRangeListeners.erase(item); - } else { - item++; - } + sp<Camera2Client> client = mClient.promote(); + if (!client.get()) { + return false; } - return OK; -} -void FrameProcessor::dump(int fd, const Vector<String16>& args) { - String8 result(" Latest received frame:\n"); - write(fd, result.string(), result.size()); - mLastFrame.dump(fd, 2, 6); -} - -bool FrameProcessor::threadLoop() { - status_t res; - - sp<Camera2Device> device; - { - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return false; - device = client->getCameraDevice(); - if (device == 0) return false; + if (processFaceDetect(frame, client) != OK) { + return false; } - res = device->waitForNextFrame(kWaitDuration); - if (res == OK) { - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return false; - processNewFrames(client); - } else if (res != TIMED_OUT) { - ALOGE("Camera2Client::FrameProcessor: Error waiting for new " - "frames: %s (%d)", strerror(-res), res); + if (!ProFrameProcessor::processSingleFrame(frame, device)) { + return false; } return true; } -void FrameProcessor::processNewFrames(sp<Camera2Client> &client) { - status_t res; - ATRACE_CALL(); - CameraMetadata frame; - while ( (res = client->getCameraDevice()->getNextFrame(&frame)) == OK) { - camera_metadata_entry_t entry; - - entry = frame.find(ANDROID_REQUEST_FRAME_COUNT); - if (entry.count == 0) { - ALOGE("%s: Camera %d: Error reading frame number", - __FUNCTION__, client->getCameraId()); - break; - } - ATRACE_INT("cam2_frame", entry.data.i32[0]); - - res = processFaceDetect(frame, client); - if (res != OK) break; - - res = processListeners(frame, client); - if (res != OK) break; - - if (!frame.isEmpty()) { - mLastFrame.acquire(frame); - } - } - if (res != NOT_ENOUGH_DATA) { - ALOGE("%s: Camera %d: Error getting next frame: %s (%d)", - __FUNCTION__, client->getCameraId(), strerror(-res), res); - return; - } - - return; -} - -status_t FrameProcessor::processListeners(const CameraMetadata &frame, - sp<Camera2Client> &client) { - status_t res; - ATRACE_CALL(); - camera_metadata_ro_entry_t entry; - - entry = frame.find(ANDROID_REQUEST_ID); - if (entry.count == 0) { - ALOGE("%s: Camera %d: Error reading frame id", - __FUNCTION__, client->getCameraId()); - return BAD_VALUE; - } - int32_t frameId = entry.data.i32[0]; - - List<sp<FilteredListener> > listeners; - { - Mutex::Autolock l(mInputMutex); - - List<RangeListener>::iterator item = mRangeListeners.begin(); - while (item != mRangeListeners.end()) { - if (frameId >= item->minId && - frameId < item->maxId) { - sp<FilteredListener> listener = item->listener.promote(); - if (listener == 0) { - item = mRangeListeners.erase(item); - continue; - } else { - listeners.push_back(listener); - } - } - item++; - } - } - ALOGV("Got %d range listeners out of %d", listeners.size(), mRangeListeners.size()); - List<sp<FilteredListener> >::iterator item = listeners.begin(); - for (; item != listeners.end(); item++) { - (*item)->onFrameAvailable(frameId, frame); - } - return OK; -} - status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame, - sp<Camera2Client> &client) { + const sp<Camera2Client> &client) { status_t res = BAD_VALUE; ATRACE_CALL(); camera_metadata_ro_entry_t entry; bool enableFaceDetect; - int maxFaces; + { SharedParameters::Lock l(client->getParameters()); enableFaceDetect = l.mParameters.enableFaceDetect; @@ -191,12 +81,14 @@ status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame, Vector<camera_face_t> faces; metadata.number_of_faces = 0; - if (enableFaceDetect && faceDetectMode != ANDROID_STATISTICS_FACE_DETECT_MODE_OFF) { + if (enableFaceDetect && + faceDetectMode != ANDROID_STATISTICS_FACE_DETECT_MODE_OFF) { + SharedParameters::Lock l(client->getParameters()); entry = frame.find(ANDROID_STATISTICS_FACE_RECTANGLES); if (entry.count == 0) { // No faces this frame - /* warning: locks SharedCameraClient */ + /* warning: locks SharedCameraCallbacks */ callbackFaceDetection(client, metadata); return OK; } @@ -248,6 +140,10 @@ status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame, metadata.number_of_faces--; continue; } + if (faceScores[i] > 100) { + ALOGW("%s: Face index %d with out of range score %d", + __FUNCTION__, i, faceScores[i]); + } camera_face_t face; @@ -260,17 +156,17 @@ status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame, if (faceDetectMode == ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) { face.id = faceIds[i]; face.left_eye[0] = - l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 0]); + l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 0]); face.left_eye[1] = - l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 1]); + l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 1]); face.right_eye[0] = - l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 2]); + l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 2]); face.right_eye[1] = - l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 3]); + l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 3]); face.mouth[0] = - l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 4]); + l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 4]); face.mouth[1] = - l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 5]); + l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 5]); } else { face.id = 0; face.left_eye[0] = face.left_eye[1] = -2000; @@ -283,21 +179,31 @@ status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame, metadata.faces = faces.editArray(); } - /* warning: locks SharedCameraClient */ + /* warning: locks SharedCameraCallbacks */ callbackFaceDetection(client, metadata); return OK; } void FrameProcessor::callbackFaceDetection(sp<Camera2Client> client, - /*in*/camera_frame_metadata &metadata) { - - /* Filter out repeated 0-face callbacks, but not when the last frame was >0 */ - if (metadata.number_of_faces != 0 || mLastFrameNumberOfFaces != metadata.number_of_faces) { - Camera2Client::SharedCameraClient::Lock l(client->mSharedCameraClient); - if (l.mCameraClient != NULL) { - l.mCameraClient->dataCallback(CAMERA_MSG_PREVIEW_METADATA, - NULL, &metadata); + const camera_frame_metadata &metadata) { + + camera_frame_metadata *metadata_ptr = + const_cast<camera_frame_metadata*>(&metadata); + + /** + * Filter out repeated 0-face callbacks, + * but not when the last frame was >0 + */ + if (metadata.number_of_faces != 0 || + mLastFrameNumberOfFaces != metadata.number_of_faces) { + + Camera2Client::SharedCameraCallbacks::Lock + l(client->mSharedCameraCallbacks); + if (l.mRemoteCallback != NULL) { + l.mRemoteCallback->dataCallback(CAMERA_MSG_PREVIEW_METADATA, + NULL, + metadata_ptr); } } diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.h b/services/camera/libcameraservice/camera2/FrameProcessor.h index 3bd4e25..27ed8f6 100644 --- a/services/camera/libcameraservice/camera2/FrameProcessor.h +++ b/services/camera/libcameraservice/camera2/FrameProcessor.h @@ -22,7 +22,9 @@ #include <utils/Vector.h> #include <utils/KeyedVector.h> #include <utils/List.h> -#include "CameraMetadata.h" +#include <camera/CameraMetadata.h> + +#include "ProFrameProcessor.h" struct camera_frame_metadata; @@ -35,51 +37,26 @@ namespace camera2 { /* Output frame metadata processing thread. This thread waits for new * frames from the device, and analyzes them as necessary. */ -class FrameProcessor: public Thread { +class FrameProcessor : public ProFrameProcessor { public: - FrameProcessor(wp<Camera2Client> client); + FrameProcessor(wp<CameraDeviceBase> device, wp<Camera2Client> client); ~FrameProcessor(); - struct FilteredListener: virtual public RefBase { - virtual void onFrameAvailable(int32_t frameId, - const CameraMetadata &frame) = 0; - }; - - // Register a listener for a range of IDs [minId, maxId). Multiple listeners - // can be listening to the same range - status_t registerListener(int32_t minId, int32_t maxId, wp<FilteredListener> listener); - status_t removeListener(int32_t minId, int32_t maxId, wp<FilteredListener> listener); - - void dump(int fd, const Vector<String16>& args); private: - static const nsecs_t kWaitDuration = 10000000; // 10 ms wp<Camera2Client> mClient; + int mLastFrameNumberOfFaces; - virtual bool threadLoop(); - - Mutex mInputMutex; - - struct RangeListener { - int32_t minId; - int32_t maxId; - wp<FilteredListener> listener; - }; - List<RangeListener> mRangeListeners; + void processNewFrames(const sp<Camera2Client> &client); - void processNewFrames(sp<Camera2Client> &client); + virtual bool processSingleFrame(CameraMetadata &frame, + const sp<CameraDeviceBase> &device); status_t processFaceDetect(const CameraMetadata &frame, - sp<Camera2Client> &client); - - status_t processListeners(const CameraMetadata &frame, - sp<Camera2Client> &client); - - CameraMetadata mLastFrame; - int mLastFrameNumberOfFaces; + const sp<Camera2Client> &client); // Emit FaceDetection event to java if faces changed void callbackFaceDetection(sp<Camera2Client> client, - camera_frame_metadata &metadata); + const camera_frame_metadata &metadata); }; diff --git a/services/camera/libcameraservice/camera2/JpegCompressor.cpp b/services/camera/libcameraservice/camera2/JpegCompressor.cpp index 702ef58..c9af71e 100644 --- a/services/camera/libcameraservice/camera2/JpegCompressor.cpp +++ b/services/camera/libcameraservice/camera2/JpegCompressor.cpp @@ -144,7 +144,7 @@ bool JpegCompressor::isBusy() { } // old function -- TODO: update for new buffer type -bool JpegCompressor::isStreamInUse(uint32_t id) { +bool JpegCompressor::isStreamInUse(uint32_t /*id*/) { ALOGV("%s", __FUNCTION__); Mutex::Autolock lock(mBusyMutex); @@ -203,14 +203,14 @@ void JpegCompressor::jpegInitDestination(j_compress_ptr cinfo) { dest->free_in_buffer = kMaxJpegSize; } -boolean JpegCompressor::jpegEmptyOutputBuffer(j_compress_ptr cinfo) { +boolean JpegCompressor::jpegEmptyOutputBuffer(j_compress_ptr /*cinfo*/) { ALOGV("%s", __FUNCTION__); ALOGE("%s: JPEG destination buffer overflow!", __FUNCTION__); return true; } -void JpegCompressor::jpegTermDestination(j_compress_ptr cinfo) { +void JpegCompressor::jpegTermDestination(j_compress_ptr /*cinfo*/) { ALOGV("%s", __FUNCTION__); ALOGV("%s: Done writing JPEG data. %d bytes left in buffer", __FUNCTION__, cinfo->dest->free_in_buffer); diff --git a/services/camera/libcameraservice/camera2/JpegProcessor.cpp b/services/camera/libcameraservice/camera2/JpegProcessor.cpp index ffc072b..f0a13ca 100644 --- a/services/camera/libcameraservice/camera2/JpegProcessor.cpp +++ b/services/camera/libcameraservice/camera2/JpegProcessor.cpp @@ -26,8 +26,8 @@ #include <utils/Trace.h> #include "JpegProcessor.h" -#include <gui/SurfaceTextureClient.h> -#include "../Camera2Device.h" +#include <gui/Surface.h> +#include "../CameraDeviceBase.h" #include "../Camera2Client.h" @@ -35,11 +35,12 @@ namespace android { namespace camera2 { JpegProcessor::JpegProcessor( - wp<Camera2Client> client, + sp<Camera2Client> client, wp<CaptureSequencer> sequencer): Thread(false), - mClient(client), + mDevice(client->getCameraDevice()), mSequencer(sequencer), + mId(client->getCameraId()), mCaptureAvailable(false), mCaptureStreamId(NO_STREAM) { } @@ -64,16 +65,18 @@ status_t JpegProcessor::updateStream(const Parameters ¶ms) { Mutex::Autolock l(mInputMutex); - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return OK; - sp<Camera2Device> device = client->getCameraDevice(); + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } // Find out buffer size for JPEG camera_metadata_ro_entry_t maxJpegSize = params.staticInfo(ANDROID_JPEG_MAX_SIZE); if (maxJpegSize.count == 0) { ALOGE("%s: Camera %d: Can't find ANDROID_JPEG_MAX_SIZE!", - __FUNCTION__, client->getCameraId()); + __FUNCTION__, mId); return INVALID_OPERATION; } @@ -82,14 +85,14 @@ status_t JpegProcessor::updateStream(const Parameters ¶ms) { mCaptureConsumer = new CpuConsumer(1); mCaptureConsumer->setFrameAvailableListener(this); mCaptureConsumer->setName(String8("Camera2Client::CaptureConsumer")); - mCaptureWindow = new SurfaceTextureClient( + mCaptureWindow = new Surface( mCaptureConsumer->getProducerInterface()); // Create memory for API consumption mCaptureHeap = new MemoryHeapBase(maxJpegSize.data.i32[0], 0, "Camera2Client::CaptureHeap"); if (mCaptureHeap->getSize() == 0) { ALOGE("%s: Camera %d: Unable to allocate memory for capture", - __FUNCTION__, client->getCameraId()); + __FUNCTION__, mId); return NO_MEMORY; } } @@ -102,18 +105,22 @@ status_t JpegProcessor::updateStream(const Parameters ¶ms) { if (res != OK) { ALOGE("%s: Camera %d: Error querying capture output stream info: " "%s (%d)", __FUNCTION__, - client->getCameraId(), strerror(-res), res); + mId, strerror(-res), res); return res; } if (currentWidth != (uint32_t)params.pictureWidth || currentHeight != (uint32_t)params.pictureHeight) { ALOGV("%s: Camera %d: Deleting stream %d since the buffer dimensions changed", - __FUNCTION__, client->getCameraId(), mCaptureStreamId); + __FUNCTION__, mId, mCaptureStreamId); res = device->deleteStream(mCaptureStreamId); - if (res != OK) { + if (res == -EBUSY) { + ALOGV("%s: Camera %d: Device is busy, call updateStream again " + " after it becomes idle", __FUNCTION__, mId); + return res; + } else if (res != OK) { ALOGE("%s: Camera %d: Unable to delete old output stream " "for capture: %s (%d)", __FUNCTION__, - client->getCameraId(), strerror(-res), res); + mId, strerror(-res), res); return res; } mCaptureStreamId = NO_STREAM; @@ -128,7 +135,7 @@ status_t JpegProcessor::updateStream(const Parameters ¶ms) { &mCaptureStreamId); if (res != OK) { ALOGE("%s: Camera %d: Can't create output stream for capture: " - "%s (%d)", __FUNCTION__, client->getCameraId(), + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } @@ -139,14 +146,15 @@ status_t JpegProcessor::updateStream(const Parameters ¶ms) { status_t JpegProcessor::deleteStream() { ATRACE_CALL(); - status_t res; Mutex::Autolock l(mInputMutex); if (mCaptureStreamId != NO_STREAM) { - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return OK; - sp<Camera2Device> device = client->getCameraDevice(); + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } device->deleteStream(mCaptureStreamId); @@ -164,7 +172,7 @@ int JpegProcessor::getStreamId() const { return mCaptureStreamId; } -void JpegProcessor::dump(int fd, const Vector<String16>& args) const { +void JpegProcessor::dump(int /*fd*/, const Vector<String16>& /*args*/) const { } bool JpegProcessor::threadLoop() { @@ -181,15 +189,13 @@ bool JpegProcessor::threadLoop() { } do { - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return false; - res = processNewCapture(client); + res = processNewCapture(); } while (res == OK); return true; } -status_t JpegProcessor::processNewCapture(sp<Camera2Client> &client) { +status_t JpegProcessor::processNewCapture() { ATRACE_CALL(); status_t res; sp<Camera2Heap> captureHeap; @@ -201,17 +207,17 @@ status_t JpegProcessor::processNewCapture(sp<Camera2Client> &client) { if (res != BAD_VALUE) { ALOGE("%s: Camera %d: Error receiving still image buffer: " "%s (%d)", __FUNCTION__, - client->getCameraId(), strerror(-res), res); + mId, strerror(-res), res); } return res; } ALOGV("%s: Camera %d: Still capture available", __FUNCTION__, - client->getCameraId()); + mId); if (imgBuffer.format != HAL_PIXEL_FORMAT_BLOB) { ALOGE("%s: Camera %d: Unexpected format for still image: " - "%x, expected %x", __FUNCTION__, client->getCameraId(), + "%x, expected %x", __FUNCTION__, mId, imgBuffer.format, HAL_PIXEL_FORMAT_BLOB); mCaptureConsumer->unlockBuffer(imgBuffer); @@ -356,7 +362,7 @@ size_t JpegProcessor::findJpegSize(uint8_t* jpegBuffer, size_t maxSize) { // Find End of Image // Scan JPEG buffer until End of Image (EOI) bool foundEnd = false; - for (size; size <= maxSize - MARKER_LENGTH; size++) { + for ( ; size <= maxSize - MARKER_LENGTH; size++) { if ( checkJpegEnd(jpegBuffer + size) ) { foundEnd = true; size += MARKER_LENGTH; diff --git a/services/camera/libcameraservice/camera2/JpegProcessor.h b/services/camera/libcameraservice/camera2/JpegProcessor.h index 836bd02..a38611c 100644 --- a/services/camera/libcameraservice/camera2/JpegProcessor.h +++ b/services/camera/libcameraservice/camera2/JpegProcessor.h @@ -24,11 +24,12 @@ #include <utils/Condition.h> #include <gui/CpuConsumer.h> #include "Parameters.h" -#include "CameraMetadata.h" +#include "camera/CameraMetadata.h" namespace android { class Camera2Client; +class CameraDeviceBase; class MemoryHeapBase; namespace camera2 { @@ -41,9 +42,10 @@ class CaptureSequencer; class JpegProcessor: public Thread, public CpuConsumer::FrameAvailableListener { public: - JpegProcessor(wp<Camera2Client> client, wp<CaptureSequencer> sequencer); + JpegProcessor(sp<Camera2Client> client, wp<CaptureSequencer> sequencer); ~JpegProcessor(); + // CpuConsumer listener implementation void onFrameAvailable(); status_t updateStream(const Parameters ¶ms); @@ -53,8 +55,9 @@ class JpegProcessor: void dump(int fd, const Vector<String16>& args) const; private: static const nsecs_t kWaitDuration = 10000000; // 10 ms - wp<Camera2Client> mClient; + wp<CameraDeviceBase> mDevice; wp<CaptureSequencer> mSequencer; + int mId; mutable Mutex mInputMutex; bool mCaptureAvailable; @@ -71,7 +74,7 @@ class JpegProcessor: virtual bool threadLoop(); - status_t processNewCapture(sp<Camera2Client> &client); + status_t processNewCapture(); size_t findJpegSize(uint8_t* jpegBuffer, size_t maxSize); }; diff --git a/services/camera/libcameraservice/camera2/Parameters.cpp b/services/camera/libcameraservice/camera2/Parameters.cpp index 278a19c..a248b76 100644 --- a/services/camera/libcameraservice/camera2/Parameters.cpp +++ b/services/camera/libcameraservice/camera2/Parameters.cpp @@ -152,7 +152,16 @@ status_t Parameters::initialize(const CameraMetadata *info) { supportedPreviewFormats += CameraParameters::PIXEL_FORMAT_RGBA8888; break; + case HAL_PIXEL_FORMAT_YCbCr_420_888: + // Flexible YUV allows both YV12 and NV21 + supportedPreviewFormats += + CameraParameters::PIXEL_FORMAT_YUV420P; + supportedPreviewFormats += ","; + supportedPreviewFormats += + CameraParameters::PIXEL_FORMAT_YUV420SP; + break; // Not advertizing JPEG, RAW_SENSOR, etc, for preview formats + case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: case HAL_PIXEL_FORMAT_RAW_SENSOR: case HAL_PIXEL_FORMAT_BLOB: addComma = false; @@ -534,6 +543,8 @@ status_t Parameters::initialize(const CameraMetadata *info) { if (!noSceneModes) { params.set(CameraParameters::KEY_SUPPORTED_SCENE_MODES, supportedSceneModes); + } else { + params.remove(CameraParameters::KEY_SCENE_MODE); } } @@ -657,15 +668,13 @@ status_t Parameters::initialize(const CameraMetadata *info) { float minFocalLength = availableFocalLengths.data.f[0]; params.setFloat(CameraParameters::KEY_FOCAL_LENGTH, minFocalLength); - camera_metadata_ro_entry_t sensorSize = - staticInfo(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, 2, 2); - if (!sensorSize.count) return NO_INIT; + float horizFov, vertFov; + res = calculatePictureFovs(&horizFov, &vertFov); + if (res != OK) { + ALOGE("%s: Can't calculate field of views!", __FUNCTION__); + return res; + } - // The fields of view here assume infinity focus, maximum wide angle - float horizFov = 180 / M_PI * - 2 * atanf(sensorSize.data.f[0] / (2 * minFocalLength)); - float vertFov = 180 / M_PI * - 2 * atanf(sensorSize.data.f[1] / (2 * minFocalLength)); params.setFloat(CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE, horizFov); params.setFloat(CameraParameters::KEY_VERTICAL_VIEW_ANGLE, vertFov); @@ -786,13 +795,21 @@ status_t Parameters::initialize(const CameraMetadata *info) { previewCallbackFlags = 0; previewCallbackOneShot = false; - char value[PROPERTY_VALUE_MAX]; - property_get("camera.disable_zsl_mode", value, "0"); - if (!strcmp(value,"1")) { - ALOGI("Camera %d: Disabling ZSL mode", cameraId); + camera_metadata_ro_entry_t supportedHardwareLevel = + staticInfo(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL); + if (!supportedHardwareLevel.count || (supportedHardwareLevel.data.u8[0] == + ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED)) { + ALOGI("Camera %d: ZSL mode disabled for limited mode HALs", cameraId); zslMode = false; } else { - zslMode = true; + char value[PROPERTY_VALUE_MAX]; + property_get("camera.disable_zsl_mode", value, "0"); + if (!strcmp(value,"1")) { + ALOGI("Camera %d: Disabling ZSL mode", cameraId); + zslMode = false; + } else { + zslMode = true; + } } lightFx = LIGHTFX_NONE; @@ -859,6 +876,15 @@ status_t Parameters::buildFastInfo() { staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE); bool fixedLens = (minFocusDistance.data.f[0] == 0); + camera_metadata_ro_entry_t availableFocalLengths = + staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS); + if (!availableFocalLengths.count) return NO_INIT; + + camera_metadata_ro_entry_t availableFormats = + staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS); + if (!availableFormats.count) return NO_INIT; + + if (sceneModeOverrides.count > 0) { // sceneModeOverrides is defined to have 3 entries for each scene mode, // which are AE, AWB, and AF override modes the HAL wants for that scene @@ -926,6 +952,27 @@ status_t Parameters::buildFastInfo() { fastInfo.arrayHeight = arrayHeight; fastInfo.bestFaceDetectMode = bestFaceDetectMode; fastInfo.maxFaces = maxFaces; + + // Find smallest (widest-angle) focal length to use as basis of still + // picture FOV reporting. + fastInfo.minFocalLength = availableFocalLengths.data.f[0]; + for (size_t i = 1; i < availableFocalLengths.count; i++) { + if (fastInfo.minFocalLength > availableFocalLengths.data.f[i]) { + fastInfo.minFocalLength = availableFocalLengths.data.f[i]; + } + } + + // Check if the HAL supports HAL_PIXEL_FORMAT_YCbCr_420_888 + fastInfo.useFlexibleYuv = false; + for (size_t i = 0; i < availableFormats.count; i++) { + if (availableFormats.data.i32[i] == HAL_PIXEL_FORMAT_YCbCr_420_888) { + fastInfo.useFlexibleYuv = true; + break; + } + } + ALOGV("Camera %d: Flexible YUV %s supported", + cameraId, fastInfo.useFlexibleYuv ? "is" : "is not"); + return OK; } @@ -950,11 +997,10 @@ status_t Parameters::buildQuirks() { } camera_metadata_ro_entry_t Parameters::staticInfo(uint32_t tag, - size_t minCount, size_t maxCount) const { - status_t res; + size_t minCount, size_t maxCount, bool required) const { camera_metadata_ro_entry_t entry = info->find(tag); - if (CC_UNLIKELY( entry.count == 0 )) { + if (CC_UNLIKELY( entry.count == 0 ) && required) { const char* tagSection = get_camera_metadata_section_name(tag); if (tagSection == NULL) tagSection = "<unknown>"; const char* tagName = get_camera_metadata_tag_name(tag); @@ -1072,15 +1118,24 @@ status_t Parameters::set(const String8& paramString) { } camera_metadata_ro_entry_t availableFormats = staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS); - for (i = 0; i < availableFormats.count; i++) { - if (availableFormats.data.i32[i] == validatedParams.previewFormat) - break; - } - if (i == availableFormats.count) { - ALOGE("%s: Requested preview format %s (0x%x) is not supported", - __FUNCTION__, newParams.getPreviewFormat(), - validatedParams.previewFormat); - return BAD_VALUE; + // If using flexible YUV, always support NV21/YV12. Otherwise, check + // HAL's list. + if (! (fastInfo.useFlexibleYuv && + (validatedParams.previewFormat == + HAL_PIXEL_FORMAT_YCrCb_420_SP || + validatedParams.previewFormat == + HAL_PIXEL_FORMAT_YV12) ) ) { + // Not using flexible YUV format, so check explicitly + for (i = 0; i < availableFormats.count; i++) { + if (availableFormats.data.i32[i] == + validatedParams.previewFormat) break; + } + if (i == availableFormats.count) { + ALOGE("%s: Requested preview format %s (0x%x) is not supported", + __FUNCTION__, newParams.getPreviewFormat(), + validatedParams.previewFormat); + return BAD_VALUE; + } } } @@ -1568,10 +1623,29 @@ status_t Parameters::set(const String8& paramString) { ALOGE("%s: Video stabilization not supported", __FUNCTION__); } + // LIGHTFX + validatedParams.lightFx = lightFxStringToEnum( + newParams.get(CameraParameters::KEY_LIGHTFX)); + /** Update internal parameters */ *this = validatedParams; + /** Update external parameters calculated from the internal ones */ + + // HORIZONTAL/VERTICAL FIELD OF VIEW + float horizFov, vertFov; + res = calculatePictureFovs(&horizFov, &vertFov); + if (res != OK) { + ALOGE("%s: Can't calculate FOVs", __FUNCTION__); + // continue so parameters are at least consistent + } + newParams.setFloat(CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE, + horizFov); + newParams.setFloat(CameraParameters::KEY_VERTICAL_VIEW_ANGLE, + vertFov); + ALOGV("Current still picture FOV: %f x %f deg", horizFov, vertFov); + // Need to flatten again in case of overrides paramsFlattened = newParams.flatten(); params = newParams; @@ -1583,6 +1657,34 @@ status_t Parameters::updateRequest(CameraMetadata *request) const { ATRACE_CALL(); status_t res; + /** + * Mixin default important security values + * - android.led.transmit = defaulted ON + */ + camera_metadata_ro_entry_t entry = staticInfo(ANDROID_LED_AVAILABLE_LEDS, + /*minimumCount*/0, + /*maximumCount*/0, + /*required*/false); + for(size_t i = 0; i < entry.count; ++i) { + uint8_t led = entry.data.u8[i]; + + switch(led) { + // Transmit LED is unconditionally on when using + // the android.hardware.Camera API + case ANDROID_LED_AVAILABLE_LEDS_TRANSMIT: { + uint8_t transmitDefault = ANDROID_LED_TRANSMIT_ON; + res = request->update(ANDROID_LED_TRANSMIT, + &transmitDefault, 1); + if (res != OK) return res; + break; + } + } + } + + /** + * Construct metadata from parameters + */ + uint8_t metadataMode = ANDROID_REQUEST_METADATA_MODE_FULL; res = request->update(ANDROID_REQUEST_METADATA_MODE, &metadataMode, 1); @@ -1750,13 +1852,14 @@ status_t Parameters::updateRequest(CameraMetadata *request) const { CropRegion::OUTPUT_PREVIEW | CropRegion::OUTPUT_VIDEO | CropRegion::OUTPUT_PICTURE )); - int32_t reqCropRegion[3] = { + int32_t reqCropRegion[4] = { static_cast<int32_t>(crop.left), static_cast<int32_t>(crop.top), - static_cast<int32_t>(crop.width) + static_cast<int32_t>(crop.width), + static_cast<int32_t>(crop.height) }; res = request->update(ANDROID_SCALER_CROP_REGION, - reqCropRegion, 3); + reqCropRegion, 4); if (res != OK) return res; uint8_t reqVstabMode = videoStabilization ? @@ -2099,6 +2202,18 @@ const char *Parameters::focusModeEnumToString(focusMode_t focusMode) { } } +Parameters::Parameters::lightFxMode_t Parameters::lightFxStringToEnum( + const char *lightFxMode) { + return + !lightFxMode ? + Parameters::LIGHTFX_NONE : + !strcmp(lightFxMode, CameraParameters::LIGHTFX_LOWLIGHT) ? + Parameters::LIGHTFX_LOWLIGHT : + !strcmp(lightFxMode, CameraParameters::LIGHTFX_HDR) ? + Parameters::LIGHTFX_HDR : + Parameters::LIGHTFX_NONE; +} + status_t Parameters::parseAreas(const char *areasCStr, Vector<Parameters::Area> *areas) { static const size_t NUM_FIELDS = 5; @@ -2201,7 +2316,7 @@ int Parameters::cropXToArray(int x) const { CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW); ALOG_ASSERT(x < previewCrop.width, "Crop-relative X coordinate = '%d' " - "is out of bounds (upper = %d)", x, previewCrop.width); + "is out of bounds (upper = %f)", x, previewCrop.width); int ret = x + previewCrop.left; @@ -2217,7 +2332,7 @@ int Parameters::cropYToArray(int y) const { CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW); ALOG_ASSERT(y < previewCrop.height, "Crop-relative Y coordinate = '%d' is " - "out of bounds (upper = %d)", y, previewCrop.height); + "out of bounds (upper = %f)", y, previewCrop.height); int ret = y + previewCrop.top; @@ -2423,7 +2538,88 @@ Parameters::CropRegion Parameters::calculateCropRegion( return crop; } -int32_t Parameters::fpsFromRange(int32_t min, int32_t max) const { +status_t Parameters::calculatePictureFovs(float *horizFov, float *vertFov) + const { + camera_metadata_ro_entry_t sensorSize = + staticInfo(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, 2, 2); + if (!sensorSize.count) return NO_INIT; + + camera_metadata_ro_entry_t availableFocalLengths = + staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS); + if (!availableFocalLengths.count) return NO_INIT; + + float arrayAspect = static_cast<float>(fastInfo.arrayWidth) / + fastInfo.arrayHeight; + float stillAspect = static_cast<float>(pictureWidth) / pictureHeight; + ALOGV("Array aspect: %f, still aspect: %f", arrayAspect, stillAspect); + + // The crop factors from the full sensor array to the still picture crop + // region + float horizCropFactor = 1.f; + float vertCropFactor = 1.f; + + /** + * Need to calculate the still image field of view based on the total pixel + * array field of view, and the relative aspect ratios of the pixel array + * and output streams. + * + * Special treatment for quirky definition of crop region and relative + * stream cropping. + */ + if (quirks.meteringCropRegion) { + // Use max of preview and video as first crop + float previewAspect = static_cast<float>(previewWidth) / previewHeight; + float videoAspect = static_cast<float>(videoWidth) / videoHeight; + if (videoAspect > previewAspect) { + previewAspect = videoAspect; + } + // First crop sensor to preview aspect ratio + if (arrayAspect < previewAspect) { + vertCropFactor = arrayAspect / previewAspect; + } else { + horizCropFactor = previewAspect / arrayAspect; + } + // Second crop to still aspect ratio + if (stillAspect < previewAspect) { + horizCropFactor *= stillAspect / previewAspect; + } else { + vertCropFactor *= previewAspect / stillAspect; + } + } else { + /** + * Crop are just a function of just the still/array relative aspect + * ratios. Since each stream will maximize its area within the crop + * region, and for FOV we assume a full-sensor crop region, we only ever + * crop the FOV either vertically or horizontally, never both. + */ + horizCropFactor = (arrayAspect > stillAspect) ? + (stillAspect / arrayAspect) : 1.f; + vertCropFactor = (arrayAspect < stillAspect) ? + (arrayAspect / stillAspect) : 1.f; + } + ALOGV("Horiz crop factor: %f, vert crop fact: %f", + horizCropFactor, vertCropFactor); + /** + * Basic field of view formula is: + * angle of view = 2 * arctangent ( d / 2f ) + * where d is the physical sensor dimension of interest, and f is + * the focal length. This only applies to rectilinear sensors, for focusing + * at distances >> f, etc. + */ + if (horizFov != NULL) { + *horizFov = 180 / M_PI * 2 * + atanf(horizCropFactor * sensorSize.data.f[0] / + (2 * fastInfo.minFocalLength)); + } + if (vertFov != NULL) { + *vertFov = 180 / M_PI * 2 * + atanf(vertCropFactor * sensorSize.data.f[1] / + (2 * fastInfo.minFocalLength)); + } + return OK; +} + +int32_t Parameters::fpsFromRange(int32_t /*min*/, int32_t max) const { return max; } diff --git a/services/camera/libcameraservice/camera2/Parameters.h b/services/camera/libcameraservice/camera2/Parameters.h index dc2cdcf..be05b54 100644 --- a/services/camera/libcameraservice/camera2/Parameters.h +++ b/services/camera/libcameraservice/camera2/Parameters.h @@ -25,8 +25,7 @@ #include <utils/Vector.h> #include <utils/KeyedVector.h> #include <camera/CameraParameters.h> - -#include "CameraMetadata.h" +#include <camera/CameraMetadata.h> namespace android { namespace camera2 { @@ -158,7 +157,7 @@ struct Parameters { } state; // Number of zoom steps to simulate - static const unsigned int NUM_ZOOM_STEPS = 30; + static const unsigned int NUM_ZOOM_STEPS = 100; // Full static camera info, object owned by someone else, such as // Camera2Device. @@ -184,6 +183,8 @@ struct Parameters { } }; DefaultKeyedVector<uint8_t, OverrideModes> sceneModeOverrides; + float minFocalLength; + bool useFlexibleYuv; } fastInfo; // Quirks information; these are short-lived flags to enable workarounds for @@ -214,7 +215,7 @@ struct Parameters { // max/minCount means to do no bounds check in that direction. In case of // error, the entry data pointer is null and the count is 0. camera_metadata_ro_entry_t staticInfo(uint32_t tag, - size_t minCount=0, size_t maxCount=0) const; + size_t minCount=0, size_t maxCount=0, bool required=true) const; // Validate and update camera parameters based on new settings status_t set(const String8 ¶mString); @@ -244,6 +245,9 @@ struct Parameters { }; CropRegion calculateCropRegion(CropRegion::Outputs outputs) const; + // Calculate the field of view of the high-resolution JPEG capture + status_t calculatePictureFovs(float *horizFov, float *vertFov) const; + // Static methods for debugging and converting between camera1 and camera2 // parameters @@ -261,6 +265,8 @@ struct Parameters { static const char* flashModeEnumToString(flashMode_t flashMode); static focusMode_t focusModeStringToEnum(const char *focusMode); static const char* focusModeEnumToString(focusMode_t focusMode); + static lightFxMode_t lightFxStringToEnum(const char *lightFxMode); + static status_t parseAreas(const char *areasCStr, Vector<Area> *areas); diff --git a/services/camera/libcameraservice/camera2/ProFrameProcessor.cpp b/services/camera/libcameraservice/camera2/ProFrameProcessor.cpp new file mode 100644 index 0000000..4012fc5 --- /dev/null +++ b/services/camera/libcameraservice/camera2/ProFrameProcessor.cpp @@ -0,0 +1,176 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera2-ProFrameProcessor" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> +#include <utils/Trace.h> + +#include "ProFrameProcessor.h" +#include "../CameraDeviceBase.h" + +namespace android { +namespace camera2 { + +ProFrameProcessor::ProFrameProcessor(wp<CameraDeviceBase> device) : + Thread(/*canCallJava*/false), + mDevice(device) { +} + +ProFrameProcessor::~ProFrameProcessor() { + ALOGV("%s: Exit", __FUNCTION__); +} + +status_t ProFrameProcessor::registerListener(int32_t minId, + int32_t maxId, wp<FilteredListener> listener) { + Mutex::Autolock l(mInputMutex); + ALOGV("%s: Registering listener for frame id range %d - %d", + __FUNCTION__, minId, maxId); + RangeListener rListener = { minId, maxId, listener }; + mRangeListeners.push_back(rListener); + return OK; +} + +status_t ProFrameProcessor::removeListener(int32_t minId, + int32_t maxId, + wp<FilteredListener> listener) { + Mutex::Autolock l(mInputMutex); + List<RangeListener>::iterator item = mRangeListeners.begin(); + while (item != mRangeListeners.end()) { + if (item->minId == minId && + item->maxId == maxId && + item->listener == listener) { + item = mRangeListeners.erase(item); + } else { + item++; + } + } + return OK; +} + +void ProFrameProcessor::dump(int fd, const Vector<String16>& /*args*/) { + String8 result(" Latest received frame:\n"); + write(fd, result.string(), result.size()); + mLastFrame.dump(fd, 2, 6); +} + +bool ProFrameProcessor::threadLoop() { + status_t res; + + sp<CameraDeviceBase> device; + { + device = mDevice.promote(); + if (device == 0) return false; + } + + res = device->waitForNextFrame(kWaitDuration); + if (res == OK) { + processNewFrames(device); + } else if (res != TIMED_OUT) { + ALOGE("ProFrameProcessor: Error waiting for new " + "frames: %s (%d)", strerror(-res), res); + } + + return true; +} + +void ProFrameProcessor::processNewFrames(const sp<CameraDeviceBase> &device) { + status_t res; + ATRACE_CALL(); + CameraMetadata frame; + + ALOGV("%s: Camera %d: Process new frames", __FUNCTION__, device->getId()); + + while ( (res = device->getNextFrame(&frame)) == OK) { + + camera_metadata_entry_t entry; + + entry = frame.find(ANDROID_REQUEST_FRAME_COUNT); + if (entry.count == 0) { + ALOGE("%s: Camera %d: Error reading frame number", + __FUNCTION__, device->getId()); + break; + } + ATRACE_INT("cam2_frame", entry.data.i32[0]); + + if (!processSingleFrame(frame, device)) { + break; + } + + if (!frame.isEmpty()) { + mLastFrame.acquire(frame); + } + } + if (res != NOT_ENOUGH_DATA) { + ALOGE("%s: Camera %d: Error getting next frame: %s (%d)", + __FUNCTION__, device->getId(), strerror(-res), res); + return; + } + + return; +} + +bool ProFrameProcessor::processSingleFrame(CameraMetadata &frame, + const sp<CameraDeviceBase> &device) { + ALOGV("%s: Camera %d: Process single frame (is empty? %d)", + __FUNCTION__, device->getId(), frame.isEmpty()); + return processListeners(frame, device) == OK; +} + +status_t ProFrameProcessor::processListeners(const CameraMetadata &frame, + const sp<CameraDeviceBase> &device) { + ATRACE_CALL(); + camera_metadata_ro_entry_t entry; + + entry = frame.find(ANDROID_REQUEST_ID); + if (entry.count == 0) { + ALOGE("%s: Camera %d: Error reading frame id", + __FUNCTION__, device->getId()); + return BAD_VALUE; + } + int32_t frameId = entry.data.i32[0]; + + List<sp<FilteredListener> > listeners; + { + Mutex::Autolock l(mInputMutex); + + List<RangeListener>::iterator item = mRangeListeners.begin(); + while (item != mRangeListeners.end()) { + if (frameId >= item->minId && + frameId < item->maxId) { + sp<FilteredListener> listener = item->listener.promote(); + if (listener == 0) { + item = mRangeListeners.erase(item); + continue; + } else { + listeners.push_back(listener); + } + } + item++; + } + } + ALOGV("Got %d range listeners out of %d", listeners.size(), mRangeListeners.size()); + List<sp<FilteredListener> >::iterator item = listeners.begin(); + for (; item != listeners.end(); item++) { + (*item)->onFrameAvailable(frameId, frame); + } + return OK; +} + +}; // namespace camera2 +}; // namespace android diff --git a/services/camera/libcameraservice/camera2/ProFrameProcessor.h b/services/camera/libcameraservice/camera2/ProFrameProcessor.h new file mode 100644 index 0000000..b82942c --- /dev/null +++ b/services/camera/libcameraservice/camera2/ProFrameProcessor.h @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_PROFRAMEPROCESSOR_H +#define ANDROID_SERVERS_CAMERA_CAMERA2_PROFRAMEPROCESSOR_H + +#include <utils/Thread.h> +#include <utils/String16.h> +#include <utils/Vector.h> +#include <utils/KeyedVector.h> +#include <utils/List.h> +#include <camera/CameraMetadata.h> + +namespace android { + +class CameraDeviceBase; + +namespace camera2 { + +/* Output frame metadata processing thread. This thread waits for new + * frames from the device, and analyzes them as necessary. + */ +class ProFrameProcessor: public Thread { + public: + ProFrameProcessor(wp<CameraDeviceBase> device); + virtual ~ProFrameProcessor(); + + struct FilteredListener: virtual public RefBase { + virtual void onFrameAvailable(int32_t frameId, + const CameraMetadata &frame) = 0; + }; + + // Register a listener for a range of IDs [minId, maxId). Multiple listeners + // can be listening to the same range + status_t registerListener(int32_t minId, int32_t maxId, + wp<FilteredListener> listener); + status_t removeListener(int32_t minId, int32_t maxId, + wp<FilteredListener> listener); + + void dump(int fd, const Vector<String16>& args); + protected: + static const nsecs_t kWaitDuration = 10000000; // 10 ms + wp<CameraDeviceBase> mDevice; + + virtual bool threadLoop(); + + Mutex mInputMutex; + + struct RangeListener { + int32_t minId; + int32_t maxId; + wp<FilteredListener> listener; + }; + List<RangeListener> mRangeListeners; + + void processNewFrames(const sp<CameraDeviceBase> &device); + + virtual bool processSingleFrame(CameraMetadata &frame, + const sp<CameraDeviceBase> &device); + + status_t processListeners(const CameraMetadata &frame, + const sp<CameraDeviceBase> &device); + + CameraMetadata mLastFrame; +}; + + +}; //namespace camera2 +}; //namespace android + +#endif diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp index 207f780..f4a9217 100644 --- a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp +++ b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp @@ -20,27 +20,29 @@ #include <utils/Log.h> #include <utils/Trace.h> -#include <gui/SurfaceTextureClient.h> +#include <gui/Surface.h> #include <media/hardware/MetadataBufferType.h> #include "StreamingProcessor.h" #include "Camera2Heap.h" #include "../Camera2Client.h" -#include "../Camera2Device.h" +#include "../CameraDeviceBase.h" namespace android { namespace camera2 { -StreamingProcessor::StreamingProcessor(wp<Camera2Client> client): +StreamingProcessor::StreamingProcessor(sp<Camera2Client> client): mClient(client), + mDevice(client->getCameraDevice()), + mId(client->getCameraId()), mActiveRequest(NONE), + mPaused(false), mPreviewRequestId(Camera2Client::kPreviewRequestIdStart), mPreviewStreamId(NO_STREAM), mRecordingRequestId(Camera2Client::kRecordingRequestIdStart), mRecordingStreamId(NO_STREAM), mRecordingHeapCount(kDefaultRecordingHeapCount) { - } StreamingProcessor::~StreamingProcessor() { @@ -70,16 +72,19 @@ bool StreamingProcessor::haveValidPreviewWindow() const { status_t StreamingProcessor::updatePreviewRequest(const Parameters ¶ms) { ATRACE_CALL(); status_t res; - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return INVALID_OPERATION; + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } Mutex::Autolock m(mMutex); if (mPreviewRequest.entryCount() == 0) { - res = client->getCameraDevice()->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW, + res = device->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW, &mPreviewRequest); if (res != OK) { ALOGE("%s: Camera %d: Unable to create default preview request: " - "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res); + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } } @@ -87,7 +92,7 @@ status_t StreamingProcessor::updatePreviewRequest(const Parameters ¶ms) { res = params.updateRequest(&mPreviewRequest); if (res != OK) { ALOGE("%s: Camera %d: Unable to update common entries of preview " - "request: %s (%d)", __FUNCTION__, client->getCameraId(), + "request: %s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } @@ -96,7 +101,7 @@ status_t StreamingProcessor::updatePreviewRequest(const Parameters ¶ms) { &mPreviewRequestId, 1); if (res != OK) { ALOGE("%s: Camera %d: Unable to update request id for preview: %s (%d)", - __FUNCTION__, client->getCameraId(), strerror(-res), res); + __FUNCTION__, mId, strerror(-res), res); return res; } @@ -108,9 +113,11 @@ status_t StreamingProcessor::updatePreviewStream(const Parameters ¶ms) { Mutex::Autolock m(mMutex); status_t res; - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return INVALID_OPERATION; - sp<Camera2Device> device = client->getCameraDevice(); + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } if (mPreviewStreamId != NO_STREAM) { // Check if stream parameters have to change @@ -119,24 +126,24 @@ status_t StreamingProcessor::updatePreviewStream(const Parameters ¶ms) { ¤tWidth, ¤tHeight, 0); if (res != OK) { ALOGE("%s: Camera %d: Error querying preview stream info: " - "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res); + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } if (currentWidth != (uint32_t)params.previewWidth || currentHeight != (uint32_t)params.previewHeight) { ALOGV("%s: Camera %d: Preview size switch: %d x %d -> %d x %d", - __FUNCTION__, client->getCameraId(), currentWidth, currentHeight, + __FUNCTION__, mId, currentWidth, currentHeight, params.previewWidth, params.previewHeight); res = device->waitUntilDrained(); if (res != OK) { ALOGE("%s: Camera %d: Error waiting for preview to drain: " - "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res); + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } res = device->deleteStream(mPreviewStreamId); if (res != OK) { ALOGE("%s: Camera %d: Unable to delete old output stream " - "for preview: %s (%d)", __FUNCTION__, client->getCameraId(), + "for preview: %s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } @@ -151,7 +158,7 @@ status_t StreamingProcessor::updatePreviewStream(const Parameters ¶ms) { &mPreviewStreamId); if (res != OK) { ALOGE("%s: Camera %d: Unable to create preview stream: %s (%d)", - __FUNCTION__, client->getCameraId(), strerror(-res), res); + __FUNCTION__, mId, strerror(-res), res); return res; } } @@ -160,7 +167,7 @@ status_t StreamingProcessor::updatePreviewStream(const Parameters ¶ms) { params.previewTransform); if (res != OK) { ALOGE("%s: Camera %d: Unable to set preview stream transform: " - "%s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res); + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } @@ -174,12 +181,14 @@ status_t StreamingProcessor::deletePreviewStream() { Mutex::Autolock m(mMutex); if (mPreviewStreamId != NO_STREAM) { - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return INVALID_OPERATION; - sp<Camera2Device> device = client->getCameraDevice(); + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } ALOGV("%s: for cameraId %d on streamId %d", - __FUNCTION__, client->getCameraId(), mPreviewStreamId); + __FUNCTION__, mId, mPreviewStreamId); res = device->waitUntilDrained(); if (res != OK) { @@ -206,11 +215,9 @@ int StreamingProcessor::getPreviewStreamId() const { status_t StreamingProcessor::setRecordingBufferCount(size_t count) { ATRACE_CALL(); // 32 is the current upper limit on the video buffer count for BufferQueue - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return INVALID_OPERATION; if (count > 32) { ALOGE("%s: Camera %d: Error setting %d as video buffer count value", - __FUNCTION__, client->getCameraId(), count); + __FUNCTION__, mId, count); return BAD_VALUE; } @@ -233,15 +240,18 @@ status_t StreamingProcessor::updateRecordingRequest(const Parameters ¶ms) { status_t res; Mutex::Autolock m(mMutex); - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return INVALID_OPERATION; + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } if (mRecordingRequest.entryCount() == 0) { - res = client->getCameraDevice()->createDefaultRequest(CAMERA2_TEMPLATE_VIDEO_RECORD, + res = device->createDefaultRequest(CAMERA2_TEMPLATE_VIDEO_RECORD, &mRecordingRequest); if (res != OK) { ALOGE("%s: Camera %d: Unable to create default recording request:" - " %s (%d)", __FUNCTION__, client->getCameraId(), strerror(-res), res); + " %s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } } @@ -249,7 +259,7 @@ status_t StreamingProcessor::updateRecordingRequest(const Parameters ¶ms) { res = params.updateRequest(&mRecordingRequest); if (res != OK) { ALOGE("%s: Camera %d: Unable to update common entries of recording " - "request: %s (%d)", __FUNCTION__, client->getCameraId(), + "request: %s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } @@ -258,7 +268,7 @@ status_t StreamingProcessor::updateRecordingRequest(const Parameters ¶ms) { &mRecordingRequestId, 1); if (res != OK) { ALOGE("%s: Camera %d: Unable to update request id for request: %s (%d)", - __FUNCTION__, client->getCameraId(), strerror(-res), res); + __FUNCTION__, mId, strerror(-res), res); return res; } @@ -270,9 +280,11 @@ status_t StreamingProcessor::updateRecordingStream(const Parameters ¶ms) { status_t res; Mutex::Autolock m(mMutex); - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return INVALID_OPERATION; - sp<Camera2Device> device = client->getCameraDevice(); + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } if (mRecordingConsumer == 0) { // Create CPU buffer queue endpoint. We need one more buffer here so that we can @@ -284,7 +296,7 @@ status_t StreamingProcessor::updateRecordingStream(const Parameters ¶ms) { true); mRecordingConsumer->setFrameAvailableListener(this); mRecordingConsumer->setName(String8("Camera2-RecordingConsumer")); - mRecordingWindow = new SurfaceTextureClient( + mRecordingWindow = new Surface( mRecordingConsumer->getProducerInterface()); // Allocate memory later, since we don't know buffer size until receipt } @@ -296,7 +308,7 @@ status_t StreamingProcessor::updateRecordingStream(const Parameters ¶ms) { ¤tWidth, ¤tHeight, 0); if (res != OK) { ALOGE("%s: Camera %d: Error querying recording output stream info: " - "%s (%d)", __FUNCTION__, client->getCameraId(), + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } @@ -304,10 +316,16 @@ status_t StreamingProcessor::updateRecordingStream(const Parameters ¶ms) { currentHeight != (uint32_t)params.videoHeight) { // TODO: Should wait to be sure previous recording has finished res = device->deleteStream(mRecordingStreamId); - if (res != OK) { + + if (res == -EBUSY) { + ALOGV("%s: Camera %d: Device is busy, call " + "updateRecordingStream after it becomes idle", + __FUNCTION__, mId); + return res; + } else if (res != OK) { ALOGE("%s: Camera %d: Unable to delete old output stream " "for recording: %s (%d)", __FUNCTION__, - client->getCameraId(), strerror(-res), res); + mId, strerror(-res), res); return res; } mRecordingStreamId = NO_STREAM; @@ -321,7 +339,7 @@ status_t StreamingProcessor::updateRecordingStream(const Parameters ¶ms) { CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0, &mRecordingStreamId); if (res != OK) { ALOGE("%s: Camera %d: Can't create output stream for recording: " - "%s (%d)", __FUNCTION__, client->getCameraId(), + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } @@ -337,9 +355,11 @@ status_t StreamingProcessor::deleteRecordingStream() { Mutex::Autolock m(mMutex); if (mRecordingStreamId != NO_STREAM) { - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return INVALID_OPERATION; - sp<Camera2Device> device = client->getCameraDevice(); + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } res = device->waitUntilDrained(); if (res != OK) { @@ -369,10 +389,13 @@ status_t StreamingProcessor::startStream(StreamType type, if (type == NONE) return INVALID_OPERATION; - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return INVALID_OPERATION; + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } - ALOGV("%s: Camera %d: type = %d", __FUNCTION__, client->getCameraId(), type); + ALOGV("%s: Camera %d: type = %d", __FUNCTION__, mId, type); Mutex::Autolock m(mMutex); @@ -384,46 +407,99 @@ status_t StreamingProcessor::startStream(StreamType type, outputStreams); if (res != OK) { ALOGE("%s: Camera %d: Unable to set up preview request: %s (%d)", - __FUNCTION__, client->getCameraId(), strerror(-res), res); + __FUNCTION__, mId, strerror(-res), res); return res; } res = request.sort(); if (res != OK) { ALOGE("%s: Camera %d: Error sorting preview request: %s (%d)", - __FUNCTION__, client->getCameraId(), strerror(-res), res); + __FUNCTION__, mId, strerror(-res), res); return res; } - res = client->getCameraDevice()->setStreamingRequest(request); + res = device->setStreamingRequest(request); if (res != OK) { ALOGE("%s: Camera %d: Unable to set preview request to start preview: " "%s (%d)", - __FUNCTION__, client->getCameraId(), strerror(-res), res); + __FUNCTION__, mId, strerror(-res), res); return res; } mActiveRequest = type; + mPaused = false; return OK; } +status_t StreamingProcessor::togglePauseStream(bool pause) { + ATRACE_CALL(); + status_t res; + + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + ALOGV("%s: Camera %d: toggling pause to %d", __FUNCTION__, mId, pause); + + Mutex::Autolock m(mMutex); + + if (mActiveRequest == NONE) { + ALOGE("%s: Camera %d: Can't toggle pause, streaming was not started", + __FUNCTION__, mId); + return INVALID_OPERATION; + } + + if (mPaused == pause) { + return OK; + } + + if (pause) { + res = device->clearStreamingRequest(); + if (res != OK) { + ALOGE("%s: Camera %d: Can't clear stream request: %s (%d)", + __FUNCTION__, mId, strerror(-res), res); + return res; + } + } else { + CameraMetadata &request = + (mActiveRequest == PREVIEW) ? mPreviewRequest + : mRecordingRequest; + res = device->setStreamingRequest(request); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to set preview request to resume: " + "%s (%d)", + __FUNCTION__, mId, strerror(-res), res); + return res; + } + } + + mPaused = pause; + return OK; +} + status_t StreamingProcessor::stopStream() { ATRACE_CALL(); status_t res; Mutex::Autolock m(mMutex); - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return INVALID_OPERATION; - sp<Camera2Device> device = client->getCameraDevice(); + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } res = device->clearStreamingRequest(); if (res != OK) { ALOGE("%s: Camera %d: Can't clear stream request: %s (%d)", - __FUNCTION__, client->getCameraId(), strerror(-res), res); + __FUNCTION__, mId, strerror(-res), res); return res; } + mActiveRequest = NONE; + mPaused = false; return OK; } @@ -447,7 +523,6 @@ status_t StreamingProcessor::incrementStreamingIds() { ATRACE_CALL(); Mutex::Autolock m(mMutex); - status_t res; mPreviewRequestId++; if (mPreviewRequestId >= Camera2Client::kPreviewRequestIdEnd) { mPreviewRequestId = Camera2Client::kPreviewRequestIdStart; @@ -467,7 +542,18 @@ void StreamingProcessor::onFrameAvailable() { nsecs_t timestamp; sp<Camera2Client> client = mClient.promote(); - if (client == 0) return; + if (client == 0) { + // Discard frames during shutdown + BufferItemConsumer::BufferItem imgBuffer; + res = mRecordingConsumer->acquireBuffer(&imgBuffer); + if (res != OK) { + ALOGE("%s: Camera %d: Error receiving recording buffer: %s (%d)", + __FUNCTION__, mId, strerror(-res), res); + return; + } + mRecordingConsumer->releaseBuffer(imgBuffer); + return; + } { /* acquire SharedParameters before mMutex so we don't dead lock @@ -478,7 +564,7 @@ void StreamingProcessor::onFrameAvailable() { res = mRecordingConsumer->acquireBuffer(&imgBuffer); if (res != OK) { ALOGE("%s: Camera %d: Error receiving recording buffer: %s (%d)", - __FUNCTION__, client->getCameraId(), strerror(-res), res); + __FUNCTION__, mId, strerror(-res), res); return; } timestamp = imgBuffer.mTimestamp; @@ -491,7 +577,7 @@ void StreamingProcessor::onFrameAvailable() { l.mParameters.state != Parameters::VIDEO_SNAPSHOT) { ALOGV("%s: Camera %d: Discarding recording image buffers " "received after recording done", __FUNCTION__, - client->getCameraId()); + mId); mRecordingConsumer->releaseBuffer(imgBuffer); return; } @@ -499,14 +585,14 @@ void StreamingProcessor::onFrameAvailable() { if (mRecordingHeap == 0) { const size_t bufferSize = 4 + sizeof(buffer_handle_t); ALOGV("%s: Camera %d: Creating recording heap with %d buffers of " - "size %d bytes", __FUNCTION__, client->getCameraId(), + "size %d bytes", __FUNCTION__, mId, mRecordingHeapCount, bufferSize); mRecordingHeap = new Camera2Heap(bufferSize, mRecordingHeapCount, "Camera2Client::RecordingHeap"); if (mRecordingHeap->mHeap->getSize() == 0) { ALOGE("%s: Camera %d: Unable to allocate memory for recording", - __FUNCTION__, client->getCameraId()); + __FUNCTION__, mId); mRecordingConsumer->releaseBuffer(imgBuffer); return; } @@ -514,7 +600,7 @@ void StreamingProcessor::onFrameAvailable() { if (mRecordingBuffers[i].mBuf != BufferItemConsumer::INVALID_BUFFER_SLOT) { ALOGE("%s: Camera %d: Non-empty recording buffers list!", - __FUNCTION__, client->getCameraId()); + __FUNCTION__, mId); } } mRecordingBuffers.clear(); @@ -527,7 +613,7 @@ void StreamingProcessor::onFrameAvailable() { if ( mRecordingHeapFree == 0) { ALOGE("%s: Camera %d: No free recording buffers, dropping frame", - __FUNCTION__, client->getCameraId()); + __FUNCTION__, mId); mRecordingConsumer->releaseBuffer(imgBuffer); return; } @@ -537,7 +623,7 @@ void StreamingProcessor::onFrameAvailable() { mRecordingHeapFree--; ALOGV("%s: Camera %d: Timestamp %lld", - __FUNCTION__, client->getCameraId(), timestamp); + __FUNCTION__, mId, timestamp); ssize_t offset; size_t size; @@ -550,16 +636,16 @@ void StreamingProcessor::onFrameAvailable() { *((uint32_t*)data) = type; *((buffer_handle_t*)(data + 4)) = imgBuffer.mGraphicBuffer->handle; ALOGV("%s: Camera %d: Sending out buffer_handle_t %p", - __FUNCTION__, client->getCameraId(), + __FUNCTION__, mId, imgBuffer.mGraphicBuffer->handle); mRecordingBuffers.replaceAt(imgBuffer, heapIdx); recordingHeap = mRecordingHeap; } // Call outside locked parameters to allow re-entrancy from notification - Camera2Client::SharedCameraClient::Lock l(client->mSharedCameraClient); - if (l.mCameraClient != 0) { - l.mCameraClient->dataCallbackTimestamp(timestamp, + Camera2Client::SharedCameraCallbacks::Lock l(client->mSharedCameraCallbacks); + if (l.mRemoteCallback != 0) { + l.mRemoteCallback->dataCallbackTimestamp(timestamp, CAMERA_MSG_VIDEO_FRAME, recordingHeap->mBuffers[heapIdx]); } @@ -569,9 +655,6 @@ void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) { ATRACE_CALL(); status_t res; - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return; - Mutex::Autolock m(mMutex); // Make sure this is for the current heap ssize_t offset; @@ -579,7 +662,7 @@ void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) { sp<IMemoryHeap> heap = mem->getMemory(&offset, &size); if (heap->getHeapID() != mRecordingHeap->mHeap->getHeapID()) { ALOGW("%s: Camera %d: Mismatched heap ID, ignoring release " - "(got %x, expected %x)", __FUNCTION__, client->getCameraId(), + "(got %x, expected %x)", __FUNCTION__, mId, heap->getHeapID(), mRecordingHeap->mHeap->getHeapID()); return; } @@ -587,7 +670,7 @@ void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) { uint32_t type = *(uint32_t*)data; if (type != kMetadataBufferTypeGrallocSource) { ALOGE("%s: Camera %d: Recording frame type invalid (got %x, expected %x)", - __FUNCTION__, client->getCameraId(), type, + __FUNCTION__, mId, type, kMetadataBufferTypeGrallocSource); return; } @@ -607,19 +690,19 @@ void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) { } if (itemIndex == mRecordingBuffers.size()) { ALOGE("%s: Camera %d: Can't find buffer_handle_t %p in list of " - "outstanding buffers", __FUNCTION__, client->getCameraId(), + "outstanding buffers", __FUNCTION__, mId, imgHandle); return; } ALOGV("%s: Camera %d: Freeing buffer_handle_t %p", __FUNCTION__, - client->getCameraId(), imgHandle); + mId, imgHandle); res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]); if (res != OK) { ALOGE("%s: Camera %d: Unable to free recording frame " "(buffer_handle_t: %p): %s (%d)", __FUNCTION__, - client->getCameraId(), imgHandle, strerror(-res), res); + mId, imgHandle, strerror(-res), res); return; } mRecordingBuffers.replaceAt(itemIndex); @@ -628,7 +711,7 @@ void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) { } -status_t StreamingProcessor::dump(int fd, const Vector<String16>& args) { +status_t StreamingProcessor::dump(int fd, const Vector<String16>& /*args*/) { String8 result; result.append(" Current requests:\n"); @@ -636,20 +719,29 @@ status_t StreamingProcessor::dump(int fd, const Vector<String16>& args) { result.append(" Preview request:\n"); write(fd, result.string(), result.size()); mPreviewRequest.dump(fd, 2, 6); + result.clear(); } else { result.append(" Preview request: undefined\n"); - write(fd, result.string(), result.size()); } if (mRecordingRequest.entryCount() != 0) { result = " Recording request:\n"; write(fd, result.string(), result.size()); mRecordingRequest.dump(fd, 2, 6); + result.clear(); } else { result = " Recording request: undefined\n"; - write(fd, result.string(), result.size()); } + const char* streamTypeString[] = { + "none", "preview", "record" + }; + result.append(String8::format(" Active request: %s (paused: %s)\n", + streamTypeString[mActiveRequest], + mPaused ? "yes" : "no")); + + write(fd, result.string(), result.size()); + return OK; } diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.h b/services/camera/libcameraservice/camera2/StreamingProcessor.h index 96b100f..281b344 100644 --- a/services/camera/libcameraservice/camera2/StreamingProcessor.h +++ b/services/camera/libcameraservice/camera2/StreamingProcessor.h @@ -22,11 +22,12 @@ #include <gui/BufferItemConsumer.h> #include "Parameters.h" -#include "CameraMetadata.h" +#include "camera/CameraMetadata.h" namespace android { class Camera2Client; +class CameraDeviceBase; class IMemory; namespace camera2 { @@ -38,7 +39,7 @@ class Camera2Heap; */ class StreamingProcessor: public BufferItemConsumer::FrameAvailableListener { public: - StreamingProcessor(wp<Camera2Client> client); + StreamingProcessor(sp<Camera2Client> client); ~StreamingProcessor(); status_t setPreviewWindow(sp<ANativeWindow> window); @@ -64,6 +65,9 @@ class StreamingProcessor: public BufferItemConsumer::FrameAvailableListener { status_t startStream(StreamType type, const Vector<uint8_t> &outputStreams); + // Toggle between paused and unpaused. Stream must be started first. + status_t togglePauseStream(bool pause); + status_t stopStream(); // Returns the request ID for the currently streaming request @@ -86,8 +90,11 @@ class StreamingProcessor: public BufferItemConsumer::FrameAvailableListener { }; wp<Camera2Client> mClient; + wp<CameraDeviceBase> mDevice; + int mId; StreamType mActiveRequest; + bool mPaused; // Preview-related members int32_t mPreviewRequestId; diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.cpp b/services/camera/libcameraservice/camera2/ZslProcessor.cpp index 1937955..94059cd 100644 --- a/services/camera/libcameraservice/camera2/ZslProcessor.cpp +++ b/services/camera/libcameraservice/camera2/ZslProcessor.cpp @@ -29,8 +29,8 @@ #include <utils/Trace.h> #include "ZslProcessor.h" -#include <gui/SurfaceTextureClient.h> -#include "../Camera2Device.h" +#include <gui/Surface.h> +#include "../CameraDeviceBase.h" #include "../Camera2Client.h" @@ -38,12 +38,14 @@ namespace android { namespace camera2 { ZslProcessor::ZslProcessor( - wp<Camera2Client> client, + sp<Camera2Client> client, wp<CaptureSequencer> sequencer): Thread(false), mState(RUNNING), mClient(client), + mDevice(client->getCameraDevice()), mSequencer(sequencer), + mId(client->getCameraId()), mZslBufferAvailable(false), mZslStreamId(NO_STREAM), mZslReprocessStreamId(NO_STREAM), @@ -69,11 +71,13 @@ void ZslProcessor::onFrameAvailable() { } } -void ZslProcessor::onFrameAvailable(int32_t frameId, const CameraMetadata &frame) { +void ZslProcessor::onFrameAvailable(int32_t /*frameId*/, + const CameraMetadata &frame) { Mutex::Autolock l(mInputMutex); camera_metadata_ro_entry_t entry; entry = frame.find(ANDROID_SENSOR_TIMESTAMP); nsecs_t timestamp = entry.data.i64[0]; + (void)timestamp; ALOGVV("Got preview frame for timestamp %lld", timestamp); if (mState != RUNNING) return; @@ -112,8 +116,15 @@ status_t ZslProcessor::updateStream(const Parameters ¶ms) { Mutex::Autolock l(mInputMutex); sp<Camera2Client> client = mClient.promote(); - if (client == 0) return OK; - sp<Camera2Device> device = client->getCameraDevice(); + if (client == 0) { + ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } if (mZslConsumer == 0) { // Create CPU buffer queue endpoint @@ -123,7 +134,7 @@ status_t ZslProcessor::updateStream(const Parameters ¶ms) { true); mZslConsumer->setFrameAvailableListener(this); mZslConsumer->setName(String8("Camera2Client::ZslConsumer")); - mZslWindow = new SurfaceTextureClient( + mZslWindow = new Surface( mZslConsumer->getProducerInterface()); } @@ -135,7 +146,7 @@ status_t ZslProcessor::updateStream(const Parameters ¶ms) { if (res != OK) { ALOGE("%s: Camera %d: Error querying capture output stream info: " "%s (%d)", __FUNCTION__, - client->getCameraId(), strerror(-res), res); + mId, strerror(-res), res); return res; } if (currentWidth != (uint32_t)params.fastInfo.arrayWidth || @@ -144,16 +155,16 @@ status_t ZslProcessor::updateStream(const Parameters ¶ms) { if (res != OK) { ALOGE("%s: Camera %d: Unable to delete old reprocess stream " "for ZSL: %s (%d)", __FUNCTION__, - client->getCameraId(), strerror(-res), res); + mId, strerror(-res), res); return res; } ALOGV("%s: Camera %d: Deleting stream %d since the buffer dimensions changed", - __FUNCTION__, client->getCameraId(), mZslStreamId); + __FUNCTION__, mId, mZslStreamId); res = device->deleteStream(mZslStreamId); if (res != OK) { ALOGE("%s: Camera %d: Unable to delete old output stream " "for ZSL: %s (%d)", __FUNCTION__, - client->getCameraId(), strerror(-res), res); + mId, strerror(-res), res); return res; } mZslStreamId = NO_STREAM; @@ -172,7 +183,7 @@ status_t ZslProcessor::updateStream(const Parameters ¶ms) { &mZslStreamId); if (res != OK) { ALOGE("%s: Camera %d: Can't create output stream for ZSL: " - "%s (%d)", __FUNCTION__, client->getCameraId(), + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } @@ -180,7 +191,7 @@ status_t ZslProcessor::updateStream(const Parameters ¶ms) { &mZslReprocessStreamId); if (res != OK) { ALOGE("%s: Camera %d: Can't create reprocess stream for ZSL: " - "%s (%d)", __FUNCTION__, client->getCameraId(), + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } @@ -199,14 +210,18 @@ status_t ZslProcessor::deleteStream() { Mutex::Autolock l(mInputMutex); if (mZslStreamId != NO_STREAM) { - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return OK; - sp<Camera2Device> device = client->getCameraDevice(); + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + clearZslQueueLocked(); res = device->deleteReprocessStream(mZslReprocessStreamId); if (res != OK) { ALOGE("%s: Camera %d: Cannot delete ZSL reprocessing stream %d: " - "%s (%d)", __FUNCTION__, client->getCameraId(), + "%s (%d)", __FUNCTION__, mId, mZslReprocessStreamId, strerror(-res), res); return res; } @@ -215,7 +230,7 @@ status_t ZslProcessor::deleteStream() { res = device->deleteStream(mZslStreamId); if (res != OK) { ALOGE("%s: Camera %d: Cannot delete ZSL output stream %d: " - "%s (%d)", __FUNCTION__, client->getCameraId(), + "%s (%d)", __FUNCTION__, mId, mZslStreamId, strerror(-res), res); return res; } @@ -233,11 +248,6 @@ int ZslProcessor::getStreamId() const { return mZslStreamId; } -int ZslProcessor::getReprocessStreamId() const { - Mutex::Autolock l(mInputMutex); - return mZslReprocessStreamId; -} - status_t ZslProcessor::pushToReprocess(int32_t requestId) { ALOGV("%s: Send in reprocess request with id %d", __FUNCTION__, requestId); @@ -245,7 +255,10 @@ status_t ZslProcessor::pushToReprocess(int32_t requestId) { status_t res; sp<Camera2Client> client = mClient.promote(); - if (client == 0) return INVALID_OPERATION; + if (client == 0) { + ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } IF_ALOGV() { dumpZslQueue(-1); @@ -288,10 +301,12 @@ status_t ZslProcessor::pushToReprocess(int32_t requestId) { uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS; res = request.update(ANDROID_REQUEST_TYPE, &requestType, 1); - uint8_t inputStreams[1] = { mZslReprocessStreamId }; + uint8_t inputStreams[1] = + { static_cast<uint8_t>(mZslReprocessStreamId) }; if (res == OK) request.update(ANDROID_REQUEST_INPUT_STREAMS, inputStreams, 1); - uint8_t outputStreams[1] = { client->getCaptureStreamId() }; + uint8_t outputStreams[1] = + { static_cast<uint8_t>(client->getCaptureStreamId()) }; if (res == OK) request.update(ANDROID_REQUEST_OUTPUT_STREAMS, outputStreams, 1); res = request.update(ANDROID_REQUEST_ID, @@ -306,7 +321,7 @@ status_t ZslProcessor::pushToReprocess(int32_t requestId) { if (res != OK) { ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: " "%s (%d)", - __FUNCTION__, client->getCameraId(), strerror(-res), res); + __FUNCTION__, mId, strerror(-res), res); return INVALID_OPERATION; } // TODO: have push-and-clear be atomic @@ -325,7 +340,7 @@ status_t ZslProcessor::pushToReprocess(int32_t requestId) { if (res != OK) { ALOGE("%s: Camera %d: Unable to update JPEG entries of ZSL " "capture request: %s (%d)", __FUNCTION__, - client->getCameraId(), + mId, strerror(-res), res); return res; } @@ -367,7 +382,7 @@ status_t ZslProcessor::clearZslQueueLocked() { return OK; } -void ZslProcessor::dump(int fd, const Vector<String16>& args) const { +void ZslProcessor::dump(int fd, const Vector<String16>& /*args*/) const { Mutex::Autolock l(mInputMutex); if (!mLatestCapturedRequest.isEmpty()) { String8 result(" Latest ZSL capture request:\n"); @@ -394,26 +409,29 @@ bool ZslProcessor::threadLoop() { } do { - sp<Camera2Client> client = mClient.promote(); - if (client == 0) return false; - res = processNewZslBuffer(client); + res = processNewZslBuffer(); } while (res == OK); return true; } -status_t ZslProcessor::processNewZslBuffer(sp<Camera2Client> &client) { +status_t ZslProcessor::processNewZslBuffer() { ATRACE_CALL(); status_t res; - + sp<BufferItemConsumer> zslConsumer; + { + Mutex::Autolock l(mInputMutex); + if (mZslConsumer == 0) return OK; + zslConsumer = mZslConsumer; + } ALOGVV("Trying to get next buffer"); BufferItemConsumer::BufferItem item; - res = mZslConsumer->acquireBuffer(&item); + res = zslConsumer->acquireBuffer(&item); if (res != OK) { if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) { ALOGE("%s: Camera %d: Error receiving ZSL image buffer: " "%s (%d)", __FUNCTION__, - client->getCameraId(), strerror(-res), res); + mId, strerror(-res), res); } else { ALOGVV(" No buffer"); } @@ -424,7 +442,7 @@ status_t ZslProcessor::processNewZslBuffer(sp<Camera2Client> &client) { if (mState == LOCKED) { ALOGVV("In capture, discarding new ZSL buffers"); - mZslConsumer->releaseBuffer(item); + zslConsumer->releaseBuffer(item); return OK; } @@ -432,7 +450,7 @@ status_t ZslProcessor::processNewZslBuffer(sp<Camera2Client> &client) { if ( (mZslQueueHead + 1) % kZslBufferDepth == mZslQueueTail) { ALOGVV("Releasing oldest buffer"); - mZslConsumer->releaseBuffer(mZslQueue[mZslQueueTail].buffer); + zslConsumer->releaseBuffer(mZslQueue[mZslQueueTail].buffer); mZslQueue.replaceAt(mZslQueueTail); mZslQueueTail = (mZslQueueTail + 1) % kZslBufferDepth; } diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.h b/services/camera/libcameraservice/camera2/ZslProcessor.h index c80e7f4..27b597e 100644 --- a/services/camera/libcameraservice/camera2/ZslProcessor.h +++ b/services/camera/libcameraservice/camera2/ZslProcessor.h @@ -25,9 +25,10 @@ #include <gui/BufferItemConsumer.h> #include "Parameters.h" #include "FrameProcessor.h" -#include "CameraMetadata.h" +#include "camera/CameraMetadata.h" #include "Camera2Heap.h" -#include "../Camera2Device.h" +#include "../CameraDeviceBase.h" +#include "ZslProcessorInterface.h" namespace android { @@ -44,9 +45,10 @@ class ZslProcessor: virtual public Thread, virtual public BufferItemConsumer::FrameAvailableListener, virtual public FrameProcessor::FilteredListener, - virtual public Camera2Device::BufferReleasedListener { + virtual public CameraDeviceBase::BufferReleasedListener, + public ZslProcessorInterface { public: - ZslProcessor(wp<Camera2Client> client, wp<CaptureSequencer> sequencer); + ZslProcessor(sp<Camera2Client> client, wp<CaptureSequencer> sequencer); ~ZslProcessor(); // From mZslConsumer @@ -56,10 +58,15 @@ class ZslProcessor: virtual void onBufferReleased(buffer_handle_t *handle); + /** + **************************************** + * ZslProcessorInterface implementation * + **************************************** + */ + status_t updateStream(const Parameters ¶ms); status_t deleteStream(); int getStreamId() const; - int getReprocessStreamId() const; status_t pushToReprocess(int32_t requestId); status_t clearZslQueue(); @@ -74,7 +81,9 @@ class ZslProcessor: } mState; wp<Camera2Client> mClient; + wp<CameraDeviceBase> mDevice; wp<CaptureSequencer> mSequencer; + int mId; mutable Mutex mInputMutex; bool mZslBufferAvailable; @@ -109,7 +118,7 @@ class ZslProcessor: virtual bool threadLoop(); - status_t processNewZslBuffer(sp<Camera2Client> &client); + status_t processNewZslBuffer(); // Match up entries from frame list to buffers in ZSL queue void findMatchesLocked(); diff --git a/services/camera/libcameraservice/camera2/ZslProcessor3.cpp b/services/camera/libcameraservice/camera2/ZslProcessor3.cpp new file mode 100644 index 0000000..2e06691 --- /dev/null +++ b/services/camera/libcameraservice/camera2/ZslProcessor3.cpp @@ -0,0 +1,481 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera2-ZslProcessor3" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 +//#define LOG_NNDEBUG 0 + +#ifdef LOG_NNDEBUG +#define ALOGVV(...) ALOGV(__VA_ARGS__) +#else +#define ALOGVV(...) ((void)0) +#endif + +#include <utils/Log.h> +#include <utils/Trace.h> + +#include "ZslProcessor3.h" +#include <gui/Surface.h> +#include "../CameraDeviceBase.h" +#include "../Camera3Device.h" +#include "../Camera2Client.h" + + +namespace android { +namespace camera2 { + +ZslProcessor3::ZslProcessor3( + sp<Camera2Client> client, + wp<CaptureSequencer> sequencer): + Thread(false), + mState(RUNNING), + mClient(client), + mSequencer(sequencer), + mId(client->getCameraId()), + mZslStreamId(NO_STREAM), + mFrameListHead(0), + mZslQueueHead(0), + mZslQueueTail(0) { + mZslQueue.insertAt(0, kZslBufferDepth); + mFrameList.insertAt(0, kFrameListDepth); + sp<CaptureSequencer> captureSequencer = mSequencer.promote(); + if (captureSequencer != 0) captureSequencer->setZslProcessor(this); +} + +ZslProcessor3::~ZslProcessor3() { + ALOGV("%s: Exit", __FUNCTION__); + deleteStream(); +} + +void ZslProcessor3::onFrameAvailable(int32_t /*frameId*/, + const CameraMetadata &frame) { + Mutex::Autolock l(mInputMutex); + camera_metadata_ro_entry_t entry; + entry = frame.find(ANDROID_SENSOR_TIMESTAMP); + nsecs_t timestamp = entry.data.i64[0]; + (void)timestamp; + ALOGVV("Got preview metadata for timestamp %lld", timestamp); + + if (mState != RUNNING) return; + + mFrameList.editItemAt(mFrameListHead) = frame; + mFrameListHead = (mFrameListHead + 1) % kFrameListDepth; +} + +status_t ZslProcessor3::updateStream(const Parameters ¶ms) { + ATRACE_CALL(); + ALOGV("%s: Configuring ZSL streams", __FUNCTION__); + status_t res; + + Mutex::Autolock l(mInputMutex); + + sp<Camera2Client> client = mClient.promote(); + if (client == 0) { + ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + sp<Camera3Device> device = + static_cast<Camera3Device*>(client->getCameraDevice().get()); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + if (mZslStreamId != NO_STREAM) { + // Check if stream parameters have to change + uint32_t currentWidth, currentHeight; + res = device->getStreamInfo(mZslStreamId, + ¤tWidth, ¤tHeight, 0); + if (res != OK) { + ALOGE("%s: Camera %d: Error querying capture output stream info: " + "%s (%d)", __FUNCTION__, + client->getCameraId(), strerror(-res), res); + return res; + } + if (currentWidth != (uint32_t)params.fastInfo.arrayWidth || + currentHeight != (uint32_t)params.fastInfo.arrayHeight) { + ALOGV("%s: Camera %d: Deleting stream %d since the buffer " + "dimensions changed", + __FUNCTION__, client->getCameraId(), mZslStreamId); + res = device->deleteStream(mZslStreamId); + if (res == -EBUSY) { + ALOGV("%s: Camera %d: Device is busy, call updateStream again " + " after it becomes idle", __FUNCTION__, mId); + return res; + } else if(res != OK) { + ALOGE("%s: Camera %d: Unable to delete old output stream " + "for ZSL: %s (%d)", __FUNCTION__, + client->getCameraId(), strerror(-res), res); + return res; + } + mZslStreamId = NO_STREAM; + } + } + + if (mZslStreamId == NO_STREAM) { + // Create stream for HAL production + // TODO: Sort out better way to select resolution for ZSL + + // Note that format specified internally in Camera3ZslStream + res = device->createZslStream( + params.fastInfo.arrayWidth, params.fastInfo.arrayHeight, + kZslBufferDepth, + &mZslStreamId, + &mZslStream); + if (res != OK) { + ALOGE("%s: Camera %d: Can't create ZSL stream: " + "%s (%d)", __FUNCTION__, client->getCameraId(), + strerror(-res), res); + return res; + } + } + client->registerFrameListener(Camera2Client::kPreviewRequestIdStart, + Camera2Client::kPreviewRequestIdEnd, + this); + + return OK; +} + +status_t ZslProcessor3::deleteStream() { + ATRACE_CALL(); + status_t res; + + Mutex::Autolock l(mInputMutex); + + if (mZslStreamId != NO_STREAM) { + sp<Camera2Client> client = mClient.promote(); + if (client == 0) { + ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + sp<Camera3Device> device = + reinterpret_cast<Camera3Device*>(client->getCameraDevice().get()); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + res = device->deleteStream(mZslStreamId); + if (res != OK) { + ALOGE("%s: Camera %d: Cannot delete ZSL output stream %d: " + "%s (%d)", __FUNCTION__, client->getCameraId(), + mZslStreamId, strerror(-res), res); + return res; + } + + mZslStreamId = NO_STREAM; + } + return OK; +} + +int ZslProcessor3::getStreamId() const { + Mutex::Autolock l(mInputMutex); + return mZslStreamId; +} + +status_t ZslProcessor3::pushToReprocess(int32_t requestId) { + ALOGV("%s: Send in reprocess request with id %d", + __FUNCTION__, requestId); + Mutex::Autolock l(mInputMutex); + status_t res; + sp<Camera2Client> client = mClient.promote(); + + if (client == 0) { + ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + IF_ALOGV() { + dumpZslQueue(-1); + } + + size_t metadataIdx; + nsecs_t candidateTimestamp = getCandidateTimestampLocked(&metadataIdx); + + if (candidateTimestamp == -1) { + ALOGE("%s: Could not find good candidate for ZSL reprocessing", + __FUNCTION__); + return NOT_ENOUGH_DATA; + } + + res = mZslStream->enqueueInputBufferByTimestamp(candidateTimestamp, + /*actualTimestamp*/NULL); + + if (res == mZslStream->NO_BUFFER_AVAILABLE) { + ALOGV("%s: No ZSL buffers yet", __FUNCTION__); + return NOT_ENOUGH_DATA; + } else if (res != OK) { + ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)", + __FUNCTION__, strerror(-res), res); + return res; + } + + { + CameraMetadata request = mFrameList[metadataIdx]; + + // Verify that the frame is reasonable for reprocessing + + camera_metadata_entry_t entry; + entry = request.find(ANDROID_CONTROL_AE_STATE); + if (entry.count == 0) { + ALOGE("%s: ZSL queue frame has no AE state field!", + __FUNCTION__); + return BAD_VALUE; + } + if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED && + entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) { + ALOGV("%s: ZSL queue frame AE state is %d, need full capture", + __FUNCTION__, entry.data.u8[0]); + return NOT_ENOUGH_DATA; + } + + uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS; + res = request.update(ANDROID_REQUEST_TYPE, + &requestType, 1); + uint8_t inputStreams[1] = + { static_cast<uint8_t>(mZslStreamId) }; + if (res == OK) request.update(ANDROID_REQUEST_INPUT_STREAMS, + inputStreams, 1); + // TODO: Shouldn't we also update the latest preview frame? + uint8_t outputStreams[1] = + { static_cast<uint8_t>(client->getCaptureStreamId()) }; + if (res == OK) request.update(ANDROID_REQUEST_OUTPUT_STREAMS, + outputStreams, 1); + res = request.update(ANDROID_REQUEST_ID, + &requestId, 1); + + if (res != OK ) { + ALOGE("%s: Unable to update frame to a reprocess request", + __FUNCTION__); + return INVALID_OPERATION; + } + + res = client->stopStream(); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: " + "%s (%d)", + __FUNCTION__, client->getCameraId(), strerror(-res), res); + return INVALID_OPERATION; + } + + // Update JPEG settings + { + SharedParameters::Lock l(client->getParameters()); + res = l.mParameters.updateRequestJpeg(&request); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to update JPEG entries of ZSL " + "capture request: %s (%d)", __FUNCTION__, + client->getCameraId(), + strerror(-res), res); + return res; + } + } + + mLatestCapturedRequest = request; + res = client->getCameraDevice()->capture(request); + if (res != OK ) { + ALOGE("%s: Unable to send ZSL reprocess request to capture: %s" + " (%d)", __FUNCTION__, strerror(-res), res); + return res; + } + + mState = LOCKED; + } + + return OK; +} + +status_t ZslProcessor3::clearZslQueue() { + Mutex::Autolock l(mInputMutex); + // If in middle of capture, can't clear out queue + if (mState == LOCKED) return OK; + + return clearZslQueueLocked(); +} + +status_t ZslProcessor3::clearZslQueueLocked() { + if (mZslStream != 0) { + return mZslStream->clearInputRingBuffer(); + } + return OK; +} + +void ZslProcessor3::dump(int fd, const Vector<String16>& /*args*/) const { + Mutex::Autolock l(mInputMutex); + if (!mLatestCapturedRequest.isEmpty()) { + String8 result(" Latest ZSL capture request:\n"); + write(fd, result.string(), result.size()); + mLatestCapturedRequest.dump(fd, 2, 6); + } else { + String8 result(" Latest ZSL capture request: none yet\n"); + write(fd, result.string(), result.size()); + } + dumpZslQueue(fd); +} + +bool ZslProcessor3::threadLoop() { + // TODO: remove dependency on thread + return true; +} + +void ZslProcessor3::dumpZslQueue(int fd) const { + String8 header("ZSL queue contents:"); + String8 indent(" "); + ALOGV("%s", header.string()); + if (fd != -1) { + header = indent + header + "\n"; + write(fd, header.string(), header.size()); + } + for (size_t i = 0; i < mZslQueue.size(); i++) { + const ZslPair &queueEntry = mZslQueue[i]; + nsecs_t bufferTimestamp = queueEntry.buffer.mTimestamp; + camera_metadata_ro_entry_t entry; + nsecs_t frameTimestamp = 0; + int frameAeState = -1; + if (!queueEntry.frame.isEmpty()) { + entry = queueEntry.frame.find(ANDROID_SENSOR_TIMESTAMP); + if (entry.count > 0) frameTimestamp = entry.data.i64[0]; + entry = queueEntry.frame.find(ANDROID_CONTROL_AE_STATE); + if (entry.count > 0) frameAeState = entry.data.u8[0]; + } + String8 result = + String8::format(" %d: b: %lld\tf: %lld, AE state: %d", i, + bufferTimestamp, frameTimestamp, frameAeState); + ALOGV("%s", result.string()); + if (fd != -1) { + result = indent + result + "\n"; + write(fd, result.string(), result.size()); + } + + } +} + +nsecs_t ZslProcessor3::getCandidateTimestampLocked(size_t* metadataIdx) const { + /** + * Find the smallest timestamp we know about so far + * - ensure that aeState is either converged or locked + */ + + size_t idx = 0; + nsecs_t minTimestamp = -1; + + size_t emptyCount = mFrameList.size(); + + for (size_t j = 0; j < mFrameList.size(); j++) { + const CameraMetadata &frame = mFrameList[j]; + if (!frame.isEmpty()) { + + emptyCount--; + + camera_metadata_ro_entry_t entry; + entry = frame.find(ANDROID_SENSOR_TIMESTAMP); + if (entry.count == 0) { + ALOGE("%s: Can't find timestamp in frame!", + __FUNCTION__); + continue; + } + nsecs_t frameTimestamp = entry.data.i64[0]; + if (minTimestamp > frameTimestamp || minTimestamp == -1) { + + entry = frame.find(ANDROID_CONTROL_AE_STATE); + + if (entry.count == 0) { + /** + * This is most likely a HAL bug. The aeState field is + * mandatory, so it should always be in a metadata packet. + */ + ALOGW("%s: ZSL queue frame has no AE state field!", + __FUNCTION__); + continue; + } + if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED && + entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) { + ALOGVV("%s: ZSL queue frame AE state is %d, need " + "full capture", __FUNCTION__, entry.data.u8[0]); + continue; + } + + minTimestamp = frameTimestamp; + idx = j; + } + + ALOGVV("%s: Saw timestamp %lld", __FUNCTION__, frameTimestamp); + } + } + + if (emptyCount == mFrameList.size()) { + /** + * This could be mildly bad and means our ZSL was triggered before + * there were any frames yet received by the camera framework. + * + * This is a fairly corner case which can happen under: + * + a user presses the shutter button real fast when the camera starts + * (startPreview followed immediately by takePicture). + * + burst capture case (hitting shutter button as fast possible) + * + * If this happens in steady case (preview running for a while, call + * a single takePicture) then this might be a fwk bug. + */ + ALOGW("%s: ZSL queue has no metadata frames", __FUNCTION__); + } + + ALOGV("%s: Candidate timestamp %lld (idx %d), empty frames: %d", + __FUNCTION__, minTimestamp, idx, emptyCount); + + if (metadataIdx) { + *metadataIdx = idx; + } + + return minTimestamp; +} + +void ZslProcessor3::onBufferAcquired(const BufferInfo& /*bufferInfo*/) { + // Intentionally left empty + // Although theoretically we could use this to get better dump info +} + +void ZslProcessor3::onBufferReleased(const BufferInfo& bufferInfo) { + Mutex::Autolock l(mInputMutex); + + // ignore output buffers + if (bufferInfo.mOutput) { + return; + } + + // TODO: Verify that the buffer is in our queue by looking at timestamp + // theoretically unnecessary unless we change the following assumptions: + // -- only 1 buffer reprocessed at a time (which is the case now) + + // Erase entire ZSL queue since we've now completed the capture and preview + // is stopped. + // + // We need to guarantee that if we do two back-to-back captures, + // the second won't use a buffer that's older/the same as the first, which + // is theoretically possible if we don't clear out the queue and the + // selection criteria is something like 'newest'. Clearing out the queue + // on a completed capture ensures we'll only use new data. + ALOGV("%s: Memory optimization, clearing ZSL queue", + __FUNCTION__); + clearZslQueueLocked(); + + // Required so we accept more ZSL requests + mState = RUNNING; +} + +}; // namespace camera2 +}; // namespace android diff --git a/services/camera/libcameraservice/camera2/ZslProcessor3.h b/services/camera/libcameraservice/camera2/ZslProcessor3.h new file mode 100644 index 0000000..cb98b99 --- /dev/null +++ b/services/camera/libcameraservice/camera2/ZslProcessor3.h @@ -0,0 +1,137 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR3_H +#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR3_H + +#include <utils/Thread.h> +#include <utils/String16.h> +#include <utils/Vector.h> +#include <utils/Mutex.h> +#include <utils/Condition.h> +#include <gui/BufferItemConsumer.h> +#include "Parameters.h" +#include "FrameProcessor.h" +#include "camera/CameraMetadata.h" +#include "Camera2Heap.h" +#include "../CameraDeviceBase.h" +#include "ZslProcessorInterface.h" +#include "../camera3/Camera3ZslStream.h" + +namespace android { + +class Camera2Client; + +namespace camera2 { + +class CaptureSequencer; + +/*** + * ZSL queue processing + */ +class ZslProcessor3 : + public ZslProcessorInterface, + public camera3::Camera3StreamBufferListener, + virtual public Thread, + virtual public FrameProcessor::FilteredListener { + public: + ZslProcessor3(sp<Camera2Client> client, wp<CaptureSequencer> sequencer); + ~ZslProcessor3(); + + // From FrameProcessor + virtual void onFrameAvailable(int32_t frameId, const CameraMetadata &frame); + + /** + **************************************** + * ZslProcessorInterface implementation * + **************************************** + */ + + virtual status_t updateStream(const Parameters ¶ms); + virtual status_t deleteStream(); + virtual int getStreamId() const; + + virtual status_t pushToReprocess(int32_t requestId); + virtual status_t clearZslQueue(); + + void dump(int fd, const Vector<String16>& args) const; + + protected: + /** + ********************************************** + * Camera3StreamBufferListener implementation * + ********************************************** + */ + typedef camera3::Camera3StreamBufferListener::BufferInfo BufferInfo; + // Buffer was acquired by the HAL + virtual void onBufferAcquired(const BufferInfo& bufferInfo); + // Buffer was released by the HAL + virtual void onBufferReleased(const BufferInfo& bufferInfo); + + private: + static const nsecs_t kWaitDuration = 10000000; // 10 ms + + enum { + RUNNING, + LOCKED + } mState; + + wp<Camera2Client> mClient; + wp<CaptureSequencer> mSequencer; + + const int mId; + + mutable Mutex mInputMutex; + + enum { + NO_STREAM = -1 + }; + + int mZslStreamId; + sp<camera3::Camera3ZslStream> mZslStream; + + struct ZslPair { + BufferItemConsumer::BufferItem buffer; + CameraMetadata frame; + }; + + static const size_t kZslBufferDepth = 4; + static const size_t kFrameListDepth = kZslBufferDepth * 2; + Vector<CameraMetadata> mFrameList; + size_t mFrameListHead; + + ZslPair mNextPair; + + Vector<ZslPair> mZslQueue; + size_t mZslQueueHead; + size_t mZslQueueTail; + + CameraMetadata mLatestCapturedRequest; + + virtual bool threadLoop(); + + status_t clearZslQueueLocked(); + + void dumpZslQueue(int id) const; + + nsecs_t getCandidateTimestampLocked(size_t* metadataIdx) const; +}; + + +}; //namespace camera2 +}; //namespace android + +#endif diff --git a/services/camera/libcameraservice/camera2/ZslProcessorInterface.h b/services/camera/libcameraservice/camera2/ZslProcessorInterface.h new file mode 100644 index 0000000..183c0c2 --- /dev/null +++ b/services/camera/libcameraservice/camera2/ZslProcessorInterface.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSORINTERFACE_H +#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSORINTERFACE_H + +#include <utils/Errors.h> +#include <utils/RefBase.h> + +namespace android { +namespace camera2 { + +class Parameters; + +class ZslProcessorInterface : virtual public RefBase { +public: + + // Get ID for use with android.request.outputStreams / inputStreams + virtual int getStreamId() const = 0; + + // Update the streams by recreating them if the size/format has changed + virtual status_t updateStream(const Parameters& params) = 0; + + // Delete the underlying CameraDevice streams + virtual status_t deleteStream() = 0; + + /** + * Submits a ZSL capture request (id = requestId) + * + * An appropriate ZSL buffer is selected by the closest timestamp, + * then we push that buffer to be reprocessed by the HAL. + * A capture request is created and submitted on behalf of the client. + */ + virtual status_t pushToReprocess(int32_t requestId) = 0; + + // Flush the ZSL buffer queue, freeing up all the buffers + virtual status_t clearZslQueue() = 0; + + // (Debugging only) Dump the current state to the specified file descriptor + virtual void dump(int fd, const Vector<String16>& args) const = 0; +}; + +}; //namespace camera2 +}; //namespace android + +#endif diff --git a/services/camera/libcameraservice/camera3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/camera3/Camera3IOStreamBase.cpp new file mode 100644 index 0000000..0850566 --- /dev/null +++ b/services/camera/libcameraservice/camera3/Camera3IOStreamBase.cpp @@ -0,0 +1,275 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera3-IOStreamBase" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 + +// This is needed for stdint.h to define INT64_MAX in C++ +#define __STDC_LIMIT_MACROS + +#include <utils/Log.h> +#include <utils/Trace.h> +#include "Camera3IOStreamBase.h" + +namespace android { + +namespace camera3 { + +Camera3IOStreamBase::Camera3IOStreamBase(int id, camera3_stream_type_t type, + uint32_t width, uint32_t height, size_t maxSize, int format) : + Camera3Stream(id, type, + width, height, maxSize, format), + mTotalBufferCount(0), + mDequeuedBufferCount(0), + mFrameCount(0), + mLastTimestamp(0) { + + mCombinedFence = new Fence(); + + if (maxSize > 0 && format != HAL_PIXEL_FORMAT_BLOB) { + ALOGE("%s: Bad format for size-only stream: %d", __FUNCTION__, + format); + mState = STATE_ERROR; + } +} + +Camera3IOStreamBase::~Camera3IOStreamBase() { + disconnectLocked(); +} + +bool Camera3IOStreamBase::hasOutstandingBuffersLocked() const { + nsecs_t signalTime = mCombinedFence->getSignalTime(); + ALOGV("%s: Stream %d: Has %d outstanding buffers," + " buffer signal time is %lld", + __FUNCTION__, mId, mDequeuedBufferCount, signalTime); + if (mDequeuedBufferCount > 0 || signalTime == INT64_MAX) { + return true; + } + return false; +} + +status_t Camera3IOStreamBase::waitUntilIdle(nsecs_t timeout) { + status_t res; + { + Mutex::Autolock l(mLock); + while (mDequeuedBufferCount > 0) { + if (timeout != TIMEOUT_NEVER) { + nsecs_t startTime = systemTime(); + res = mBufferReturnedSignal.waitRelative(mLock, timeout); + if (res == TIMED_OUT) { + return res; + } else if (res != OK) { + ALOGE("%s: Error waiting for outstanding buffers: %s (%d)", + __FUNCTION__, strerror(-res), res); + return res; + } + nsecs_t deltaTime = systemTime() - startTime; + if (timeout <= deltaTime) { + timeout = 0; + } else { + timeout -= deltaTime; + } + } else { + res = mBufferReturnedSignal.wait(mLock); + if (res != OK) { + ALOGE("%s: Error waiting for outstanding buffers: %s (%d)", + __FUNCTION__, strerror(-res), res); + return res; + } + } + } + } + + // No lock + + unsigned int timeoutMs; + if (timeout == TIMEOUT_NEVER) { + timeoutMs = Fence::TIMEOUT_NEVER; + } else if (timeout == 0) { + timeoutMs = 0; + } else { + // Round up to wait at least 1 ms + timeoutMs = (timeout + 999999) / 1000000; + } + + return mCombinedFence->wait(timeoutMs); +} + +void Camera3IOStreamBase::dump(int fd, const Vector<String16> &args) const { + (void) args; + String8 lines; + lines.appendFormat(" State: %d\n", mState); + lines.appendFormat(" Dims: %d x %d, format 0x%x\n", + camera3_stream::width, camera3_stream::height, + camera3_stream::format); + lines.appendFormat(" Max size: %d\n", mMaxSize); + lines.appendFormat(" Usage: %d, max HAL buffers: %d\n", + camera3_stream::usage, camera3_stream::max_buffers); + lines.appendFormat(" Frames produced: %d, last timestamp: %lld ns\n", + mFrameCount, mLastTimestamp); + lines.appendFormat(" Total buffers: %d, currently dequeued: %d\n", + mTotalBufferCount, mDequeuedBufferCount); + write(fd, lines.string(), lines.size()); +} + +status_t Camera3IOStreamBase::configureQueueLocked() { + status_t res; + + switch (mState) { + case STATE_IN_RECONFIG: + res = disconnectLocked(); + if (res != OK) { + return res; + } + break; + case STATE_IN_CONFIG: + // OK + break; + default: + ALOGE("%s: Bad state: %d", __FUNCTION__, mState); + return INVALID_OPERATION; + } + + return OK; +} + +size_t Camera3IOStreamBase::getBufferCountLocked() { + return mTotalBufferCount; +} + +status_t Camera3IOStreamBase::disconnectLocked() { + switch (mState) { + case STATE_IN_RECONFIG: + case STATE_CONFIGURED: + // OK + break; + default: + // No connection, nothing to do + ALOGV("%s: Stream %d: Already disconnected", + __FUNCTION__, mId); + return -ENOTCONN; + } + + if (mDequeuedBufferCount > 0) { + ALOGE("%s: Can't disconnect with %d buffers still dequeued!", + __FUNCTION__, mDequeuedBufferCount); + return INVALID_OPERATION; + } + + return OK; +} + +void Camera3IOStreamBase::handoutBufferLocked(camera3_stream_buffer &buffer, + buffer_handle_t *handle, + int acquireFence, + int releaseFence, + camera3_buffer_status_t status) { + /** + * Note that all fences are now owned by HAL. + */ + + // Handing out a raw pointer to this object. Increment internal refcount. + incStrong(this); + buffer.stream = this; + buffer.buffer = handle; + buffer.acquire_fence = acquireFence; + buffer.release_fence = releaseFence; + buffer.status = status; + + mDequeuedBufferCount++; +} + +status_t Camera3IOStreamBase::getBufferPreconditionCheckLocked() const { + // Allow dequeue during IN_[RE]CONFIG for registration + if (mState != STATE_CONFIGURED && + mState != STATE_IN_CONFIG && mState != STATE_IN_RECONFIG) { + ALOGE("%s: Stream %d: Can't get buffers in unconfigured state %d", + __FUNCTION__, mId, mState); + return INVALID_OPERATION; + } + + // Only limit dequeue amount when fully configured + if (mState == STATE_CONFIGURED && + mDequeuedBufferCount == camera3_stream::max_buffers) { + ALOGE("%s: Stream %d: Already dequeued maximum number of simultaneous" + " buffers (%d)", __FUNCTION__, mId, + camera3_stream::max_buffers); + return INVALID_OPERATION; + } + + return OK; +} + +status_t Camera3IOStreamBase::returnBufferPreconditionCheckLocked() const { + // Allow buffers to be returned in the error state, to allow for disconnect + // and in the in-config states for registration + if (mState == STATE_CONSTRUCTED) { + ALOGE("%s: Stream %d: Can't return buffers in unconfigured state %d", + __FUNCTION__, mId, mState); + return INVALID_OPERATION; + } + if (mDequeuedBufferCount == 0) { + ALOGE("%s: Stream %d: No buffers outstanding to return", __FUNCTION__, + mId); + return INVALID_OPERATION; + } + + return OK; +} + +status_t Camera3IOStreamBase::returnAnyBufferLocked( + const camera3_stream_buffer &buffer, + nsecs_t timestamp, + bool output) { + status_t res; + + // returnBuffer may be called from a raw pointer, not a sp<>, and we'll be + // decrementing the internal refcount next. In case this is the last ref, we + // might get destructed on the decStrong(), so keep an sp around until the + // end of the call - otherwise have to sprinkle the decStrong on all exit + // points. + sp<Camera3IOStreamBase> keepAlive(this); + decStrong(this); + + if ((res = returnBufferPreconditionCheckLocked()) != OK) { + return res; + } + + sp<Fence> releaseFence; + res = returnBufferCheckedLocked(buffer, timestamp, output, + &releaseFence); + if (res != OK) { + return res; + } + + mCombinedFence = Fence::merge(mName, mCombinedFence, releaseFence); + + mDequeuedBufferCount--; + mBufferReturnedSignal.signal(); + + if (output) { + mLastTimestamp = timestamp; + } + + return OK; +} + + + +}; // namespace camera3 + +}; // namespace android diff --git a/services/camera/libcameraservice/camera3/Camera3IOStreamBase.h b/services/camera/libcameraservice/camera3/Camera3IOStreamBase.h new file mode 100644 index 0000000..74c4484 --- /dev/null +++ b/services/camera/libcameraservice/camera3/Camera3IOStreamBase.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA3_IO_STREAM_BASE_H +#define ANDROID_SERVERS_CAMERA3_IO_STREAM_BASE_H + +#include <utils/RefBase.h> +#include <gui/Surface.h> + +#include "Camera3Stream.h" + +namespace android { + +namespace camera3 { + +/** + * A base class for managing a single stream of I/O data from the camera device. + */ +class Camera3IOStreamBase : + public Camera3Stream { + protected: + Camera3IOStreamBase(int id, camera3_stream_type_t type, + uint32_t width, uint32_t height, size_t maxSize, int format); + + public: + + virtual ~Camera3IOStreamBase(); + + /** + * Camera3Stream interface + */ + + virtual status_t waitUntilIdle(nsecs_t timeout); + virtual void dump(int fd, const Vector<String16> &args) const; + + protected: + size_t mTotalBufferCount; + // sum of input and output buffers that are currently acquired by HAL + size_t mDequeuedBufferCount; + Condition mBufferReturnedSignal; + uint32_t mFrameCount; + // Last received output buffer's timestamp + nsecs_t mLastTimestamp; + + // The merged release fence for all returned buffers + sp<Fence> mCombinedFence; + + status_t returnAnyBufferLocked( + const camera3_stream_buffer &buffer, + nsecs_t timestamp, + bool output); + + virtual status_t returnBufferCheckedLocked( + const camera3_stream_buffer &buffer, + nsecs_t timestamp, + bool output, + /*out*/ + sp<Fence> *releaseFenceOut) = 0; + + /** + * Internal Camera3Stream interface + */ + virtual bool hasOutstandingBuffersLocked() const; + + virtual size_t getBufferCountLocked(); + + status_t getBufferPreconditionCheckLocked() const; + status_t returnBufferPreconditionCheckLocked() const; + + // State check only + virtual status_t configureQueueLocked(); + // State checks only + virtual status_t disconnectLocked(); + + // Hand out the buffer to a native location, + // incrementing the internal refcount and dequeued buffer count + void handoutBufferLocked(camera3_stream_buffer &buffer, + buffer_handle_t *handle, + int acquire_fence, + int release_fence, + camera3_buffer_status_t status); + +}; // class Camera3IOStreamBase + +} // namespace camera3 + +} // namespace android + +#endif diff --git a/services/camera/libcameraservice/camera3/Camera3InputStream.cpp b/services/camera/libcameraservice/camera3/Camera3InputStream.cpp new file mode 100644 index 0000000..13e9c83 --- /dev/null +++ b/services/camera/libcameraservice/camera3/Camera3InputStream.cpp @@ -0,0 +1,239 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera3-InputStream" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> +#include <utils/Trace.h> +#include "Camera3InputStream.h" + +namespace android { + +namespace camera3 { + +Camera3InputStream::Camera3InputStream(int id, + uint32_t width, uint32_t height, int format) : + Camera3IOStreamBase(id, CAMERA3_STREAM_INPUT, width, height, + /*maxSize*/0, format) { + + if (format == HAL_PIXEL_FORMAT_BLOB) { + ALOGE("%s: Bad format, BLOB not supported", __FUNCTION__); + mState = STATE_ERROR; + } +} + +Camera3InputStream::~Camera3InputStream() { + disconnectLocked(); +} + +status_t Camera3InputStream::getInputBufferLocked( + camera3_stream_buffer *buffer) { + ATRACE_CALL(); + status_t res; + + // FIXME: will not work in (re-)registration + if (mState == STATE_IN_CONFIG || mState == STATE_IN_RECONFIG) { + ALOGE("%s: Stream %d: Buffer registration for input streams" + " not implemented (state %d)", + __FUNCTION__, mId, mState); + return INVALID_OPERATION; + } + + if ((res = getBufferPreconditionCheckLocked()) != OK) { + return res; + } + + ANativeWindowBuffer* anb; + int fenceFd; + + assert(mConsumer != 0); + + BufferItem bufferItem; + res = mConsumer->acquireBuffer(&bufferItem, /*waitForFence*/false); + + if (res != OK) { + ALOGE("%s: Stream %d: Can't acquire next output buffer: %s (%d)", + __FUNCTION__, mId, strerror(-res), res); + return res; + } + + anb = bufferItem.mGraphicBuffer->getNativeBuffer(); + assert(anb != NULL); + fenceFd = bufferItem.mFence->dup(); + + /** + * FenceFD now owned by HAL except in case of error, + * in which case we reassign it to acquire_fence + */ + handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd, + /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK); + mBuffersInFlight.push_back(bufferItem); + + return OK; +} + +status_t Camera3InputStream::returnBufferCheckedLocked( + const camera3_stream_buffer &buffer, + nsecs_t timestamp, + bool output, + /*out*/ + sp<Fence> *releaseFenceOut) { + + (void)timestamp; + (void)output; + ALOG_ASSERT(!output, "Expected output to be false"); + + status_t res; + + bool bufferFound = false; + BufferItem bufferItem; + { + // Find the buffer we are returning + Vector<BufferItem>::iterator it, end; + for (it = mBuffersInFlight.begin(), end = mBuffersInFlight.end(); + it != end; + ++it) { + + const BufferItem& tmp = *it; + ANativeWindowBuffer *anb = tmp.mGraphicBuffer->getNativeBuffer(); + if (anb != NULL && &(anb->handle) == buffer.buffer) { + bufferFound = true; + bufferItem = tmp; + mBuffersInFlight.erase(it); + mDequeuedBufferCount--; + } + } + } + if (!bufferFound) { + ALOGE("%s: Stream %d: Can't return buffer that wasn't sent to HAL", + __FUNCTION__, mId); + return INVALID_OPERATION; + } + + if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) { + if (buffer.release_fence != -1) { + ALOGE("%s: Stream %d: HAL should not set release_fence(%d) when " + "there is an error", __FUNCTION__, mId, buffer.release_fence); + close(buffer.release_fence); + } + + /** + * Reassign release fence as the acquire fence incase of error + */ + const_cast<camera3_stream_buffer*>(&buffer)->release_fence = + buffer.acquire_fence; + } + + /** + * Unconditionally return buffer to the buffer queue. + * - Fwk takes over the release_fence ownership + */ + sp<Fence> releaseFence = new Fence(buffer.release_fence); + res = mConsumer->releaseBuffer(bufferItem, releaseFence); + if (res != OK) { + ALOGE("%s: Stream %d: Error releasing buffer back to buffer queue:" + " %s (%d)", __FUNCTION__, mId, strerror(-res), res); + return res; + } + + *releaseFenceOut = releaseFence; + + return OK; +} + +status_t Camera3InputStream::returnInputBufferLocked( + const camera3_stream_buffer &buffer) { + ATRACE_CALL(); + + return returnAnyBufferLocked(buffer, /*timestamp*/0, /*output*/false); +} + +status_t Camera3InputStream::disconnectLocked() { + + status_t res; + + if ((res = Camera3IOStreamBase::disconnectLocked()) != OK) { + return res; + } + + assert(mBuffersInFlight.size() == 0); + + /** + * no-op since we can't disconnect the producer from the consumer-side + */ + + mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG + : STATE_CONSTRUCTED; + return OK; +} + +sp<IGraphicBufferProducer> Camera3InputStream::getProducerInterface() const { + return mConsumer->getProducerInterface(); +} + +void Camera3InputStream::dump(int fd, const Vector<String16> &args) const { + (void) args; + String8 lines; + lines.appendFormat(" Stream[%d]: Input\n", mId); + write(fd, lines.string(), lines.size()); + + Camera3IOStreamBase::dump(fd, args); +} + +status_t Camera3InputStream::configureQueueLocked() { + status_t res; + + if ((res = Camera3IOStreamBase::configureQueueLocked()) != OK) { + return res; + } + + assert(mMaxSize == 0); + assert(camera3_stream::format != HAL_PIXEL_FORMAT_BLOB); + + mTotalBufferCount = BufferQueue::MIN_UNDEQUEUED_BUFFERS + + camera3_stream::max_buffers; + mDequeuedBufferCount = 0; + mFrameCount = 0; + + if (mConsumer.get() == 0) { + mConsumer = new BufferItemConsumer(camera3_stream::usage, + mTotalBufferCount, + /*synchronousMode*/true); + mConsumer->setName(String8::format("Camera3-InputStream-%d", mId)); + } + + res = mConsumer->setDefaultBufferSize(camera3_stream::width, + camera3_stream::height); + if (res != OK) { + ALOGE("%s: Stream %d: Could not set buffer dimensions %dx%d", + __FUNCTION__, mId, camera3_stream::width, camera3_stream::height); + return res; + } + res = mConsumer->setDefaultBufferFormat(camera3_stream::format); + if (res != OK) { + ALOGE("%s: Stream %d: Could not set buffer format %d", + __FUNCTION__, mId, camera3_stream::format); + return res; + } + + return OK; +} + +}; // namespace camera3 + +}; // namespace android diff --git a/services/camera/libcameraservice/camera3/Camera3InputStream.h b/services/camera/libcameraservice/camera3/Camera3InputStream.h new file mode 100644 index 0000000..8adda88 --- /dev/null +++ b/services/camera/libcameraservice/camera3/Camera3InputStream.h @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA3_INPUT_STREAM_H +#define ANDROID_SERVERS_CAMERA3_INPUT_STREAM_H + +#include <utils/RefBase.h> +#include <gui/Surface.h> +#include <gui/BufferItemConsumer.h> + +#include "Camera3IOStreamBase.h" + +namespace android { + +namespace camera3 { + +/** + * A class for managing a single stream of input data to the camera device. + * + * This class serves as a consumer adapter for the HAL, and will consume the + * buffers by feeding them into the HAL, as well as releasing the buffers back + * the buffers once the HAL is done with them. + */ +class Camera3InputStream : public Camera3IOStreamBase { + public: + /** + * Set up a stream for formats that have fixed size, such as RAW and YUV. + */ + Camera3InputStream(int id, uint32_t width, uint32_t height, int format); + ~Camera3InputStream(); + + virtual void dump(int fd, const Vector<String16> &args) const; + + /** + * Get the producer interface for this stream, to hand off to a producer. + * The producer must be connected to the provided interface before + * finishConfigure is called on this stream. + */ + sp<IGraphicBufferProducer> getProducerInterface() const; + + private: + + typedef BufferItemConsumer::BufferItem BufferItem; + + sp<BufferItemConsumer> mConsumer; + Vector<BufferItem> mBuffersInFlight; + + /** + * Camera3IOStreamBase + */ + virtual status_t returnBufferCheckedLocked( + const camera3_stream_buffer &buffer, + nsecs_t timestamp, + bool output, + /*out*/ + sp<Fence> *releaseFenceOut); + + /** + * Camera3Stream interface + */ + + virtual status_t getInputBufferLocked(camera3_stream_buffer *buffer); + virtual status_t returnInputBufferLocked( + const camera3_stream_buffer &buffer); + virtual status_t disconnectLocked(); + + virtual status_t configureQueueLocked(); + +}; // class Camera3InputStream + +}; // namespace camera3 + +}; // namespace android + +#endif diff --git a/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp b/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp new file mode 100644 index 0000000..2efeede --- /dev/null +++ b/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp @@ -0,0 +1,364 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera3-OutputStream" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> +#include <utils/Trace.h> +#include "Camera3OutputStream.h" + +#ifndef container_of +#define container_of(ptr, type, member) \ + (type *)((char*)(ptr) - offsetof(type, member)) +#endif + +namespace android { + +namespace camera3 { + +Camera3OutputStream::Camera3OutputStream(int id, + sp<ANativeWindow> consumer, + uint32_t width, uint32_t height, int format) : + Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, width, height, + /*maxSize*/0, format), + mConsumer(consumer), + mTransform(0) { + + if (mConsumer == NULL) { + ALOGE("%s: Consumer is NULL!", __FUNCTION__); + mState = STATE_ERROR; + } +} + +Camera3OutputStream::Camera3OutputStream(int id, + sp<ANativeWindow> consumer, + uint32_t width, uint32_t height, size_t maxSize, int format) : + Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, width, height, maxSize, + format), + mConsumer(consumer), + mTransform(0) { + + if (format != HAL_PIXEL_FORMAT_BLOB) { + ALOGE("%s: Bad format for size-only stream: %d", __FUNCTION__, + format); + mState = STATE_ERROR; + } + + if (mConsumer == NULL) { + ALOGE("%s: Consumer is NULL!", __FUNCTION__); + mState = STATE_ERROR; + } +} + +Camera3OutputStream::Camera3OutputStream(int id, camera3_stream_type_t type, + uint32_t width, uint32_t height, + int format) : + Camera3IOStreamBase(id, type, width, height, + /*maxSize*/0, + format), + mTransform(0) { + + // Subclasses expected to initialize mConsumer themselves +} + + +Camera3OutputStream::~Camera3OutputStream() { + disconnectLocked(); +} + +status_t Camera3OutputStream::getBufferLocked(camera3_stream_buffer *buffer) { + ATRACE_CALL(); + status_t res; + + if ((res = getBufferPreconditionCheckLocked()) != OK) { + return res; + } + + ANativeWindowBuffer* anb; + int fenceFd; + + res = mConsumer->dequeueBuffer(mConsumer.get(), &anb, &fenceFd); + if (res != OK) { + ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)", + __FUNCTION__, mId, strerror(-res), res); + return res; + } + + /** + * FenceFD now owned by HAL except in case of error, + * in which case we reassign it to acquire_fence + */ + handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd, + /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK); + + return OK; +} + +status_t Camera3OutputStream::returnBufferLocked( + const camera3_stream_buffer &buffer, + nsecs_t timestamp) { + ATRACE_CALL(); + + status_t res = returnAnyBufferLocked(buffer, timestamp, /*output*/true); + + if (res != OK) { + return res; + } + + mLastTimestamp = timestamp; + + return OK; +} + +status_t Camera3OutputStream::returnBufferCheckedLocked( + const camera3_stream_buffer &buffer, + nsecs_t timestamp, + bool output, + /*out*/ + sp<Fence> *releaseFenceOut) { + + (void)output; + ALOG_ASSERT(output, "Expected output to be true"); + + status_t res; + sp<Fence> releaseFence; + + /** + * Fence management - calculate Release Fence + */ + if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) { + if (buffer.release_fence != -1) { + ALOGE("%s: Stream %d: HAL should not set release_fence(%d) when " + "there is an error", __FUNCTION__, mId, buffer.release_fence); + close(buffer.release_fence); + } + + /** + * Reassign release fence as the acquire fence in case of error + */ + releaseFence = new Fence(buffer.acquire_fence); + } else { + res = native_window_set_buffers_timestamp(mConsumer.get(), timestamp); + if (res != OK) { + ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)", + __FUNCTION__, mId, strerror(-res), res); + return res; + } + + releaseFence = new Fence(buffer.release_fence); + } + + int anwReleaseFence = releaseFence->dup(); + + /** + * Release the lock briefly to avoid deadlock with + * StreamingProcessor::startStream -> Camera3Stream::isConfiguring (this + * thread will go into StreamingProcessor::onFrameAvailable) during + * queueBuffer + */ + sp<ANativeWindow> currentConsumer = mConsumer; + mLock.unlock(); + + /** + * Return buffer back to ANativeWindow + */ + if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) { + // Cancel buffer + res = currentConsumer->cancelBuffer(currentConsumer.get(), + container_of(buffer.buffer, ANativeWindowBuffer, handle), + anwReleaseFence); + if (res != OK) { + ALOGE("%s: Stream %d: Error cancelling buffer to native window:" + " %s (%d)", __FUNCTION__, mId, strerror(-res), res); + } + } else { + res = currentConsumer->queueBuffer(currentConsumer.get(), + container_of(buffer.buffer, ANativeWindowBuffer, handle), + anwReleaseFence); + if (res != OK) { + ALOGE("%s: Stream %d: Error queueing buffer to native window: " + "%s (%d)", __FUNCTION__, mId, strerror(-res), res); + } + } + mLock.lock(); + if (res != OK) { + close(anwReleaseFence); + return res; + } + + *releaseFenceOut = releaseFence; + + return OK; +} + +void Camera3OutputStream::dump(int fd, const Vector<String16> &args) const { + (void) args; + String8 lines; + lines.appendFormat(" Stream[%d]: Output\n", mId); + write(fd, lines.string(), lines.size()); + + Camera3IOStreamBase::dump(fd, args); +} + +status_t Camera3OutputStream::setTransform(int transform) { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + return setTransformLocked(transform); +} + +status_t Camera3OutputStream::setTransformLocked(int transform) { + status_t res = OK; + if (mState == STATE_ERROR) { + ALOGE("%s: Stream in error state", __FUNCTION__); + return INVALID_OPERATION; + } + + mTransform = transform; + if (mState == STATE_CONFIGURED) { + res = native_window_set_buffers_transform(mConsumer.get(), + transform); + if (res != OK) { + ALOGE("%s: Unable to configure stream transform to %x: %s (%d)", + __FUNCTION__, transform, strerror(-res), res); + } + } + return res; +} + +status_t Camera3OutputStream::configureQueueLocked() { + status_t res; + + if ((res = Camera3IOStreamBase::configureQueueLocked()) != OK) { + return res; + } + + ALOG_ASSERT(mConsumer != 0, "mConsumer should never be NULL"); + + // Configure consumer-side ANativeWindow interface + res = native_window_api_connect(mConsumer.get(), + NATIVE_WINDOW_API_CAMERA); + if (res != OK) { + ALOGE("%s: Unable to connect to native window for stream %d", + __FUNCTION__, mId); + return res; + } + + res = native_window_set_usage(mConsumer.get(), camera3_stream::usage); + if (res != OK) { + ALOGE("%s: Unable to configure usage %08x for stream %d", + __FUNCTION__, camera3_stream::usage, mId); + return res; + } + + res = native_window_set_scaling_mode(mConsumer.get(), + NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW); + if (res != OK) { + ALOGE("%s: Unable to configure stream scaling: %s (%d)", + __FUNCTION__, strerror(-res), res); + return res; + } + + if (mMaxSize == 0) { + // For buffers of known size + res = native_window_set_buffers_geometry(mConsumer.get(), + camera3_stream::width, camera3_stream::height, + camera3_stream::format); + } else { + // For buffers with bounded size + res = native_window_set_buffers_geometry(mConsumer.get(), + mMaxSize, 1, + camera3_stream::format); + } + if (res != OK) { + ALOGE("%s: Unable to configure stream buffer geometry" + " %d x %d, format %x for stream %d", + __FUNCTION__, camera3_stream::width, camera3_stream::height, + camera3_stream::format, mId); + return res; + } + + int maxConsumerBuffers; + res = mConsumer->query(mConsumer.get(), + NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers); + if (res != OK) { + ALOGE("%s: Unable to query consumer undequeued" + " buffer count for stream %d", __FUNCTION__, mId); + return res; + } + + ALOGV("%s: Consumer wants %d buffers", __FUNCTION__, + maxConsumerBuffers); + + mTotalBufferCount = maxConsumerBuffers + camera3_stream::max_buffers; + mDequeuedBufferCount = 0; + mFrameCount = 0; + mLastTimestamp = 0; + + res = native_window_set_buffer_count(mConsumer.get(), + mTotalBufferCount); + if (res != OK) { + ALOGE("%s: Unable to set buffer count for stream %d", + __FUNCTION__, mId); + return res; + } + + res = native_window_set_buffers_transform(mConsumer.get(), + mTransform); + if (res != OK) { + ALOGE("%s: Unable to configure stream transform to %x: %s (%d)", + __FUNCTION__, mTransform, strerror(-res), res); + } + + return OK; +} + +status_t Camera3OutputStream::disconnectLocked() { + status_t res; + + if ((res = Camera3IOStreamBase::disconnectLocked()) != OK) { + return res; + } + + res = native_window_api_disconnect(mConsumer.get(), + NATIVE_WINDOW_API_CAMERA); + + /** + * This is not an error. if client calling process dies, the window will + * also die and all calls to it will return DEAD_OBJECT, thus it's already + * "disconnected" + */ + if (res == DEAD_OBJECT) { + ALOGW("%s: While disconnecting stream %d from native window, the" + " native window died from under us", __FUNCTION__, mId); + } + else if (res != OK) { + ALOGE("%s: Unable to disconnect stream %d from native window " + "(error %d %s)", + __FUNCTION__, mId, res, strerror(-res)); + mState = STATE_ERROR; + return res; + } + + mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG + : STATE_CONSTRUCTED; + return OK; +} + +}; // namespace camera3 + +}; // namespace android diff --git a/services/camera/libcameraservice/camera3/Camera3OutputStream.h b/services/camera/libcameraservice/camera3/Camera3OutputStream.h new file mode 100644 index 0000000..774fbdd --- /dev/null +++ b/services/camera/libcameraservice/camera3/Camera3OutputStream.h @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA3_OUTPUT_STREAM_H +#define ANDROID_SERVERS_CAMERA3_OUTPUT_STREAM_H + +#include <utils/RefBase.h> +#include <gui/Surface.h> + +#include "Camera3Stream.h" +#include "Camera3IOStreamBase.h" +#include "Camera3OutputStreamInterface.h" + +namespace android { + +namespace camera3 { + +/** + * A class for managing a single stream of output data from the camera device. + */ +class Camera3OutputStream : + public Camera3IOStreamBase, + public Camera3OutputStreamInterface { + public: + /** + * Set up a stream for formats that have 2 dimensions, such as RAW and YUV. + */ + Camera3OutputStream(int id, sp<ANativeWindow> consumer, + uint32_t width, uint32_t height, int format); + + /** + * Set up a stream for formats that have a variable buffer size for the same + * dimensions, such as compressed JPEG. + */ + Camera3OutputStream(int id, sp<ANativeWindow> consumer, + uint32_t width, uint32_t height, size_t maxSize, int format); + + virtual ~Camera3OutputStream(); + + /** + * Camera3Stream interface + */ + + virtual void dump(int fd, const Vector<String16> &args) const; + + /** + * Set the transform on the output stream; one of the + * HAL_TRANSFORM_* / NATIVE_WINDOW_TRANSFORM_* constants. + */ + status_t setTransform(int transform); + + protected: + Camera3OutputStream(int id, camera3_stream_type_t type, + uint32_t width, uint32_t height, int format); + + /** + * Note that we release the lock briefly in this function + */ + virtual status_t returnBufferCheckedLocked( + const camera3_stream_buffer &buffer, + nsecs_t timestamp, + bool output, + /*out*/ + sp<Fence> *releaseFenceOut); + + sp<ANativeWindow> mConsumer; + private: + int mTransform; + + virtual status_t setTransformLocked(int transform); + + /** + * Internal Camera3Stream interface + */ + virtual status_t getBufferLocked(camera3_stream_buffer *buffer); + virtual status_t returnBufferLocked( + const camera3_stream_buffer &buffer, + nsecs_t timestamp); + + virtual status_t configureQueueLocked(); + virtual status_t disconnectLocked(); +}; // class Camera3OutputStream + +} // namespace camera3 + +} // namespace android + +#endif diff --git a/services/camera/libcameraservice/camera3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/camera3/Camera3OutputStreamInterface.h new file mode 100644 index 0000000..aae72cf --- /dev/null +++ b/services/camera/libcameraservice/camera3/Camera3OutputStreamInterface.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA3_OUTPUT_STREAM_INTERFACE_H +#define ANDROID_SERVERS_CAMERA3_OUTPUT_STREAM_INTERFACE_H + +#include "Camera3StreamInterface.h" + +namespace android { + +namespace camera3 { + +/** + * An interface for managing a single stream of output data from the camera + * device. + */ +class Camera3OutputStreamInterface : public virtual Camera3StreamInterface { + public: + /** + * Set the transform on the output stream; one of the + * HAL_TRANSFORM_* / NATIVE_WINDOW_TRANSFORM_* constants. + */ + virtual status_t setTransform(int transform) = 0; +}; + +} // namespace camera3 + +} // namespace android + +#endif diff --git a/services/camera/libcameraservice/camera3/Camera3Stream.cpp b/services/camera/libcameraservice/camera3/Camera3Stream.cpp new file mode 100644 index 0000000..f05658a --- /dev/null +++ b/services/camera/libcameraservice/camera3/Camera3Stream.cpp @@ -0,0 +1,381 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera3-Stream" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> +#include <utils/Trace.h> +#include "Camera3Stream.h" + +namespace android { + +namespace camera3 { + +Camera3Stream::~Camera3Stream() { +} + +Camera3Stream* Camera3Stream::cast(camera3_stream *stream) { + return static_cast<Camera3Stream*>(stream); +} + +const Camera3Stream* Camera3Stream::cast(const camera3_stream *stream) { + return static_cast<const Camera3Stream*>(stream); +} + +Camera3Stream::Camera3Stream(int id, + camera3_stream_type type, + uint32_t width, uint32_t height, size_t maxSize, int format) : + camera3_stream(), + mId(id), + mName(String8::format("Camera3Stream[%d]", id)), + mMaxSize(maxSize), + mState(STATE_CONSTRUCTED) { + + camera3_stream::stream_type = type; + camera3_stream::width = width; + camera3_stream::height = height; + camera3_stream::format = format; + camera3_stream::usage = 0; + camera3_stream::max_buffers = 0; + camera3_stream::priv = NULL; + + if (format == HAL_PIXEL_FORMAT_BLOB && maxSize == 0) { + ALOGE("%s: BLOB format with size == 0", __FUNCTION__); + mState = STATE_ERROR; + } +} + +int Camera3Stream::getId() const { + return mId; +} + +uint32_t Camera3Stream::getWidth() const { + return camera3_stream::width; +} + +uint32_t Camera3Stream::getHeight() const { + return camera3_stream::height; +} + +int Camera3Stream::getFormat() const { + return camera3_stream::format; +} + +camera3_stream* Camera3Stream::startConfiguration() { + Mutex::Autolock l(mLock); + + switch (mState) { + case STATE_ERROR: + ALOGE("%s: In error state", __FUNCTION__); + return NULL; + case STATE_CONSTRUCTED: + // OK + break; + case STATE_IN_CONFIG: + case STATE_IN_RECONFIG: + // Can start config again with no trouble; but don't redo + // oldUsage/oldMaxBuffers + return this; + case STATE_CONFIGURED: + if (stream_type == CAMERA3_STREAM_INPUT) { + ALOGE("%s: Cannot configure an input stream twice", + __FUNCTION__); + return NULL; + } else if (hasOutstandingBuffersLocked()) { + ALOGE("%s: Cannot configure stream; has outstanding buffers", + __FUNCTION__); + return NULL; + } + break; + default: + ALOGE("%s: Unknown state %d", __FUNCTION__, mState); + return NULL; + } + + oldUsage = usage; + oldMaxBuffers = max_buffers; + + if (mState == STATE_CONSTRUCTED) { + mState = STATE_IN_CONFIG; + } else { // mState == STATE_CONFIGURED + mState = STATE_IN_RECONFIG; + } + + return this; +} + +bool Camera3Stream::isConfiguring() const { + Mutex::Autolock l(mLock); + return (mState == STATE_IN_CONFIG) || (mState == STATE_IN_RECONFIG); +} + +status_t Camera3Stream::finishConfiguration(camera3_device *hal3Device) { + Mutex::Autolock l(mLock); + switch (mState) { + case STATE_ERROR: + ALOGE("%s: In error state", __FUNCTION__); + return INVALID_OPERATION; + case STATE_IN_CONFIG: + case STATE_IN_RECONFIG: + // OK + break; + case STATE_CONSTRUCTED: + case STATE_CONFIGURED: + ALOGE("%s: Cannot finish configuration that hasn't been started", + __FUNCTION__); + return INVALID_OPERATION; + default: + ALOGE("%s: Unknown state", __FUNCTION__); + return INVALID_OPERATION; + } + + // Check if the stream configuration is unchanged, and skip reallocation if + // so. As documented in hardware/camera3.h:configure_streams(). + if (mState == STATE_IN_RECONFIG && + oldUsage == usage && + oldMaxBuffers == max_buffers) { + mState = STATE_CONFIGURED; + return OK; + } + + status_t res; + res = configureQueueLocked(); + if (res != OK) { + ALOGE("%s: Unable to configure stream %d queue: %s (%d)", + __FUNCTION__, mId, strerror(-res), res); + mState = STATE_ERROR; + return res; + } + + res = registerBuffersLocked(hal3Device); + if (res != OK) { + ALOGE("%s: Unable to register stream buffers with HAL: %s (%d)", + __FUNCTION__, strerror(-res), res); + mState = STATE_ERROR; + return res; + } + + mState = STATE_CONFIGURED; + + return res; +} + +status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer) { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + + status_t res = getBufferLocked(buffer); + if (res == OK) { + fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/true); + } + + return res; +} + +status_t Camera3Stream::returnBuffer(const camera3_stream_buffer &buffer, + nsecs_t timestamp) { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + + status_t res = returnBufferLocked(buffer, timestamp); + if (res == OK) { + fireBufferListenersLocked(buffer, /*acquired*/false, /*output*/true); + } + + return res; +} + +status_t Camera3Stream::getInputBuffer(camera3_stream_buffer *buffer) { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + + status_t res = getInputBufferLocked(buffer); + if (res == OK) { + fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/false); + } + + return res; +} + +status_t Camera3Stream::returnInputBuffer(const camera3_stream_buffer &buffer) { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + + status_t res = returnInputBufferLocked(buffer); + if (res == OK) { + fireBufferListenersLocked(buffer, /*acquired*/false, /*output*/false); + } + return res; +} + +void Camera3Stream::fireBufferListenersLocked( + const camera3_stream_buffer& /*buffer*/, bool acquired, bool output) { + List<wp<Camera3StreamBufferListener> >::iterator it, end; + + // TODO: finish implementing + + Camera3StreamBufferListener::BufferInfo info = + Camera3StreamBufferListener::BufferInfo(); + info.mOutput = output; + // TODO: rest of fields + + for (it = mBufferListenerList.begin(), end = mBufferListenerList.end(); + it != end; + ++it) { + + sp<Camera3StreamBufferListener> listener = it->promote(); + if (listener != 0) { + if (acquired) { + listener->onBufferAcquired(info); + } else { + listener->onBufferReleased(info); + } + } + } +} + +bool Camera3Stream::hasOutstandingBuffers() const { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + return hasOutstandingBuffersLocked(); +} + +status_t Camera3Stream::disconnect() { + ATRACE_CALL(); + Mutex::Autolock l(mLock); + ALOGV("%s: Stream %d: Disconnecting...", __FUNCTION__, mId); + status_t res = disconnectLocked(); + + if (res == -ENOTCONN) { + // "Already disconnected" -- not an error + return OK; + } else { + return res; + } +} + +status_t Camera3Stream::registerBuffersLocked(camera3_device *hal3Device) { + ATRACE_CALL(); + status_t res; + + size_t bufferCount = getBufferCountLocked(); + + Vector<buffer_handle_t*> buffers; + buffers.insertAt(NULL, 0, bufferCount); + + camera3_stream_buffer_set bufferSet = camera3_stream_buffer_set(); + bufferSet.stream = this; + bufferSet.num_buffers = bufferCount; + bufferSet.buffers = buffers.editArray(); + + Vector<camera3_stream_buffer_t> streamBuffers; + streamBuffers.insertAt(camera3_stream_buffer_t(), 0, bufferCount); + + // Register all buffers with the HAL. This means getting all the buffers + // from the stream, providing them to the HAL with the + // register_stream_buffers() method, and then returning them back to the + // stream in the error state, since they won't have valid data. + // + // Only registered buffers can be sent to the HAL. + + uint32_t bufferIdx = 0; + for (; bufferIdx < bufferCount; bufferIdx++) { + res = getBufferLocked( &streamBuffers.editItemAt(bufferIdx) ); + if (res != OK) { + ALOGE("%s: Unable to get buffer %d for registration with HAL", + __FUNCTION__, bufferIdx); + // Skip registering, go straight to cleanup + break; + } + + sp<Fence> fence = new Fence(streamBuffers[bufferIdx].acquire_fence); + fence->waitForever("Camera3Stream::registerBuffers"); + + buffers.editItemAt(bufferIdx) = streamBuffers[bufferIdx].buffer; + } + if (bufferIdx == bufferCount) { + // Got all buffers, register with HAL + ALOGV("%s: Registering %d buffers with camera HAL", + __FUNCTION__, bufferCount); + res = hal3Device->ops->register_stream_buffers(hal3Device, + &bufferSet); + } + + // Return all valid buffers to stream, in ERROR state to indicate + // they weren't filled. + for (size_t i = 0; i < bufferIdx; i++) { + streamBuffers.editItemAt(i).release_fence = -1; + streamBuffers.editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR; + returnBufferLocked(streamBuffers[i], 0); + } + + return res; +} + +status_t Camera3Stream::getBufferLocked(camera3_stream_buffer *) { + ALOGE("%s: This type of stream does not support output", __FUNCTION__); + return INVALID_OPERATION; +} +status_t Camera3Stream::returnBufferLocked(const camera3_stream_buffer &, + nsecs_t) { + ALOGE("%s: This type of stream does not support output", __FUNCTION__); + return INVALID_OPERATION; +} +status_t Camera3Stream::getInputBufferLocked(camera3_stream_buffer *) { + ALOGE("%s: This type of stream does not support input", __FUNCTION__); + return INVALID_OPERATION; +} +status_t Camera3Stream::returnInputBufferLocked( + const camera3_stream_buffer &) { + ALOGE("%s: This type of stream does not support input", __FUNCTION__); + return INVALID_OPERATION; +} + +void Camera3Stream::addBufferListener( + wp<Camera3StreamBufferListener> listener) { + Mutex::Autolock l(mLock); + mBufferListenerList.push_back(listener); +} + +void Camera3Stream::removeBufferListener( + const sp<Camera3StreamBufferListener>& listener) { + Mutex::Autolock l(mLock); + + bool erased = true; + List<wp<Camera3StreamBufferListener> >::iterator it, end; + for (it = mBufferListenerList.begin(), end = mBufferListenerList.end(); + it != end; + ) { + + if (*it == listener) { + it = mBufferListenerList.erase(it); + erased = true; + } else { + ++it; + } + } + + if (!erased) { + ALOGW("%s: Could not find listener to remove, already removed", + __FUNCTION__); + } +} + +}; // namespace camera3 + +}; // namespace android diff --git a/services/camera/libcameraservice/camera3/Camera3Stream.h b/services/camera/libcameraservice/camera3/Camera3Stream.h new file mode 100644 index 0000000..69d81e4 --- /dev/null +++ b/services/camera/libcameraservice/camera3/Camera3Stream.h @@ -0,0 +1,283 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA3_STREAM_H +#define ANDROID_SERVERS_CAMERA3_STREAM_H + +#include <gui/Surface.h> +#include <utils/RefBase.h> +#include <utils/String8.h> +#include <utils/String16.h> +#include <utils/List.h> + +#include "hardware/camera3.h" + +#include "Camera3StreamBufferListener.h" +#include "Camera3StreamInterface.h" + +namespace android { + +namespace camera3 { + +/** + * A class for managing a single stream of input or output data from the camera + * device. + * + * The stream has an internal state machine to track whether it's + * connected/configured/etc. + * + * States: + * + * STATE_ERROR: A serious error has occurred, stream is unusable. Outstanding + * buffers may still be returned. + * + * STATE_CONSTRUCTED: The stream is ready for configuration, but buffers cannot + * be gotten yet. Not connected to any endpoint, no buffers are registered + * with the HAL. + * + * STATE_IN_CONFIG: Configuration has started, but not yet concluded. During this + * time, the usage, max_buffers, and priv fields of camera3_stream returned by + * startConfiguration() may be modified. + * + * STATE_IN_RE_CONFIG: Configuration has started, and the stream has been + * configured before. Need to track separately from IN_CONFIG to avoid + * re-registering buffers with HAL. + * + * STATE_CONFIGURED: Stream is configured, and has registered buffers with the + * HAL. The stream's getBuffer/returnBuffer work. The priv pointer may still be + * modified. + * + * Transition table: + * + * <none> => STATE_CONSTRUCTED: + * When constructed with valid arguments + * <none> => STATE_ERROR: + * When constructed with invalid arguments + * STATE_CONSTRUCTED => STATE_IN_CONFIG: + * When startConfiguration() is called + * STATE_IN_CONFIG => STATE_CONFIGURED: + * When finishConfiguration() is called + * STATE_IN_CONFIG => STATE_ERROR: + * When finishConfiguration() fails to allocate or register buffers. + * STATE_CONFIGURED => STATE_IN_RE_CONFIG: * + * When startConfiguration() is called again, after making sure stream is + * idle with waitUntilIdle(). + * STATE_IN_RE_CONFIG => STATE_CONFIGURED: + * When finishConfiguration() is called. + * STATE_IN_RE_CONFIG => STATE_ERROR: + * When finishConfiguration() fails to allocate or register buffers. + * STATE_CONFIGURED => STATE_CONSTRUCTED: + * When disconnect() is called after making sure stream is idle with + * waitUntilIdle(). + */ +class Camera3Stream : + protected camera3_stream, + public virtual Camera3StreamInterface, + public virtual RefBase { + public: + + virtual ~Camera3Stream(); + + static Camera3Stream* cast(camera3_stream *stream); + static const Camera3Stream* cast(const camera3_stream *stream); + + /** + * Get the stream's ID + */ + int getId() const; + + /** + * Get the stream's dimensions and format + */ + uint32_t getWidth() const; + uint32_t getHeight() const; + int getFormat() const; + + /** + * Start the stream configuration process. Returns a handle to the stream's + * information to be passed into the HAL device's configure_streams call. + * + * Until finishConfiguration() is called, no other methods on the stream may be + * called. The usage and max_buffers fields of camera3_stream may be modified + * between start/finishConfiguration, but may not be changed after that. + * The priv field of camera3_stream may be modified at any time after + * startConfiguration. + * + * Returns NULL in case of error starting configuration. + */ + camera3_stream* startConfiguration(); + + /** + * Check if the stream is mid-configuration (start has been called, but not + * finish). Used for lazy completion of configuration. + */ + bool isConfiguring() const; + + /** + * Completes the stream configuration process. During this call, the stream + * may call the device's register_stream_buffers() method. The stream + * information structure returned by startConfiguration() may no longer be + * modified after this call, but can still be read until the destruction of + * the stream. + * + * Returns: + * OK on a successful configuration + * NO_INIT in case of a serious error from the HAL device + * NO_MEMORY in case of an error registering buffers + * INVALID_OPERATION in case connecting to the consumer failed + */ + status_t finishConfiguration(camera3_device *hal3Device); + + /** + * Fill in the camera3_stream_buffer with the next valid buffer for this + * stream, to hand over to the HAL. + * + * This method may only be called once finishConfiguration has been called. + * For bidirectional streams, this method applies to the output-side + * buffers. + * + */ + status_t getBuffer(camera3_stream_buffer *buffer); + + /** + * Return a buffer to the stream after use by the HAL. + * + * This method may only be called for buffers provided by getBuffer(). + * For bidirectional streams, this method applies to the output-side buffers + */ + status_t returnBuffer(const camera3_stream_buffer &buffer, + nsecs_t timestamp); + + /** + * Fill in the camera3_stream_buffer with the next valid buffer for this + * stream, to hand over to the HAL. + * + * This method may only be called once finishConfiguration has been called. + * For bidirectional streams, this method applies to the input-side + * buffers. + * + */ + status_t getInputBuffer(camera3_stream_buffer *buffer); + + /** + * Return a buffer to the stream after use by the HAL. + * + * This method may only be called for buffers provided by getBuffer(). + * For bidirectional streams, this method applies to the input-side buffers + */ + status_t returnInputBuffer(const camera3_stream_buffer &buffer); + + /** + * Whether any of the stream's buffers are currently in use by the HAL, + * including buffers that have been returned but not yet had their + * release fence signaled. + */ + bool hasOutstandingBuffers() const; + + enum { + TIMEOUT_NEVER = -1 + }; + /** + * Wait until the HAL is done with all of this stream's buffers, including + * signalling all release fences. Returns TIMED_OUT if the timeout is exceeded, + * OK on success. Pass in TIMEOUT_NEVER for timeout to indicate an indefinite wait. + */ + virtual status_t waitUntilIdle(nsecs_t timeout) = 0; + + /** + * Disconnect stream from its non-HAL endpoint. After this, + * start/finishConfiguration must be called before the stream can be used + * again. This cannot be called if the stream has outstanding dequeued + * buffers. + */ + status_t disconnect(); + + /** + * Debug dump of the stream's state. + */ + virtual void dump(int fd, const Vector<String16> &args) const = 0; + + void addBufferListener( + wp<Camera3StreamBufferListener> listener); + void removeBufferListener( + const sp<Camera3StreamBufferListener>& listener); + + protected: + const int mId; + const String8 mName; + // Zero for formats with fixed buffer size for given dimensions. + const size_t mMaxSize; + + enum { + STATE_ERROR, + STATE_CONSTRUCTED, + STATE_IN_CONFIG, + STATE_IN_RECONFIG, + STATE_CONFIGURED + } mState; + + mutable Mutex mLock; + + Camera3Stream(int id, camera3_stream_type type, + uint32_t width, uint32_t height, size_t maxSize, int format); + + /** + * Interface to be implemented by derived classes + */ + + // getBuffer / returnBuffer implementations + + // Since camera3_stream_buffer includes a raw pointer to the stream, + // cast to camera3_stream*, implementations must increment the + // refcount of the stream manually in getBufferLocked, and decrement it in + // returnBufferLocked. + virtual status_t getBufferLocked(camera3_stream_buffer *buffer); + virtual status_t returnBufferLocked(const camera3_stream_buffer &buffer, + nsecs_t timestamp); + virtual status_t getInputBufferLocked(camera3_stream_buffer *buffer); + virtual status_t returnInputBufferLocked( + const camera3_stream_buffer &buffer); + virtual bool hasOutstandingBuffersLocked() const = 0; + // Can return -ENOTCONN when we are already disconnected (not an error) + virtual status_t disconnectLocked() = 0; + + // Configure the buffer queue interface to the other end of the stream, + // after the HAL has provided usage and max_buffers values. After this call, + // the stream must be ready to produce all buffers for registration with + // HAL. + virtual status_t configureQueueLocked() = 0; + + // Get the total number of buffers in the queue + virtual size_t getBufferCountLocked() = 0; + + private: + uint32_t oldUsage; + uint32_t oldMaxBuffers; + + // Gets all buffers from endpoint and registers them with the HAL. + status_t registerBuffersLocked(camera3_device *hal3Device); + + void fireBufferListenersLocked(const camera3_stream_buffer& buffer, + bool acquired, bool output); + List<wp<Camera3StreamBufferListener> > mBufferListenerList; + +}; // class Camera3Stream + +}; // namespace camera3 + +}; // namespace android + +#endif diff --git a/services/camera/libcameraservice/camera3/Camera3StreamBufferListener.h b/services/camera/libcameraservice/camera3/Camera3StreamBufferListener.h new file mode 100644 index 0000000..62ea6c0 --- /dev/null +++ b/services/camera/libcameraservice/camera3/Camera3StreamBufferListener.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA3_STREAMBUFFERLISTENER_H +#define ANDROID_SERVERS_CAMERA3_STREAMBUFFERLISTENER_H + +#include <gui/Surface.h> +#include <utils/RefBase.h> + +namespace android { + +namespace camera3 { + +class Camera3StreamBufferListener : public virtual RefBase { +public: + + struct BufferInfo { + bool mOutput; // if false then input buffer + Rect mCrop; + uint32_t mTransform; + uint32_t mScalingMode; + int64_t mTimestamp; + uint64_t mFrameNumber; + }; + + // Buffer was acquired by the HAL + virtual void onBufferAcquired(const BufferInfo& bufferInfo) = 0; + // Buffer was released by the HAL + virtual void onBufferReleased(const BufferInfo& bufferInfo) = 0; +}; + +}; //namespace camera3 +}; //namespace android + +#endif diff --git a/services/camera/libcameraservice/camera3/Camera3StreamInterface.h b/services/camera/libcameraservice/camera3/Camera3StreamInterface.h new file mode 100644 index 0000000..4768536 --- /dev/null +++ b/services/camera/libcameraservice/camera3/Camera3StreamInterface.h @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA3_STREAM_INTERFACE_H +#define ANDROID_SERVERS_CAMERA3_STREAM_INTERFACE_H + +#include <utils/RefBase.h> +#include "Camera3StreamBufferListener.h" + +struct camera3_stream_buffer; + +namespace android { + +namespace camera3 { + +/** + * An interface for managing a single stream of input and/or output data from + * the camera device. + */ +class Camera3StreamInterface : public virtual RefBase { + public: + /** + * Get the stream's ID + */ + virtual int getId() const = 0; + + /** + * Get the stream's dimensions and format + */ + virtual uint32_t getWidth() const = 0; + virtual uint32_t getHeight() const = 0; + virtual int getFormat() const = 0; + + /** + * Start the stream configuration process. Returns a handle to the stream's + * information to be passed into the HAL device's configure_streams call. + * + * Until finishConfiguration() is called, no other methods on the stream may + * be called. The usage and max_buffers fields of camera3_stream may be + * modified between start/finishConfiguration, but may not be changed after + * that. The priv field of camera3_stream may be modified at any time after + * startConfiguration. + * + * Returns NULL in case of error starting configuration. + */ + virtual camera3_stream* startConfiguration() = 0; + + /** + * Check if the stream is mid-configuration (start has been called, but not + * finish). Used for lazy completion of configuration. + */ + virtual bool isConfiguring() const = 0; + + /** + * Completes the stream configuration process. During this call, the stream + * may call the device's register_stream_buffers() method. The stream + * information structure returned by startConfiguration() may no longer be + * modified after this call, but can still be read until the destruction of + * the stream. + * + * Returns: + * OK on a successful configuration + * NO_INIT in case of a serious error from the HAL device + * NO_MEMORY in case of an error registering buffers + * INVALID_OPERATION in case connecting to the consumer failed + */ + virtual status_t finishConfiguration(camera3_device *hal3Device) = 0; + + /** + * Fill in the camera3_stream_buffer with the next valid buffer for this + * stream, to hand over to the HAL. + * + * This method may only be called once finishConfiguration has been called. + * For bidirectional streams, this method applies to the output-side + * buffers. + * + */ + virtual status_t getBuffer(camera3_stream_buffer *buffer) = 0; + + /** + * Return a buffer to the stream after use by the HAL. + * + * This method may only be called for buffers provided by getBuffer(). + * For bidirectional streams, this method applies to the output-side buffers + */ + virtual status_t returnBuffer(const camera3_stream_buffer &buffer, + nsecs_t timestamp) = 0; + + /** + * Fill in the camera3_stream_buffer with the next valid buffer for this + * stream, to hand over to the HAL. + * + * This method may only be called once finishConfiguration has been called. + * For bidirectional streams, this method applies to the input-side + * buffers. + * + */ + virtual status_t getInputBuffer(camera3_stream_buffer *buffer) = 0; + + /** + * Return a buffer to the stream after use by the HAL. + * + * This method may only be called for buffers provided by getBuffer(). + * For bidirectional streams, this method applies to the input-side buffers + */ + virtual status_t returnInputBuffer(const camera3_stream_buffer &buffer) = 0; + + /** + * Whether any of the stream's buffers are currently in use by the HAL, + * including buffers that have been returned but not yet had their + * release fence signaled. + */ + virtual bool hasOutstandingBuffers() const = 0; + + enum { + TIMEOUT_NEVER = -1 + }; + /** + * Wait until the HAL is done with all of this stream's buffers, including + * signalling all release fences. Returns TIMED_OUT if the timeout is + * exceeded, OK on success. Pass in TIMEOUT_NEVER for timeout to indicate + * an indefinite wait. + */ + virtual status_t waitUntilIdle(nsecs_t timeout) = 0; + + /** + * Disconnect stream from its non-HAL endpoint. After this, + * start/finishConfiguration must be called before the stream can be used + * again. This cannot be called if the stream has outstanding dequeued + * buffers. + */ + virtual status_t disconnect() = 0; + + /** + * Debug dump of the stream's state. + */ + virtual void dump(int fd, const Vector<String16> &args) const = 0; + + virtual void addBufferListener( + wp<Camera3StreamBufferListener> listener) = 0; + virtual void removeBufferListener( + const sp<Camera3StreamBufferListener>& listener) = 0; +}; + +} // namespace camera3 + +} // namespace android + +#endif diff --git a/services/camera/libcameraservice/camera3/Camera3ZslStream.cpp b/services/camera/libcameraservice/camera3/Camera3ZslStream.cpp new file mode 100644 index 0000000..8790c8c --- /dev/null +++ b/services/camera/libcameraservice/camera3/Camera3ZslStream.cpp @@ -0,0 +1,328 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Camera3-ZslStream" +#define ATRACE_TAG ATRACE_TAG_CAMERA +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> +#include <utils/Trace.h> +#include "Camera3ZslStream.h" + +typedef android::RingBufferConsumer::PinnedBufferItem PinnedBufferItem; + +namespace android { + +namespace camera3 { + +namespace { +struct TimestampFinder : public RingBufferConsumer::RingBufferComparator { + typedef RingBufferConsumer::BufferInfo BufferInfo; + + enum { + SELECT_I1 = -1, + SELECT_I2 = 1, + SELECT_NEITHER = 0, + }; + + TimestampFinder(nsecs_t timestamp) : mTimestamp(timestamp) {} + ~TimestampFinder() {} + + template <typename T> + static void swap(T& a, T& b) { + T tmp = a; + a = b; + b = tmp; + } + + /** + * Try to find the best candidate for a ZSL buffer. + * Match priority from best to worst: + * 1) Timestamps match. + * 2) Timestamp is closest to the needle (and lower). + * 3) Timestamp is closest to the needle (and higher). + * + */ + virtual int compare(const BufferInfo *i1, + const BufferInfo *i2) const { + // Try to select non-null object first. + if (i1 == NULL) { + return SELECT_I2; + } else if (i2 == NULL) { + return SELECT_I1; + } + + // Best result: timestamp is identical + if (i1->mTimestamp == mTimestamp) { + return SELECT_I1; + } else if (i2->mTimestamp == mTimestamp) { + return SELECT_I2; + } + + const BufferInfo* infoPtrs[2] = { + i1, + i2 + }; + int infoSelectors[2] = { + SELECT_I1, + SELECT_I2 + }; + + // Order i1,i2 so that always i1.timestamp < i2.timestamp + if (i1->mTimestamp > i2->mTimestamp) { + swap(infoPtrs[0], infoPtrs[1]); + swap(infoSelectors[0], infoSelectors[1]); + } + + // Second best: closest (lower) timestamp + if (infoPtrs[1]->mTimestamp < mTimestamp) { + return infoSelectors[1]; + } else if (infoPtrs[0]->mTimestamp < mTimestamp) { + return infoSelectors[0]; + } + + // Worst: closest (higher) timestamp + return infoSelectors[0]; + + /** + * The above cases should cover all the possibilities, + * and we get an 'empty' result only if the ring buffer + * was empty itself + */ + } + + const nsecs_t mTimestamp; +}; // struct TimestampFinder +} // namespace anonymous + +Camera3ZslStream::Camera3ZslStream(int id, uint32_t width, uint32_t height, + int depth) : + Camera3OutputStream(id, CAMERA3_STREAM_BIDIRECTIONAL, + width, height, + HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED), + mDepth(depth), + mProducer(new RingBufferConsumer(GRALLOC_USAGE_HW_CAMERA_ZSL, + depth)) { + + mConsumer = new Surface(mProducer->getProducerInterface()); +} + +Camera3ZslStream::~Camera3ZslStream() { +} + +status_t Camera3ZslStream::getInputBufferLocked(camera3_stream_buffer *buffer) { + ATRACE_CALL(); + + status_t res; + + // TODO: potentially register from inputBufferLocked + // this should be ok, registerBuffersLocked only calls getBuffer for now + // register in output mode instead of input mode for ZSL streams. + if (mState == STATE_IN_CONFIG || mState == STATE_IN_RECONFIG) { + ALOGE("%s: Stream %d: Buffer registration for input streams" + " not implemented (state %d)", + __FUNCTION__, mId, mState); + return INVALID_OPERATION; + } + + if ((res = getBufferPreconditionCheckLocked()) != OK) { + return res; + } + + ANativeWindowBuffer* anb; + int fenceFd; + + assert(mProducer != 0); + + sp<PinnedBufferItem> bufferItem; + { + List<sp<RingBufferConsumer::PinnedBufferItem> >::iterator it, end; + it = mInputBufferQueue.begin(); + end = mInputBufferQueue.end(); + + // Need to call enqueueInputBufferByTimestamp as a prerequisite + if (it == end) { + ALOGE("%s: Stream %d: No input buffer was queued", + __FUNCTION__, mId); + return INVALID_OPERATION; + } + bufferItem = *it; + mInputBufferQueue.erase(it); + } + + anb = bufferItem->getBufferItem().mGraphicBuffer->getNativeBuffer(); + assert(anb != NULL); + fenceFd = bufferItem->getBufferItem().mFence->dup(); + + /** + * FenceFD now owned by HAL except in case of error, + * in which case we reassign it to acquire_fence + */ + handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd, + /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK); + + mBuffersInFlight.push_back(bufferItem); + + return OK; +} + +status_t Camera3ZslStream::returnBufferCheckedLocked( + const camera3_stream_buffer &buffer, + nsecs_t timestamp, + bool output, + /*out*/ + sp<Fence> *releaseFenceOut) { + + if (output) { + // Output stream path + return Camera3OutputStream::returnBufferCheckedLocked(buffer, + timestamp, + output, + releaseFenceOut); + } + + /** + * Input stream path + */ + bool bufferFound = false; + sp<PinnedBufferItem> bufferItem; + { + // Find the buffer we are returning + Vector<sp<PinnedBufferItem> >::iterator it, end; + for (it = mBuffersInFlight.begin(), end = mBuffersInFlight.end(); + it != end; + ++it) { + + const sp<PinnedBufferItem>& tmp = *it; + ANativeWindowBuffer *anb = + tmp->getBufferItem().mGraphicBuffer->getNativeBuffer(); + if (anb != NULL && &(anb->handle) == buffer.buffer) { + bufferFound = true; + bufferItem = tmp; + mBuffersInFlight.erase(it); + break; + } + } + } + if (!bufferFound) { + ALOGE("%s: Stream %d: Can't return buffer that wasn't sent to HAL", + __FUNCTION__, mId); + return INVALID_OPERATION; + } + + int releaseFenceFd = buffer.release_fence; + + if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) { + if (buffer.release_fence != -1) { + ALOGE("%s: Stream %d: HAL should not set release_fence(%d) when " + "there is an error", __FUNCTION__, mId, buffer.release_fence); + close(buffer.release_fence); + } + + /** + * Reassign release fence as the acquire fence incase of error + */ + releaseFenceFd = buffer.acquire_fence; + } + + /** + * Unconditionally return buffer to the buffer queue. + * - Fwk takes over the release_fence ownership + */ + sp<Fence> releaseFence = new Fence(releaseFenceFd); + bufferItem->getBufferItem().mFence = releaseFence; + bufferItem.clear(); // dropping last reference unpins buffer + + *releaseFenceOut = releaseFence; + + return OK; +} + +status_t Camera3ZslStream::returnInputBufferLocked( + const camera3_stream_buffer &buffer) { + ATRACE_CALL(); + + status_t res = returnAnyBufferLocked(buffer, /*timestamp*/0, + /*output*/false); + + return res; +} + +void Camera3ZslStream::dump(int fd, const Vector<String16> &args) const { + (void) args; + + String8 lines; + lines.appendFormat(" Stream[%d]: ZSL\n", mId); + write(fd, lines.string(), lines.size()); + + Camera3IOStreamBase::dump(fd, args); + + lines = String8(); + lines.appendFormat(" Input buffers pending: %d, in flight %d\n", + mInputBufferQueue.size(), mBuffersInFlight.size()); + write(fd, lines.string(), lines.size()); +} + +status_t Camera3ZslStream::enqueueInputBufferByTimestamp( + nsecs_t timestamp, + nsecs_t* actualTimestamp) { + + Mutex::Autolock l(mLock); + + TimestampFinder timestampFinder = TimestampFinder(timestamp); + + sp<RingBufferConsumer::PinnedBufferItem> pinnedBuffer = + mProducer->pinSelectedBuffer(timestampFinder, + /*waitForFence*/false); + + if (pinnedBuffer == 0) { + ALOGE("%s: No ZSL buffers were available yet", __FUNCTION__); + return NO_BUFFER_AVAILABLE; + } + + nsecs_t actual = pinnedBuffer->getBufferItem().mTimestamp; + + if (actual != timestamp) { + ALOGW("%s: ZSL buffer candidate search didn't find an exact match --" + " requested timestamp = %lld, actual timestamp = %lld", + __FUNCTION__, timestamp, actual); + } + + mInputBufferQueue.push_back(pinnedBuffer); + + if (actualTimestamp != NULL) { + *actualTimestamp = actual; + } + + return OK; +} + +status_t Camera3ZslStream::clearInputRingBuffer() { + Mutex::Autolock l(mLock); + + mInputBufferQueue.clear(); + + return mProducer->clear(); +} + +status_t Camera3ZslStream::setTransform(int /*transform*/) { + ALOGV("%s: Not implemented", __FUNCTION__); + return INVALID_OPERATION; +} + +}; // namespace camera3 + +}; // namespace android diff --git a/services/camera/libcameraservice/camera3/Camera3ZslStream.h b/services/camera/libcameraservice/camera3/Camera3ZslStream.h new file mode 100644 index 0000000..c7f4490 --- /dev/null +++ b/services/camera/libcameraservice/camera3/Camera3ZslStream.h @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_SERVERS_CAMERA3_ZSL_STREAM_H +#define ANDROID_SERVERS_CAMERA3_ZSL_STREAM_H + +#include <utils/RefBase.h> +#include <gui/Surface.h> +#include <gui/RingBufferConsumer.h> + +#include "Camera3OutputStream.h" + +namespace android { + +namespace camera3 { + +/** + * A class for managing a single opaque ZSL stream to/from the camera device. + * This acts as a bidirectional stream at the HAL layer, caching and discarding + * most output buffers, and when directed, pushes a buffer back to the HAL for + * processing. + */ +class Camera3ZslStream : + public Camera3OutputStream { + public: + /** + * Set up a ZSL stream of a given resolution. Depth is the number of buffers + * cached within the stream that can be retrieved for input. + */ + Camera3ZslStream(int id, uint32_t width, uint32_t height, int depth); + ~Camera3ZslStream(); + + virtual void dump(int fd, const Vector<String16> &args) const; + + enum { NO_BUFFER_AVAILABLE = BufferQueue::NO_BUFFER_AVAILABLE }; + + /** + * Locate a buffer matching this timestamp in the RingBufferConsumer, + * and mark it to be queued at the next getInputBufferLocked invocation. + * + * Errors: Returns NO_BUFFER_AVAILABLE if we could not find a match. + * + */ + status_t enqueueInputBufferByTimestamp(nsecs_t timestamp, + nsecs_t* actualTimestamp); + + /** + * Clears the buffers that can be used by enqueueInputBufferByTimestamp + */ + status_t clearInputRingBuffer(); + + protected: + + /** + * Camera3OutputStreamInterface implementation + */ + status_t setTransform(int transform); + + private: + + int mDepth; + // Input buffers pending to be queued into HAL + List<sp<RingBufferConsumer::PinnedBufferItem> > mInputBufferQueue; + sp<RingBufferConsumer> mProducer; + + // Input buffers in flight to HAL + Vector<sp<RingBufferConsumer::PinnedBufferItem> > mBuffersInFlight; + + /** + * Camera3Stream interface + */ + + // getInputBuffer/returnInputBuffer operate the input stream side of the + // ZslStream. + virtual status_t getInputBufferLocked(camera3_stream_buffer *buffer); + virtual status_t returnInputBufferLocked( + const camera3_stream_buffer &buffer); + + // Actual body to return either input or output buffers + virtual status_t returnBufferCheckedLocked( + const camera3_stream_buffer &buffer, + nsecs_t timestamp, + bool output, + /*out*/ + sp<Fence> *releaseFenceOut); +}; // class Camera3ZslStream + +}; // namespace camera3 + +}; // namespace android + +#endif diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp new file mode 100644 index 0000000..cd39bad --- /dev/null +++ b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp @@ -0,0 +1,352 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "RingBufferConsumer" +#define ATRACE_TAG ATRACE_TAG_GRAPHICS +#include <utils/Log.h> + +#include <gui/RingBufferConsumer.h> + +#define BI_LOGV(x, ...) ALOGV("[%s] "x, mName.string(), ##__VA_ARGS__) +#define BI_LOGD(x, ...) ALOGD("[%s] "x, mName.string(), ##__VA_ARGS__) +#define BI_LOGI(x, ...) ALOGI("[%s] "x, mName.string(), ##__VA_ARGS__) +#define BI_LOGW(x, ...) ALOGW("[%s] "x, mName.string(), ##__VA_ARGS__) +#define BI_LOGE(x, ...) ALOGE("[%s] "x, mName.string(), ##__VA_ARGS__) + +#undef assert +#define assert(x) ALOG_ASSERT((x), #x) + +typedef android::RingBufferConsumer::PinnedBufferItem PinnedBufferItem; + +namespace android { + +RingBufferConsumer::RingBufferConsumer(uint32_t consumerUsage, + int bufferCount) : + ConsumerBase(new BufferQueue(true)), + mBufferCount(bufferCount) +{ + mBufferQueue->setConsumerUsageBits(consumerUsage); + mBufferQueue->setSynchronousMode(true); + mBufferQueue->setMaxAcquiredBufferCount(bufferCount); + + assert(bufferCount > 0); +} + +RingBufferConsumer::~RingBufferConsumer() { +} + +void RingBufferConsumer::setName(const String8& name) { + Mutex::Autolock _l(mMutex); + mName = name; + mBufferQueue->setConsumerName(name); +} + +sp<PinnedBufferItem> RingBufferConsumer::pinSelectedBuffer( + const RingBufferComparator& filter, + bool waitForFence) { + + sp<PinnedBufferItem> pinnedBuffer; + + { + List<RingBufferItem>::iterator it, end, accIt; + BufferInfo acc, cur; + BufferInfo* accPtr = NULL; + + Mutex::Autolock _l(mMutex); + + for (it = mBufferItemList.begin(), end = mBufferItemList.end(); + it != end; + ++it) { + + const RingBufferItem& item = *it; + + cur.mCrop = item.mCrop; + cur.mTransform = item.mTransform; + cur.mScalingMode = item.mScalingMode; + cur.mTimestamp = item.mTimestamp; + cur.mFrameNumber = item.mFrameNumber; + cur.mPinned = item.mPinCount > 0; + + int ret = filter.compare(accPtr, &cur); + + if (ret == 0) { + accPtr = NULL; + } else if (ret > 0) { + acc = cur; + accPtr = &acc; + accIt = it; + } // else acc = acc + } + + if (!accPtr) { + return NULL; + } + + pinnedBuffer = new PinnedBufferItem(this, *accIt); + pinBufferLocked(pinnedBuffer->getBufferItem()); + + } // end scope of mMutex autolock + + if (waitForFence) { + status_t err = pinnedBuffer->getBufferItem().mFence->waitForever( + "RingBufferConsumer::pinSelectedBuffer"); + if (err != OK) { + BI_LOGE("Failed to wait for fence of acquired buffer: %s (%d)", + strerror(-err), err); + } + } + + return pinnedBuffer; +} + +status_t RingBufferConsumer::clear() { + + status_t err; + Mutex::Autolock _l(mMutex); + + BI_LOGV("%s", __FUNCTION__); + + // Avoid annoying log warnings by returning early + if (mBufferItemList.size() == 0) { + return OK; + } + + do { + size_t pinnedFrames = 0; + err = releaseOldestBufferLocked(&pinnedFrames); + + if (err == NO_BUFFER_AVAILABLE) { + assert(pinnedFrames == mBufferItemList.size()); + break; + } + + if (err == NOT_ENOUGH_DATA) { + // Fine. Empty buffer item list. + break; + } + + if (err != OK) { + BI_LOGE("Clear failed, could not release buffer"); + return err; + } + + } while(true); + + return OK; +} + +void RingBufferConsumer::pinBufferLocked(const BufferItem& item) { + List<RingBufferItem>::iterator it, end; + + for (it = mBufferItemList.begin(), end = mBufferItemList.end(); + it != end; + ++it) { + + RingBufferItem& find = *it; + if (item.mGraphicBuffer == find.mGraphicBuffer) { + find.mPinCount++; + break; + } + } + + if (it == end) { + BI_LOGE("Failed to pin buffer (timestamp %lld, framenumber %lld)", + item.mTimestamp, item.mFrameNumber); + } else { + BI_LOGV("Pinned buffer (frame %lld, timestamp %lld)", + item.mFrameNumber, item.mTimestamp); + } +} + +status_t RingBufferConsumer::releaseOldestBufferLocked(size_t* pinnedFrames) { + status_t err = OK; + + List<RingBufferItem>::iterator it, end, accIt; + + it = mBufferItemList.begin(); + end = mBufferItemList.end(); + accIt = end; + + if (it == end) { + /** + * This is fine. We really care about being able to acquire a buffer + * successfully after this function completes, not about it releasing + * some buffer. + */ + BI_LOGV("%s: No buffers yet acquired, can't release anything", + __FUNCTION__); + return NOT_ENOUGH_DATA; + } + + for (; it != end; ++it) { + RingBufferItem& find = *it; + + if (find.mPinCount > 0) { + if (pinnedFrames != NULL) { + ++(*pinnedFrames); + } + // Filter out pinned frame when searching for buffer to release + continue; + } + + if (find.mTimestamp < accIt->mTimestamp || accIt == end) { + accIt = it; + } + } + + if (accIt != end) { + RingBufferItem& item = *accIt; + + // In case the object was never pinned, pass the acquire fence + // back to the release fence. If the fence was already waited on, + // it'll just be a no-op to wait on it again. + err = addReleaseFenceLocked(item.mBuf, item.mFence); + + if (err != OK) { + BI_LOGE("Failed to add release fence to buffer " + "(timestamp %lld, framenumber %lld", + item.mTimestamp, item.mFrameNumber); + return err; + } + + BI_LOGV("Attempting to release buffer timestamp %lld, frame %lld", + item.mTimestamp, item.mFrameNumber); + + err = releaseBufferLocked(item.mBuf, + EGL_NO_DISPLAY, + EGL_NO_SYNC_KHR); + if (err != OK) { + BI_LOGE("Failed to release buffer: %s (%d)", + strerror(-err), err); + return err; + } + + BI_LOGV("Buffer timestamp %lld, frame %lld evicted", + item.mTimestamp, item.mFrameNumber); + + size_t currentSize = mBufferItemList.size(); + mBufferItemList.erase(accIt); + assert(mBufferItemList.size() == currentSize - 1); + } else { + BI_LOGW("All buffers pinned, could not find any to release"); + return NO_BUFFER_AVAILABLE; + + } + + return OK; +} + +void RingBufferConsumer::onFrameAvailable() { + status_t err; + + { + Mutex::Autolock _l(mMutex); + + /** + * Release oldest frame + */ + if (mBufferItemList.size() >= (size_t)mBufferCount) { + err = releaseOldestBufferLocked(/*pinnedFrames*/NULL); + assert(err != NOT_ENOUGH_DATA); + + // TODO: implement the case for NO_BUFFER_AVAILABLE + assert(err != NO_BUFFER_AVAILABLE); + if (err != OK) { + return; + } + // TODO: in unpinBuffer rerun this routine if we had buffers + // we could've locked but didn't because there was no space + } + + RingBufferItem& item = *mBufferItemList.insert(mBufferItemList.end(), + RingBufferItem()); + + /** + * Acquire new frame + */ + err = acquireBufferLocked(&item); + if (err != OK) { + if (err != NO_BUFFER_AVAILABLE) { + BI_LOGE("Error acquiring buffer: %s (%d)", strerror(err), err); + } + + mBufferItemList.erase(--mBufferItemList.end()); + return; + } + + BI_LOGV("New buffer acquired (timestamp %lld), " + "buffer items %u out of %d", + item.mTimestamp, + mBufferItemList.size(), mBufferCount); + + item.mGraphicBuffer = mSlots[item.mBuf].mGraphicBuffer; + } // end of mMutex lock + + ConsumerBase::onFrameAvailable(); +} + +void RingBufferConsumer::unpinBuffer(const BufferItem& item) { + Mutex::Autolock _l(mMutex); + + List<RingBufferItem>::iterator it, end, accIt; + + for (it = mBufferItemList.begin(), end = mBufferItemList.end(); + it != end; + ++it) { + + RingBufferItem& find = *it; + if (item.mGraphicBuffer == find.mGraphicBuffer) { + status_t res = addReleaseFenceLocked(item.mBuf, item.mFence); + + if (res != OK) { + BI_LOGE("Failed to add release fence to buffer " + "(timestamp %lld, framenumber %lld", + item.mTimestamp, item.mFrameNumber); + return; + } + + find.mPinCount--; + break; + } + } + + if (it == end) { + // This should never happen. If it happens, we have a bug. + BI_LOGE("Failed to unpin buffer (timestamp %lld, framenumber %lld)", + item.mTimestamp, item.mFrameNumber); + } else { + BI_LOGV("Unpinned buffer (timestamp %lld, framenumber %lld)", + item.mTimestamp, item.mFrameNumber); + } +} + +status_t RingBufferConsumer::setDefaultBufferSize(uint32_t w, uint32_t h) { + Mutex::Autolock _l(mMutex); + return mBufferQueue->setDefaultBufferSize(w, h); +} + +status_t RingBufferConsumer::setDefaultBufferFormat(uint32_t defaultFormat) { + Mutex::Autolock _l(mMutex); + return mBufferQueue->setDefaultBufferFormat(defaultFormat); +} + +status_t RingBufferConsumer::setConsumerUsage(uint32_t usage) { + Mutex::Autolock _l(mMutex); + return mBufferQueue->setConsumerUsageBits(usage); +} + +} // namespace android diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.h b/services/camera/libcameraservice/gui/RingBufferConsumer.h new file mode 100644 index 0000000..454fbae --- /dev/null +++ b/services/camera/libcameraservice/gui/RingBufferConsumer.h @@ -0,0 +1,189 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_GUI_RINGBUFFERCONSUMER_H +#define ANDROID_GUI_RINGBUFFERCONSUMER_H + +#include <gui/ConsumerBase.h> + +#include <ui/GraphicBuffer.h> + +#include <utils/String8.h> +#include <utils/Vector.h> +#include <utils/threads.h> +#include <utils/List.h> + +#define ANDROID_GRAPHICS_RINGBUFFERCONSUMER_JNI_ID "mRingBufferConsumer" + +namespace android { + +/** + * The RingBufferConsumer maintains a ring buffer of BufferItem objects, + * (which are 'acquired' as long as they are part of the ring buffer, and + * 'released' when they leave the ring buffer). + * + * When new buffers are produced, the oldest non-pinned buffer item is immediately + * dropped from the ring buffer, and overridden with the newest buffer. + * + * Users can only access a buffer item after pinning it (which also guarantees + * that during its duration it will not be released back into the BufferQueue). + * + * Note that the 'oldest' buffer is the one with the smallest timestamp. + * + * Edge cases: + * - If ringbuffer is not full, no drops occur when a buffer is produced. + * - If all the buffers get filled or pinned then there will be no empty + * buffers left, so the producer will block on dequeue. + */ +class RingBufferConsumer : public ConsumerBase, + public ConsumerBase::FrameAvailableListener +{ + public: + typedef ConsumerBase::FrameAvailableListener FrameAvailableListener; + + typedef BufferQueue::BufferItem BufferItem; + + enum { INVALID_BUFFER_SLOT = BufferQueue::INVALID_BUFFER_SLOT }; + enum { NO_BUFFER_AVAILABLE = BufferQueue::NO_BUFFER_AVAILABLE }; + + // Create a new ring buffer consumer. The consumerUsage parameter determines + // the consumer usage flags passed to the graphics allocator. The + // bufferCount parameter specifies how many buffers can be pinned for user + // access at the same time. + RingBufferConsumer(uint32_t consumerUsage, + int bufferCount = BufferQueue::MIN_UNDEQUEUED_BUFFERS); + + virtual ~RingBufferConsumer(); + + // set the name of the RingBufferConsumer that will be used to identify it in + // log messages. + void setName(const String8& name); + + sp<IGraphicBufferProducer> getProducerInterface() const { return getBufferQueue(); } + + // setDefaultBufferSize is used to set the size of buffers returned by + // requestBuffers when a with and height of zero is requested. + status_t setDefaultBufferSize(uint32_t w, uint32_t h); + + // setDefaultBufferFormat allows the BufferQueue to create + // GraphicBuffers of a defaultFormat if no format is specified + // by the producer endpoint. + status_t setDefaultBufferFormat(uint32_t defaultFormat); + + // setConsumerUsage allows the BufferQueue consumer usage to be + // set at a later time after construction. + status_t setConsumerUsage(uint32_t usage); + + // Buffer info, minus the graphics buffer/slot itself. + struct BufferInfo { + // mCrop is the current crop rectangle for this buffer slot. + Rect mCrop; + + // mTransform is the current transform flags for this buffer slot. + uint32_t mTransform; + + // mScalingMode is the current scaling mode for this buffer slot. + uint32_t mScalingMode; + + // mTimestamp is the current timestamp for this buffer slot. This gets + // to set by queueBuffer each time this slot is queued. + int64_t mTimestamp; + + // mFrameNumber is the number of the queued frame for this slot. + uint64_t mFrameNumber; + + // mPinned is whether or not the buffer has been pinned already. + bool mPinned; + }; + + struct RingBufferComparator { + // Return < 0 to select i1, > 0 to select i2, 0 for neither + // i1 or i2 can be NULL. + // + // The comparator has to implement a total ordering. Otherwise + // a linear scan won't find the most preferred buffer. + virtual int compare(const BufferInfo* i1, + const BufferInfo* i2) const = 0; + + virtual ~RingBufferComparator() {} + }; + + struct PinnedBufferItem : public LightRefBase<PinnedBufferItem> { + PinnedBufferItem(wp<RingBufferConsumer> consumer, + const BufferItem& item) : + mConsumer(consumer), + mBufferItem(item) { + } + + ~PinnedBufferItem() { + sp<RingBufferConsumer> consumer = mConsumer.promote(); + if (consumer != NULL) { + consumer->unpinBuffer(mBufferItem); + } + } + + bool isEmpty() { + return mBufferItem.mBuf == BufferQueue::INVALID_BUFFER_SLOT; + } + + BufferItem& getBufferItem() { return mBufferItem; } + const BufferItem& getBufferItem() const { return mBufferItem; } + + private: + wp<RingBufferConsumer> mConsumer; + BufferItem mBufferItem; + }; + + // Find a buffer using the filter, then pin it before returning it. + // + // The filter will be invoked on each buffer item in the ring buffer, + // passing the item that was selected from each previous iteration, + // as well as the current iteration's item. + // + // Pinning will ensure that the buffer will not be dropped when a new + // frame is available. + sp<PinnedBufferItem> pinSelectedBuffer(const RingBufferComparator& filter, + bool waitForFence = true); + + // Release all the non-pinned buffers in the ring buffer + status_t clear(); + + private: + + // Override ConsumerBase::onFrameAvailable + virtual void onFrameAvailable(); + + void pinBufferLocked(const BufferItem& item); + void unpinBuffer(const BufferItem& item); + + // Releases oldest buffer. Returns NO_BUFFER_AVAILABLE + // if all the buffers were pinned. + // Returns NOT_ENOUGH_DATA if list was empty. + status_t releaseOldestBufferLocked(size_t* pinnedFrames); + + struct RingBufferItem : public BufferItem { + RingBufferItem() : BufferItem(), mPinCount(0) {} + int mPinCount; + }; + + // List of acquired buffers in our ring buffer + List<RingBufferItem> mBufferItemList; + const int mBufferCount; +}; + +} // namespace android + +#endif // ANDROID_GUI_CPUCONSUMER_H diff --git a/services/camera/tests/CameraServiceTest/Android.mk b/services/camera/tests/CameraServiceTest/Android.mk deleted file mode 100644 index 41b6f63..0000000 --- a/services/camera/tests/CameraServiceTest/Android.mk +++ /dev/null @@ -1,26 +0,0 @@ -LOCAL_PATH:= $(call my-dir) - -include $(CLEAR_VARS) - -LOCAL_SRC_FILES:= CameraServiceTest.cpp - -LOCAL_MODULE:= CameraServiceTest - -LOCAL_MODULE_TAGS := tests - -LOCAL_C_INCLUDES += \ - frameworks/av/libs - -LOCAL_CFLAGS := - -LOCAL_SHARED_LIBRARIES += \ - libbinder \ - libcutils \ - libutils \ - libui \ - libcamera_client \ - libgui - -# Disable it because the ISurface interface may change, and before we have a -# chance to fix this test, we don't want to break normal builds. -#include $(BUILD_EXECUTABLE) diff --git a/services/camera/tests/CameraServiceTest/CameraServiceTest.cpp b/services/camera/tests/CameraServiceTest/CameraServiceTest.cpp deleted file mode 100644 index e417b79..0000000 --- a/services/camera/tests/CameraServiceTest/CameraServiceTest.cpp +++ /dev/null @@ -1,924 +0,0 @@ -/* - * Copyright (C) 2010 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "CameraServiceTest" - -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <sys/types.h> -#include <sys/wait.h> -#include <unistd.h> -#include <camera/Camera.h> -#include <camera/CameraParameters.h> -#include <ui/GraphicBuffer.h> -#include <camera/ICamera.h> -#include <camera/ICameraClient.h> -#include <camera/ICameraService.h> -#include <binder/IPCThreadState.h> -#include <binder/IServiceManager.h> -#include <binder/ProcessState.h> -#include <utils/KeyedVector.h> -#include <utils/Log.h> -#include <utils/Vector.h> -#include <utils/threads.h> - -using namespace android; - -// -// Assertion and Logging utilities -// -#define INFO(...) \ - do { \ - printf(__VA_ARGS__); \ - printf("\n"); \ - ALOGD(__VA_ARGS__); \ - } while(0) - -void assert_fail(const char *file, int line, const char *func, const char *expr) { - INFO("assertion failed at file %s, line %d, function %s:", - file, line, func); - INFO("%s", expr); - abort(); -} - -void assert_eq_fail(const char *file, int line, const char *func, - const char *expr, int actual) { - INFO("assertion failed at file %s, line %d, function %s:", - file, line, func); - INFO("(expected) %s != (actual) %d", expr, actual); - abort(); -} - -#define ASSERT(e) \ - do { \ - if (!(e)) \ - assert_fail(__FILE__, __LINE__, __func__, #e); \ - } while(0) - -#define ASSERT_EQ(expected, actual) \ - do { \ - int _x = (actual); \ - if (_x != (expected)) \ - assert_eq_fail(__FILE__, __LINE__, __func__, #expected, _x); \ - } while(0) - -// -// Holder service for pass objects between processes. -// -class IHolder : public IInterface { -protected: - enum { - HOLDER_PUT = IBinder::FIRST_CALL_TRANSACTION, - HOLDER_GET, - HOLDER_CLEAR - }; -public: - DECLARE_META_INTERFACE(Holder); - - virtual void put(sp<IBinder> obj) = 0; - virtual sp<IBinder> get() = 0; - virtual void clear() = 0; -}; - -class BnHolder : public BnInterface<IHolder> { - virtual status_t onTransact(uint32_t code, - const Parcel& data, - Parcel* reply, - uint32_t flags = 0); -}; - -class BpHolder : public BpInterface<IHolder> { -public: - BpHolder(const sp<IBinder>& impl) - : BpInterface<IHolder>(impl) { - } - - virtual void put(sp<IBinder> obj) { - Parcel data, reply; - data.writeStrongBinder(obj); - remote()->transact(HOLDER_PUT, data, &reply, IBinder::FLAG_ONEWAY); - } - - virtual sp<IBinder> get() { - Parcel data, reply; - remote()->transact(HOLDER_GET, data, &reply); - return reply.readStrongBinder(); - } - - virtual void clear() { - Parcel data, reply; - remote()->transact(HOLDER_CLEAR, data, &reply); - } -}; - -IMPLEMENT_META_INTERFACE(Holder, "CameraServiceTest.Holder"); - -status_t BnHolder::onTransact( - uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { - switch(code) { - case HOLDER_PUT: { - put(data.readStrongBinder()); - return NO_ERROR; - } break; - case HOLDER_GET: { - reply->writeStrongBinder(get()); - return NO_ERROR; - } break; - case HOLDER_CLEAR: { - clear(); - return NO_ERROR; - } break; - default: - return BBinder::onTransact(code, data, reply, flags); - } -} - -class HolderService : public BnHolder { - virtual void put(sp<IBinder> obj) { - mObj = obj; - } - virtual sp<IBinder> get() { - return mObj; - } - virtual void clear() { - mObj.clear(); - } -private: - sp<IBinder> mObj; -}; - -// -// A mock CameraClient -// -class MCameraClient : public BnCameraClient { -public: - virtual void notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2); - virtual void dataCallback(int32_t msgType, const sp<IMemory>& data); - virtual void dataCallbackTimestamp(nsecs_t timestamp, - int32_t msgType, const sp<IMemory>& data); - - // new functions - void clearStat(); - enum OP { EQ, GE, LE, GT, LT }; - void assertNotify(int32_t msgType, OP op, int count); - void assertData(int32_t msgType, OP op, int count); - void waitNotify(int32_t msgType, OP op, int count); - void waitData(int32_t msgType, OP op, int count); - void assertDataSize(int32_t msgType, OP op, int dataSize); - - void setReleaser(ICamera *releaser) { - mReleaser = releaser; - } -private: - Mutex mLock; - Condition mCond; - DefaultKeyedVector<int32_t, int> mNotifyCount; - DefaultKeyedVector<int32_t, int> mDataCount; - DefaultKeyedVector<int32_t, int> mDataSize; - bool test(OP op, int v1, int v2); - void assertTest(OP op, int v1, int v2); - - ICamera *mReleaser; -}; - -void MCameraClient::clearStat() { - Mutex::Autolock _l(mLock); - mNotifyCount.clear(); - mDataCount.clear(); - mDataSize.clear(); -} - -bool MCameraClient::test(OP op, int v1, int v2) { - switch (op) { - case EQ: return v1 == v2; - case GT: return v1 > v2; - case LT: return v1 < v2; - case GE: return v1 >= v2; - case LE: return v1 <= v2; - default: ASSERT(0); break; - } - return false; -} - -void MCameraClient::assertTest(OP op, int v1, int v2) { - if (!test(op, v1, v2)) { - ALOGE("assertTest failed: op=%d, v1=%d, v2=%d", op, v1, v2); - ASSERT(0); - } -} - -void MCameraClient::assertNotify(int32_t msgType, OP op, int count) { - Mutex::Autolock _l(mLock); - int v = mNotifyCount.valueFor(msgType); - assertTest(op, v, count); -} - -void MCameraClient::assertData(int32_t msgType, OP op, int count) { - Mutex::Autolock _l(mLock); - int v = mDataCount.valueFor(msgType); - assertTest(op, v, count); -} - -void MCameraClient::assertDataSize(int32_t msgType, OP op, int dataSize) { - Mutex::Autolock _l(mLock); - int v = mDataSize.valueFor(msgType); - assertTest(op, v, dataSize); -} - -void MCameraClient::notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2) { - INFO("%s", __func__); - Mutex::Autolock _l(mLock); - ssize_t i = mNotifyCount.indexOfKey(msgType); - if (i < 0) { - mNotifyCount.add(msgType, 1); - } else { - ++mNotifyCount.editValueAt(i); - } - mCond.signal(); -} - -void MCameraClient::dataCallback(int32_t msgType, const sp<IMemory>& data) { - INFO("%s", __func__); - int dataSize = data->size(); - INFO("data type = %d, size = %d", msgType, dataSize); - Mutex::Autolock _l(mLock); - ssize_t i = mDataCount.indexOfKey(msgType); - if (i < 0) { - mDataCount.add(msgType, 1); - mDataSize.add(msgType, dataSize); - } else { - ++mDataCount.editValueAt(i); - mDataSize.editValueAt(i) = dataSize; - } - mCond.signal(); - - if (msgType == CAMERA_MSG_VIDEO_FRAME) { - ASSERT(mReleaser != NULL); - mReleaser->releaseRecordingFrame(data); - } -} - -void MCameraClient::dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, - const sp<IMemory>& data) { - dataCallback(msgType, data); -} - -void MCameraClient::waitNotify(int32_t msgType, OP op, int count) { - INFO("waitNotify: %d, %d, %d", msgType, op, count); - Mutex::Autolock _l(mLock); - while (true) { - int v = mNotifyCount.valueFor(msgType); - if (test(op, v, count)) { - break; - } - mCond.wait(mLock); - } -} - -void MCameraClient::waitData(int32_t msgType, OP op, int count) { - INFO("waitData: %d, %d, %d", msgType, op, count); - Mutex::Autolock _l(mLock); - while (true) { - int v = mDataCount.valueFor(msgType); - if (test(op, v, count)) { - break; - } - mCond.wait(mLock); - } -} - -// -// A mock Surface -// -class MSurface : public BnSurface { -public: - virtual status_t registerBuffers(const BufferHeap& buffers); - virtual void postBuffer(ssize_t offset); - virtual void unregisterBuffers(); - virtual sp<GraphicBuffer> requestBuffer(int bufferIdx, int usage); - virtual status_t setBufferCount(int bufferCount); - - // new functions - void clearStat(); - void waitUntil(int c0, int c1, int c2); - -private: - // check callback count - Condition mCond; - Mutex mLock; - int registerBuffersCount; - int postBufferCount; - int unregisterBuffersCount; -}; - -status_t MSurface::registerBuffers(const BufferHeap& buffers) { - INFO("%s", __func__); - Mutex::Autolock _l(mLock); - ++registerBuffersCount; - mCond.signal(); - return NO_ERROR; -} - -void MSurface::postBuffer(ssize_t offset) { - // INFO("%s", __func__); - Mutex::Autolock _l(mLock); - ++postBufferCount; - mCond.signal(); -} - -void MSurface::unregisterBuffers() { - INFO("%s", __func__); - Mutex::Autolock _l(mLock); - ++unregisterBuffersCount; - mCond.signal(); -} - -sp<GraphicBuffer> MSurface::requestBuffer(int bufferIdx, int usage) { - INFO("%s", __func__); - return NULL; -} - -status_t MSurface::setBufferCount(int bufferCount) { - INFO("%s", __func__); - return NULL; -} - -void MSurface::clearStat() { - Mutex::Autolock _l(mLock); - registerBuffersCount = 0; - postBufferCount = 0; - unregisterBuffersCount = 0; -} - -void MSurface::waitUntil(int c0, int c1, int c2) { - INFO("waitUntil: %d %d %d", c0, c1, c2); - Mutex::Autolock _l(mLock); - while (true) { - if (registerBuffersCount >= c0 && - postBufferCount >= c1 && - unregisterBuffersCount >= c2) { - break; - } - mCond.wait(mLock); - } -} - -// -// Utilities to use the Holder service -// -sp<IHolder> getHolder() { - sp<IServiceManager> sm = defaultServiceManager(); - ASSERT(sm != 0); - sp<IBinder> binder = sm->getService(String16("CameraServiceTest.Holder")); - ASSERT(binder != 0); - sp<IHolder> holder = interface_cast<IHolder>(binder); - ASSERT(holder != 0); - return holder; -} - -void putTempObject(sp<IBinder> obj) { - INFO("%s", __func__); - getHolder()->put(obj); -} - -sp<IBinder> getTempObject() { - INFO("%s", __func__); - return getHolder()->get(); -} - -void clearTempObject() { - INFO("%s", __func__); - getHolder()->clear(); -} - -// -// Get a Camera Service -// -sp<ICameraService> getCameraService() { - sp<IServiceManager> sm = defaultServiceManager(); - ASSERT(sm != 0); - sp<IBinder> binder = sm->getService(String16("media.camera")); - ASSERT(binder != 0); - sp<ICameraService> cs = interface_cast<ICameraService>(binder); - ASSERT(cs != 0); - return cs; -} - -int getNumberOfCameras() { - sp<ICameraService> cs = getCameraService(); - return cs->getNumberOfCameras(); -} - -// -// Various Connect Tests -// -void testConnect(int cameraId) { - INFO("%s", __func__); - sp<ICameraService> cs = getCameraService(); - sp<MCameraClient> cc = new MCameraClient(); - sp<ICamera> c = cs->connect(cc, cameraId); - ASSERT(c != 0); - c->disconnect(); -} - -void testAllowConnectOnceOnly(int cameraId) { - INFO("%s", __func__); - sp<ICameraService> cs = getCameraService(); - // Connect the first client. - sp<MCameraClient> cc = new MCameraClient(); - sp<ICamera> c = cs->connect(cc, cameraId); - ASSERT(c != 0); - // Same client -- ok. - ASSERT(cs->connect(cc, cameraId) != 0); - // Different client -- not ok. - sp<MCameraClient> cc2 = new MCameraClient(); - ASSERT(cs->connect(cc2, cameraId) == 0); - c->disconnect(); -} - -void testReconnectFailed() { - INFO("%s", __func__); - sp<ICamera> c = interface_cast<ICamera>(getTempObject()); - sp<MCameraClient> cc = new MCameraClient(); - ASSERT(c->connect(cc) != NO_ERROR); -} - -void testReconnectSuccess() { - INFO("%s", __func__); - sp<ICamera> c = interface_cast<ICamera>(getTempObject()); - sp<MCameraClient> cc = new MCameraClient(); - ASSERT(c->connect(cc) == NO_ERROR); - c->disconnect(); -} - -void testLockFailed() { - INFO("%s", __func__); - sp<ICamera> c = interface_cast<ICamera>(getTempObject()); - ASSERT(c->lock() != NO_ERROR); -} - -void testLockUnlockSuccess() { - INFO("%s", __func__); - sp<ICamera> c = interface_cast<ICamera>(getTempObject()); - ASSERT(c->lock() == NO_ERROR); - ASSERT(c->unlock() == NO_ERROR); -} - -void testLockSuccess() { - INFO("%s", __func__); - sp<ICamera> c = interface_cast<ICamera>(getTempObject()); - ASSERT(c->lock() == NO_ERROR); - c->disconnect(); -} - -// -// Run the connect tests in another process. -// -const char *gExecutable; - -struct FunctionTableEntry { - const char *name; - void (*func)(); -}; - -FunctionTableEntry function_table[] = { -#define ENTRY(x) {#x, &x} - ENTRY(testReconnectFailed), - ENTRY(testReconnectSuccess), - ENTRY(testLockUnlockSuccess), - ENTRY(testLockFailed), - ENTRY(testLockSuccess), -#undef ENTRY -}; - -void runFunction(const char *tag) { - INFO("runFunction: %s", tag); - int entries = sizeof(function_table) / sizeof(function_table[0]); - for (int i = 0; i < entries; i++) { - if (strcmp(function_table[i].name, tag) == 0) { - (*function_table[i].func)(); - return; - } - } - ASSERT(0); -} - -void runInAnotherProcess(const char *tag) { - pid_t pid = fork(); - if (pid == 0) { - execlp(gExecutable, gExecutable, tag, NULL); - ASSERT(0); - } else { - int status; - ASSERT_EQ(pid, wait(&status)); - ASSERT_EQ(0, status); - } -} - -void testReconnect(int cameraId) { - INFO("%s", __func__); - sp<ICameraService> cs = getCameraService(); - sp<MCameraClient> cc = new MCameraClient(); - sp<ICamera> c = cs->connect(cc, cameraId); - ASSERT(c != 0); - // Reconnect to the same client -- ok. - ASSERT(c->connect(cc) == NO_ERROR); - // Reconnect to a different client (but the same pid) -- ok. - sp<MCameraClient> cc2 = new MCameraClient(); - ASSERT(c->connect(cc2) == NO_ERROR); - c->disconnect(); - cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0); -} - -void testLockUnlock(int cameraId) { - sp<ICameraService> cs = getCameraService(); - sp<MCameraClient> cc = new MCameraClient(); - sp<ICamera> c = cs->connect(cc, cameraId); - ASSERT(c != 0); - // We can lock as many times as we want. - ASSERT(c->lock() == NO_ERROR); - ASSERT(c->lock() == NO_ERROR); - // Lock from a different process -- not ok. - putTempObject(c->asBinder()); - runInAnotherProcess("testLockFailed"); - // Unlock then lock from a different process -- ok. - ASSERT(c->unlock() == NO_ERROR); - runInAnotherProcess("testLockUnlockSuccess"); - // Unlock then lock from a different process -- ok. - runInAnotherProcess("testLockSuccess"); - clearTempObject(); -} - -void testReconnectFromAnotherProcess(int cameraId) { - INFO("%s", __func__); - - sp<ICameraService> cs = getCameraService(); - sp<MCameraClient> cc = new MCameraClient(); - sp<ICamera> c = cs->connect(cc, cameraId); - ASSERT(c != 0); - // Reconnect from a different process -- not ok. - putTempObject(c->asBinder()); - runInAnotherProcess("testReconnectFailed"); - // Unlock then reconnect from a different process -- ok. - ASSERT(c->unlock() == NO_ERROR); - runInAnotherProcess("testReconnectSuccess"); - clearTempObject(); -} - -// We need to flush the command buffer after the reference -// to ICamera is gone. The sleep is for the server to run -// the destructor for it. -static void flushCommands() { - IPCThreadState::self()->flushCommands(); - usleep(200000); // 200ms -} - -// Run a test case -#define RUN(class_name, cameraId) do { \ - { \ - INFO(#class_name); \ - class_name instance; \ - instance.init(cameraId); \ - instance.run(); \ - } \ - flushCommands(); \ -} while(0) - -// Base test case after the the camera is connected. -class AfterConnect { -public: - void init(int cameraId) { - cs = getCameraService(); - cc = new MCameraClient(); - c = cs->connect(cc, cameraId); - ASSERT(c != 0); - } - -protected: - sp<ICameraService> cs; - sp<MCameraClient> cc; - sp<ICamera> c; - - ~AfterConnect() { - c->disconnect(); - c.clear(); - cc.clear(); - cs.clear(); - } -}; - -class TestSetPreviewDisplay : public AfterConnect { -public: - void run() { - sp<MSurface> surface = new MSurface(); - ASSERT(c->setPreviewDisplay(surface) == NO_ERROR); - c->disconnect(); - cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0); - } -}; - -class TestStartPreview : public AfterConnect { -public: - void run() { - sp<MSurface> surface = new MSurface(); - ASSERT(c->setPreviewDisplay(surface) == NO_ERROR); - - ASSERT(c->startPreview() == NO_ERROR); - ASSERT(c->previewEnabled() == true); - - surface->waitUntil(1, 10, 0); // needs 1 registerBuffers and 10 postBuffer - surface->clearStat(); - - sp<MSurface> another_surface = new MSurface(); - c->setPreviewDisplay(another_surface); // just to make sure unregisterBuffers - // is called. - surface->waitUntil(0, 0, 1); // needs unregisterBuffers - - cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0); - } -}; - -class TestStartPreviewWithoutDisplay : public AfterConnect { -public: - void run() { - ASSERT(c->startPreview() == NO_ERROR); - ASSERT(c->previewEnabled() == true); - c->disconnect(); - cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0); - } -}; - -// Base test case after the the camera is connected and the preview is started. -class AfterStartPreview : public AfterConnect { -public: - void init(int cameraId) { - AfterConnect::init(cameraId); - surface = new MSurface(); - ASSERT(c->setPreviewDisplay(surface) == NO_ERROR); - ASSERT(c->startPreview() == NO_ERROR); - } - -protected: - sp<MSurface> surface; - - ~AfterStartPreview() { - surface.clear(); - } -}; - -class TestAutoFocus : public AfterStartPreview { -public: - void run() { - cc->assertNotify(CAMERA_MSG_FOCUS, MCameraClient::EQ, 0); - c->autoFocus(); - cc->waitNotify(CAMERA_MSG_FOCUS, MCameraClient::EQ, 1); - c->disconnect(); - cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0); - } -}; - -class TestStopPreview : public AfterStartPreview { -public: - void run() { - ASSERT(c->previewEnabled() == true); - c->stopPreview(); - ASSERT(c->previewEnabled() == false); - c->disconnect(); - cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0); - } -}; - -class TestTakePicture: public AfterStartPreview { -public: - void run() { - ASSERT(c->takePicture() == NO_ERROR); - cc->waitNotify(CAMERA_MSG_SHUTTER, MCameraClient::EQ, 1); - cc->waitData(CAMERA_MSG_RAW_IMAGE, MCameraClient::EQ, 1); - cc->waitData(CAMERA_MSG_COMPRESSED_IMAGE, MCameraClient::EQ, 1); - c->stopPreview(); - c->disconnect(); - cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0); - } -}; - -class TestTakeMultiplePictures: public AfterStartPreview { -public: - void run() { - for (int i = 0; i < 10; i++) { - cc->clearStat(); - ASSERT(c->takePicture() == NO_ERROR); - cc->waitNotify(CAMERA_MSG_SHUTTER, MCameraClient::EQ, 1); - cc->waitData(CAMERA_MSG_RAW_IMAGE, MCameraClient::EQ, 1); - cc->waitData(CAMERA_MSG_COMPRESSED_IMAGE, MCameraClient::EQ, 1); - } - c->disconnect(); - cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0); - } -}; - -class TestGetParameters: public AfterStartPreview { -public: - void run() { - String8 param_str = c->getParameters(); - INFO("%s", static_cast<const char*>(param_str)); - } -}; - -static bool getNextSize(const char **ptrS, int *w, int *h) { - const char *s = *ptrS; - - // skip over ',' - if (*s == ',') s++; - - // remember start position in p - const char *p = s; - while (*s != '\0' && *s != 'x') { - s++; - } - if (*s == '\0') return false; - - // get the width - *w = atoi(p); - - // skip over 'x' - ASSERT(*s == 'x'); - p = s + 1; - while (*s != '\0' && *s != ',') { - s++; - } - - // get the height - *h = atoi(p); - *ptrS = s; - return true; -} - -class TestPictureSize : public AfterStartPreview { -public: - void checkOnePicture(int w, int h) { - const float rate = 0.9; // byte per pixel limit - int pixels = w * h; - - CameraParameters param(c->getParameters()); - param.setPictureSize(w, h); - // disable thumbnail to get more accurate size. - param.set(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH, 0); - param.set(CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT, 0); - c->setParameters(param.flatten()); - - cc->clearStat(); - ASSERT(c->takePicture() == NO_ERROR); - cc->waitData(CAMERA_MSG_RAW_IMAGE, MCameraClient::EQ, 1); - //cc->assertDataSize(CAMERA_MSG_RAW_IMAGE, MCameraClient::EQ, pixels*3/2); - cc->waitData(CAMERA_MSG_COMPRESSED_IMAGE, MCameraClient::EQ, 1); - cc->assertDataSize(CAMERA_MSG_COMPRESSED_IMAGE, MCameraClient::LT, - int(pixels * rate)); - cc->assertDataSize(CAMERA_MSG_COMPRESSED_IMAGE, MCameraClient::GT, 0); - cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0); - } - - void run() { - CameraParameters param(c->getParameters()); - int w, h; - const char *s = param.get(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES); - while (getNextSize(&s, &w, &h)) { - ALOGD("checking picture size %dx%d", w, h); - checkOnePicture(w, h); - } - } -}; - -class TestPreviewCallbackFlag : public AfterConnect { -public: - void run() { - sp<MSurface> surface = new MSurface(); - ASSERT(c->setPreviewDisplay(surface) == NO_ERROR); - - // Try all flag combinations. - for (int v = 0; v < 8; v++) { - ALOGD("TestPreviewCallbackFlag: flag=%d", v); - usleep(100000); // sleep a while to clear the in-flight callbacks. - cc->clearStat(); - c->setPreviewCallbackFlag(v); - ASSERT(c->previewEnabled() == false); - ASSERT(c->startPreview() == NO_ERROR); - ASSERT(c->previewEnabled() == true); - sleep(2); - c->stopPreview(); - if ((v & CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) == 0) { - cc->assertData(CAMERA_MSG_PREVIEW_FRAME, MCameraClient::EQ, 0); - } else { - if ((v & CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK) == 0) { - cc->assertData(CAMERA_MSG_PREVIEW_FRAME, MCameraClient::GE, 10); - } else { - cc->assertData(CAMERA_MSG_PREVIEW_FRAME, MCameraClient::EQ, 1); - } - } - } - } -}; - -class TestRecording : public AfterConnect { -public: - void run() { - ASSERT(c->recordingEnabled() == false); - sp<MSurface> surface = new MSurface(); - ASSERT(c->setPreviewDisplay(surface) == NO_ERROR); - c->setPreviewCallbackFlag(CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK); - cc->setReleaser(c.get()); - c->startRecording(); - ASSERT(c->recordingEnabled() == true); - sleep(2); - c->stopRecording(); - usleep(100000); // sleep a while to clear the in-flight callbacks. - cc->setReleaser(NULL); - cc->assertData(CAMERA_MSG_VIDEO_FRAME, MCameraClient::GE, 10); - } -}; - -class TestPreviewSize : public AfterStartPreview { -public: - void checkOnePicture(int w, int h) { - int size = w*h*3/2; // should read from parameters - - c->stopPreview(); - - CameraParameters param(c->getParameters()); - param.setPreviewSize(w, h); - c->setPreviewCallbackFlag(CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK); - c->setParameters(param.flatten()); - - c->startPreview(); - - cc->clearStat(); - cc->waitData(CAMERA_MSG_PREVIEW_FRAME, MCameraClient::GE, 1); - cc->assertDataSize(CAMERA_MSG_PREVIEW_FRAME, MCameraClient::EQ, size); - } - - void run() { - CameraParameters param(c->getParameters()); - int w, h; - const char *s = param.get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES); - while (getNextSize(&s, &w, &h)) { - ALOGD("checking preview size %dx%d", w, h); - checkOnePicture(w, h); - } - } -}; - -void runHolderService() { - defaultServiceManager()->addService( - String16("CameraServiceTest.Holder"), new HolderService()); - ProcessState::self()->startThreadPool(); -} - -int main(int argc, char **argv) -{ - if (argc != 1) { - runFunction(argv[1]); - return 0; - } - INFO("CameraServiceTest start"); - gExecutable = argv[0]; - runHolderService(); - int n = getNumberOfCameras(); - INFO("%d Cameras available", n); - - for (int id = 0; id < n; id++) { - INFO("Testing camera %d", id); - testConnect(id); flushCommands(); - testAllowConnectOnceOnly(id); flushCommands(); - testReconnect(id); flushCommands(); - testLockUnlock(id); flushCommands(); - testReconnectFromAnotherProcess(id); flushCommands(); - - RUN(TestSetPreviewDisplay, id); - RUN(TestStartPreview, id); - RUN(TestStartPreviewWithoutDisplay, id); - RUN(TestAutoFocus, id); - RUN(TestStopPreview, id); - RUN(TestTakePicture, id); - RUN(TestTakeMultiplePictures, id); - RUN(TestGetParameters, id); - RUN(TestPictureSize, id); - RUN(TestPreviewCallbackFlag, id); - RUN(TestRecording, id); - RUN(TestPreviewSize, id); - } - - INFO("CameraServiceTest finished"); -} diff --git a/services/medialog/Android.mk b/services/medialog/Android.mk new file mode 100644 index 0000000..08006c8 --- /dev/null +++ b/services/medialog/Android.mk @@ -0,0 +1,11 @@ +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) + +LOCAL_SRC_FILES := MediaLogService.cpp + +LOCAL_SHARED_LIBRARIES := libmedia libbinder libutils liblog libnbaio + +LOCAL_MODULE:= libmedialogservice + +include $(BUILD_SHARED_LIBRARY) diff --git a/services/medialog/MediaLogService.cpp b/services/medialog/MediaLogService.cpp new file mode 100644 index 0000000..683fdf3 --- /dev/null +++ b/services/medialog/MediaLogService.cpp @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "MediaLog" +//#define LOG_NDEBUG 0 + +#include <sys/mman.h> +#include <utils/Log.h> +#include <binder/PermissionCache.h> +#include <media/nbaio/NBLog.h> +#include <private/android_filesystem_config.h> +#include "MediaLogService.h" + +namespace android { + +void MediaLogService::registerWriter(const sp<IMemory>& shared, size_t size, const char *name) +{ + if (IPCThreadState::self()->getCallingUid() != AID_MEDIA || shared == 0 || + size < kMinSize || size > kMaxSize || name == NULL || + shared->size() < NBLog::Timeline::sharedSize(size)) { + return; + } + sp<NBLog::Reader> reader(new NBLog::Reader(size, shared)); + NamedReader namedReader(reader, name); + Mutex::Autolock _l(mLock); + mNamedReaders.add(namedReader); +} + +void MediaLogService::unregisterWriter(const sp<IMemory>& shared) +{ + if (IPCThreadState::self()->getCallingUid() != AID_MEDIA || shared == 0) { + return; + } + Mutex::Autolock _l(mLock); + for (size_t i = 0; i < mNamedReaders.size(); ) { + if (mNamedReaders[i].reader()->isIMemory(shared)) { + mNamedReaders.removeAt(i); + } else { + i++; + } + } +} + +status_t MediaLogService::dump(int fd, const Vector<String16>& args) +{ + // FIXME merge with similar but not identical code at services/audioflinger/ServiceUtilities.cpp + static const String16 sDump("android.permission.DUMP"); + if (!(IPCThreadState::self()->getCallingUid() == AID_MEDIA || + PermissionCache::checkCallingPermission(sDump))) { + fdprintf(fd, "Permission Denial: can't dump media.log from pid=%d, uid=%d\n", + IPCThreadState::self()->getCallingPid(), + IPCThreadState::self()->getCallingUid()); + return NO_ERROR; + } + + Vector<NamedReader> namedReaders; + { + Mutex::Autolock _l(mLock); + namedReaders = mNamedReaders; + } + for (size_t i = 0; i < namedReaders.size(); i++) { + const NamedReader& namedReader = namedReaders[i]; + if (fd >= 0) { + fdprintf(fd, "\n%s:\n", namedReader.name()); + } else { + ALOGI("%s:", namedReader.name()); + } + namedReader.reader()->dump(fd, 0 /*indent*/); + } + return NO_ERROR; +} + +status_t MediaLogService::onTransact(uint32_t code, const Parcel& data, Parcel* reply, + uint32_t flags) +{ + return BnMediaLogService::onTransact(code, data, reply, flags); +} + +} // namespace android diff --git a/services/medialog/MediaLogService.h b/services/medialog/MediaLogService.h new file mode 100644 index 0000000..2d89a41 --- /dev/null +++ b/services/medialog/MediaLogService.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_MEDIA_LOG_SERVICE_H +#define ANDROID_MEDIA_LOG_SERVICE_H + +#include <binder/BinderService.h> +#include <media/IMediaLogService.h> +#include <media/nbaio/NBLog.h> + +namespace android { + +class MediaLogService : public BinderService<MediaLogService>, public BnMediaLogService +{ + friend class BinderService<MediaLogService>; // for MediaLogService() +public: + MediaLogService() : BnMediaLogService() { } + virtual ~MediaLogService() { } + virtual void onFirstRef() { } + + static const char* getServiceName() { return "media.log"; } + + static const size_t kMinSize = 0x100; + static const size_t kMaxSize = 0x10000; + virtual void registerWriter(const sp<IMemory>& shared, size_t size, const char *name); + virtual void unregisterWriter(const sp<IMemory>& shared); + + virtual status_t dump(int fd, const Vector<String16>& args); + virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, + uint32_t flags); + +private: + Mutex mLock; + class NamedReader { + public: + NamedReader() : mReader(0) { mName[0] = '\0'; } // for Vector + NamedReader(const sp<NBLog::Reader>& reader, const char *name) : mReader(reader) + { strlcpy(mName, name, sizeof(mName)); } + ~NamedReader() { } + const sp<NBLog::Reader>& reader() const { return mReader; } + const char* name() const { return mName; } + private: + sp<NBLog::Reader> mReader; + static const size_t kMaxName = 32; + char mName[kMaxName]; + }; + Vector<NamedReader> mNamedReaders; +}; + +} // namespace android + +#endif // ANDROID_MEDIA_LOG_SERVICE_H |