diff options
59 files changed, 1967 insertions, 493 deletions
diff --git a/camera/Camera.cpp b/camera/Camera.cpp index 22199fa..85f44f0 100644 --- a/camera/Camera.cpp +++ b/camera/Camera.cpp @@ -77,6 +77,32 @@ sp<Camera> Camera::connect(int cameraId, const String16& clientPackageName, return CameraBaseT::connect(cameraId, clientPackageName, clientUid); } +status_t Camera::connectLegacy(int cameraId, int halVersion, + const String16& clientPackageName, + int clientUid, + sp<Camera>& camera) +{ + ALOGV("%s: connect legacy camera device", __FUNCTION__); + sp<Camera> c = new Camera(cameraId); + sp<ICameraClient> cl = c; + status_t status = NO_ERROR; + const sp<ICameraService>& cs = CameraBaseT::getCameraService(); + + if (cs != 0) { + status = cs.get()->connectLegacy(cl, cameraId, halVersion, clientPackageName, + clientUid, /*out*/c->mCamera); + } + if (status == OK && c->mCamera != 0) { + c->mCamera->asBinder()->linkToDeath(c); + c->mStatus = NO_ERROR; + camera = c; + } else { + ALOGW("An error occurred while connecting to camera: %d", cameraId); + c.clear(); + } + return status; +} + status_t Camera::reconnect() { ALOGV("reconnect"); diff --git a/camera/ICameraService.cpp b/camera/ICameraService.cpp index 79c33f9..5485205 100644 --- a/camera/ICameraService.cpp +++ b/camera/ICameraService.cpp @@ -186,6 +186,29 @@ public: return status; } + // connect to camera service (android.hardware.Camera) + virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient, int cameraId, + int halVersion, + const String16 &clientPackageName, int clientUid, + /*out*/sp<ICamera>& device) + { + Parcel data, reply; + data.writeInterfaceToken(ICameraService::getInterfaceDescriptor()); + data.writeStrongBinder(cameraClient->asBinder()); + data.writeInt32(cameraId); + data.writeInt32(halVersion); + data.writeString16(clientPackageName); + data.writeInt32(clientUid); + remote()->transact(BnCameraService::CONNECT_LEGACY, data, &reply); + + if (readExceptionCode(reply)) return -EPROTO; + status_t status = reply.readInt32(); + if (reply.readInt32() != 0) { + device = interface_cast<ICamera>(reply.readStrongBinder()); + } + return status; + } + // connect to camera service (pro client) virtual status_t connectPro(const sp<IProCameraCallbacks>& cameraCb, int cameraId, const String16 &clientPackageName, int clientUid, @@ -446,6 +469,27 @@ status_t BnCameraService::onTransact( reply->writeInt32(supportsCameraApi(cameraId, apiVersion)); return NO_ERROR; } break; + case CONNECT_LEGACY: { + CHECK_INTERFACE(ICameraService, data, reply); + sp<ICameraClient> cameraClient = + interface_cast<ICameraClient>(data.readStrongBinder()); + int32_t cameraId = data.readInt32(); + int32_t halVersion = data.readInt32(); + const String16 clientName = data.readString16(); + int32_t clientUid = data.readInt32(); + sp<ICamera> camera; + status_t status = connectLegacy(cameraClient, cameraId, halVersion, + clientName, clientUid, /*out*/camera); + reply->writeNoException(); + reply->writeInt32(status); + if (camera != NULL) { + reply->writeInt32(1); + reply->writeStrongBinder(camera->asBinder()); + } else { + reply->writeInt32(0); + } + return NO_ERROR; + } break; default: return BBinder::onTransact(code, data, reply, flags); } diff --git a/cmds/screenrecord/Overlay.cpp b/cmds/screenrecord/Overlay.cpp index c2a8f1b..af84069 100644 --- a/cmds/screenrecord/Overlay.cpp +++ b/cmds/screenrecord/Overlay.cpp @@ -14,6 +14,10 @@ * limitations under the License. */ +#include <assert.h> +#include <inttypes.h> +#include <stdlib.h> + #define LOG_TAG "ScreenRecord" //#define LOG_NDEBUG 0 #include <utils/Log.h> @@ -27,9 +31,6 @@ #include <GLES2/gl2.h> #include <GLES2/gl2ext.h> -#include <stdlib.h> -#include <assert.h> - #include "screenrecord.h" #include "Overlay.h" #include "TextRenderer.h" @@ -235,7 +236,7 @@ void Overlay::processFrame_l() { char textBuf[64]; getTimeString_l(monotonicNsec, textBuf, sizeof(textBuf)); - String8 timeStr(String8::format("%s f=%lld (%zd)", + String8 timeStr(String8::format("%s f=%" PRId64 " (%zd)", textBuf, frameNumber, mTotalDroppedFrames)); mTextRenderer.drawString(mTexProgram, Program::kIdentity, 0, 0, timeStr); diff --git a/cmds/screenrecord/TextRenderer.cpp b/cmds/screenrecord/TextRenderer.cpp index 784055c..6a9176b 100644 --- a/cmds/screenrecord/TextRenderer.cpp +++ b/cmds/screenrecord/TextRenderer.cpp @@ -353,6 +353,6 @@ char* TextRenderer::breakString(const char* str, float maxWidth) const { } } - ALOGV("goodPos=%d for str='%s'", goodPos, str); + ALOGV("goodPos=%zu for str='%s'", goodPos, str); return const_cast<char*>(str + goodPos); } diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp index 02ed53a..02df1d2 100644 --- a/cmds/screenrecord/screenrecord.cpp +++ b/cmds/screenrecord/screenrecord.cpp @@ -14,6 +14,19 @@ * limitations under the License. */ +#include <assert.h> +#include <ctype.h> +#include <fcntl.h> +#include <inttypes.h> +#include <getopt.h> +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/wait.h> +#include <termios.h> +#include <unistd.h> + #define LOG_TAG "ScreenRecord" #define ATRACE_TAG ATRACE_TAG_GRAPHICS //#define LOG_NDEBUG 0 @@ -36,18 +49,6 @@ #include <media/stagefright/MediaMuxer.h> #include <media/ICrypto.h> -#include <stdlib.h> -#include <unistd.h> -#include <string.h> -#include <stdio.h> -#include <ctype.h> -#include <fcntl.h> -#include <signal.h> -#include <getopt.h> -#include <sys/wait.h> -#include <termios.h> -#include <assert.h> - #include "screenrecord.h" #include "Overlay.h" #include "FrameOutput.h" @@ -354,7 +355,7 @@ static status_t runEncoder(const sp<MediaCodec>& encoder, case NO_ERROR: // got a buffer if ((flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) != 0) { - ALOGV("Got codec config buffer (%u bytes)", size); + ALOGV("Got codec config buffer (%zu bytes)", size); if (muxer != NULL) { // ignore this -- we passed the CSD into MediaMuxer when // we got the format change notification @@ -362,7 +363,7 @@ static status_t runEncoder(const sp<MediaCodec>& encoder, } } if (size != 0) { - ALOGV("Got data in buffer %d, size=%d, pts=%lld", + ALOGV("Got data in buffer %zu, size=%zu, pts=%" PRId64, bufIndex, size, ptsUsec); { // scope @@ -473,7 +474,7 @@ static status_t runEncoder(const sp<MediaCodec>& encoder, ALOGV("Encoder stopping (req=%d)", gStopRequested); if (gVerbose) { - printf("Encoder stopping; recorded %u frames in %lld seconds\n", + printf("Encoder stopping; recorded %u frames in %" PRId64 " seconds\n", debugNumFrames, nanoseconds_to_seconds( systemTime(CLOCK_MONOTONIC) - startWhenNsec)); } diff --git a/drm/drmserver/DrmManagerService.cpp b/drm/drmserver/DrmManagerService.cpp index 2b71904..63341e0 100644 --- a/drm/drmserver/DrmManagerService.cpp +++ b/drm/drmserver/DrmManagerService.cpp @@ -34,7 +34,18 @@ using namespace android; static Vector<uid_t> trustedUids; static bool isProtectedCallAllowed() { - return true; + // TODO + // Following implementation is just for reference. + // Each OEM manufacturer should implement/replace with their own solutions. + IPCThreadState* ipcState = IPCThreadState::self(); + uid_t uid = ipcState->getCallingUid(); + + for (unsigned int i = 0; i < trustedUids.size(); ++i) { + if (trustedUids[i] == uid) { + return true; + } + } + return false; } void DrmManagerService::instantiate() { diff --git a/include/camera/Camera.h b/include/camera/Camera.h index 79682b8..2b60842 100644 --- a/include/camera/Camera.h +++ b/include/camera/Camera.h @@ -74,6 +74,10 @@ public: const String16& clientPackageName, int clientUid); + static status_t connectLegacy(int cameraId, int halVersion, + const String16& clientPackageName, + int clientUid, sp<Camera>& camera); + virtual ~Camera(); status_t reconnect(); diff --git a/include/camera/ICameraService.h b/include/camera/ICameraService.h index d7e5dac..f7f06bb 100644 --- a/include/camera/ICameraService.h +++ b/include/camera/ICameraService.h @@ -52,6 +52,7 @@ public: GET_CAMERA_VENDOR_TAG_DESCRIPTOR, GET_LEGACY_PARAMETERS, SUPPORTS_CAMERA_API, + CONNECT_LEGACY, }; enum { @@ -63,6 +64,10 @@ public: API_VERSION_2 = 2, }; + enum { + CAMERA_HAL_API_VERSION_UNSPECIFIED = -1 + }; + public: DECLARE_META_INTERFACE(CameraService); @@ -125,6 +130,18 @@ public: */ virtual status_t supportsCameraApi( int cameraId, int apiVersion) = 0; + + /** + * Connect the device as a legacy device for a given HAL version. + * For halVersion, use CAMERA_API_DEVICE_VERSION_* for a particular + * version, or CAMERA_HAL_API_VERSION_UNSPECIFIED for a service-selected version. + */ + virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient, + int cameraId, int halVersion, + const String16& clientPackageName, + int clientUid, + /*out*/ + sp<ICamera>& device) = 0; }; // ---------------------------------------------------------------------------- diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h index a4722cb..c89ceaa 100644 --- a/include/media/AudioSystem.h +++ b/include/media/AudioSystem.h @@ -99,6 +99,8 @@ public: // to be non-zero if status == NO_ERROR static status_t getOutputSamplingRate(uint32_t* samplingRate, audio_stream_type_t stream); + static status_t getOutputSamplingRateForAttr(uint32_t* samplingRate, + const audio_attributes_t *attr); static status_t getOutputFrameCount(size_t* frameCount, audio_stream_type_t stream); static status_t getOutputLatency(uint32_t* latency, @@ -212,7 +214,12 @@ public: audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO, audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, const audio_offload_info_t *offloadInfo = NULL); - + static audio_io_handle_t getOutputForAttr(const audio_attributes_t *attr, + uint32_t samplingRate = 0, + audio_format_t format = AUDIO_FORMAT_DEFAULT, + audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO, + audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, + const audio_offload_info_t *offloadInfo = NULL); static status_t startOutput(audio_io_handle_t output, audio_stream_type_t stream, int session); diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h index 79db323..3492520 100644 --- a/include/media/AudioTrack.h +++ b/include/media/AudioTrack.h @@ -253,7 +253,8 @@ public: transfer_type transferType = TRANSFER_DEFAULT, const audio_offload_info_t *offloadInfo = NULL, int uid = -1, - pid_t pid = -1); + pid_t pid = -1, + audio_attributes_t* pAttributes = NULL); /* Result of constructing the AudioTrack. This must be checked for successful initialization * before using any AudioTrack API (except for set()), because using @@ -586,6 +587,11 @@ protected: AudioTrack(const AudioTrack& other); AudioTrack& operator = (const AudioTrack& other); + void setAttributesFromStreamType(audio_stream_type_t streamType); + void setStreamTypeFromAttributes(audio_attributes_t& aa); + /* paa is guaranteed non-NULL */ + bool isValidAttributes(const audio_attributes_t *paa); + /* a small internal class to handle the callback */ class AudioTrackThread : public Thread { @@ -626,6 +632,8 @@ protected: nsecs_t processAudioBuffer(); bool isOffloaded() const; + bool isDirect() const; + bool isOffloadedOrDirect() const; // caller must hold lock on mLock for all _l methods @@ -642,6 +650,13 @@ protected: bool isOffloaded_l() const { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; } + bool isOffloadedOrDirect_l() const + { return (mFlags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD| + AUDIO_OUTPUT_FLAG_DIRECT)) != 0; } + + bool isDirect_l() const + { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; } + // Next 4 fields may be changed if IAudioTrack is re-created, but always != 0 sp<IAudioTrack> mAudioTrack; sp<IMemory> mCblkMemory; @@ -667,6 +682,7 @@ protected: transfer_type mTransfer; audio_offload_info_t mOffloadInfoCopy; const audio_offload_info_t* mOffloadInfo; + audio_attributes_t mAttributes; // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data. For 8-bit PCM data, it's // twice as large as mFrameSize because data is expanded to 16-bit before it's stored in buffer. diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h index c742810..fc8be20 100644 --- a/include/media/IAudioFlinger.h +++ b/include/media/IAudioFlinger.h @@ -50,6 +50,7 @@ public: TRACK_TIMED = 1, // client requests a TimedAudioTrack TRACK_FAST = 2, // client requests a fast AudioTrack or AudioRecord TRACK_OFFLOAD = 4, // client requests offload to hw codec + TRACK_DIRECT = 8, // client requests a direct output }; typedef uint32_t track_flags_t; diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h index d422aa3..959e4c3 100644 --- a/include/media/IAudioPolicyService.h +++ b/include/media/IAudioPolicyService.h @@ -56,6 +56,12 @@ public: audio_channel_mask_t channelMask = 0, audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, const audio_offload_info_t *offloadInfo = NULL) = 0; + virtual audio_io_handle_t getOutputForAttr(const audio_attributes_t *attr, + uint32_t samplingRate = 0, + audio_format_t format = AUDIO_FORMAT_DEFAULT, + audio_channel_mask_t channelMask = 0, + audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, + const audio_offload_info_t *offloadInfo = NULL) = 0; virtual status_t startOutput(audio_io_handle_t output, audio_stream_type_t stream, int session = 0) = 0; diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp index db5c78f..695767d 100644 --- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp +++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp @@ -19,11 +19,13 @@ #define ARRAY_SIZE(array) (sizeof array / sizeof array[0]) //#define LOG_NDEBUG 0 -#include <cutils/log.h> #include <assert.h> +#include <inttypes.h> +#include <new> #include <stdlib.h> #include <string.h> -#include <new> + +#include <cutils/log.h> #include "EffectBundle.h" @@ -560,11 +562,12 @@ int LvmBundle_init(EffectContext *pContext){ MemTab.Region[i].pBaseAddress = malloc(MemTab.Region[i].Size); if (MemTab.Region[i].pBaseAddress == LVM_NULL){ - ALOGV("\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate %ld bytes " - "for region %u\n", MemTab.Region[i].Size, i ); + ALOGV("\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate %" PRIu32 + " bytes for region %u\n", MemTab.Region[i].Size, i ); bMallocFailure = LVM_TRUE; }else{ - ALOGV("\tLvmBundle_init CreateInstance allocated %ld bytes for region %u at %p\n", + ALOGV("\tLvmBundle_init CreateInstance allocated %" PRIu32 + " bytes for region %u at %p\n", MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress); } } @@ -576,11 +579,11 @@ int LvmBundle_init(EffectContext *pContext){ if(bMallocFailure == LVM_TRUE){ for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){ if (MemTab.Region[i].pBaseAddress == LVM_NULL){ - ALOGV("\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate %ld bytes " - "for region %u Not freeing\n", MemTab.Region[i].Size, i ); + ALOGV("\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate %" PRIu32 + " bytes for region %u Not freeing\n", MemTab.Region[i].Size, i ); }else{ - ALOGV("\tLVM_ERROR :LvmBundle_init CreateInstance Failed: but allocated %ld bytes " - "for region %u at %p- free\n", + ALOGV("\tLVM_ERROR :LvmBundle_init CreateInstance Failed: but allocated %" PRIu32 + " bytes for region %u at %p- free\n", MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress); free(MemTab.Region[i].pBaseAddress); } @@ -889,16 +892,16 @@ void LvmEffect_free(EffectContext *pContext){ for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){ if (MemTab.Region[i].Size != 0){ if (MemTab.Region[i].pBaseAddress != NULL){ - ALOGV("\tLvmEffect_free - START freeing %ld bytes for region %u at %p\n", + ALOGV("\tLvmEffect_free - START freeing %" PRIu32 " bytes for region %u at %p\n", MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress); free(MemTab.Region[i].pBaseAddress); - ALOGV("\tLvmEffect_free - END freeing %ld bytes for region %u at %p\n", + ALOGV("\tLvmEffect_free - END freeing %" PRIu32 " bytes for region %u at %p\n", MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress); }else{ - ALOGV("\tLVM_ERROR : LvmEffect_free - trying to free with NULL pointer %ld bytes " - "for region %u at %p ERROR\n", + ALOGV("\tLVM_ERROR : LvmEffect_free - trying to free with NULL pointer %" PRIu32 + " bytes for region %u at %p ERROR\n", MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress); } } diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp index c6d3759..13f1a0d 100644 --- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp +++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp @@ -19,11 +19,13 @@ #define ARRAY_SIZE(array) (sizeof array / sizeof array[0]) //#define LOG_NDEBUG 0 -#include <cutils/log.h> #include <assert.h> +#include <inttypes.h> +#include <new> #include <stdlib.h> #include <string.h> -#include <new> + +#include <cutils/log.h> #include "EffectReverb.h" // from Reverb/lib #include "LVREV.h" @@ -269,7 +271,7 @@ extern "C" int EffectCreate(const effect_uuid_t *uuid, pContext->InFrames32 = (LVM_INT32 *)malloc(LVREV_MAX_FRAME_SIZE * sizeof(LVM_INT32) * 2); pContext->OutFrames32 = (LVM_INT32 *)malloc(LVREV_MAX_FRAME_SIZE * sizeof(LVM_INT32) * 2); - ALOGV("\tEffectCreate %p, size %d", pContext, sizeof(ReverbContext)); + ALOGV("\tEffectCreate %p, size %zu", pContext, sizeof(ReverbContext)); ALOGV("\tEffectCreate end\n"); return 0; } /* end EffectCreate */ @@ -570,15 +572,15 @@ void Reverb_free(ReverbContext *pContext){ for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){ if (MemTab.Region[i].Size != 0){ if (MemTab.Region[i].pBaseAddress != NULL){ - ALOGV("\tfree() - START freeing %ld bytes for region %u at %p\n", + ALOGV("\tfree() - START freeing %" PRIu32 " bytes for region %u at %p\n", MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress); free(MemTab.Region[i].pBaseAddress); - ALOGV("\tfree() - END freeing %ld bytes for region %u at %p\n", + ALOGV("\tfree() - END freeing %" PRIu32 " bytes for region %u at %p\n", MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress); }else{ - ALOGV("\tLVM_ERROR : free() - trying to free with NULL pointer %ld bytes " + ALOGV("\tLVM_ERROR : free() - trying to free with NULL pointer %" PRIu32 " bytes " "for region %u at %p ERROR\n", MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress); } @@ -771,11 +773,12 @@ int Reverb_init(ReverbContext *pContext){ MemTab.Region[i].pBaseAddress = malloc(MemTab.Region[i].Size); if (MemTab.Region[i].pBaseAddress == LVM_NULL){ - ALOGV("\tLVREV_ERROR :Reverb_init CreateInstance Failed to allocate %ld " - "bytes for region %u\n", MemTab.Region[i].Size, i ); + ALOGV("\tLVREV_ERROR :Reverb_init CreateInstance Failed to allocate %" PRIu32 + " bytes for region %u\n", MemTab.Region[i].Size, i ); bMallocFailure = LVM_TRUE; }else{ - ALOGV("\tReverb_init CreateInstance allocate %ld bytes for region %u at %p\n", + ALOGV("\tReverb_init CreateInstance allocate %" PRIu32 + " bytes for region %u at %p\n", MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress); } } @@ -787,11 +790,11 @@ int Reverb_init(ReverbContext *pContext){ if(bMallocFailure == LVM_TRUE){ for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){ if (MemTab.Region[i].pBaseAddress == LVM_NULL){ - ALOGV("\tLVM_ERROR :Reverb_init CreateInstance Failed to allocate %ld bytes " - "for region %u - Not freeing\n", MemTab.Region[i].Size, i ); + ALOGV("\tLVM_ERROR :Reverb_init CreateInstance Failed to allocate %" PRIu32 + " bytes for region %u - Not freeing\n", MemTab.Region[i].Size, i ); }else{ - ALOGV("\tLVM_ERROR :Reverb_init CreateInstance Failed: but allocated %ld bytes " - "for region %u at %p- free\n", + ALOGV("\tLVM_ERROR :Reverb_init CreateInstance Failed: but allocated %" PRIu32 + " bytes for region %u at %p- free\n", MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress); free(MemTab.Region[i].pBaseAddress); } diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp index 50b444a..f865d38 100644 --- a/media/libmedia/AudioRecord.cpp +++ b/media/libmedia/AudioRecord.cpp @@ -18,7 +18,9 @@ //#define LOG_NDEBUG 0 #define LOG_TAG "AudioRecord" +#include <inttypes.h> #include <sys/resource.h> + #include <binder/IPCThreadState.h> #include <media/AudioRecord.h> #include <utils/Log.h> @@ -468,7 +470,7 @@ status_t AudioRecord::openRecord_l(size_t epoch) if (frameCount == 0) { frameCount = minFrameCount; } else if (frameCount < minFrameCount) { - ALOGE("frameCount %u < minFrameCount %u", frameCount, minFrameCount); + ALOGE("frameCount %zu < minFrameCount %zu", frameCount, minFrameCount); return BAD_VALUE; } @@ -555,17 +557,17 @@ status_t AudioRecord::openRecord_l(size_t epoch) mCblk = cblk; // note that temp is the (possibly revised) value of frameCount if (temp < frameCount || (frameCount == 0 && temp == 0)) { - ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp); + ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp); } frameCount = temp; mAwaitBoost = false; if (mFlags & AUDIO_INPUT_FLAG_FAST) { if (trackFlags & IAudioFlinger::TRACK_FAST) { - ALOGV("AUDIO_INPUT_FLAG_FAST successful; frameCount %u", frameCount); + ALOGV("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu", frameCount); mAwaitBoost = true; } else { - ALOGV("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %u", frameCount); + ALOGV("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %zu", frameCount); // once denied, do not request again if IAudioRecord is re-created mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST); } @@ -740,7 +742,7 @@ ssize_t AudioRecord::read(void* buffer, size_t userSize) if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) { // sanity-check. user is most-likely passing an error code, and it would // make the return value ambiguous (actualSize vs error). - ALOGE("AudioRecord::read(buffer=%p, size=%u (%d)", buffer, userSize, userSize); + ALOGE("AudioRecord::read(buffer=%p, size=%zu (%zu)", buffer, userSize, userSize); return BAD_VALUE; } @@ -921,10 +923,10 @@ nsecs_t AudioRecord::processAudioBuffer() size_t nonContig; status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig); LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0), - "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount); + "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount); requested = &ClientProxy::kNonBlocking; size_t avail = audioBuffer.frameCount + nonContig; - ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d", + ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d", mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err); if (err != NO_ERROR) { if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR) { @@ -952,8 +954,8 @@ nsecs_t AudioRecord::processAudioBuffer() // Sanity check on returned size if (ssize_t(readSize) < 0 || readSize > reqSize) { - ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes", - reqSize, (int) readSize); + ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes", + reqSize, ssize_t(readSize)); return NS_NEVER; } @@ -1092,7 +1094,7 @@ bool AudioRecord::AudioRecordThread::threadLoop() ns = 1000000000LL; // fall through default: - LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns); + LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns); pauseInternal(ns); return true; } diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp index 15b32ff..a47d45c 100644 --- a/media/libmedia/AudioSystem.cpp +++ b/media/libmedia/AudioSystem.cpp @@ -245,6 +245,19 @@ status_t AudioSystem::getOutputSamplingRate(uint32_t* samplingRate, audio_stream return getSamplingRate(output, samplingRate); } +status_t AudioSystem::getOutputSamplingRateForAttr(uint32_t* samplingRate, + const audio_attributes_t *attr) +{ + if (attr == NULL) { + return BAD_VALUE; + } + audio_io_handle_t output = getOutputForAttr(attr); + if (output == 0) { + return PERMISSION_DENIED; + } + return getSamplingRate(output, samplingRate); +} + status_t AudioSystem::getSamplingRate(audio_io_handle_t output, uint32_t* samplingRate) { @@ -310,7 +323,7 @@ status_t AudioSystem::getFrameCount(audio_io_handle_t output, return BAD_VALUE; } - ALOGV("getFrameCount() output %d, frameCount %d", output, *frameCount); + ALOGV("getFrameCount() output %d, frameCount %zu", output, *frameCount); return NO_ERROR; } @@ -476,7 +489,7 @@ void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, audio_io_handle OutputDescriptor *outputDesc = new OutputDescriptor(*desc); gOutputs.add(ioHandle, outputDesc); - ALOGV("ioConfigChanged() new output samplingRate %u, format %#x channel mask %#x frameCount %u " + ALOGV("ioConfigChanged() new output samplingRate %u, format %#x channel mask %#x frameCount %zu " "latency %d", outputDesc->samplingRate, outputDesc->format, outputDesc->channelMask, outputDesc->frameCount, outputDesc->latency); @@ -501,7 +514,7 @@ void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, audio_io_handle desc = (const OutputDescriptor *)param2; ALOGV("ioConfigChanged() new config for output %d samplingRate %u, format %#x channel mask %#x " - "frameCount %d latency %d", + "frameCount %zu latency %d", ioHandle, desc->samplingRate, desc->format, desc->channelMask, desc->frameCount, desc->latency); OutputDescriptor *outputDesc = gOutputs.valueAt(index); @@ -633,6 +646,19 @@ audio_io_handle_t AudioSystem::getOutput(audio_stream_type_t stream, return aps->getOutput(stream, samplingRate, format, channelMask, flags, offloadInfo); } +audio_io_handle_t AudioSystem::getOutputForAttr(const audio_attributes_t *attr, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo) +{ + if (attr == NULL) return 0; + const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); + if (aps == 0) return 0; + return aps->getOutputForAttr(attr, samplingRate, format, channelMask, flags, offloadInfo); +} + status_t AudioSystem::startOutput(audio_io_handle_t output, audio_stream_type_t stream, int session) diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp index e6827ee..898d58d 100644 --- a/media/libmedia/AudioTrack.cpp +++ b/media/libmedia/AudioTrack.cpp @@ -15,12 +15,13 @@ ** limitations under the License. */ - //#define LOG_NDEBUG 0 #define LOG_TAG "AudioTrack" +#include <inttypes.h> #include <math.h> #include <sys/resource.h> + #include <audio_utils/primitives.h> #include <binder/IPCThreadState.h> #include <media/AudioTrack.h> @@ -89,7 +90,7 @@ status_t AudioTrack::getMinFrameCount( streamType, sampleRate); return BAD_VALUE; } - ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d", + ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%d, afSampleRate=%d, afLatency=%d", *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency); return NO_ERROR; } @@ -103,6 +104,10 @@ AudioTrack::AudioTrack() mPreviousSchedulingGroup(SP_DEFAULT), mPausedPosition(0) { + mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN; + mAttributes.usage = AUDIO_USAGE_UNKNOWN; + mAttributes.flags = 0x0; + strcpy(mAttributes.tags, ""); } AudioTrack::AudioTrack( @@ -129,7 +134,7 @@ AudioTrack::AudioTrack( mStatus = set(streamType, sampleRate, format, channelMask, frameCount, flags, cbf, user, notificationFrames, 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, - offloadInfo, uid, pid); + offloadInfo, uid, pid, NULL /*no audio attributes*/); } AudioTrack::AudioTrack( @@ -156,7 +161,7 @@ AudioTrack::AudioTrack( mStatus = set(streamType, sampleRate, format, channelMask, 0 /*frameCount*/, flags, cbf, user, notificationFrames, sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, - uid, pid); + uid, pid, NULL /*no audio attributes*/); } AudioTrack::~AudioTrack() @@ -199,7 +204,8 @@ status_t AudioTrack::set( transfer_type transferType, const audio_offload_info_t *offloadInfo, int uid, - pid_t pid) + pid_t pid, + audio_attributes_t* pAttributes) { ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, " "flags #%x, notificationFrames %u, sessionId %d, transferType %d", @@ -245,7 +251,7 @@ status_t AudioTrack::set( ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size()); - ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags); + ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags); AutoMutex lock(mLock); @@ -259,18 +265,33 @@ status_t AudioTrack::set( if (streamType == AUDIO_STREAM_DEFAULT) { streamType = AUDIO_STREAM_MUSIC; } - if (uint32_t(streamType) >= AUDIO_STREAM_CNT) { - ALOGE("Invalid stream type %d", streamType); - return BAD_VALUE; + + if (pAttributes == NULL) { + if (uint32_t(streamType) >= AUDIO_STREAM_CNT) { + ALOGE("Invalid stream type %d", streamType); + return BAD_VALUE; + } + setAttributesFromStreamType(streamType); + mStreamType = streamType; + } else { + if (!isValidAttributes(pAttributes)) { + ALOGE("Invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]", + pAttributes->usage, pAttributes->content_type, pAttributes->flags, + pAttributes->tags); + } + // stream type shouldn't be looked at, this track has audio attributes + memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t)); + setStreamTypeFromAttributes(mAttributes); + ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]", + mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags); } - mStreamType = streamType; status_t status; if (sampleRate == 0) { - status = AudioSystem::getOutputSamplingRate(&sampleRate, streamType); + status = AudioSystem::getOutputSamplingRateForAttr(&sampleRate, &mAttributes); if (status != NO_ERROR) { ALOGE("Could not get output sample rate for stream type %d; status %d", - streamType, status); + mStreamType, status); return status; } } @@ -314,7 +335,7 @@ status_t AudioTrack::set( ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST); } // only allow deep buffering for music stream type - if (streamType != AUDIO_STREAM_MUSIC) { + if (mStreamType != AUDIO_STREAM_MUSIC) { flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER); } @@ -615,12 +636,12 @@ void AudioTrack::getAuxEffectSendLevel(float* level) const status_t AudioTrack::setSampleRate(uint32_t rate) { - if (mIsTimed || isOffloaded()) { + if (mIsTimed || isOffloadedOrDirect()) { return INVALID_OPERATION; } uint32_t afSamplingRate; - if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) { + if (AudioSystem::getOutputSamplingRateForAttr(&afSamplingRate, &mAttributes) != NO_ERROR) { return NO_INIT; } // Resampler implementation limits input sampling rate to 2 x output sampling rate. @@ -646,7 +667,7 @@ uint32_t AudioTrack::getSampleRate() const // sample rate can be updated during playback by the offloaded decoder so we need to // query the HAL and update if needed. // FIXME use Proxy return channel to update the rate from server and avoid polling here - if (isOffloaded_l()) { + if (isOffloadedOrDirect_l()) { if (mOutput != AUDIO_IO_HANDLE_NONE) { uint32_t sampleRate = 0; status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate); @@ -660,7 +681,7 @@ uint32_t AudioTrack::getSampleRate() const status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount) { - if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) { + if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) { return INVALID_OPERATION; } @@ -694,7 +715,7 @@ void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount) status_t AudioTrack::setMarkerPosition(uint32_t marker) { // The only purpose of setting marker position is to get a callback - if (mCbf == NULL || isOffloaded()) { + if (mCbf == NULL || isOffloadedOrDirect()) { return INVALID_OPERATION; } @@ -707,7 +728,7 @@ status_t AudioTrack::setMarkerPosition(uint32_t marker) status_t AudioTrack::getMarkerPosition(uint32_t *marker) const { - if (isOffloaded()) { + if (isOffloadedOrDirect()) { return INVALID_OPERATION; } if (marker == NULL) { @@ -723,7 +744,7 @@ status_t AudioTrack::getMarkerPosition(uint32_t *marker) const status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod) { // The only purpose of setting position update period is to get a callback - if (mCbf == NULL || isOffloaded()) { + if (mCbf == NULL || isOffloadedOrDirect()) { return INVALID_OPERATION; } @@ -736,7 +757,7 @@ status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod) status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const { - if (isOffloaded()) { + if (isOffloadedOrDirect()) { return INVALID_OPERATION; } if (updatePeriod == NULL) { @@ -751,7 +772,7 @@ status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const status_t AudioTrack::setPosition(uint32_t position) { - if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) { + if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) { return INVALID_OPERATION; } if (position > mFrameCount) { @@ -784,10 +805,10 @@ status_t AudioTrack::getPosition(uint32_t *position) const } AutoMutex lock(mLock); - if (isOffloaded_l()) { + if (isOffloadedOrDirect_l()) { uint32_t dspFrames = 0; - if ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING)) { + if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) { ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition); *position = mPausedPosition; return NO_ERROR; @@ -822,7 +843,7 @@ status_t AudioTrack::getBufferPosition(uint32_t *position) status_t AudioTrack::reload() { - if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) { + if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) { return INVALID_OPERATION; } @@ -867,12 +888,12 @@ status_t AudioTrack::createTrack_l(size_t epoch) return NO_INIT; } - audio_io_handle_t output = AudioSystem::getOutput(mStreamType, mSampleRate, mFormat, + audio_io_handle_t output = AudioSystem::getOutputForAttr(&mAttributes, mSampleRate, mFormat, mChannelMask, mFlags, mOffloadInfo); if (output == AUDIO_IO_HANDLE_NONE) { - ALOGE("Could not get audio output for stream type %d, sample rate %u, format %#x, " - "channel mask %#x, flags %#x", - mStreamType, mSampleRate, mFormat, mChannelMask, mFlags); + ALOGE("Could not get audio output for stream type %d, usage %d, sample rate %u, format %#x," + " channel mask %#x, flags %#x", + mStreamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags); return BAD_VALUE; } { @@ -973,14 +994,14 @@ status_t AudioTrack::createTrack_l(size_t epoch) // Ensure that buffer depth covers at least audio hardware latency uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate); - ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d", + ALOGV("afFrameCount=%zu, minBufCount=%d, afSampleRate=%u, afLatency=%d", afFrameCount, minBufCount, afSampleRate, afLatency); if (minBufCount <= nBuffering) { minBufCount = nBuffering; } size_t minFrameCount = (afFrameCount*mSampleRate*minBufCount)/afSampleRate; - ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u" + ALOGV("minFrameCount: %zu, afFrameCount=%zu, minBufCount=%d, sampleRate=%u, afSampleRate=%u" ", afLatency=%d", minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency); @@ -988,7 +1009,7 @@ status_t AudioTrack::createTrack_l(size_t epoch) frameCount = minFrameCount; } else if (frameCount < minFrameCount) { // not ALOGW because it happens all the time when playing key clicks over A2DP - ALOGV("Minimum buffer size corrected from %d to %d", + ALOGV("Minimum buffer size corrected from %zu to %zu", frameCount, minFrameCount); frameCount = minFrameCount; } @@ -1018,6 +1039,10 @@ status_t AudioTrack::createTrack_l(size_t epoch) trackFlags |= IAudioFlinger::TRACK_OFFLOAD; } + if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) { + trackFlags |= IAudioFlinger::TRACK_DIRECT; + } + size_t temp = frameCount; // temp may be replaced by a revised value of frameCount, // but we will still need the original value also sp<IAudioTrack> track = audioFlinger->createTrack(mStreamType, @@ -1071,14 +1096,14 @@ status_t AudioTrack::createTrack_l(size_t epoch) // In current design, AudioTrack client checks and ensures frame count validity before // passing it to AudioFlinger so AudioFlinger should not return a different value except // for fast track as it uses a special method of assigning frame count. - ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp); + ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp); } frameCount = temp; mAwaitBoost = false; if (mFlags & AUDIO_OUTPUT_FLAG_FAST) { if (trackFlags & IAudioFlinger::TRACK_FAST) { - ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount); + ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount); mAwaitBoost = true; if (mSharedBuffer == 0) { // Theoretically double-buffering is not required for fast tracks, @@ -1089,7 +1114,7 @@ status_t AudioTrack::createTrack_l(size_t epoch) } } } else { - ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount); + ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount); // once denied, do not request again if IAudioTrack is re-created mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST); if (mSharedBuffer == 0) { @@ -1109,6 +1134,16 @@ status_t AudioTrack::createTrack_l(size_t epoch) //return NO_INIT; } } + if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) { + if (trackFlags & IAudioFlinger::TRACK_DIRECT) { + ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful"); + } else { + ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server"); + mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT); + // FIXME This is a warning, not an error, so don't return error status + //return NO_INIT; + } + } // We retain a copy of the I/O handle, but don't own the reference mOutput = output; @@ -1304,6 +1339,16 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking) return INVALID_OPERATION; } + if (isDirect()) { + AutoMutex lock(mLock); + int32_t flags = android_atomic_and( + ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), + &mCblk->mFlags); + if (flags & CBLK_INVALID) { + return DEAD_OBJECT; + } + } + if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) { // Sanity-check: user is most-likely passing an error code, and it would // make the return value ambiguous (actualSize vs error). @@ -1452,7 +1497,7 @@ nsecs_t AudioTrack::processAudioBuffer() // for offloaded tracks restoreTrack_l() will just update the sequence and clear // AudioSystem cache. We should not exit here but after calling the callback so // that the upper layers can recreate the track - if (!isOffloaded_l() || (mSequence == mObservedSequence)) { + if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) { status_t status = restoreTrack_l("processAudioBuffer"); mLock.unlock(); // Run again immediately, but with a new IAudioTrack @@ -1578,7 +1623,7 @@ nsecs_t AudioTrack::processAudioBuffer() mObservedSequence = sequence; mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL); // for offloaded tracks, just wait for the upper layers to recreate the track - if (isOffloaded()) { + if (isOffloadedOrDirect()) { return NS_INACTIVE; } } @@ -1636,10 +1681,10 @@ nsecs_t AudioTrack::processAudioBuffer() size_t nonContig; status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig); LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0), - "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount); + "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount); requested = &ClientProxy::kNonBlocking; size_t avail = audioBuffer.frameCount + nonContig; - ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d", + ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d", mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err); if (err != NO_ERROR) { if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR || @@ -1674,8 +1719,8 @@ nsecs_t AudioTrack::processAudioBuffer() // Sanity check on returned size if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) { - ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes", - reqSize, (int) writtenSize); + ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes", + reqSize, ssize_t(writtenSize)); return NS_NEVER; } @@ -1736,7 +1781,7 @@ nsecs_t AudioTrack::processAudioBuffer() status_t AudioTrack::restoreTrack_l(const char *from) { ALOGW("dead IAudioTrack, %s, creating a new one from %s()", - isOffloaded_l() ? "Offloaded" : "PCM", from); + isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from); ++mSequence; status_t result; @@ -1744,7 +1789,7 @@ status_t AudioTrack::restoreTrack_l(const char *from) // output parameters in createTrack_l() AudioSystem::clearAudioConfigCache(); - if (isOffloaded_l()) { + if (isOffloadedOrDirect_l()) { // FIXME re-creation of offloaded tracks is not yet implemented return DEAD_OBJECT; } @@ -1830,6 +1875,19 @@ bool AudioTrack::isOffloaded() const return isOffloaded_l(); } +bool AudioTrack::isDirect() const +{ + AutoMutex lock(mLock); + return isDirect_l(); +} + +bool AudioTrack::isOffloadedOrDirect() const +{ + AutoMutex lock(mLock); + return isOffloadedOrDirect_l(); +} + + status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const { @@ -1858,6 +1916,136 @@ uint32_t AudioTrack::getUnderrunFrames() const return mProxy->getUnderrunFrames(); } +void AudioTrack::setAttributesFromStreamType(audio_stream_type_t streamType) { + mAttributes.flags = 0x0; + + switch(streamType) { + case AUDIO_STREAM_DEFAULT: + case AUDIO_STREAM_MUSIC: + mAttributes.content_type = AUDIO_CONTENT_TYPE_MUSIC; + mAttributes.usage = AUDIO_USAGE_MEDIA; + break; + case AUDIO_STREAM_VOICE_CALL: + mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH; + mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION; + break; + case AUDIO_STREAM_ENFORCED_AUDIBLE: + mAttributes.flags |= AUDIO_FLAG_AUDIBILITY_ENFORCED; + // intended fall through, attributes in common with STREAM_SYSTEM + case AUDIO_STREAM_SYSTEM: + mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION; + mAttributes.usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION; + break; + case AUDIO_STREAM_RING: + mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION; + mAttributes.usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE; + break; + case AUDIO_STREAM_ALARM: + mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION; + mAttributes.usage = AUDIO_USAGE_ALARM; + break; + case AUDIO_STREAM_NOTIFICATION: + mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION; + mAttributes.usage = AUDIO_USAGE_NOTIFICATION; + break; + case AUDIO_STREAM_BLUETOOTH_SCO: + mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH; + mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION; + mAttributes.flags |= AUDIO_FLAG_SCO; + break; + case AUDIO_STREAM_DTMF: + mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION; + mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING; + break; + case AUDIO_STREAM_TTS: + mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH; + mAttributes.usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY; + break; + default: + ALOGE("invalid stream type %d when converting to attributes", streamType); + } +} + +void AudioTrack::setStreamTypeFromAttributes(audio_attributes_t& aa) { + // flags to stream type mapping + if ((aa.flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) { + mStreamType = AUDIO_STREAM_ENFORCED_AUDIBLE; + return; + } + if ((aa.flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) { + mStreamType = AUDIO_STREAM_BLUETOOTH_SCO; + return; + } + + // usage to stream type mapping + switch (aa.usage) { + case AUDIO_USAGE_MEDIA: + case AUDIO_USAGE_GAME: + case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY: + case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE: + mStreamType = AUDIO_STREAM_MUSIC; + return; + case AUDIO_USAGE_ASSISTANCE_SONIFICATION: + mStreamType = AUDIO_STREAM_SYSTEM; + return; + case AUDIO_USAGE_VOICE_COMMUNICATION: + mStreamType = AUDIO_STREAM_VOICE_CALL; + return; + + case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING: + mStreamType = AUDIO_STREAM_DTMF; + return; + + case AUDIO_USAGE_ALARM: + mStreamType = AUDIO_STREAM_ALARM; + return; + case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE: + mStreamType = AUDIO_STREAM_RING; + return; + + case AUDIO_USAGE_NOTIFICATION: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED: + case AUDIO_USAGE_NOTIFICATION_EVENT: + mStreamType = AUDIO_STREAM_NOTIFICATION; + return; + + case AUDIO_USAGE_UNKNOWN: + default: + mStreamType = AUDIO_STREAM_MUSIC; + } +} + +bool AudioTrack::isValidAttributes(const audio_attributes_t *paa) { + // has flags that map to a strategy? + if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO)) != 0) { + return true; + } + + // has known usage? + switch (paa->usage) { + case AUDIO_USAGE_UNKNOWN: + case AUDIO_USAGE_MEDIA: + case AUDIO_USAGE_VOICE_COMMUNICATION: + case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING: + case AUDIO_USAGE_ALARM: + case AUDIO_USAGE_NOTIFICATION: + case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED: + case AUDIO_USAGE_NOTIFICATION_EVENT: + case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY: + case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE: + case AUDIO_USAGE_ASSISTANCE_SONIFICATION: + case AUDIO_USAGE_GAME: + break; + default: + return false; + } + return true; +} // ========================================================================= void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused) @@ -1918,7 +2106,7 @@ bool AudioTrack::AudioTrackThread::threadLoop() ns = 1000000000LL; // fall through default: - LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns); + LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns); pauseInternal(ns); return true; } diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp index 0dbfa62..eec025e 100644 --- a/media/libmedia/AudioTrackShared.cpp +++ b/media/libmedia/AudioTrackShared.cpp @@ -135,7 +135,7 @@ status_t ClientProxy::obtainBuffer(Buffer* buffer, const struct timespec *reques // pipe should not be overfull if (!(0 <= filled && (size_t) filled <= mFrameCount)) { if (mIsOut) { - ALOGE("Shared memory control block is corrupt (filled=%d, mFrameCount=%u); " + ALOGE("Shared memory control block is corrupt (filled=%zd, mFrameCount=%zu); " "shutting down", filled, mFrameCount); mIsShutdown = true; status = NO_INIT; @@ -338,7 +338,7 @@ size_t ClientProxy::getFramesFilled() { ssize_t filled = rear - front; // pipe should not be overfull if (!(0 <= filled && (size_t) filled <= mFrameCount)) { - ALOGE("Shared memory control block is corrupt (filled=%d); shutting down", filled); + ALOGE("Shared memory control block is corrupt (filled=%zd); shutting down", filled); return 0; } return (size_t)filled; @@ -555,7 +555,7 @@ status_t ServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush) ssize_t filled = rear - front; // pipe should not already be overfull if (!(0 <= filled && (size_t) filled <= mFrameCount)) { - ALOGE("Shared memory control block is corrupt (filled=%d); shutting down", filled); + ALOGE("Shared memory control block is corrupt (filled=%zd); shutting down", filled); mIsShutdown = true; } if (mIsShutdown) { @@ -642,7 +642,7 @@ void ServerProxy::releaseBuffer(Buffer* buffer) } // FIXME AudioRecord wakeup needs to be optimized; it currently wakes up client every time if (!mIsOut || (mAvailToClient + stepCount >= minimum)) { - ALOGV("mAvailToClient=%u stepCount=%u minimum=%u", mAvailToClient, stepCount, minimum); + ALOGV("mAvailToClient=%zu stepCount=%zu minimum=%zu", mAvailToClient, stepCount, minimum); int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex); if (!(old & CBLK_FUTEX_WAKE)) { (void) syscall(__NR_futex, &cblk->mFutex, @@ -675,7 +675,7 @@ size_t AudioTrackServerProxy::framesReady() ssize_t filled = rear - cblk->u.mStreaming.mFront; // pipe should not already be overfull if (!(0 <= filled && (size_t) filled <= mFrameCount)) { - ALOGE("Shared memory control block is corrupt (filled=%d); shutting down", filled); + ALOGE("Shared memory control block is corrupt (filled=%zd); shutting down", filled); mIsShutdown = true; return 0; } @@ -834,7 +834,7 @@ void StaticAudioTrackServerProxy::releaseBuffer(Buffer* buffer) size_t newPosition = position + stepCount; int32_t setFlags = 0; if (!(position <= newPosition && newPosition <= mFrameCount)) { - ALOGW("%s newPosition %u outside [%u, %u]", __func__, newPosition, position, mFrameCount); + ALOGW("%s newPosition %zu outside [%zu, %zu]", __func__, newPosition, position, mFrameCount); newPosition = mFrameCount; } else if (mState.mLoopCount != 0 && newPosition == mState.mLoopEnd) { if (mState.mLoopCount == -1 || --mState.mLoopCount != 0) { diff --git a/media/libmedia/CharacterEncodingDetector.cpp b/media/libmedia/CharacterEncodingDetector.cpp index 4992798..7d1ddfd 100644 --- a/media/libmedia/CharacterEncodingDetector.cpp +++ b/media/libmedia/CharacterEncodingDetector.cpp @@ -112,7 +112,7 @@ void CharacterEncodingDetector::detectAndConvert() { if (allprintable) { // since 'buf' is empty, ICU would return a UTF-8 matcher with low confidence, so // no need to even call it - ALOGV("all tags are printable, assuming ascii (%d)", strlen(buf)); + ALOGV("all tags are printable, assuming ascii (%zu)", strlen(buf)); } else { ucsdet_setText(csd, buf, strlen(buf), &status); int32_t matches; @@ -267,11 +267,11 @@ const UCharsetMatch *CharacterEncodingDetector::getPreferred( Vector<const UCharsetMatch*> matches; UErrorCode status = U_ZERO_ERROR; - ALOGV("%d matches", nummatches); + ALOGV("%zu matches", nummatches); for (size_t i = 0; i < nummatches; i++) { const char *encname = ucsdet_getName(ucma[i], &status); int confidence = ucsdet_getConfidence(ucma[i], &status); - ALOGV("%d: %s %d", i, encname, confidence); + ALOGV("%zu: %s %d", i, encname, confidence); matches.push_back(ucma[i]); } @@ -287,7 +287,7 @@ const UCharsetMatch *CharacterEncodingDetector::getPreferred( return matches[0]; } - ALOGV("considering %d matches", num); + ALOGV("considering %zu matches", num); // keep track of how many "special" characters result when converting the input using each // encoding @@ -315,7 +315,7 @@ const UCharsetMatch *CharacterEncodingDetector::getPreferred( freqcoverage = frequent_ja_coverage; } - ALOGV("%d: %s %d", i, encname, confidence); + ALOGV("%zu: %s %d", i, encname, confidence); UConverter *conv = ucnv_open(encname, &status); const char *source = input; const char *sourceLimit = input + len; diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp index 77d131b..41a9065 100644 --- a/media/libmedia/IAudioPolicyService.cpp +++ b/media/libmedia/IAudioPolicyService.cpp @@ -64,7 +64,8 @@ enum { RELEASE_AUDIO_PATCH, LIST_AUDIO_PATCHES, SET_AUDIO_PORT_CONFIG, - REGISTER_CLIENT + REGISTER_CLIENT, + GET_OUTPUT_FOR_ATTR }; class BpAudioPolicyService : public BpInterface<IAudioPolicyService> @@ -155,6 +156,36 @@ public: return static_cast <audio_io_handle_t> (reply.readInt32()); } + virtual audio_io_handle_t getOutputForAttr( + const audio_attributes_t *attr, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo) + { + Parcel data, reply; + data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); + if (attr == NULL) { + ALOGE("Writing NULL audio attributes - shouldn't happen"); + return (audio_io_handle_t) 0; + } + data.write(attr, sizeof(audio_attributes_t)); + data.writeInt32(samplingRate); + data.writeInt32(static_cast <uint32_t>(format)); + data.writeInt32(channelMask); + data.writeInt32(static_cast <uint32_t>(flags)); + // hasOffloadInfo + if (offloadInfo == NULL) { + data.writeInt32(0); + } else { + data.writeInt32(1); + data.write(offloadInfo, sizeof(audio_offload_info_t)); + } + remote()->transact(GET_OUTPUT_FOR_ATTR, data, &reply); + return static_cast <audio_io_handle_t> (reply.readInt32()); + } + virtual status_t startOutput(audio_io_handle_t output, audio_stream_type_t stream, int session) @@ -614,6 +645,30 @@ status_t BnAudioPolicyService::onTransact( return NO_ERROR; } break; + case GET_OUTPUT_FOR_ATTR: { + CHECK_INTERFACE(IAudioPolicyService, data, reply); + audio_attributes_t *attr = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t)); + data.read(attr, sizeof(audio_attributes_t)); + uint32_t samplingRate = data.readInt32(); + audio_format_t format = (audio_format_t) data.readInt32(); + audio_channel_mask_t channelMask = data.readInt32(); + audio_output_flags_t flags = + static_cast <audio_output_flags_t>(data.readInt32()); + bool hasOffloadInfo = data.readInt32() != 0; + audio_offload_info_t offloadInfo; + if (hasOffloadInfo) { + data.read(&offloadInfo, sizeof(audio_offload_info_t)); + } + audio_io_handle_t output = getOutputForAttr(attr, + samplingRate, + format, + channelMask, + flags, + hasOffloadInfo ? &offloadInfo : NULL); + reply->writeInt32(static_cast <int>(output)); + return NO_ERROR; + } break; + case START_OUTPUT: { CHECK_INTERFACE(IAudioPolicyService, data, reply); audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32()); diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp index 432d890..38f717c 100644 --- a/media/libmedia/IMediaMetadataRetriever.cpp +++ b/media/libmedia/IMediaMetadataRetriever.cpp @@ -15,8 +15,10 @@ ** limitations under the License. */ +#include <inttypes.h> #include <stdint.h> #include <sys/types.h> + #include <binder/Parcel.h> #include <media/IMediaHTTPService.h> #include <media/IMediaMetadataRetriever.h> @@ -125,7 +127,7 @@ public: sp<IMemory> getFrameAtTime(int64_t timeUs, int option) { - ALOGV("getTimeAtTime: time(%lld us) and option(%d)", timeUs, option); + ALOGV("getTimeAtTime: time(%" PRId64 " us) and option(%d)", timeUs, option); Parcel data, reply; data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor()); data.writeInt64(timeUs); @@ -237,7 +239,7 @@ status_t BnMediaMetadataRetriever::onTransact( CHECK_INTERFACE(IMediaMetadataRetriever, data, reply); int64_t timeUs = data.readInt64(); int option = data.readInt32(); - ALOGV("getTimeAtTime: time(%lld us) and option(%d)", timeUs, option); + ALOGV("getTimeAtTime: time(%" PRId64 " us) and option(%d)", timeUs, option); #ifndef DISABLE_GROUP_SCHEDULE_HACK setSchedPolicy(data); #endif diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp index 8e58162..95af006 100644 --- a/media/libmedia/IMediaRecorder.cpp +++ b/media/libmedia/IMediaRecorder.cpp @@ -17,6 +17,10 @@ //#define LOG_NDEBUG 0 #define LOG_TAG "IMediaRecorder" + +#include <inttypes.h> +#include <unistd.h> + #include <utils/Log.h> #include <binder/Parcel.h> #include <camera/ICamera.h> @@ -24,8 +28,6 @@ #include <media/IMediaRecorder.h> #include <gui/Surface.h> #include <gui/IGraphicBufferProducer.h> -#include <unistd.h> - namespace android { @@ -167,7 +169,7 @@ public: } status_t setOutputFile(int fd, int64_t offset, int64_t length) { - ALOGV("setOutputFile(%d, %lld, %lld)", fd, offset, length); + ALOGV("setOutputFile(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length); Parcel data, reply; data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor()); data.writeFileDescriptor(fd); diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp index 28238c4..e9e453b 100644 --- a/media/libmedia/MediaProfiles.cpp +++ b/media/libmedia/MediaProfiles.cpp @@ -475,7 +475,7 @@ static bool isTimelapseProfile(camcorder_quality quality) { } void MediaProfiles::initRequiredProfileRefs(const Vector<int>& cameraIds) { - ALOGV("Number of camera ids: %d", cameraIds.size()); + ALOGV("Number of camera ids: %zu", cameraIds.size()); CHECK(cameraIds.size() > 0); mRequiredProfileRefs = new RequiredProfiles[cameraIds.size()]; for (size_t i = 0, n = cameraIds.size(); i < n; ++i) { @@ -602,14 +602,14 @@ void MediaProfiles::checkAndAddRequiredProfilesIfNecessary() { int index = getCamcorderProfileIndex(cameraId, profile->mQuality); if (index != -1) { - ALOGV("Profile quality %d for camera %d already exists", + ALOGV("Profile quality %d for camera %zu already exists", profile->mQuality, cameraId); CHECK(index == refIndex); continue; } // Insert the new profile - ALOGV("Add a profile: quality %d=>%d for camera %d", + ALOGV("Add a profile: quality %d=>%d for camera %zu", mCamcorderProfiles[info->mRefProfileIndex]->mQuality, profile->mQuality, cameraId); diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp index a55e09c..2aa0592 100644 --- a/media/libmedia/SoundPool.cpp +++ b/media/libmedia/SoundPool.cpp @@ -16,6 +16,9 @@ //#define LOG_NDEBUG 0 #define LOG_TAG "SoundPool" + +#include <inttypes.h> + #include <utils/Log.h> #define USE_SHARED_MEM_BUFFER @@ -212,7 +215,7 @@ int SoundPool::load(const char* path, int priority __unused) int SoundPool::load(int fd, int64_t offset, int64_t length, int priority __unused) { - ALOGV("load: fd=%d, offset=%lld, length=%lld, priority=%d", + ALOGV("load: fd=%d, offset=%" PRId64 ", length=%" PRId64 ", priority=%d", fd, offset, length, priority); Mutex::Autolock lock(&mLock); sp<Sample> sample = new Sample(++mNextSampleID, fd, offset, length); @@ -462,7 +465,8 @@ Sample::Sample(int sampleID, int fd, int64_t offset, int64_t length) mFd = dup(fd); mOffset = offset; mLength = length; - ALOGV("create sampleID=%d, fd=%d, offset=%lld, length=%lld", mSampleID, mFd, mLength, mOffset); + ALOGV("create sampleID=%d, fd=%d, offset=%" PRId64 " length=%" PRId64, + mSampleID, mFd, mLength, mOffset); } void Sample::init() @@ -516,7 +520,7 @@ status_t Sample::doLoad() ALOGE("Unable to load sample: %s", mUrl); goto error; } - ALOGV("pointer = %p, size = %u, sampleRate = %u, numChannels = %d", + ALOGV("pointer = %p, size = %zu, sampleRate = %u, numChannels = %d", mHeap->getBase(), mSize, sampleRate, numChannels); if (sampleRate > kMaxSampleRate) { diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp index 1d6bb6f..39a239d 100644 --- a/media/libmedia/mediametadataretriever.cpp +++ b/media/libmedia/mediametadataretriever.cpp @@ -18,6 +18,8 @@ //#define LOG_NDEBUG 0 #define LOG_TAG "MediaMetadataRetriever" +#include <inttypes.h> + #include <binder/IServiceManager.h> #include <binder/IPCThreadState.h> #include <media/mediametadataretriever.h> @@ -114,7 +116,7 @@ status_t MediaMetadataRetriever::setDataSource( status_t MediaMetadataRetriever::setDataSource(int fd, int64_t offset, int64_t length) { - ALOGV("setDataSource(%d, %lld, %lld)", fd, offset, length); + ALOGV("setDataSource(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length); Mutex::Autolock _l(mLock); if (mRetriever == 0) { ALOGE("retriever is not initialized"); @@ -129,7 +131,7 @@ status_t MediaMetadataRetriever::setDataSource(int fd, int64_t offset, int64_t l sp<IMemory> MediaMetadataRetriever::getFrameAtTime(int64_t timeUs, int option) { - ALOGV("getFrameAtTime: time(%lld us) option(%d)", timeUs, option); + ALOGV("getFrameAtTime: time(%" PRId64 " us) option(%d)", timeUs, option); Mutex::Autolock _l(mLock); if (mRetriever == 0) { ALOGE("retriever is not initialized"); diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp index 0be01a9..406f9f2 100644 --- a/media/libmedia/mediaplayer.cpp +++ b/media/libmedia/mediaplayer.cpp @@ -17,12 +17,14 @@ //#define LOG_NDEBUG 0 #define LOG_TAG "MediaPlayer" -#include <utils/Log.h> -#include <sys/types.h> +#include <fcntl.h> +#include <inttypes.h> #include <sys/stat.h> +#include <sys/types.h> #include <unistd.h> -#include <fcntl.h> + +#include <utils/Log.h> #include <binder/IServiceManager.h> #include <binder/IPCThreadState.h> @@ -157,7 +159,7 @@ status_t MediaPlayer::setDataSource( status_t MediaPlayer::setDataSource(int fd, int64_t offset, int64_t length) { - ALOGV("setDataSource(%d, %lld, %lld)", fd, offset, length); + ALOGV("setDataSource(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length); status_t err = UNKNOWN_ERROR; const sp<IMediaPlayerService>& service(getMediaPlayerService()); if (service != 0) { @@ -194,7 +196,7 @@ status_t MediaPlayer::invoke(const Parcel& request, Parcel *reply) (mCurrentState != MEDIA_PLAYER_STATE_ERROR) && ((mCurrentState & MEDIA_PLAYER_IDLE) != MEDIA_PLAYER_IDLE); if ((mPlayer != NULL) && hasBeenInitialized) { - ALOGV("invoke %d", request.dataSize()); + ALOGV("invoke %zu", request.dataSize()); return mPlayer->invoke(request, reply); } ALOGE("invoke failed: wrong state %X", mCurrentState); @@ -818,7 +820,7 @@ void MediaPlayer::died() audio_format_t* pFormat, const sp<IMemoryHeap>& heap, size_t *pSize) { - ALOGV("decode(%d, %lld, %lld)", fd, offset, length); + ALOGV("decode(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length); status_t status; const sp<IMediaPlayerService>& service = getMediaPlayerService(); if (service != 0) { diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp index 3710e46..c8192e9 100644 --- a/media/libmedia/mediarecorder.cpp +++ b/media/libmedia/mediarecorder.cpp @@ -17,6 +17,9 @@ //#define LOG_NDEBUG 0 #define LOG_TAG "MediaRecorder" + +#include <inttypes.h> + #include <utils/Log.h> #include <media/mediarecorder.h> #include <binder/IServiceManager.h> @@ -286,7 +289,7 @@ status_t MediaRecorder::setOutputFile(const char* path) status_t MediaRecorder::setOutputFile(int fd, int64_t offset, int64_t length) { - ALOGV("setOutputFile(%d, %lld, %lld)", fd, offset, length); + ALOGV("setOutputFile(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length); if (mMediaRecorder == NULL) { ALOGE("media recorder is not initialized yet"); return INVALID_OPERATION; diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp index 778eb9a..76632a7 100644 --- a/media/libmediaplayerservice/MediaPlayerService.cpp +++ b/media/libmediaplayerservice/MediaPlayerService.cpp @@ -307,7 +307,7 @@ sp<IRemoteDisplay> MediaPlayerService::listenForRemoteDisplay( return new RemoteDisplay(client, iface.string()); } -status_t MediaPlayerService::AudioCache::dump(int fd, const Vector<String16>& args) const +status_t MediaPlayerService::AudioCache::dump(int fd, const Vector<String16>& /*args*/) const { const size_t SIZE = 256; char buffer[SIZE]; @@ -673,8 +673,8 @@ status_t MediaPlayerService::Client::setDataSource(int fd, int64_t offset, int64 ALOGV("st_dev = %llu", sb.st_dev); ALOGV("st_mode = %u", sb.st_mode); - ALOGV("st_uid = %lu", sb.st_uid); - ALOGV("st_gid = %lu", sb.st_gid); + ALOGV("st_uid = %lu", static_cast<unsigned long>(sb.st_uid)); + ALOGV("st_gid = %lu", static_cast<unsigned long>(sb.st_gid)); ALOGV("st_size = %llu", sb.st_size); if (offset >= sb.st_size) { @@ -803,7 +803,7 @@ status_t MediaPlayerService::Client::setMetadataFilter(const Parcel& filter) } status_t MediaPlayerService::Client::getMetadata( - bool update_only, bool apply_filter, Parcel *reply) + bool update_only, bool /*apply_filter*/, Parcel *reply) { sp<MediaPlayerBase> player = getPlayer(); if (player == 0) return UNKNOWN_ERROR; @@ -1926,8 +1926,8 @@ bool CallbackThread::threadLoop() { status_t MediaPlayerService::AudioCache::open( uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask, audio_format_t format, int bufferCount, - AudioCallback cb, void *cookie, audio_output_flags_t flags, - const audio_offload_info_t *offloadInfo) + AudioCallback cb, void *cookie, audio_output_flags_t /*flags*/, + const audio_offload_info_t* /*offloadInfo*/) { ALOGV("open(%u, %d, 0x%x, %d, %d)", sampleRate, channelCount, channelMask, format, bufferCount); if (mHeap->getHeapID() < 0) { @@ -1994,7 +1994,7 @@ status_t MediaPlayerService::AudioCache::wait() } void MediaPlayerService::AudioCache::notify( - void* cookie, int msg, int ext1, int ext2, const Parcel *obj) + void* cookie, int msg, int ext1, int ext2, const Parcel* /*obj*/) { ALOGV("notify(%p, %d, %d, %d)", cookie, msg, ext1, ext2); AudioCache* p = static_cast<AudioCache*>(cookie); diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp index 80c7e0a..a91b0e5 100644 --- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp +++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp @@ -147,8 +147,8 @@ status_t MetadataRetrieverClient::setDataSource(int fd, int64_t offset, int64_t } ALOGV("st_dev = %llu", sb.st_dev); ALOGV("st_mode = %u", sb.st_mode); - ALOGV("st_uid = %lu", sb.st_uid); - ALOGV("st_gid = %lu", sb.st_gid); + ALOGV("st_uid = %lu", static_cast<unsigned long>(sb.st_uid)); + ALOGV("st_gid = %lu", static_cast<unsigned long>(sb.st_gid)); ALOGV("st_size = %llu", sb.st_size); if (offset >= sb.st_size) { diff --git a/media/libmediaplayerservice/MidiFile.cpp b/media/libmediaplayerservice/MidiFile.cpp index deeddd1..749ef96 100644 --- a/media/libmediaplayerservice/MidiFile.cpp +++ b/media/libmediaplayerservice/MidiFile.cpp @@ -114,7 +114,7 @@ MidiFile::~MidiFile() { } status_t MidiFile::setDataSource( - const sp<IMediaHTTPService> &httpService, + const sp<IMediaHTTPService> & /*httpService*/, const char* path, const KeyedVector<String8, String8> *) { ALOGV("MidiFile::setDataSource url=%s", path); diff --git a/media/libnbaio/MonoPipe.cpp b/media/libnbaio/MonoPipe.cpp index 4adf018..0b65861 100644 --- a/media/libnbaio/MonoPipe.cpp +++ b/media/libnbaio/MonoPipe.cpp @@ -14,6 +14,8 @@ * limitations under the License. */ +#include <inttypes.h> + #define LOG_TAG "MonoPipe" //#define LOG_NDEBUG 0 @@ -87,7 +89,7 @@ MonoPipe::MonoPipe(size_t reqFrames, const NBAIO_Format& format, bool writeCanBl static const uint64_t kUnsignedHiBitsMask = ~(0xFFFFFFFFull); if ((N & kSignedHiBitsMask) || (D & kUnsignedHiBitsMask)) { ALOGE("Cannot reduce sample rate to local clock frequency ratio to fit" - " in a 32/32 bit rational. (max reduction is 0x%016llx/0x%016llx" + " in a 32/32 bit rational. (max reduction is 0x%016" PRIx64 "/0x%016" PRIx64 "). getNextWriteTimestamp calls will be non-functional", N, D); return; } @@ -308,7 +310,7 @@ int64_t MonoPipe::offsetTimestampByAudioFrames(int64_t ts, size_t audFrames) // error, but then zero out the ratio in the linear transform so // that we don't try to do any conversions from now on. This // MonoPipe's getNextWriteTimestamp is now broken for good. - ALOGE("Overflow when attempting to convert %d audio frames to" + ALOGE("Overflow when attempting to convert %zu audio frames to" " duration in local time. getNextWriteTimestamp will fail from" " now on.", audFrames); mSamplesToLocalTime.a_to_b_numer = 0; diff --git a/media/libnbaio/NBAIO.cpp b/media/libnbaio/NBAIO.cpp index ff3284c..d641e74 100644 --- a/media/libnbaio/NBAIO.cpp +++ b/media/libnbaio/NBAIO.cpp @@ -137,7 +137,7 @@ ssize_t NBAIO_Source::readVia(readVia_t via, size_t total, void *user, ssize_t NBAIO_Port::negotiate(const NBAIO_Format offers[], size_t numOffers, NBAIO_Format counterOffers[], size_t& numCounterOffers) { - ALOGV("negotiate offers=%p numOffers=%u countersOffers=%p numCounterOffers=%u", + ALOGV("negotiate offers=%p numOffers=%zu countersOffers=%p numCounterOffers=%zu", offers, numOffers, counterOffers, numCounterOffers); if (Format_isValid(mFormat)) { for (size_t i = 0; i < numOffers; ++i) { diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h index 6130084..79bdfe8 100644 --- a/services/audioflinger/PlaybackTracks.h +++ b/services/audioflinger/PlaybackTracks.h @@ -54,6 +54,7 @@ public: return mStreamType; } bool isOffloaded() const { return (mFlags & IAudioFlinger::TRACK_OFFLOAD) != 0; } + bool isDirect() const { return (mFlags & IAudioFlinger::TRACK_DIRECT) != 0; } status_t setParameters(const String8& keyValuePairs); status_t attachAuxEffect(int EffectId); void setAuxBuffer(int EffectId, int32_t *buffer); diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp index d6390b1..8ce819c 100755 --- a/services/audioflinger/Threads.cpp +++ b/services/audioflinger/Threads.cpp @@ -1660,7 +1660,7 @@ bool AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track) track->mState = TrackBase::STOPPED; if (!trackActive) { removeTrack_l(track); - } else if (track->isFastTrack() || track->isOffloaded()) { + } else if (track->isFastTrack() || track->isOffloaded() || track->isDirect()) { track->mState = TrackBase::STOPPING_1; } @@ -1868,7 +1868,9 @@ void AudioFlinger::PlaybackThread::readOutputParameters_l() } mNormalFrameCount = multiplier * mFrameCount; // round up to nearest 16 frames to satisfy AudioMixer - mNormalFrameCount = (mNormalFrameCount + 15) & ~15; + if (mType == MIXER || mType == DUPLICATING) { + mNormalFrameCount = (mNormalFrameCount + 15) & ~15; + } ALOGI("HAL output buffer size %u frames, normal sink buffer size %u frames", mFrameCount, mNormalFrameCount); @@ -2656,7 +2658,7 @@ status_t AudioFlinger::PlaybackThread::getTimestamp_l(AudioTimestamp& timestamp) if (mNormalSink != 0) { return mNormalSink->getTimestamp(timestamp); } - if (mType == OFFLOAD && mOutput->stream->get_presentation_position) { + if ((mType == OFFLOAD || mType == DIRECT) && mOutput->stream->get_presentation_position) { uint64_t position64; int ret = mOutput->stream->get_presentation_position( mOutput->stream, &position64, ×tamp.mTime); @@ -3947,14 +3949,16 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prep // The first time a track is added we wait // for all its buffers to be filled before processing it uint32_t minFrames; - if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing()) { + if ((track->sharedBuffer() == 0) && !track->isStopping_1() && !track->isPausing()) { minFrames = mNormalFrameCount; } else { minFrames = 1; } - if ((track->framesReady() >= minFrames) && track->isReady() && - !track->isPaused() && !track->isTerminated()) + ALOGI("prepareTracks_l minFrames %d state %d frames ready %d, ", + minFrames, track->mState, track->framesReady()); + if ((track->framesReady() >= minFrames) && track->isReady() && !track->isPaused() && + !track->isStopping_2() && !track->isStopped()) { ALOGVV("track %d s=%08x [OK]", track->name(), cblk->mServer); @@ -3981,17 +3985,26 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prep if (!mEffectChains.isEmpty() && last) { mEffectChains[0]->clearInputBuffer(); } - - ALOGVV("track %d s=%08x [NOT READY]", track->name(), cblk->mServer); - if ((track->sharedBuffer() != 0) || track->isTerminated() || - track->isStopped() || track->isPaused()) { + if (track->isStopping_1()) { + track->mState = TrackBase::STOPPING_2; + } + if ((track->sharedBuffer() != 0) || track->isStopped() || + track->isStopping_2() || track->isPaused()) { // We have consumed all the buffers of this track. // Remove it from the list of active tracks. - // TODO: implement behavior for compressed audio - size_t audioHALFrames = (latency_l() * mSampleRate) / 1000; + size_t audioHALFrames; + if (audio_is_linear_pcm(mFormat)) { + audioHALFrames = (latency_l() * mSampleRate) / 1000; + } else { + audioHALFrames = 0; + } + size_t framesWritten = mBytesWritten / mFrameSize; if (mStandby || !last || track->presentationComplete(framesWritten, audioHALFrames)) { + if (track->isStopping_2()) { + track->mState = TrackBase::STOPPED; + } if (track->isStopped()) { track->reset(); } diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp index 1e906ad..4fbb973 100644 --- a/services/audioflinger/Tracks.cpp +++ b/services/audioflinger/Tracks.cpp @@ -706,7 +706,7 @@ void AudioFlinger::PlaybackThread::Track::stop() if (playbackThread->mActiveTracks.indexOf(this) < 0) { reset(); mState = STOPPED; - } else if (!isFastTrack() && !isOffloaded()) { + } else if (!isFastTrack() && !isOffloaded() && !isDirect()) { mState = STOPPED; } else { // For fast tracks prepareTracks_l() will set state to STOPPING_2 @@ -860,7 +860,7 @@ status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& times } Mutex::Autolock _l(thread->mLock); PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); - if (!isOffloaded()) { + if (!isOffloaded() && !isDirect()) { if (!playbackThread->mLatchQValid) { mPreviousValid = false; return INVALID_OPERATION; @@ -980,8 +980,6 @@ bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWrit } if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) { - ALOGV("presentationComplete() session %d complete: framesWritten %d", - mSessionId, framesWritten); triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE); mAudioTrackServerProxy->setStreamEndDone(); return true; diff --git a/services/audioflinger/tests/Android.mk b/services/audioflinger/tests/Android.mk index 874f18f..7bba05b 100644 --- a/services/audioflinger/tests/Android.mk +++ b/services/audioflinger/tests/Android.mk @@ -1,5 +1,8 @@ # Build the unit tests for audioflinger +# +# resampler unit test +# LOCAL_PATH:= $(call my-dir) include $(CLEAR_VARS) @@ -20,6 +23,7 @@ LOCAL_C_INCLUDES := \ bionic/libstdc++/include \ external/gtest/include \ external/stlport/stlport \ + $(call include-path-for, audio-utils) \ frameworks/av/services/audioflinger LOCAL_SRC_FILES := \ @@ -29,3 +33,41 @@ LOCAL_MODULE := resampler_tests LOCAL_MODULE_TAGS := tests include $(BUILD_EXECUTABLE) + +# +# audio mixer test tool +# +include $(CLEAR_VARS) + +LOCAL_SRC_FILES:= \ + test-mixer.cpp \ + ../AudioMixer.cpp.arm \ + +LOCAL_C_INCLUDES := \ + bionic \ + bionic/libstdc++/include \ + external/stlport/stlport \ + $(call include-path-for, audio-effects) \ + $(call include-path-for, audio-utils) \ + frameworks/av/services/audioflinger + +LOCAL_STATIC_LIBRARIES := \ + libsndfile + +LOCAL_SHARED_LIBRARIES := \ + libstlport \ + libeffects \ + libnbaio \ + libcommon_time_client \ + libaudioresampler \ + libaudioutils \ + libdl \ + libcutils \ + libutils \ + liblog + +LOCAL_MODULE:= test-mixer + +LOCAL_MODULE_TAGS := optional + +include $(BUILD_EXECUTABLE) diff --git a/services/audioflinger/tests/mixer_to_wav_tests.sh b/services/audioflinger/tests/mixer_to_wav_tests.sh new file mode 100755 index 0000000..93bff47 --- /dev/null +++ b/services/audioflinger/tests/mixer_to_wav_tests.sh @@ -0,0 +1,134 @@ +#!/bin/bash +# +# This script uses test-mixer to generate WAV files +# for evaluation of the AudioMixer component. +# +# Sine and chirp signals are used for input because they +# show up as clear lines, either horizontal or diagonal, +# on a spectrogram. This means easy verification of multiple +# track mixing. +# +# After execution, look for created subdirectories like +# mixer_i_i +# mixer_i_f +# mixer_f_f +# +# Recommend using a program such as audacity to evaluate +# the output WAV files, e.g. +# +# cd testdir +# audacity *.wav +# +# Using Audacity: +# +# Under "Waveform" view mode you can zoom into the +# start of the WAV file to verify proper ramping. +# +# Select "Spectrogram" to see verify the lines +# (sine = horizontal, chirp = diagonal) which should +# be clear (except for around the start as the volume +# ramping causes spectral distortion). + +if [ -z "$ANDROID_BUILD_TOP" ]; then + echo "Android build environment not set" + exit -1 +fi + +# ensure we have mm +. $ANDROID_BUILD_TOP/build/envsetup.sh + +pushd $ANDROID_BUILD_TOP/frameworks/av/services/audioflinger/ + +# build +pwd +mm + +# send to device +echo "waiting for device" +adb root && adb wait-for-device remount +adb push $OUT/system/lib/libaudioresampler.so /system/lib +adb push $OUT/system/bin/test-mixer /system/bin + +# createwav creates a series of WAV files testing various +# mixer settings +# $1 = flags +# $2 = directory +function createwav() { +# create directory if it doesn't exist + if [ ! -d $2 ]; then + mkdir $2 + fi + +# Test: +# process__genericResampling +# track__Resample / track__genericResample + adb shell test-mixer $1 -s 48000 \ + -o /sdcard/tm48000gr.wav \ + sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000 + adb pull /sdcard/tm48000gr.wav $2 + +# Test: +# process__genericResample +# track__Resample / track__genericResample +# track__NoResample / track__16BitsStereo / track__16BitsMono +# Aux buffer + adb shell test-mixer $1 -s 9307 \ + -a /sdcard/aux9307gra.wav -o /sdcard/tm9307gra.wav \ + sine:2,1000,3000 sine:1,2000,9307 chirp:2,9307 + adb pull /sdcard/tm9307gra.wav $2 + adb pull /sdcard/aux9307gra.wav $2 + +# Test: +# process__genericNoResampling +# track__NoResample / track__16BitsStereo / track__16BitsMono + adb shell test-mixer $1 -s 32000 \ + -o /sdcard/tm32000gnr.wav \ + sine:2,1000,32000 chirp:2,32000 sine:1,3000,32000 + adb pull /sdcard/tm32000gnr.wav $2 + +# Test: +# process__genericNoResampling +# track__NoResample / track__16BitsStereo / track__16BitsMono +# Aux buffer + adb shell test-mixer $1 -s 32000 \ + -a /sdcard/aux32000gnra.wav -o /sdcard/tm32000gnra.wav \ + sine:2,1000,32000 chirp:2,32000 sine:1,3000,32000 + adb pull /sdcard/tm32000gnra.wav $2 + adb pull /sdcard/aux32000gnra.wav $2 + +# Test: +# process__NoResampleOneTrack / process__OneTrack16BitsStereoNoResampling +# Downmixer + adb shell test-mixer $1 -s 32000 \ + -o /sdcard/tm32000nrot.wav \ + sine:6,1000,32000 + adb pull /sdcard/tm32000nrot.wav $2 + +# Test: +# process__NoResampleOneTrack / OneTrack16BitsStereoNoResampling +# Aux buffer + adb shell test-mixer $1 -s 44100 \ + -a /sdcard/aux44100nrota.wav -o /sdcard/tm44100nrota.wav \ + sine:2,2000,44100 + adb pull /sdcard/tm44100nrota.wav $2 + adb pull /sdcard/aux44100nrota.wav $2 +} + +# +# Call createwav to generate WAV files in various combinations +# +# i_i = integer input track, integer mixer output +# f_f = float input track, float mixer output +# i_f = integer input track, float_mixer output +# +# If the mixer output is float, then the output WAV file is pcm float. +# +# TODO: create a "snr" like "diff" to automatically +# compare files in these directories together. +# + +createwav "" "tests/mixer_i_i" +createwav "-f -m" "tests/mixer_f_f" +createwav "-m" "tests/mixer_i_f" + +popd diff --git a/services/audioflinger/tests/resampler_tests.cpp b/services/audioflinger/tests/resampler_tests.cpp index 8f9c270..4a67d0b 100644 --- a/services/audioflinger/tests/resampler_tests.cpp +++ b/services/audioflinger/tests/resampler_tests.cpp @@ -33,200 +33,7 @@ #include <gtest/gtest.h> #include <media/AudioBufferProvider.h> #include "AudioResampler.h" - -#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) - -template<typename T, typename U> -struct is_same -{ - static const bool value = false; -}; - -template<typename T> -struct is_same<T, T> // partial specialization -{ - static const bool value = true; -}; - -template<typename T> -static inline T convertValue(double val) -{ - if (is_same<T, int16_t>::value) { - return floor(val * 32767.0 + 0.5); - } else if (is_same<T, int32_t>::value) { - return floor(val * (1UL<<31) + 0.5); - } - return val; // assume float or double -} - -/* Creates a type-independent audio buffer provider from - * a buffer base address, size, framesize, and input increment array. - * - * No allocation or deallocation of the provided buffer is done. - */ -class TestProvider : public android::AudioBufferProvider { -public: - TestProvider(const void* addr, size_t frames, size_t frameSize, - const std::vector<size_t>& inputIncr) - : mAddr(addr), - mNumFrames(frames), - mFrameSize(frameSize), - mNextFrame(0), mUnrel(0), mInputIncr(inputIncr), mNextIdx(0) - { - } - - virtual android::status_t getNextBuffer(Buffer* buffer, int64_t pts __unused = kInvalidPTS ) - { - size_t requestedFrames = buffer->frameCount; - if (requestedFrames > mNumFrames - mNextFrame) { - buffer->frameCount = mNumFrames - mNextFrame; - } - if (!mInputIncr.empty()) { - size_t provided = mInputIncr[mNextIdx++]; - ALOGV("getNextBuffer() mValue[%d]=%u not %u", - mNextIdx-1, provided, buffer->frameCount); - if (provided < buffer->frameCount) { - buffer->frameCount = provided; - } - if (mNextIdx >= mInputIncr.size()) { - mNextIdx = 0; - } - } - ALOGV("getNextBuffer() requested %u frames out of %u frames available" - " and returned %u frames\n", - requestedFrames, mNumFrames - mNextFrame, buffer->frameCount); - mUnrel = buffer->frameCount; - if (buffer->frameCount > 0) { - buffer->raw = (char *)mAddr + mFrameSize * mNextFrame; - return android::NO_ERROR; - } else { - buffer->raw = NULL; - return android::NOT_ENOUGH_DATA; - } - } - - virtual void releaseBuffer(Buffer* buffer) - { - if (buffer->frameCount > mUnrel) { - ALOGE("releaseBuffer() released %u frames but only %u available " - "to release\n", buffer->frameCount, mUnrel); - mNextFrame += mUnrel; - mUnrel = 0; - } else { - - ALOGV("releaseBuffer() released %u frames out of %u frames available " - "to release\n", buffer->frameCount, mUnrel); - mNextFrame += buffer->frameCount; - mUnrel -= buffer->frameCount; - } - buffer->frameCount = 0; - buffer->raw = NULL; - } - - void reset() - { - mNextFrame = 0; - } - - size_t getNumFrames() - { - return mNumFrames; - } - - void setIncr(const std::vector<size_t> inputIncr) - { - mNextIdx = 0; - mInputIncr = inputIncr; - } - -protected: - const void* mAddr; // base address - size_t mNumFrames; // total frames - int mFrameSize; // frame size (# channels * bytes per sample) - size_t mNextFrame; // index of next frame to provide - size_t mUnrel; // number of frames not yet released - std::vector<size_t> mInputIncr; // number of frames provided per call - size_t mNextIdx; // index of next entry in mInputIncr to use -}; - -/* Creates a buffer filled with a sine wave. - * - * Returns a pair consisting of the sine signal buffer and the number of frames. - * The caller must delete[] the buffer when no longer needed (no shared_ptr<>). - */ -template<typename T> -static std::pair<T*, size_t> createSine(size_t channels, - double freq, double samplingRate, double time) -{ - double tscale = 1. / samplingRate; - size_t frames = static_cast<size_t>(samplingRate * time); - T* buffer = new T[frames * channels]; - for (size_t i = 0; i < frames; ++i) { - double t = i * tscale; - double y = sin(2. * M_PI * freq * t); - T yt = convertValue<T>(y); - - for (size_t j = 0; j < channels; ++j) { - buffer[i*channels + j] = yt / (j + 1); - } - } - return std::make_pair(buffer, frames); -} - -/* Creates a buffer filled with a chirp signal (a sine wave sweep). - * - * Returns a pair consisting of the chirp signal buffer and the number of frames. - * The caller must delete[] the buffer when no longer needed (no shared_ptr<>). - * - * When creating the Chirp, note that the frequency is the true sinusoidal - * frequency not the sampling rate. - * - * http://en.wikipedia.org/wiki/Chirp - */ -template<typename T> -static std::pair<T*, size_t> createChirp(size_t channels, - double minfreq, double maxfreq, double samplingRate, double time) -{ - double tscale = 1. / samplingRate; - size_t frames = static_cast<size_t>(samplingRate * time); - T *buffer = new T[frames * channels]; - // note the chirp constant k has a divide-by-two. - double k = (maxfreq - minfreq) / (2. * time); - for (size_t i = 0; i < frames; ++i) { - double t = i * tscale; - double y = sin(2. * M_PI * (k * t + minfreq) * t); - T yt = convertValue<T>(y); - - for (size_t j = 0; j < channels; ++j) { - buffer[i*channels + j] = yt / (j + 1); - } - } - return std::make_pair(buffer, frames); -} - -/* This derived class creates a buffer provider of datatype T, - * consisting of an input signal, e.g. from createChirp(). - * The number of frames can be obtained from the base class - * TestProvider::getNumFrames(). - */ -template <typename T> -class SignalProvider : public TestProvider { -public: - SignalProvider(const std::pair<T*, size_t>& bufferInfo, size_t channels, - const std::vector<size_t>& values) - : TestProvider(bufferInfo.first, bufferInfo.second, channels * sizeof(T), values), - mManagedPtr(bufferInfo.first) - { - } - - virtual ~SignalProvider() - { - delete[] mManagedPtr; - } - -protected: - T* mManagedPtr; -}; +#include "test_utils.h" void resample(void *output, size_t outputFrames, const std::vector<size_t> &outputIncr, android::AudioBufferProvider *provider, android::AudioResampler *resampler) @@ -261,10 +68,11 @@ void testBufferIncrement(size_t channels, unsigned inputFreq, unsigned outputFre enum android::AudioResampler::src_quality quality) { // create the provider - std::vector<size_t> inputIncr; - SignalProvider<int16_t> provider(createChirp<int16_t>(channels, - 0., outputFreq/2., outputFreq, outputFreq/2000.), - channels, inputIncr); + std::vector<int> inputIncr; + SignalProvider provider; + provider.setChirp<int16_t>(channels, + 0., outputFreq/2., outputFreq, outputFreq/2000.); + provider.setIncr(inputIncr); // calculate the output size size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq; @@ -339,10 +147,11 @@ void testStopbandDownconversion(size_t channels, enum android::AudioResampler::src_quality quality) { // create the provider - std::vector<size_t> inputIncr; - SignalProvider<int16_t> provider(createChirp<int16_t>(channels, - 0., inputFreq/2., inputFreq, inputFreq/2000.), - channels, inputIncr); + std::vector<int> inputIncr; + SignalProvider provider; + provider.setChirp<int16_t>(channels, + 0., inputFreq/2., inputFreq, inputFreq/2000.); + provider.setIncr(inputIncr); // calculate the output size size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq; diff --git a/services/audioflinger/tests/test-mixer.cpp b/services/audioflinger/tests/test-mixer.cpp new file mode 100644 index 0000000..3940702 --- /dev/null +++ b/services/audioflinger/tests/test-mixer.cpp @@ -0,0 +1,286 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <stdio.h> +#include <inttypes.h> +#include <math.h> +#include <vector> +#include <audio_utils/primitives.h> +#include <audio_utils/sndfile.h> +#include <media/AudioBufferProvider.h> +#include "AudioMixer.h" +#include "test_utils.h" + +/* Testing is typically through creation of an output WAV file from several + * source inputs, to be later analyzed by an audio program such as Audacity. + * + * Sine or chirp functions are typically more useful as input to the mixer + * as they show up as straight lines on a spectrogram if successfully mixed. + * + * A sample shell script is provided: mixer_to_wave_tests.sh + */ + +using namespace android; + +static void usage(const char* name) { + fprintf(stderr, "Usage: %s [-f] [-m]" + " [-s sample-rate] [-o <output-file>] [-a <aux-buffer-file>] [-P csv]" + " (<input-file> | <command>)+\n", name); + fprintf(stderr, " -f enable floating point input track\n"); + fprintf(stderr, " -m enable floating point mixer output\n"); + fprintf(stderr, " -s mixer sample-rate\n"); + fprintf(stderr, " -o <output-file> WAV file, pcm16 (or float if -m specified)\n"); + fprintf(stderr, " -a <aux-buffer-file>\n"); + fprintf(stderr, " -P # frames provided per call to resample() in CSV format\n"); + fprintf(stderr, " <input-file> is a WAV file\n"); + fprintf(stderr, " <command> can be 'sine:<channels>,<frequency>,<samplerate>'\n"); + fprintf(stderr, " 'chirp:<channels>,<samplerate>'\n"); +} + +static int writeFile(const char *filename, const void *buffer, + uint32_t sampleRate, uint32_t channels, size_t frames, bool isBufferFloat) { + if (filename == NULL) { + return 0; // ok to pass in NULL filename + } + // write output to file. + SF_INFO info; + info.frames = 0; + info.samplerate = sampleRate; + info.channels = channels; + info.format = SF_FORMAT_WAV | (isBufferFloat ? SF_FORMAT_FLOAT : SF_FORMAT_PCM_16); + printf("saving file:%s channels:%d samplerate:%d frames:%d\n", + filename, info.channels, info.samplerate, frames); + SNDFILE *sf = sf_open(filename, SFM_WRITE, &info); + if (sf == NULL) { + perror(filename); + return EXIT_FAILURE; + } + if (isBufferFloat) { + (void) sf_writef_float(sf, (float*)buffer, frames); + } else { + (void) sf_writef_short(sf, (short*)buffer, frames); + } + sf_close(sf); + return EXIT_SUCCESS; +} + +int main(int argc, char* argv[]) { + const char* const progname = argv[0]; + bool useInputFloat = false; + bool useMixerFloat = false; + bool useRamp = true; + uint32_t outputSampleRate = 48000; + uint32_t outputChannels = 2; // stereo for now + std::vector<int> Pvalues; + const char* outputFilename = NULL; + const char* auxFilename = NULL; + std::vector<int32_t> Names; + std::vector<SignalProvider> Providers; + + for (int ch; (ch = getopt(argc, argv, "fms:o:a:P:")) != -1;) { + switch (ch) { + case 'f': + useInputFloat = true; + break; + case 'm': + useMixerFloat = true; + break; + case 's': + outputSampleRate = atoi(optarg); + break; + case 'o': + outputFilename = optarg; + break; + case 'a': + auxFilename = optarg; + break; + case 'P': + if (parseCSV(optarg, Pvalues) < 0) { + fprintf(stderr, "incorrect syntax for -P option\n"); + return EXIT_FAILURE; + } + break; + case '?': + default: + usage(progname); + return EXIT_FAILURE; + } + } + argc -= optind; + argv += optind; + + if (argc == 0) { + usage(progname); + return EXIT_FAILURE; + } + if ((unsigned)argc > AudioMixer::MAX_NUM_TRACKS) { + fprintf(stderr, "too many tracks: %d > %u", argc, AudioMixer::MAX_NUM_TRACKS); + return EXIT_FAILURE; + } + + size_t outputFrames = 0; + + // create providers for each track + Providers.resize(argc); + for (int i = 0; i < argc; ++i) { + static const char chirp[] = "chirp:"; + static const char sine[] = "sine:"; + static const double kSeconds = 1; + + if (!strncmp(argv[i], chirp, strlen(chirp))) { + std::vector<int> v; + + parseCSV(argv[i] + strlen(chirp), v); + if (v.size() == 2) { + printf("creating chirp(%d %d)\n", v[0], v[1]); + if (useInputFloat) { + Providers[i].setChirp<float>(v[0], 0, v[1]/2, v[1], kSeconds); + } else { + Providers[i].setChirp<int16_t>(v[0], 0, v[1]/2, v[1], kSeconds); + } + Providers[i].setIncr(Pvalues); + } else { + fprintf(stderr, "malformed input '%s'\n", argv[i]); + } + } else if (!strncmp(argv[i], sine, strlen(sine))) { + std::vector<int> v; + + parseCSV(argv[i] + strlen(sine), v); + if (v.size() == 3) { + printf("creating sine(%d %d)\n", v[0], v[1]); + if (useInputFloat) { + Providers[i].setSine<float>(v[0], v[1], v[2], kSeconds); + } else { + Providers[i].setSine<int16_t>(v[0], v[1], v[2], kSeconds); + } + Providers[i].setIncr(Pvalues); + } else { + fprintf(stderr, "malformed input '%s'\n", argv[i]); + } + } else { + printf("creating filename(%s)\n", argv[i]); + if (useInputFloat) { + Providers[i].setFile<float>(argv[i]); + } else { + Providers[i].setFile<short>(argv[i]); + } + Providers[i].setIncr(Pvalues); + } + // calculate the number of output frames + size_t nframes = (int64_t) Providers[i].getNumFrames() * outputSampleRate + / Providers[i].getSampleRate(); + if (i == 0 || outputFrames > nframes) { // choose minimum for outputFrames + outputFrames = nframes; + } + } + + // create the output buffer. + const size_t outputFrameSize = outputChannels + * (useMixerFloat ? sizeof(float) : sizeof(int16_t)); + const size_t outputSize = outputFrames * outputFrameSize; + void *outputAddr = NULL; + (void) posix_memalign(&outputAddr, 32, outputSize); + memset(outputAddr, 0, outputSize); + + // create the aux buffer, if needed. + const size_t auxFrameSize = sizeof(int32_t); // Q4.27 always + const size_t auxSize = outputFrames * auxFrameSize; + void *auxAddr = NULL; + if (auxFilename) { + (void) posix_memalign(&auxAddr, 32, auxSize); + memset(auxAddr, 0, auxSize); + } + + // create the mixer. + const size_t mixerFrameCount = 320; // typical numbers may range from 240 or 960 + AudioMixer *mixer = new AudioMixer(mixerFrameCount, outputSampleRate); + audio_format_t inputFormat = useInputFloat + ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + audio_format_t mixerFormat = useMixerFloat + ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + float f = AudioMixer::UNITY_GAIN_FLOAT / Providers.size(); // normalize volume by # tracks + static float f0; // zero + + // set up the tracks. + for (size_t i = 0; i < Providers.size(); ++i) { + //printf("track %d out of %d\n", i, Providers.size()); + uint32_t channelMask = audio_channel_out_mask_from_count(Providers[i].getNumChannels()); + int32_t name = mixer->getTrackName(channelMask, + inputFormat, AUDIO_SESSION_OUTPUT_MIX); + ALOG_ASSERT(name >= 0); + Names.push_back(name); + mixer->setBufferProvider(name, &Providers[i]); + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER, + (void *) outputAddr); + mixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MIXER_FORMAT, (void *)mixerFormat); + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT, + (void *)(uintptr_t)inputFormat); + mixer->setParameter( + name, + AudioMixer::RESAMPLE, + AudioMixer::SAMPLE_RATE, + (void *)(uintptr_t)Providers[i].getSampleRate()); + if (useRamp) { + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f0); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f0); + mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME0, &f); + mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME1, &f); + } else { + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f); + } + if (auxFilename) { + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::AUX_BUFFER, + (void *) auxAddr); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::AUXLEVEL, &f0); + mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::AUXLEVEL, &f); + } + mixer->enable(name); + } + + // pump the mixer to process data. + size_t i; + for (i = 0; i < outputFrames - mixerFrameCount; i += mixerFrameCount) { + for (size_t j = 0; j < Names.size(); ++j) { + mixer->setParameter(Names[j], AudioMixer::TRACK, AudioMixer::MAIN_BUFFER, + (char *) outputAddr + i * outputFrameSize); + if (auxFilename) { + mixer->setParameter(Names[j], AudioMixer::TRACK, AudioMixer::AUX_BUFFER, + (char *) auxAddr + i * auxFrameSize); + } + } + mixer->process(AudioBufferProvider::kInvalidPTS); + } + outputFrames = i; // reset output frames to the data actually produced. + + // write to files + writeFile(outputFilename, outputAddr, + outputSampleRate, outputChannels, outputFrames, useMixerFloat); + if (auxFilename) { + // Aux buffer is always in q4_27 format for now. + // memcpy_to_i16_from_q4_27(), but with stereo frame count (not sample count) + ditherAndClamp((int32_t*)auxAddr, (int32_t*)auxAddr, outputFrames >> 1); + writeFile(auxFilename, auxAddr, outputSampleRate, 1, outputFrames, false); + } + + delete mixer; + free(outputAddr); + free(auxAddr); + return EXIT_SUCCESS; +} diff --git a/services/audioflinger/tests/test_utils.h b/services/audioflinger/tests/test_utils.h new file mode 100644 index 0000000..f954292 --- /dev/null +++ b/services/audioflinger/tests/test_utils.h @@ -0,0 +1,307 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_TEST_UTILS_H +#define ANDROID_AUDIO_TEST_UTILS_H + +#include <audio_utils/sndfile.h> + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) +#endif + +template<typename T, typename U> +struct is_same +{ + static const bool value = false; +}; + +template<typename T> +struct is_same<T, T> // partial specialization +{ + static const bool value = true; +}; + +template<typename T> +static inline T convertValue(double val) +{ + if (is_same<T, int16_t>::value) { + return floor(val * 32767.0 + 0.5); + } else if (is_same<T, int32_t>::value) { + return floor(val * (1UL<<31) + 0.5); + } + return val; // assume float or double +} + +// Convert a list of integers in CSV format to a Vector of those values. +// Returns the number of elements in the list, or -1 on error. +static inline int parseCSV(const char *string, std::vector<int>& values) +{ + // pass 1: count the number of values and do syntax check + size_t numValues = 0; + bool hadDigit = false; + for (const char *p = string; ; ) { + switch (*p++) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + hadDigit = true; + break; + case '\0': + if (hadDigit) { + // pass 2: allocate and initialize vector of values + values.resize(++numValues); + values[0] = atoi(p = string); + for (size_t i = 1; i < numValues; ) { + if (*p++ == ',') { + values[i++] = atoi(p); + } + } + return numValues; + } + // fall through + case ',': + if (hadDigit) { + hadDigit = false; + numValues++; + break; + } + // fall through + default: + return -1; + } + } +} + +/* Creates a type-independent audio buffer provider from + * a buffer base address, size, framesize, and input increment array. + * + * No allocation or deallocation of the provided buffer is done. + */ +class TestProvider : public android::AudioBufferProvider { +public: + TestProvider(void* addr, size_t frames, size_t frameSize, + const std::vector<int>& inputIncr) + : mAddr(addr), + mNumFrames(frames), + mFrameSize(frameSize), + mNextFrame(0), mUnrel(0), mInputIncr(inputIncr), mNextIdx(0) + { + } + + TestProvider() + : mAddr(NULL), mNumFrames(0), mFrameSize(0), + mNextFrame(0), mUnrel(0), mNextIdx(0) + { + } + + void setIncr(const std::vector<int>& inputIncr) { + mInputIncr = inputIncr; + mNextIdx = 0; + } + + virtual android::status_t getNextBuffer(Buffer* buffer, int64_t pts __unused = kInvalidPTS) + { + size_t requestedFrames = buffer->frameCount; + if (requestedFrames > mNumFrames - mNextFrame) { + buffer->frameCount = mNumFrames - mNextFrame; + } + if (!mInputIncr.empty()) { + size_t provided = mInputIncr[mNextIdx++]; + ALOGV("getNextBuffer() mValue[%d]=%u not %u", + mNextIdx-1, provided, buffer->frameCount); + if (provided < buffer->frameCount) { + buffer->frameCount = provided; + } + if (mNextIdx >= mInputIncr.size()) { + mNextIdx = 0; + } + } + ALOGV("getNextBuffer() requested %u frames out of %u frames available" + " and returned %u frames\n", + requestedFrames, mNumFrames - mNextFrame, buffer->frameCount); + mUnrel = buffer->frameCount; + if (buffer->frameCount > 0) { + buffer->raw = (char *)mAddr + mFrameSize * mNextFrame; + return android::NO_ERROR; + } else { + buffer->raw = NULL; + return android::NOT_ENOUGH_DATA; + } + } + + virtual void releaseBuffer(Buffer* buffer) + { + if (buffer->frameCount > mUnrel) { + ALOGE("releaseBuffer() released %u frames but only %u available " + "to release\n", buffer->frameCount, mUnrel); + mNextFrame += mUnrel; + mUnrel = 0; + } else { + + ALOGV("releaseBuffer() released %u frames out of %u frames available " + "to release\n", buffer->frameCount, mUnrel); + mNextFrame += buffer->frameCount; + mUnrel -= buffer->frameCount; + } + buffer->frameCount = 0; + buffer->raw = NULL; + } + + void reset() + { + mNextFrame = 0; + } + + size_t getNumFrames() + { + return mNumFrames; + } + + +protected: + void* mAddr; // base address + size_t mNumFrames; // total frames + int mFrameSize; // frame size (# channels * bytes per sample) + size_t mNextFrame; // index of next frame to provide + size_t mUnrel; // number of frames not yet released + std::vector<int> mInputIncr; // number of frames provided per call + size_t mNextIdx; // index of next entry in mInputIncr to use +}; + +/* Creates a buffer filled with a sine wave. + */ +template<typename T> +static void createSine(void *vbuffer, size_t frames, + size_t channels, double sampleRate, double freq) +{ + double tscale = 1. / sampleRate; + T* buffer = reinterpret_cast<T*>(vbuffer); + for (size_t i = 0; i < frames; ++i) { + double t = i * tscale; + double y = sin(2. * M_PI * freq * t); + T yt = convertValue<T>(y); + + for (size_t j = 0; j < channels; ++j) { + buffer[i*channels + j] = yt / (j + 1); + } + } +} + +/* Creates a buffer filled with a chirp signal (a sine wave sweep). + * + * When creating the Chirp, note that the frequency is the true sinusoidal + * frequency not the sampling rate. + * + * http://en.wikipedia.org/wiki/Chirp + */ +template<typename T> +static void createChirp(void *vbuffer, size_t frames, + size_t channels, double sampleRate, double minfreq, double maxfreq) +{ + double tscale = 1. / sampleRate; + T *buffer = reinterpret_cast<T*>(vbuffer); + // note the chirp constant k has a divide-by-two. + double k = (maxfreq - minfreq) / (2. * tscale * frames); + for (size_t i = 0; i < frames; ++i) { + double t = i * tscale; + double y = sin(2. * M_PI * (k * t + minfreq) * t); + T yt = convertValue<T>(y); + + for (size_t j = 0; j < channels; ++j) { + buffer[i*channels + j] = yt / (j + 1); + } + } +} + +/* This derived class creates a buffer provider of datatype T, + * consisting of an input signal, e.g. from createChirp(). + * The number of frames can be obtained from the base class + * TestProvider::getNumFrames(). + */ + +class SignalProvider : public TestProvider { +public: + SignalProvider() + : mSampleRate(0), + mChannels(0) + { + } + + virtual ~SignalProvider() + { + free(mAddr); + mAddr = NULL; + } + + template <typename T> + void setChirp(size_t channels, double minfreq, double maxfreq, double sampleRate, double time) + { + createBufferByFrames<T>(channels, sampleRate, sampleRate*time); + createChirp<T>(mAddr, mNumFrames, mChannels, mSampleRate, minfreq, maxfreq); + } + + template <typename T> + void setSine(size_t channels, + double freq, double sampleRate, double time) + { + createBufferByFrames<T>(channels, sampleRate, sampleRate*time); + createSine<T>(mAddr, mNumFrames, mChannels, mSampleRate, freq); + } + + template <typename T> + void setFile(const char *file_in) + { + SF_INFO info; + info.format = 0; + SNDFILE *sf = sf_open(file_in, SFM_READ, &info); + if (sf == NULL) { + perror(file_in); + return; + } + createBufferByFrames<T>(info.channels, info.samplerate, info.frames); + if (is_same<T, float>::value) { + (void) sf_readf_float(sf, (float *) mAddr, mNumFrames); + } else if (is_same<T, short>::value) { + (void) sf_readf_short(sf, (short *) mAddr, mNumFrames); + } + sf_close(sf); + } + + template <typename T> + void createBufferByFrames(size_t channels, uint32_t sampleRate, size_t frames) + { + mNumFrames = frames; + mChannels = channels; + mFrameSize = mChannels * sizeof(T); + free(mAddr); + mAddr = malloc(mFrameSize * mNumFrames); + mSampleRate = sampleRate; + } + + uint32_t getSampleRate() const { + return mSampleRate; + } + + uint32_t getNumChannels() const { + return mChannels; + } + +protected: + uint32_t mSampleRate; + uint32_t mChannels; +}; + +#endif // ANDROID_AUDIO_TEST_UTILS_H diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h index c025a45..33e4397 100644 --- a/services/audiopolicy/AudioPolicyInterface.h +++ b/services/audiopolicy/AudioPolicyInterface.h @@ -90,6 +90,12 @@ public: audio_channel_mask_t channelMask, audio_output_flags_t flags, const audio_offload_info_t *offloadInfo) = 0; + virtual audio_io_handle_t getOutputForAttr(const audio_attributes_t *attr, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo) = 0; // indicates to the audio policy manager that the output starts being used by corresponding stream. virtual status_t startOutput(audio_io_handle_t output, audio_stream_type_t stream, diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp index 8cc386a..6342d8f 100644 --- a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp +++ b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp @@ -131,6 +131,22 @@ audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream, format, channelMask, flags, offloadInfo); } +audio_io_handle_t AudioPolicyService::getOutputForAttr(const audio_attributes_t *attr, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo) +{ + if (mAudioPolicyManager == NULL) { + return 0; + } + ALOGV("getOutput()"); + Mutex::Autolock _l(mLock); + return mAudioPolicyManager->getOutputForAttr(attr, samplingRate, + format, channelMask, flags, offloadInfo); +} + status_t AudioPolicyService::startOutput(audio_io_handle_t output, audio_stream_type_t stream, int session) diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp index d4c9374..74c6207 100644 --- a/services/audiopolicy/AudioPolicyManager.cpp +++ b/services/audiopolicy/AudioPolicyManager.cpp @@ -122,6 +122,11 @@ const StringToEnum sFormatNameToEnumTable[] = { STRING_TO_ENUM(AUDIO_FORMAT_MP3), STRING_TO_ENUM(AUDIO_FORMAT_AAC), STRING_TO_ENUM(AUDIO_FORMAT_VORBIS), + STRING_TO_ENUM(AUDIO_FORMAT_HE_AAC_V1), + STRING_TO_ENUM(AUDIO_FORMAT_HE_AAC_V2), + STRING_TO_ENUM(AUDIO_FORMAT_OPUS), + STRING_TO_ENUM(AUDIO_FORMAT_AC3), + STRING_TO_ENUM(AUDIO_FORMAT_E_AC3), }; const StringToEnum sOutChannelsNameToEnumTable[] = { @@ -623,13 +628,53 @@ audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream, audio_output_flags_t flags, const audio_offload_info_t *offloadInfo) { - audio_io_handle_t output = 0; - uint32_t latency = 0; + routing_strategy strategy = getStrategy(stream); audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/); ALOGV("getOutput() device %d, stream %d, samplingRate %d, format %x, channelMask %x, flags %x", device, stream, samplingRate, format, channelMask, flags); + return getOutputForDevice(device, stream, samplingRate,format, channelMask, flags, + offloadInfo); +} + +audio_io_handle_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo) +{ + if (attr == NULL) { + ALOGE("getOutputForAttr() called with NULL audio attributes"); + return 0; + } + ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s", + attr->usage, attr->content_type, attr->tags); + + // TODO this is where filtering for custom policies (rerouting, dynamic sources) will go + routing_strategy strategy = (routing_strategy) getStrategyForAttr(attr); + audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/); + ALOGV("getOutputForAttr() device %d, samplingRate %d, format %x, channelMask %x, flags %x", + device, samplingRate, format, channelMask, flags); + + audio_stream_type_t stream = streamTypefromAttributesInt(attr); + return getOutputForDevice(device, stream, samplingRate, format, channelMask, flags, + offloadInfo); +} + +audio_io_handle_t AudioPolicyManager::getOutputForDevice( + audio_devices_t device, + audio_stream_type_t stream, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo) +{ + audio_io_handle_t output = 0; + uint32_t latency = 0; + #ifdef AUDIO_POLICY_TEST if (mCurOutput != 0) { ALOGV("getOutput() test output mCurOutput %d, samplingRate %d, format %d, channelMask %x, mDirectOutput %d", @@ -1515,7 +1560,7 @@ status_t AudioPolicyManager::dump(int fd) snprintf(buffer, SIZE, " Stream Can be muted Index Min Index Max Index Cur [device : index]...\n"); write(fd, buffer, strlen(buffer)); - for (int i = 0; i < AUDIO_STREAM_CNT; i++) { + for (size_t i = 0; i < AUDIO_STREAM_CNT; i++) { snprintf(buffer, SIZE, " %02zu ", i); write(fd, buffer, strlen(buffer)); mStreams[i].dump(fd); @@ -1653,7 +1698,7 @@ status_t AudioPolicyManager::listAudioPorts(audio_port_role_t role, } } *generation = curAudioPortGeneration(); - ALOGV("listAudioPorts() got %d ports needed %d", portsWritten, *num_ports); + ALOGV("listAudioPorts() got %zu ports needed %d", portsWritten, *num_ports); return NO_ERROR; } @@ -1997,7 +2042,7 @@ status_t AudioPolicyManager::listAudioPatches(unsigned int *num_patches, generation == NULL) { return BAD_VALUE; } - ALOGV("listAudioPatches() num_patches %d patches %p available patches %d", + ALOGV("listAudioPatches() num_patches %d patches %p available patches %zu", *num_patches, patches, mAudioPatches.size()); if (patches == NULL) { *num_patches = 0; @@ -2009,13 +2054,13 @@ status_t AudioPolicyManager::listAudioPatches(unsigned int *num_patches, i < mAudioPatches.size() && patchesWritten < patchesMax; i++) { patches[patchesWritten] = mAudioPatches[i]->mPatch; patches[patchesWritten++].id = mAudioPatches[i]->mHandle; - ALOGV("listAudioPatches() patch %d num_sources %d num_sinks %d", + ALOGV("listAudioPatches() patch %zu num_sources %d num_sinks %d", i, mAudioPatches[i]->mPatch.num_sources, mAudioPatches[i]->mPatch.num_sinks); } *num_patches = mAudioPatches.size(); *generation = curAudioPortGeneration(); - ALOGV("listAudioPatches() got %d patches needed %d", patchesWritten, *num_patches); + ALOGV("listAudioPatches() got %zu patches needed %d", patchesWritten, *num_patches); return NO_ERROR; } @@ -2779,7 +2824,7 @@ status_t AudioPolicyManager::checkInputsForDevice(audio_devices_t device, { if (mHwModules[module_idx]->mInputProfiles[profile_index]->mSupportedDevices.types() & (device & ~AUDIO_DEVICE_BIT_IN)) { - ALOGV("checkInputsForDevice(): adding profile %d from module %d", + ALOGV("checkInputsForDevice(): adding profile %zu from module %zu", profile_index, module_idx); profiles.add(mHwModules[module_idx]->mInputProfiles[profile_index]); } @@ -2903,7 +2948,7 @@ status_t AudioPolicyManager::checkInputsForDevice(audio_devices_t device, profile_index++) { sp<IOProfile> profile = mHwModules[module_index]->mInputProfiles[profile_index]; if (profile->mSupportedDevices.types() & device) { - ALOGV("checkInputsForDevice(): clearing direct input profile %d on module %d", + ALOGV("checkInputsForDevice(): clearing direct input profile %zu on module %zu", profile_index, module_index); if (profile->mSamplingRates[0] == 0) { profile->mSamplingRates.clear(); @@ -3239,6 +3284,44 @@ AudioPolicyManager::routing_strategy AudioPolicyManager::getStrategy( } } +uint32_t AudioPolicyManager::getStrategyForAttr(const audio_attributes_t *attr) { + // flags to strategy mapping + if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) { + return (uint32_t) STRATEGY_ENFORCED_AUDIBLE; + } + + // usage to strategy mapping + switch (attr->usage) { + case AUDIO_USAGE_MEDIA: + case AUDIO_USAGE_GAME: + case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY: + case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE: + case AUDIO_USAGE_ASSISTANCE_SONIFICATION: + return (uint32_t) STRATEGY_MEDIA; + + case AUDIO_USAGE_VOICE_COMMUNICATION: + return (uint32_t) STRATEGY_PHONE; + + case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING: + return (uint32_t) STRATEGY_DTMF; + + case AUDIO_USAGE_ALARM: + case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE: + return (uint32_t) STRATEGY_SONIFICATION; + + case AUDIO_USAGE_NOTIFICATION: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED: + case AUDIO_USAGE_NOTIFICATION_EVENT: + return (uint32_t) STRATEGY_SONIFICATION_RESPECTFUL; + + case AUDIO_USAGE_UNKNOWN: + default: + return (uint32_t) STRATEGY_MEDIA; + } +} + void AudioPolicyManager::handleNotificationRoutingForStream(audio_stream_type_t stream) { switch(stream) { case AUDIO_STREAM_MUSIC: @@ -4911,7 +4994,7 @@ void AudioPolicyManager::AudioPort::toAudioPort(struct audio_port *port) const } port->num_formats = i; - ALOGV("AudioPort::toAudioPort() num gains %d", mGains.size()); + ALOGV("AudioPort::toAudioPort() num gains %zu", mGains.size()); for (i = 0; i < mGains.size() && i < AUDIO_PORT_MAX_GAINS; i++) { port->gains[i] = mGains[i]->mGain; @@ -5609,7 +5692,7 @@ sp<AudioPolicyManager::DeviceDescriptor> AudioPolicyManager::DeviceVector::getDe { sp<DeviceDescriptor> device; for (size_t i = 0; i < size(); i++) { - ALOGV("DeviceVector::getDeviceFromId(%d) itemAt(%d)->mId %d", id, i, itemAt(i)->mId); + ALOGV("DeviceVector::getDeviceFromId(%d) itemAt(%zu)->mId %d", id, i, itemAt(i)->mId); if (itemAt(i)->mId == id) { device = itemAt(i); break; @@ -5916,4 +5999,46 @@ void AudioPolicyManager::defaultAudioPolicyConfig(void) mHwModules.add(module); } +audio_stream_type_t AudioPolicyManager::streamTypefromAttributesInt(const audio_attributes_t *attr) +{ + // flags to stream type mapping + if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) { + return AUDIO_STREAM_ENFORCED_AUDIBLE; + } + if ((attr->flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) { + return AUDIO_STREAM_BLUETOOTH_SCO; + } + + // usage to stream type mapping + switch (attr->usage) { + case AUDIO_USAGE_MEDIA: + case AUDIO_USAGE_GAME: + case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY: + case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE: + return AUDIO_STREAM_MUSIC; + case AUDIO_USAGE_ASSISTANCE_SONIFICATION: + return AUDIO_STREAM_SYSTEM; + case AUDIO_USAGE_VOICE_COMMUNICATION: + return AUDIO_STREAM_VOICE_CALL; + + case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING: + return AUDIO_STREAM_DTMF; + + case AUDIO_USAGE_ALARM: + return AUDIO_STREAM_ALARM; + case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE: + return AUDIO_STREAM_RING; + + case AUDIO_USAGE_NOTIFICATION: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT: + case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED: + case AUDIO_USAGE_NOTIFICATION_EVENT: + return AUDIO_STREAM_NOTIFICATION; + + case AUDIO_USAGE_UNKNOWN: + default: + return AUDIO_STREAM_MUSIC; + } +} }; // namespace android diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h index 1abeb6a..c23d994 100644 --- a/services/audiopolicy/AudioPolicyManager.h +++ b/services/audiopolicy/AudioPolicyManager.h @@ -84,6 +84,12 @@ public: audio_channel_mask_t channelMask, audio_output_flags_t flags, const audio_offload_info_t *offloadInfo); + virtual audio_io_handle_t getOutputForAttr(const audio_attributes_t *attr, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo); virtual status_t startOutput(audio_io_handle_t output, audio_stream_type_t stream, int session = 0); @@ -116,6 +122,8 @@ public: // return the strategy corresponding to a given stream type virtual uint32_t getStrategyForStream(audio_stream_type_t stream); + // return the strategy corresponding to the given audio attributes + virtual uint32_t getStrategyForAttr(const audio_attributes_t *attr); // return the enabled output devices for the given stream type virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream); @@ -755,6 +763,17 @@ private: uint32_t curAudioPortGeneration() const { return mAudioPortGeneration; } // converts device address to string sent to audio HAL via setParameters static String8 addressToParameter(audio_devices_t device, const String8 address); + // internal method to return the output handle for the given device and format + audio_io_handle_t getOutputForDevice( + audio_devices_t device, + audio_stream_type_t stream, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo); + // internal function to derive a stream type value from audio attributes + audio_stream_type_t streamTypefromAttributesInt(const audio_attributes_t *attr); }; }; diff --git a/services/audiopolicy/AudioPolicyService.h b/services/audiopolicy/AudioPolicyService.h index 66d9cad..69673cd 100644 --- a/services/audiopolicy/AudioPolicyService.h +++ b/services/audiopolicy/AudioPolicyService.h @@ -70,6 +70,12 @@ public: audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, const audio_offload_info_t *offloadInfo = NULL); + virtual audio_io_handle_t getOutputForAttr(const audio_attributes_t *attr, + uint32_t samplingRate = 0, + audio_format_t format = AUDIO_FORMAT_DEFAULT, + audio_channel_mask_t channelMask = 0, + audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, + const audio_offload_info_t *offloadInfo = NULL); virtual status_t startOutput(audio_io_handle_t output, audio_stream_type_t stream, int session = 0); diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp index 73eccbf..648e82c 100644 --- a/services/camera/libcameraservice/CameraService.cpp +++ b/services/camera/libcameraservice/CameraService.cpp @@ -659,7 +659,8 @@ status_t CameraService::connectHelperLocked(const sp<ICameraClient>& cameraClien int clientUid, int callingPid, /*out*/ - sp<Client>& client) { + sp<Client>& client, + int halVersion) { int facing = -1; int deviceVersion = getDeviceVersion(cameraId, &facing); @@ -672,28 +673,47 @@ status_t CameraService::connectHelperLocked(const sp<ICameraClient>& cameraClien cameraId); } - switch(deviceVersion) { - case CAMERA_DEVICE_API_VERSION_1_0: - client = new CameraClient(this, cameraClient, - clientPackageName, cameraId, - facing, callingPid, clientUid, getpid()); - break; - case CAMERA_DEVICE_API_VERSION_2_0: - case CAMERA_DEVICE_API_VERSION_2_1: - case CAMERA_DEVICE_API_VERSION_3_0: - case CAMERA_DEVICE_API_VERSION_3_1: - case CAMERA_DEVICE_API_VERSION_3_2: - client = new Camera2Client(this, cameraClient, - clientPackageName, cameraId, - facing, callingPid, clientUid, getpid(), - deviceVersion); - break; - case -1: - ALOGE("Invalid camera id %d", cameraId); - return BAD_VALUE; - default: - ALOGE("Unknown camera device HAL version: %d", deviceVersion); - return INVALID_OPERATION; + if (halVersion < 0 || halVersion == deviceVersion) { + // Default path: HAL version is unspecified by caller, create CameraClient + // based on device version reported by the HAL. + switch(deviceVersion) { + case CAMERA_DEVICE_API_VERSION_1_0: + client = new CameraClient(this, cameraClient, + clientPackageName, cameraId, + facing, callingPid, clientUid, getpid()); + break; + case CAMERA_DEVICE_API_VERSION_2_0: + case CAMERA_DEVICE_API_VERSION_2_1: + case CAMERA_DEVICE_API_VERSION_3_0: + case CAMERA_DEVICE_API_VERSION_3_1: + case CAMERA_DEVICE_API_VERSION_3_2: + client = new Camera2Client(this, cameraClient, + clientPackageName, cameraId, + facing, callingPid, clientUid, getpid()); + break; + case -1: + ALOGE("Invalid camera id %d", cameraId); + return BAD_VALUE; + default: + ALOGE("Unknown camera device HAL version: %d", deviceVersion); + return INVALID_OPERATION; + } + } else { + // A particular HAL version is requested by caller. Create CameraClient + // based on the requested HAL version. + if (deviceVersion > CAMERA_DEVICE_API_VERSION_1_0 && + halVersion == CAMERA_DEVICE_API_VERSION_1_0) { + // Only support higher HAL version device opened as HAL1.0 device. + client = new CameraClient(this, cameraClient, + clientPackageName, cameraId, + facing, callingPid, clientUid, getpid()); + } else { + // Other combinations (e.g. HAL3.x open as HAL2.x) are not supported yet. + ALOGE("Invalid camera HAL version %x: HAL %x device can only be" + " opened as HAL %x device", halVersion, deviceVersion, + CAMERA_DEVICE_API_VERSION_1_0); + return INVALID_OPERATION; + } } status_t status = connectFinishUnsafe(client, client->getRemote()); @@ -762,6 +782,70 @@ status_t CameraService::connect( return OK; } +status_t CameraService::connectLegacy( + const sp<ICameraClient>& cameraClient, + int cameraId, int halVersion, + const String16& clientPackageName, + int clientUid, + /*out*/ + sp<ICamera>& device) { + + if (halVersion != CAMERA_HAL_API_VERSION_UNSPECIFIED && + mModule->common.module_api_version < CAMERA_MODULE_API_VERSION_2_3) { + /* + * Either the HAL version is unspecified in which case this just creates + * a camera client selected by the latest device version, or + * it's a particular version in which case the HAL must supported + * the open_legacy call + */ + ALOGE("%s: camera HAL module version %x doesn't support connecting to legacy HAL devices!", + __FUNCTION__, mModule->common.module_api_version); + return INVALID_OPERATION; + } + + String8 clientName8(clientPackageName); + int callingPid = getCallingPid(); + + LOG1("CameraService::connect legacy E (pid %d \"%s\", id %d)", callingPid, + clientName8.string(), cameraId); + + status_t status = validateConnect(cameraId, /*inout*/clientUid); + if (status != OK) { + return status; + } + + sp<Client> client; + { + Mutex::Autolock lock(mServiceLock); + sp<BasicClient> clientTmp; + if (!canConnectUnsafe(cameraId, clientPackageName, + cameraClient->asBinder(), + /*out*/clientTmp)) { + return -EBUSY; + } else if (client.get() != NULL) { + device = static_cast<Client*>(clientTmp.get()); + return OK; + } + + status = connectHelperLocked(cameraClient, + cameraId, + clientPackageName, + clientUid, + callingPid, + client, + halVersion); + if (status != OK) { + return status; + } + + } + // important: release the mutex here so the client can call back + // into the service from its destructor (can be at the end of the call) + + device = client; + return OK; +} + status_t CameraService::connectFinishUnsafe(const sp<BasicClient>& client, const sp<IBinder>& remoteCallback) { status_t status = client->initialize(mModule); @@ -1196,6 +1280,7 @@ status_t CameraService::onTransact( case BnCameraService::CONNECT: case BnCameraService::CONNECT_PRO: case BnCameraService::CONNECT_DEVICE: + case BnCameraService::CONNECT_LEGACY: const int pid = getCallingPid(); const int self_pid = getpid(); if (pid != self_pid) { diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h index b2b65b8..28590eb 100644 --- a/services/camera/libcameraservice/CameraService.h +++ b/services/camera/libcameraservice/CameraService.h @@ -83,6 +83,11 @@ public: /*out*/ sp<ICamera>& device); + virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient, int cameraId, + int halVersion, const String16& clientPackageName, int clientUid, + /*out*/ + sp<ICamera>& device); + virtual status_t connectPro(const sp<IProCameraCallbacks>& cameraCb, int cameraId, const String16& clientPackageName, int clientUid, /*out*/ @@ -450,7 +455,8 @@ private: int clientUid, int callingPid, /*out*/ - sp<Client>& client); + sp<Client>& client, + int halVersion = CAMERA_HAL_API_VERSION_UNSPECIFIED); }; } // namespace android diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp index 0447979..a6f5a6c 100644 --- a/services/camera/libcameraservice/api1/Camera2Client.cpp +++ b/services/camera/libcameraservice/api1/Camera2Client.cpp @@ -53,12 +53,10 @@ Camera2Client::Camera2Client(const sp<CameraService>& cameraService, int cameraFacing, int clientPid, uid_t clientUid, - int servicePid, - int deviceVersion): + int servicePid): Camera2ClientBase(cameraService, cameraClient, clientPackageName, cameraId, cameraFacing, clientPid, clientUid, servicePid), - mParameters(cameraId, cameraFacing), - mDeviceVersion(deviceVersion) + mParameters(cameraId, cameraFacing) { ATRACE_CALL(); @@ -80,7 +78,7 @@ status_t Camera2Client::initialize(camera_module_t *module) { SharedParameters::Lock l(mParameters); - res = l.mParameters.initialize(&(mDevice->info())); + res = l.mParameters.initialize(&(mDevice->info()), mDeviceVersion); if (res != OK) { ALOGE("%s: Camera %d: unable to build defaults: %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res); diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h index fe0bf74..0e06195 100644 --- a/services/camera/libcameraservice/api1/Camera2Client.h +++ b/services/camera/libcameraservice/api1/Camera2Client.h @@ -89,8 +89,7 @@ public: int cameraFacing, int clientPid, uid_t clientUid, - int servicePid, - int deviceVersion); + int servicePid); virtual ~Camera2Client(); @@ -170,7 +169,6 @@ private: void setPreviewCallbackFlagL(Parameters ¶ms, int flag); status_t updateRequests(Parameters ¶ms); - int mDeviceVersion; // Used with stream IDs static const int NO_STREAM = -1; diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp index 30b7bb8..517226d 100644 --- a/services/camera/libcameraservice/api1/CameraClient.cpp +++ b/services/camera/libcameraservice/api1/CameraClient.cpp @@ -79,7 +79,7 @@ status_t CameraClient::initialize(camera_module_t *module) { ALOGE("%s: Camera %d: unable to initialize device: %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res); mHardware.clear(); - return NO_INIT; + return res; } mHardware->setCallbacks(notifyCallback, diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp index 51b1980..6459300 100644 --- a/services/camera/libcameraservice/api1/client2/Parameters.cpp +++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp @@ -29,6 +29,7 @@ #include "Parameters.h" #include "system/camera.h" +#include "hardware/camera_common.h" #include <media/MediaProfiles.h> #include <media/mediarecorder.h> @@ -45,7 +46,7 @@ Parameters::Parameters(int cameraId, Parameters::~Parameters() { } -status_t Parameters::initialize(const CameraMetadata *info) { +status_t Parameters::initialize(const CameraMetadata *info, int deviceVersion) { status_t res; if (info->entryCount() == 0) { @@ -53,6 +54,7 @@ status_t Parameters::initialize(const CameraMetadata *info) { return BAD_VALUE; } Parameters::info = info; + mDeviceVersion = deviceVersion; res = buildFastInfo(); if (res != OK) return res; @@ -140,16 +142,14 @@ status_t Parameters::initialize(const CameraMetadata *info) { previewTransform = degToTransform(0, cameraFacing == CAMERA_FACING_FRONT); - camera_metadata_ro_entry_t availableFormats = - staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS); - { String8 supportedPreviewFormats; + SortedVector<int32_t> outputFormats = getAvailableOutputFormats(); bool addComma = false; - for (size_t i=0; i < availableFormats.count; i++) { + for (size_t i=0; i < outputFormats.size(); i++) { if (addComma) supportedPreviewFormats += ","; addComma = true; - switch (availableFormats.data.i32[i]) { + switch (outputFormats[i]) { case HAL_PIXEL_FORMAT_YCbCr_422_SP: supportedPreviewFormats += CameraParameters::PIXEL_FORMAT_YUV422SP; @@ -191,7 +191,7 @@ status_t Parameters::initialize(const CameraMetadata *info) { default: ALOGW("%s: Camera %d: Unknown preview format: %x", - __FUNCTION__, cameraId, availableFormats.data.i32[i]); + __FUNCTION__, cameraId, outputFormats[i]); addComma = false; break; } @@ -239,24 +239,23 @@ status_t Parameters::initialize(const CameraMetadata *info) { supportedPreviewFrameRates); } - camera_metadata_ro_entry_t availableJpegSizes = - staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES, 2); - if (!availableJpegSizes.count) return NO_INIT; + Vector<Size> availableJpegSizes = getAvailableJpegSizes(); + if (!availableJpegSizes.size()) return NO_INIT; // TODO: Pick maximum - pictureWidth = availableJpegSizes.data.i32[0]; - pictureHeight = availableJpegSizes.data.i32[1]; + pictureWidth = availableJpegSizes[0].width; + pictureHeight = availableJpegSizes[0].height; params.setPictureSize(pictureWidth, pictureHeight); { String8 supportedPictureSizes; - for (size_t i=0; i < availableJpegSizes.count; i += 2) { + for (size_t i=0; i < availableJpegSizes.size(); i++) { if (i != 0) supportedPictureSizes += ","; supportedPictureSizes += String8::format("%dx%d", - availableJpegSizes.data.i32[i], - availableJpegSizes.data.i32[i+1]); + availableJpegSizes[i].width, + availableJpegSizes[i].height); } params.set(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES, supportedPictureSizes); @@ -952,9 +951,8 @@ status_t Parameters::buildFastInfo() { staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS); if (!availableFocalLengths.count) return NO_INIT; - camera_metadata_ro_entry_t availableFormats = - staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS); - if (!availableFormats.count) return NO_INIT; + SortedVector<int32_t> availableFormats = getAvailableOutputFormats(); + if (!availableFormats.size()) return NO_INIT; if (sceneModeOverrides.count > 0) { @@ -1038,8 +1036,8 @@ status_t Parameters::buildFastInfo() { // Check if the HAL supports HAL_PIXEL_FORMAT_YCbCr_420_888 fastInfo.useFlexibleYuv = false; - for (size_t i = 0; i < availableFormats.count; i++) { - if (availableFormats.data.i32[i] == HAL_PIXEL_FORMAT_YCbCr_420_888) { + for (size_t i = 0; i < availableFormats.size(); i++) { + if (availableFormats[i] == HAL_PIXEL_FORMAT_YCbCr_420_888) { fastInfo.useFlexibleYuv = true; break; } @@ -1198,8 +1196,7 @@ status_t Parameters::set(const String8& paramString) { "is active!", __FUNCTION__); return BAD_VALUE; } - camera_metadata_ro_entry_t availableFormats = - staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS); + SortedVector<int32_t> availableFormats = getAvailableOutputFormats(); // If using flexible YUV, always support NV21/YV12. Otherwise, check // HAL's list. if (! (fastInfo.useFlexibleYuv && @@ -1208,11 +1205,10 @@ status_t Parameters::set(const String8& paramString) { validatedParams.previewFormat == HAL_PIXEL_FORMAT_YV12) ) ) { // Not using flexible YUV format, so check explicitly - for (i = 0; i < availableFormats.count; i++) { - if (availableFormats.data.i32[i] == - validatedParams.previewFormat) break; + for (i = 0; i < availableFormats.size(); i++) { + if (availableFormats[i] == validatedParams.previewFormat) break; } - if (i == availableFormats.count) { + if (i == availableFormats.size()) { ALOGE("%s: Requested preview format %s (0x%x) is not supported", __FUNCTION__, newParams.getPreviewFormat(), validatedParams.previewFormat); @@ -1302,15 +1298,14 @@ status_t Parameters::set(const String8& paramString) { &validatedParams.pictureHeight); if (validatedParams.pictureWidth == pictureWidth || validatedParams.pictureHeight == pictureHeight) { - camera_metadata_ro_entry_t availablePictureSizes = - staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES); - for (i = 0; i < availablePictureSizes.count; i+=2) { - if ((availablePictureSizes.data.i32[i] == + Vector<Size> availablePictureSizes = getAvailableJpegSizes(); + for (i = 0; i < availablePictureSizes.size(); i++) { + if ((availablePictureSizes[i].width == validatedParams.pictureWidth) && - (availablePictureSizes.data.i32[i+1] == + (availablePictureSizes[i].height == validatedParams.pictureHeight)) break; } - if (i == availablePictureSizes.count) { + if (i == availablePictureSizes.size()) { ALOGE("%s: Requested picture size %d x %d is not supported", __FUNCTION__, validatedParams.pictureWidth, validatedParams.pictureHeight); @@ -2527,22 +2522,37 @@ status_t Parameters::getFilteredSizes(Size limit, Vector<Size> *sizes) { ALOGE("%s: Input size is null", __FUNCTION__); return BAD_VALUE; } - - const size_t SIZE_COUNT = sizeof(Size) / sizeof(int); - camera_metadata_ro_entry_t availableProcessedSizes = - staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, SIZE_COUNT); - if (availableProcessedSizes.count < SIZE_COUNT) return BAD_VALUE; - - Size filteredSize; - for (size_t i = 0; i < availableProcessedSizes.count; i += SIZE_COUNT) { - filteredSize.width = availableProcessedSizes.data.i32[i]; - filteredSize.height = availableProcessedSizes.data.i32[i+1]; - // Need skip the preview sizes that are too large. - if (filteredSize.width <= limit.width && - filteredSize.height <= limit.height) { - sizes->push(filteredSize); + sizes->clear(); + + if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) { + Vector<StreamConfiguration> scs = getStreamConfigurations(); + for (size_t i=0; i < scs.size(); i++) { + const StreamConfiguration &sc = scs[i]; + if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT && + sc.format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED && + sc.width <= limit.width && sc.height <= limit.height) { + Size sz = {sc.width, sc.height}; + sizes->push(sz); } + } + } else { + const size_t SIZE_COUNT = sizeof(Size) / sizeof(int); + camera_metadata_ro_entry_t availableProcessedSizes = + staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, SIZE_COUNT); + if (availableProcessedSizes.count < SIZE_COUNT) return BAD_VALUE; + + Size filteredSize; + for (size_t i = 0; i < availableProcessedSizes.count; i += SIZE_COUNT) { + filteredSize.width = availableProcessedSizes.data.i32[i]; + filteredSize.height = availableProcessedSizes.data.i32[i+1]; + // Need skip the preview sizes that are too large. + if (filteredSize.width <= limit.width && + filteredSize.height <= limit.height) { + sizes->push(filteredSize); + } + } } + if (sizes->isEmpty()) { ALOGE("generated preview size list is empty!!"); return BAD_VALUE; @@ -2576,6 +2586,78 @@ Parameters::Size Parameters::getMaxSizeForRatio( return maxSize; } +Vector<Parameters::StreamConfiguration> Parameters::getStreamConfigurations() { + const int STREAM_CONFIGURATION_SIZE = 4; + const int STREAM_FORMAT_OFFSET = 0; + const int STREAM_WIDTH_OFFSET = 1; + const int STREAM_HEIGHT_OFFSET = 2; + const int STREAM_IS_INPUT_OFFSET = 3; + Vector<StreamConfiguration> scs; + if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) { + ALOGE("StreamConfiguration is only valid after device HAL 3.2!"); + return scs; + } + + camera_metadata_ro_entry_t availableStreamConfigs = + staticInfo(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS); + for (size_t i=0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) { + int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET]; + int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET]; + int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET]; + int32_t isInput = availableStreamConfigs.data.i32[i + STREAM_IS_INPUT_OFFSET]; + StreamConfiguration sc = {format, width, height, isInput}; + scs.add(sc); + } + return scs; +} + +SortedVector<int32_t> Parameters::getAvailableOutputFormats() { + SortedVector<int32_t> outputFormats; // Non-duplicated output formats + if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) { + Vector<StreamConfiguration> scs = getStreamConfigurations(); + for (size_t i=0; i < scs.size(); i++) { + const StreamConfiguration &sc = scs[i]; + if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT) { + outputFormats.add(sc.format); + } + } + } else { + camera_metadata_ro_entry_t availableFormats = staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS); + for (size_t i=0; i < availableFormats.count; i++) { + outputFormats.add(availableFormats.data.i32[i]); + } + } + return outputFormats; +} + +Vector<Parameters::Size> Parameters::getAvailableJpegSizes() { + Vector<Parameters::Size> jpegSizes; + if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) { + Vector<StreamConfiguration> scs = getStreamConfigurations(); + for (size_t i=0; i < scs.size(); i++) { + const StreamConfiguration &sc = scs[i]; + if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT && + sc.format == HAL_PIXEL_FORMAT_BLOB) { + Size sz = {sc.width, sc.height}; + jpegSizes.add(sz); + } + } + } else { + const int JPEG_SIZE_ENTRY_COUNT = 2; + const int WIDTH_OFFSET = 0; + const int HEIGHT_OFFSET = 1; + camera_metadata_ro_entry_t availableJpegSizes = + staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES); + for (size_t i=0; i < availableJpegSizes.count; i+= JPEG_SIZE_ENTRY_COUNT) { + int width = availableJpegSizes.data.i32[i + WIDTH_OFFSET]; + int height = availableJpegSizes.data.i32[i + HEIGHT_OFFSET]; + Size sz = {width, height}; + jpegSizes.add(sz); + } + } + return jpegSizes; +} + Parameters::CropRegion Parameters::calculateCropRegion( Parameters::CropRegion::Outputs outputs) const { diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h index 28dd788..f95c69a 100644 --- a/services/camera/libcameraservice/api1/client2/Parameters.h +++ b/services/camera/libcameraservice/api1/client2/Parameters.h @@ -226,7 +226,7 @@ struct Parameters { ~Parameters(); // Sets up default parameters - status_t initialize(const CameraMetadata *info); + status_t initialize(const CameraMetadata *info, int deviceVersion); // Build fast-access device static info from static info status_t buildFastInfo(); @@ -346,6 +346,24 @@ private: status_t getFilteredSizes(Size limit, Vector<Size> *sizes); // Get max size (from the size array) that matches the given aspect ratio. Size getMaxSizeForRatio(float ratio, const int32_t* sizeArray, size_t count); + + struct StreamConfiguration { + int32_t format; + int32_t width; + int32_t height; + int32_t isInput; + }; + // Helper function extract available stream configuration + // Only valid since device HAL version 3.2 + // returns an empty Vector if device HAL version does support it + Vector<StreamConfiguration> getStreamConfigurations(); + + // Helper function to get non-duplicated available output formats + SortedVector<int32_t> getAvailableOutputFormats(); + // Helper function to get available output jpeg sizes + Vector<Size> getAvailableJpegSizes(); + + int mDeviceVersion; }; // This class encapsulates the Parameters class so that it can only be accessed diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp index 2064e2c..99abced 100644 --- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp +++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp @@ -430,10 +430,13 @@ status_t StreamingProcessor::startStream(StreamType type, Mutex::Autolock m(mMutex); - // If a recording stream is being started up, free up any - // outstanding buffers left from the previous recording session. - // There should never be any, so if there are, warn about it. - if (isStreamActive(outputStreams, mRecordingStreamId)) { + // If a recording stream is being started up and no recording + // stream is active yet, free up any outstanding buffers left + // from the previous recording session. There should never be + // any, so if there are, warn about it. + bool isRecordingStreamIdle = !isStreamActive(mActiveStreamIds, mRecordingStreamId); + bool startRecordingStream = isStreamActive(outputStreams, mRecordingStreamId); + if (startRecordingStream && isRecordingStreamIdle) { releaseAllRecordingFramesLocked(); } diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp index 8154e59..544f736 100644 --- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp +++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp @@ -102,7 +102,7 @@ status_t CameraDeviceClient::submitRequest(sp<CaptureRequest> request, status_t CameraDeviceClient::submitRequestList(List<sp<CaptureRequest> > requests, bool streaming, int64_t* lastFrameNumber) { ATRACE_CALL(); - ALOGV("%s-start of function. Request list size %d", __FUNCTION__, requests.size()); + ALOGV("%s-start of function. Request list size %zu", __FUNCTION__, requests.size()); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; @@ -177,7 +177,7 @@ status_t CameraDeviceClient::submitRequestList(List<sp<CaptureRequest> > request metadata.update(ANDROID_REQUEST_ID, &requestId, /*size*/1); loopCounter++; // loopCounter starts from 1 - ALOGV("%s: Camera %d: Creating request with ID %d (%d of %d)", + ALOGV("%s: Camera %d: Creating request with ID %d (%d of %zu)", __FUNCTION__, mCameraId, requestId, loopCounter, requests.size()); metadataRequestList.push_back(metadata); diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp index 19efd30..13c9f48 100644 --- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp +++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp @@ -54,7 +54,8 @@ Camera2ClientBase<TClientBase>::Camera2ClientBase( int servicePid): TClientBase(cameraService, remoteCallback, clientPackageName, cameraId, cameraFacing, clientPid, clientUid, servicePid), - mSharedCameraCallbacks(remoteCallback) + mSharedCameraCallbacks(remoteCallback), + mDeviceVersion(cameraService->getDeviceVersion(cameraId)) { ALOGI("Camera %d: Opened", cameraId); @@ -280,6 +281,11 @@ int Camera2ClientBase<TClientBase>::getCameraId() const { } template <typename TClientBase> +int Camera2ClientBase<TClientBase>::getCameraDeviceVersion() const { + return mDeviceVersion; +} + +template <typename TClientBase> const sp<CameraDeviceBase>& Camera2ClientBase<TClientBase>::getCameraDevice() { return mDevice; } diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h index 9feca93..f57d204 100644 --- a/services/camera/libcameraservice/common/Camera2ClientBase.h +++ b/services/camera/libcameraservice/common/Camera2ClientBase.h @@ -76,6 +76,7 @@ public: int getCameraId() const; const sp<CameraDeviceBase>& getCameraDevice(); + int getCameraDeviceVersion() const; const sp<CameraService>& getCameraService(); @@ -122,6 +123,7 @@ protected: /** CameraDeviceBase instance wrapping HAL2+ entry */ + const int mDeviceVersion; sp<CameraDeviceBase> mDevice; /** Utility members */ diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h index 87b2807..925b645 100644 --- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h +++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h @@ -92,8 +92,22 @@ public: status_t initialize(hw_module_t *module) { ALOGI("Opening camera %s", mName.string()); - int rc = module->methods->open(module, mName.string(), - (hw_device_t **)&mDevice); + camera_module_t *cameraModule = reinterpret_cast<camera_module_t *>(module); + camera_info info; + status_t res = cameraModule->get_camera_info(atoi(mName.string()), &info); + if (res != OK) return res; + + int rc = OK; + if (module->module_api_version >= CAMERA_MODULE_API_VERSION_2_3 && + info.device_version > CAMERA_DEVICE_API_VERSION_1_0) { + // Open higher version camera device as HAL1.0 device. + rc = cameraModule->open_legacy(module, mName.string(), + CAMERA_DEVICE_API_VERSION_1_0, + (hw_device_t **)&mDevice); + } else { + rc = module->methods->open(module, mName.string(), + (hw_device_t **)&mDevice); + } if (rc != OK) { ALOGE("Could not open camera %s: %d", mName.string(), rc); return rc; diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp index 16d6f42..24df819 100644 --- a/services/camera/libcameraservice/device3/Camera3Device.cpp +++ b/services/camera/libcameraservice/device3/Camera3Device.cpp @@ -113,7 +113,6 @@ status_t Camera3Device::initialize(camera_module_t *module) } /** Cross-check device version */ - if (device->common.version < CAMERA_DEVICE_API_VERSION_3_0) { SET_ERR_L("Could not open camera: " "Camera device should be at least %x, reports %x instead", @@ -173,6 +172,7 @@ status_t Camera3Device::initialize(camera_module_t *module) /** Everything is good to go */ + mDeviceVersion = device->common.version; mDeviceInfo = info.static_camera_characteristics; mHal3Device = device; mStatus = STATUS_UNCONFIGURED; @@ -284,42 +284,74 @@ bool Camera3Device::tryLockSpinRightRound(Mutex& lock) { return gotLock; } -ssize_t Camera3Device::getJpegBufferSize(uint32_t width, uint32_t height) const { - // TODO: replace below with availableStreamConfiguration for HAL3.2+. - camera_metadata_ro_entry availableJpegSizes = - mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_JPEG_SIZES); - if (availableJpegSizes.count == 0 || availableJpegSizes.count % 2 != 0) { - ALOGE("%s: Camera %d: Can't find find valid available jpeg sizes in static metadata!", - __FUNCTION__, mId); - return BAD_VALUE; - } - - // Get max jpeg size (area-wise). +Camera3Device::Size Camera3Device::getMaxJpegResolution() const { int32_t maxJpegWidth = 0, maxJpegHeight = 0; - bool foundMax = false; - for (size_t i = 0; i < availableJpegSizes.count; i += 2) { - if ((availableJpegSizes.data.i32[i] * availableJpegSizes.data.i32[i + 1]) - > (maxJpegWidth * maxJpegHeight)) { - maxJpegWidth = availableJpegSizes.data.i32[i]; - maxJpegHeight = availableJpegSizes.data.i32[i + 1]; - foundMax = true; + if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) { + const int STREAM_CONFIGURATION_SIZE = 4; + const int STREAM_FORMAT_OFFSET = 0; + const int STREAM_WIDTH_OFFSET = 1; + const int STREAM_HEIGHT_OFFSET = 2; + const int STREAM_IS_INPUT_OFFSET = 3; + camera_metadata_ro_entry_t availableStreamConfigs = + mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS); + if (availableStreamConfigs.count == 0 || + availableStreamConfigs.count % STREAM_CONFIGURATION_SIZE != 0) { + return Size(0, 0); + } + + // Get max jpeg size (area-wise). + for (size_t i=0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) { + int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET]; + int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET]; + int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET]; + int32_t isInput = availableStreamConfigs.data.i32[i + STREAM_IS_INPUT_OFFSET]; + if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT + && format == HAL_PIXEL_FORMAT_BLOB && + (width * height > maxJpegWidth * maxJpegHeight)) { + maxJpegWidth = width; + maxJpegHeight = height; + } + } + } else { + camera_metadata_ro_entry availableJpegSizes = + mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_JPEG_SIZES); + if (availableJpegSizes.count == 0 || availableJpegSizes.count % 2 != 0) { + return Size(0, 0); + } + + // Get max jpeg size (area-wise). + for (size_t i = 0; i < availableJpegSizes.count; i += 2) { + if ((availableJpegSizes.data.i32[i] * availableJpegSizes.data.i32[i + 1]) + > (maxJpegWidth * maxJpegHeight)) { + maxJpegWidth = availableJpegSizes.data.i32[i]; + maxJpegHeight = availableJpegSizes.data.i32[i + 1]; + } } } - if (!foundMax) { + return Size(maxJpegWidth, maxJpegHeight); +} + +ssize_t Camera3Device::getJpegBufferSize(uint32_t width, uint32_t height) const { + // Get max jpeg size (area-wise). + Size maxJpegResolution = getMaxJpegResolution(); + if (maxJpegResolution.width == 0) { + ALOGE("%s: Camera %d: Can't find find valid available jpeg sizes in static metadata!", + __FUNCTION__, mId); return BAD_VALUE; } // Get max jpeg buffer size ssize_t maxJpegBufferSize = 0; - camera_metadata_ro_entry jpegMaxSize = mDeviceInfo.find(ANDROID_JPEG_MAX_SIZE); - if (jpegMaxSize.count == 0) { + camera_metadata_ro_entry jpegBufMaxSize = mDeviceInfo.find(ANDROID_JPEG_MAX_SIZE); + if (jpegBufMaxSize.count == 0) { ALOGE("%s: Camera %d: Can't find maximum JPEG size in static metadata!", __FUNCTION__, mId); return BAD_VALUE; } - maxJpegBufferSize = jpegMaxSize.data.i32[0]; + maxJpegBufferSize = jpegBufMaxSize.data.i32[0]; // Calculate final jpeg buffer size for the given resolution. - float scaleFactor = ((float) (width * height)) / (maxJpegWidth * maxJpegHeight); + float scaleFactor = ((float) (width * height)) / + (maxJpegResolution.width * maxJpegResolution.height); ssize_t jpegBufferSize = scaleFactor * maxJpegBufferSize; // Bound the buffer size to [MIN_JPEG_BUFFER_SIZE, maxJpegBufferSize]. if (jpegBufferSize > maxJpegBufferSize) { @@ -2126,6 +2158,17 @@ status_t Camera3Device::RequestThread::setRepeatingRequests( return OK; } +bool Camera3Device::RequestThread::isRepeatingRequestLocked(const sp<CaptureRequest> requestIn) { + if (mRepeatingRequests.empty()) { + return false; + } + int32_t requestId = requestIn->mResultExtras.requestId; + const RequestList &repeatRequests = mRepeatingRequests; + // All repeating requests are guaranteed to have same id so only check first quest + const sp<CaptureRequest> firstRequest = *repeatRequests.begin(); + return (firstRequest->mResultExtras.requestId == requestId); +} + status_t Camera3Device::RequestThread::clearRepeatingRequests(/*out*/int64_t *lastFrameNumber) { Mutex::Autolock l(mRequestLock); mRepeatingRequests.clear(); @@ -2140,6 +2183,18 @@ status_t Camera3Device::RequestThread::clear(/*out*/int64_t *lastFrameNumber) { Mutex::Autolock l(mRequestLock); ALOGV("RequestThread::%s:", __FUNCTION__); mRepeatingRequests.clear(); + + // Decrement repeating frame count for those requests never sent to device + // TODO: Remove this after we have proper error handling so these requests + // will generate an error callback. This might be the only place calling + // isRepeatingRequestLocked. If so, isRepeatingRequestLocked should also be removed. + const RequestList &requests = mRequestQueue; + for (RequestList::const_iterator it = requests.begin(); + it != requests.end(); ++it) { + if (isRepeatingRequestLocked(*it)) { + mRepeatingLastFrameNumber--; + } + } mRequestQueue.clear(); mTriggerMap.clear(); if (lastFrameNumber != NULL) { diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h index 00ae771..61e6572 100644 --- a/services/camera/libcameraservice/device3/Camera3Device.h +++ b/services/camera/libcameraservice/device3/Camera3Device.h @@ -168,6 +168,8 @@ class Camera3Device : CameraMetadata mDeviceInfo; + int mDeviceVersion; + enum Status { STATUS_ERROR, STATUS_UNINITIALIZED, @@ -297,6 +299,18 @@ class Camera3Device : */ bool tryLockSpinRightRound(Mutex& lock); + struct Size { + int width; + int height; + Size(int w, int h) : width(w), height(h){} + }; + + /** + * Helper function to get the largest Jpeg resolution (in area) + * Return Size(0, 0) if static metatdata is invalid + */ + Size getMaxJpegResolution() const; + /** * Get Jpeg buffer size for a given jpeg resolution. * Negative values are error codes. @@ -430,6 +444,9 @@ class Camera3Device : // Relay error to parent device object setErrorState void setErrorState(const char *fmt, ...); + // If the input request is in mRepeatingRequests. Must be called with mRequestLock hold + bool isRepeatingRequestLocked(const sp<CaptureRequest>); + wp<Camera3Device> mParent; wp<camera3::StatusTracker> mStatusTracker; camera3_device_t *mHal3Device; |