diff options
102 files changed, 8394 insertions, 2572 deletions
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp index 7765914..6b726e0 100644 --- a/camera/CameraMetadata.cpp +++ b/camera/CameraMetadata.cpp @@ -25,6 +25,9 @@ namespace android { +#define ALIGN_TO(val, alignment) \ + (((uintptr_t)(val) + ((alignment) - 1)) & ~((alignment) - 1)) + typedef Parcel::WritableBlob WritableBlob; typedef Parcel::ReadableBlob ReadableBlob; @@ -431,40 +434,70 @@ status_t CameraMetadata::readFromParcel(const Parcel& data, *out = NULL; } - // arg0 = metadataSize (int32) - int32_t metadataSizeTmp = -1; - if ((err = data.readInt32(&metadataSizeTmp)) != OK) { + // See CameraMetadata::writeToParcel for parcel data layout diagram and explanation. + // arg0 = blobSize (int32) + int32_t blobSizeTmp = -1; + if ((err = data.readInt32(&blobSizeTmp)) != OK) { ALOGE("%s: Failed to read metadata size (error %d %s)", __FUNCTION__, err, strerror(-err)); return err; } - const size_t metadataSize = static_cast<size_t>(metadataSizeTmp); + const size_t blobSize = static_cast<size_t>(blobSizeTmp); + const size_t alignment = get_camera_metadata_alignment(); - if (metadataSize == 0) { + // Special case: zero blob size means zero sized (NULL) metadata. + if (blobSize == 0) { ALOGV("%s: Read 0-sized metadata", __FUNCTION__); return OK; } - // NOTE: this doesn't make sense to me. shouldnt the blob + if (blobSize <= alignment) { + ALOGE("%s: metadata blob is malformed, blobSize(%zu) should be larger than alignment(%zu)", + __FUNCTION__, blobSize, alignment); + return BAD_VALUE; + } + + const size_t metadataSize = blobSize - alignment; + + // NOTE: this doesn't make sense to me. shouldn't the blob // know how big it is? why do we have to specify the size // to Parcel::readBlob ? - ReadableBlob blob; // arg1 = metadata (blob) do { - if ((err = data.readBlob(metadataSize, &blob)) != OK) { - ALOGE("%s: Failed to read metadata blob (sized %d). Possible " + if ((err = data.readBlob(blobSize, &blob)) != OK) { + ALOGE("%s: Failed to read metadata blob (sized %zu). Possible " " serialization bug. Error %d %s", - __FUNCTION__, metadataSize, err, strerror(-err)); + __FUNCTION__, blobSize, err, strerror(-err)); break; } - const camera_metadata_t* tmp = - reinterpret_cast<const camera_metadata_t*>(blob.data()); + // arg2 = offset (blob) + // Must be after blob since we don't know offset until after writeBlob. + int32_t offsetTmp; + if ((err = data.readInt32(&offsetTmp)) != OK) { + ALOGE("%s: Failed to read metadata offsetTmp (error %d %s)", + __FUNCTION__, err, strerror(-err)); + break; + } + const size_t offset = static_cast<size_t>(offsetTmp); + if (offset >= alignment) { + ALOGE("%s: metadata offset(%zu) should be less than alignment(%zu)", + __FUNCTION__, blobSize, alignment); + err = BAD_VALUE; + break; + } + + const uintptr_t metadataStart = reinterpret_cast<uintptr_t>(blob.data()) + offset; + const camera_metadata_t* tmp = + reinterpret_cast<const camera_metadata_t*>(metadataStart); + ALOGV("%s: alignment is: %zu, metadata start: %p, offset: %zu", + __FUNCTION__, alignment, tmp, offset); metadata = allocate_copy_camera_metadata_checked(tmp, metadataSize); if (metadata == NULL) { // We consider that allocation only fails if the validation // also failed, therefore the readFromParcel was a failure. + ALOGE("%s: metadata allocation and copy failed", __FUNCTION__); err = BAD_VALUE; } } while(0); @@ -485,38 +518,79 @@ status_t CameraMetadata::writeToParcel(Parcel& data, const camera_metadata_t* metadata) { status_t res = OK; - // arg0 = metadataSize (int32) - + /** + * Below is the camera metadata parcel layout: + * + * |--------------------------------------------| + * | arg0: blobSize | + * | (length = 4) | + * |--------------------------------------------|<--Skip the rest if blobSize == 0. + * | | + * | | + * | arg1: blob | + * | (length = variable, see arg1 layout below) | + * | | + * | | + * |--------------------------------------------| + * | arg2: offset | + * | (length = 4) | + * |--------------------------------------------| + */ + + // arg0 = blobSize (int32) if (metadata == NULL) { + // Write zero blobSize for null metadata. return data.writeInt32(0); } + /** + * Always make the blob size sufficiently larger, as we need put alignment + * padding and metadata into the blob. Since we don't know the alignment + * offset before writeBlob. Then write the metadata to aligned offset. + */ const size_t metadataSize = get_camera_metadata_compact_size(metadata); - res = data.writeInt32(static_cast<int32_t>(metadataSize)); + const size_t alignment = get_camera_metadata_alignment(); + const size_t blobSize = metadataSize + alignment; + res = data.writeInt32(static_cast<int32_t>(blobSize)); if (res != OK) { return res; } - // arg1 = metadata (blob) + size_t offset = 0; + /** + * arg1 = metadata (blob). + * + * The blob size is the sum of front padding size, metadata size and back padding + * size, which is equal to metadataSize + alignment. + * + * The blob layout is: + * |------------------------------------|<----Start address of the blob (unaligned). + * | front padding | + * | (size = offset) | + * |------------------------------------|<----Aligned start address of metadata. + * | | + * | | + * | metadata | + * | (size = metadataSize) | + * | | + * | | + * |------------------------------------| + * | back padding | + * | (size = alignment - offset) | + * |------------------------------------|<----End address of blob. + * (Blob start address + blob size). + */ WritableBlob blob; do { - res = data.writeBlob(metadataSize, &blob); + res = data.writeBlob(blobSize, &blob); if (res != OK) { break; } - copy_camera_metadata(blob.data(), metadataSize, metadata); - - IF_ALOGV() { - if (validate_camera_metadata_structure( - (const camera_metadata_t*)blob.data(), - &metadataSize) != OK) { - ALOGV("%s: Failed to validate metadata %p after writing blob", - __FUNCTION__, blob.data()); - } else { - ALOGV("%s: Metadata written to blob. Validation success", - __FUNCTION__); - } - } + const uintptr_t metadataStart = ALIGN_TO(blob.data(), alignment); + offset = metadataStart - reinterpret_cast<uintptr_t>(blob.data()); + ALOGV("%s: alignment is: %zu, metadata start: %p, offset: %zu", + __FUNCTION__, alignment, metadataStart, offset); + copy_camera_metadata(reinterpret_cast<void*>(metadataStart), metadataSize, metadata); // Not too big of a problem since receiving side does hard validation // Don't check the size since the compact size could be larger @@ -528,6 +602,9 @@ status_t CameraMetadata::writeToParcel(Parcel& data, } while(false); blob.release(); + // arg2 = offset (int32) + res = data.writeInt32(static_cast<int32_t>(offset)); + return res; } diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp index 61f83e3..b6f150c 100644 --- a/cmds/screenrecord/screenrecord.cpp +++ b/cmds/screenrecord/screenrecord.cpp @@ -45,6 +45,7 @@ #include <signal.h> #include <getopt.h> #include <sys/wait.h> +#include <termios.h> #include <assert.h> #include "screenrecord.h" @@ -61,6 +62,7 @@ static const uint32_t kFallbackHeight = 720; // Command-line parameters. static bool gVerbose = false; // chatty on stdout static bool gRotate = false; // rotate 90 degrees +static bool gRawOutput = false; // generate raw H.264 byte stream output static bool gSizeSpecified = false; // was size explicitly requested? static bool gWantInfoScreen = false; // do we want initial info screen? static bool gWantFrameTime = false; // do we want times on each frame? @@ -298,10 +300,12 @@ static status_t prepareVirtualDisplay(const DisplayInfo& mainDpyInfo, * input frames are coming from the virtual display as fast as SurfaceFlinger * wants to send them. * + * Exactly one of muxer or rawFp must be non-null. + * * The muxer must *not* have been started before calling. */ static status_t runEncoder(const sp<MediaCodec>& encoder, - const sp<MediaMuxer>& muxer, const sp<IBinder>& mainDpy, + const sp<MediaMuxer>& muxer, FILE* rawFp, const sp<IBinder>& mainDpy, const sp<IBinder>& virtualDpy, uint8_t orientation) { static int kTimeout = 250000; // be responsive on signal status_t err; @@ -311,6 +315,8 @@ static status_t runEncoder(const sp<MediaCodec>& encoder, int64_t endWhenNsec = startWhenNsec + seconds_to_nanoseconds(gTimeLimitSec); DisplayInfo mainDpyInfo; + assert((rawFp == NULL && muxer != NULL) || (rawFp != NULL && muxer == NULL)); + Vector<sp<ABuffer> > buffers; err = encoder->getOutputBuffers(&buffers); if (err != NO_ERROR) { @@ -342,15 +348,16 @@ static status_t runEncoder(const sp<MediaCodec>& encoder, case NO_ERROR: // got a buffer if ((flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) != 0) { - // ignore this -- we passed the CSD into MediaMuxer when - // we got the format change notification - ALOGV("Got codec config buffer (%u bytes); ignoring", size); - size = 0; + ALOGV("Got codec config buffer (%u bytes)", size); + if (muxer != NULL) { + // ignore this -- we passed the CSD into MediaMuxer when + // we got the format change notification + size = 0; + } } if (size != 0) { ALOGV("Got data in buffer %d, size=%d, pts=%lld", bufIndex, size, ptsUsec); - assert(trackIdx != -1); { // scope ATRACE_NAME("orientation"); @@ -379,14 +386,23 @@ static status_t runEncoder(const sp<MediaCodec>& encoder, ptsUsec = systemTime(SYSTEM_TIME_MONOTONIC) / 1000; } - // The MediaMuxer docs are unclear, but it appears that we - // need to pass either the full set of BufferInfo flags, or - // (flags & BUFFER_FLAG_SYNCFRAME). - // - // If this blocks for too long we could drop frames. We may - // want to queue these up and do them on a different thread. - { // scope + if (muxer == NULL) { + fwrite(buffers[bufIndex]->data(), 1, size, rawFp); + // Flush the data immediately in case we're streaming. + // We don't want to do this if all we've written is + // the SPS/PPS data because mplayer gets confused. + if ((flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) == 0) { + fflush(rawFp); + } + } else { + // The MediaMuxer docs are unclear, but it appears that we + // need to pass either the full set of BufferInfo flags, or + // (flags & BUFFER_FLAG_SYNCFRAME). + // + // If this blocks for too long we could drop frames. We may + // want to queue these up and do them on a different thread. ATRACE_NAME("write sample"); + assert(trackIdx != -1); err = muxer->writeSampleData(buffers[bufIndex], trackIdx, ptsUsec, flags); if (err != NO_ERROR) { @@ -418,12 +434,14 @@ static status_t runEncoder(const sp<MediaCodec>& encoder, ALOGV("Encoder format changed"); sp<AMessage> newFormat; encoder->getOutputFormat(&newFormat); - trackIdx = muxer->addTrack(newFormat); - ALOGV("Starting muxer"); - err = muxer->start(); - if (err != NO_ERROR) { - fprintf(stderr, "Unable to start muxer (err=%d)\n", err); - return err; + if (muxer != NULL) { + trackIdx = muxer->addTrack(newFormat); + ALOGV("Starting muxer"); + err = muxer->start(); + if (err != NO_ERROR) { + fprintf(stderr, "Unable to start muxer (err=%d)\n", err); + return err; + } } } break; @@ -457,6 +475,44 @@ static status_t runEncoder(const sp<MediaCodec>& encoder, } /* + * Raw H.264 byte stream output requested. Send the output to stdout + * if desired. If the output is a tty, reconfigure it to avoid the + * CRLF line termination that we see with "adb shell" commands. + */ +static FILE* prepareRawOutput(const char* fileName) { + FILE* rawFp = NULL; + + if (strcmp(fileName, "-") == 0) { + if (gVerbose) { + fprintf(stderr, "ERROR: verbose output and '-' not compatible"); + return NULL; + } + rawFp = stdout; + } else { + rawFp = fopen(fileName, "w"); + if (rawFp == NULL) { + fprintf(stderr, "fopen raw failed: %s\n", strerror(errno)); + return NULL; + } + } + + int fd = fileno(rawFp); + if (isatty(fd)) { + // best effort -- reconfigure tty for "raw" + ALOGD("raw video output to tty (fd=%d)", fd); + struct termios term; + if (tcgetattr(fd, &term) == 0) { + cfmakeraw(&term); + if (tcsetattr(fd, TCSANOW, &term) == 0) { + ALOGD("tty successfully configured for raw"); + } + } + } + + return rawFp; +} + +/* * Main "do work" method. * * Configures codec, muxer, and virtual display, then starts moving bits @@ -558,16 +614,26 @@ static status_t recordScreen(const char* fileName) { return err; } - // Configure muxer. We have to wait for the CSD blob from the encoder - // before we can start it. - sp<MediaMuxer> muxer = new MediaMuxer(fileName, - MediaMuxer::OUTPUT_FORMAT_MPEG_4); - if (gRotate) { - muxer->setOrientationHint(90); // TODO: does this do anything? + sp<MediaMuxer> muxer = NULL; + FILE* rawFp = NULL; + if (gRawOutput) { + rawFp = prepareRawOutput(fileName); + if (rawFp == NULL) { + encoder->release(); + return -1; + } + } else { + // Configure muxer. We have to wait for the CSD blob from the encoder + // before we can start it. + muxer = new MediaMuxer(fileName, MediaMuxer::OUTPUT_FORMAT_MPEG_4); + if (gRotate) { + muxer->setOrientationHint(90); // TODO: does this do anything? + } } // Main encoder loop. - err = runEncoder(encoder, muxer, mainDpy, dpy, mainDpyInfo.orientation); + err = runEncoder(encoder, muxer, rawFp, mainDpy, dpy, + mainDpyInfo.orientation); if (err != NO_ERROR) { fprintf(stderr, "Encoder failed (err=%d)\n", err); // fall through to cleanup @@ -584,9 +650,13 @@ static status_t recordScreen(const char* fileName) { overlay->stop(); } encoder->stop(); - // If we don't stop muxer explicitly, i.e. let the destructor run, - // it may hang (b/11050628). - muxer->stop(); + if (muxer != NULL) { + // If we don't stop muxer explicitly, i.e. let the destructor run, + // it may hang (b/11050628). + muxer->stop(); + } else if (rawFp != stdout) { + fclose(rawFp); + } encoder->release(); return err; @@ -753,6 +823,7 @@ int main(int argc, char* const argv[]) { { "show-frame-time", no_argument, NULL, 'f' }, { "bugreport", no_argument, NULL, 'u' }, { "rotate", no_argument, NULL, 'r' }, + { "raw", no_argument, NULL, 'w' }, { NULL, 0, NULL, 0 } }; @@ -818,6 +889,10 @@ int main(int argc, char* const argv[]) { // experimental feature gRotate = true; break; + case 'w': + // experimental feature + gRawOutput = true; + break; default: if (ic != '?') { fprintf(stderr, "getopt_long returned unexpected value 0x%x\n", ic); @@ -831,17 +906,19 @@ int main(int argc, char* const argv[]) { return 2; } - // MediaMuxer tries to create the file in the constructor, but we don't - // learn about the failure until muxer.start(), which returns a generic - // error code without logging anything. We attempt to create the file - // now for better diagnostics. const char* fileName = argv[optind]; - int fd = open(fileName, O_CREAT | O_RDWR, 0644); - if (fd < 0) { - fprintf(stderr, "Unable to open '%s': %s\n", fileName, strerror(errno)); - return 1; + if (!gRawOutput) { + // MediaMuxer tries to create the file in the constructor, but we don't + // learn about the failure until muxer.start(), which returns a generic + // error code without logging anything. We attempt to create the file + // now for better diagnostics. + int fd = open(fileName, O_CREAT | O_RDWR, 0644); + if (fd < 0) { + fprintf(stderr, "Unable to open '%s': %s\n", fileName, strerror(errno)); + return 1; + } + close(fd); } - close(fd); status_t err = recordScreen(fileName); if (err == NO_ERROR) { diff --git a/cmds/screenrecord/screenrecord.h b/cmds/screenrecord/screenrecord.h index 95e8a68..9b058c2 100644 --- a/cmds/screenrecord/screenrecord.h +++ b/cmds/screenrecord/screenrecord.h @@ -18,6 +18,6 @@ #define SCREENRECORD_SCREENRECORD_H #define kVersionMajor 1 -#define kVersionMinor 1 +#define kVersionMinor 2 #endif /*SCREENRECORD_SCREENRECORD_H*/ diff --git a/cmds/stagefright/sf2.cpp b/cmds/stagefright/sf2.cpp index c817443..439b6e4 100644 --- a/cmds/stagefright/sf2.cpp +++ b/cmds/stagefright/sf2.cpp @@ -18,6 +18,8 @@ #define LOG_TAG "sf2" #include <utils/Log.h> +#include <signal.h> + #include <binder/ProcessState.h> #include <media/stagefright/foundation/hexdump.h> @@ -42,6 +44,18 @@ using namespace android; +volatile static bool ctrlc = false; + +static sighandler_t oldhandler = NULL; + +static void mysighandler(int signum) { + if (signum == SIGINT) { + ctrlc = true; + return; + } + oldhandler(signum); +} + struct Controller : public AHandler { Controller(const char *uri, bool decodeAudio, const sp<Surface> &surface, bool renderToSurface) @@ -62,7 +76,29 @@ protected: virtual ~Controller() { } + virtual void printStatistics() { + int64_t delayUs = ALooper::GetNowUs() - mStartTimeUs; + if (mDecodeAudio) { + printf("%lld bytes received. %.2f KB/sec\n", + mTotalBytesReceived, + mTotalBytesReceived * 1E6 / 1024 / delayUs); + } else { + printf("%d frames decoded, %.2f fps. %lld bytes " + "received. %.2f KB/sec\n", + mNumOutputBuffersReceived, + mNumOutputBuffersReceived * 1E6 / delayUs, + mTotalBytesReceived, + mTotalBytesReceived * 1E6 / 1024 / delayUs); + } + } + virtual void onMessageReceived(const sp<AMessage> &msg) { + if (ctrlc) { + printf("\n"); + printStatistics(); + (new AMessage(kWhatStop, id()))->post(); + ctrlc = false; + } switch (msg->what()) { case kWhatStart: { @@ -98,7 +134,10 @@ protected: break; } } - CHECK(mSource != NULL); + if (mSource == NULL) { + printf("no %s track found\n", mDecodeAudio ? "audio" : "video"); + exit (1); + } CHECK_EQ(mSource->start(), (status_t)OK); @@ -180,21 +219,7 @@ protected: || what == ACodec::kWhatError) { printf((what == ACodec::kWhatEOS) ? "$\n" : "E\n"); - int64_t delayUs = ALooper::GetNowUs() - mStartTimeUs; - - if (mDecodeAudio) { - printf("%lld bytes received. %.2f KB/sec\n", - mTotalBytesReceived, - mTotalBytesReceived * 1E6 / 1024 / delayUs); - } else { - printf("%d frames decoded, %.2f fps. %lld bytes " - "received. %.2f KB/sec\n", - mNumOutputBuffersReceived, - mNumOutputBuffersReceived * 1E6 / delayUs, - mTotalBytesReceived, - mTotalBytesReceived * 1E6 / 1024 / delayUs); - } - + printStatistics(); (new AMessage(kWhatStop, id()))->post(); } else if (what == ACodec::kWhatFlushCompleted) { mSeekState = SEEK_FLUSH_COMPLETED; @@ -638,6 +663,8 @@ int main(int argc, char **argv) { looper->registerHandler(controller); + signal(SIGINT, mysighandler); + controller->startAsync(); CHECK_EQ(looper->start(true /* runOnCallingThread */), (status_t)OK); diff --git a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp index 234aef2..f400732 100644 --- a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp +++ b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp @@ -316,6 +316,7 @@ String8 FwdLockEngine::onGetOriginalMimeType(int uniqueId, const String8& path, if (-1 < fileDesc) { if (FwdLockFile_attach(fileDesc) < 0) { + close(fileDesc); return mimeString; } const char* pMimeType = FwdLockFile_GetContentType(fileDesc); diff --git a/include/media/AudioBufferProvider.h b/include/media/AudioBufferProvider.h index ef392f0..7be449c 100644 --- a/include/media/AudioBufferProvider.h +++ b/include/media/AudioBufferProvider.h @@ -61,6 +61,17 @@ public: // buffer->frameCount 0 virtual status_t getNextBuffer(Buffer* buffer, int64_t pts = kInvalidPTS) = 0; + // Release (a portion of) the buffer previously obtained by getNextBuffer(). + // It is permissible to call releaseBuffer() multiple times per getNextBuffer(). + // On entry: + // buffer->frameCount number of frames to release, must be <= number of frames + // obtained but not yet released + // buffer->raw unused + // On return: + // buffer->frameCount 0; implementation MUST set to zero + // buffer->raw undefined; implementation is PERMITTED to set to any value, + // so if caller needs to continue using this buffer it must + // keep track of the pointer itself virtual void releaseBuffer(Buffer* buffer) = 0; }; diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h index 052064d..3d839fc 100644 --- a/include/media/AudioRecord.h +++ b/include/media/AudioRecord.h @@ -39,8 +39,12 @@ public: * Keep in sync with frameworks/base/media/java/android/media/AudioRecord.java NATIVE_EVENT_*. */ enum event_type { - EVENT_MORE_DATA = 0, // Request to read more data from PCM buffer. - EVENT_OVERRUN = 1, // PCM buffer overrun occurred. + EVENT_MORE_DATA = 0, // Request to read available data from buffer. + // If this event is delivered but the callback handler + // does not want to read the available data, the handler must + // explicitly + // ignore the event by setting frameCount to zero. + EVENT_OVERRUN = 1, // Buffer overrun occurred. EVENT_MARKER = 2, // Record head is at the specified marker position // (See setMarkerPosition()). EVENT_NEW_POS = 3, // Record head is at a new position @@ -60,9 +64,10 @@ public: size_t frameCount; // number of sample frames corresponding to size; // on input it is the number of frames available, // on output is the number of frames actually drained - // (currently ignored, but will make the primary field in future) + // (currently ignored but will make the primary field in future) size_t size; // input/output in bytes == frameCount * frameSize + // on output is the number of bytes actually drained // FIXME this is redundant with respect to frameCount, // and TRANSFER_OBTAIN mode is broken for 8-bit data // since we don't define the frame format @@ -76,7 +81,7 @@ public: /* As a convenience, if a callback is supplied, a handler thread * is automatically created with the appropriate priority. This thread - * invokes the callback when a new buffer becomes ready or various conditions occur. + * invokes the callback when a new buffer becomes available or various conditions occur. * Parameters: * * event: type of event notified (see enum AudioRecord::event_type). @@ -99,6 +104,8 @@ public: * - NO_ERROR: successful operation * - NO_INIT: audio server or audio hardware not initialized * - BAD_VALUE: unsupported configuration + * frameCount is guaranteed to be non-zero if status is NO_ERROR, + * and is undefined otherwise. */ static status_t getMinFrameCount(size_t* frameCount, @@ -109,7 +116,7 @@ public: /* How data is transferred from AudioRecord */ enum transfer_type { - TRANSFER_DEFAULT, // not specified explicitly; determine from other parameters + TRANSFER_DEFAULT, // not specified explicitly; determine from the other parameters TRANSFER_CALLBACK, // callback EVENT_MORE_DATA TRANSFER_OBTAIN, // FIXME deprecated: call obtainBuffer() and releaseBuffer() TRANSFER_SYNC, // synchronous read() @@ -137,7 +144,7 @@ public: * be larger if the requested size is not compatible with current audio HAL * latency. Zero means to use a default value. * cbf: Callback function. If not null, this function is called periodically - * to consume new PCM data and inform of marker, position updates, etc. + * to consume new data and inform of marker, position updates, etc. * user: Context for use by the callback receiver. * notificationFrames: The callback function is called each time notificationFrames PCM * frames are ready in record track output buffer. @@ -155,7 +162,7 @@ public: callback_t cbf = NULL, void* user = NULL, int notificationFrames = 0, - int sessionId = 0, + int sessionId = AUDIO_SESSION_ALLOCATE, transfer_type transferType = TRANSFER_DEFAULT, audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE); @@ -171,9 +178,10 @@ public: * Returned status (from utils/Errors.h) can be: * - NO_ERROR: successful intialization * - INVALID_OPERATION: AudioRecord is already initialized or record device is already in use - * - BAD_VALUE: invalid parameter (channels, format, sampleRate...) + * - BAD_VALUE: invalid parameter (channelMask, format, sampleRate...) * - NO_INIT: audio server or audio hardware not initialized * - PERMISSION_DENIED: recording is not allowed for the requesting process + * If status is not equal to NO_ERROR, don't call any other APIs on this AudioRecord. * * Parameters not listed in the AudioRecord constructors above: * @@ -188,11 +196,11 @@ public: void* user = NULL, int notificationFrames = 0, bool threadCanCallJava = false, - int sessionId = 0, + int sessionId = AUDIO_SESSION_ALLOCATE, transfer_type transferType = TRANSFER_DEFAULT, audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE); - /* Result of constructing the AudioRecord. This must be checked + /* Result of constructing the AudioRecord. This must be checked for successful initialization * before using any AudioRecord API (except for set()), because using * an uninitialized AudioRecord produces undefined results. * See set() method above for possible return codes. @@ -221,7 +229,7 @@ public: status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE, int triggerSession = 0); - /* Stop a track. If set, the callback will cease being called. Note that obtainBuffer() still + /* Stop a track. The callback will cease being called. Note that obtainBuffer() still * works and will drain buffers until the pool is exhausted, and then will return WOULD_BLOCK. */ void stop(); @@ -236,7 +244,7 @@ public: * a callback with event type EVENT_MARKER is called. Calling setMarkerPosition * with marker == 0 cancels marker notification callback. * To set a marker at a position which would compute as 0, - * a workaround is to the set the marker at a nearby position such as ~0 or 1. + * a workaround is to set the marker at a nearby position such as ~0 or 1. * If the AudioRecord has been opened with no callback function associated, * the operation will fail. * @@ -378,8 +386,10 @@ public: * returning the current value by this function call. Such loss typically occurs when the * user space process is blocked longer than the capacity of audio driver buffers. * Units: the number of input audio frames. + * FIXME The side-effect of resetting the counter may be incompatible with multi-client. + * Consider making it more like AudioTrack::getUnderrunFrames which doesn't have side effects. */ - unsigned int getInputFramesLost() const; + uint32_t getInputFramesLost() const; private: /* copying audio record objects is not allowed */ @@ -412,6 +422,7 @@ private: bool mPaused; // whether thread is requested to pause at next loop entry bool mPausedInt; // whether thread internally requests pause nsecs_t mPausedNs; // if mPausedInt then associated timeout, otherwise ignored + bool mIgnoreNextPausedInt; // whether to ignore next mPausedInt request }; // body of AudioRecordThread::threadLoop() @@ -422,9 +433,10 @@ private: // NS_INACTIVE inactive so don't run again until re-started // NS_NEVER never again static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3; - nsecs_t processAudioBuffer(const sp<AudioRecordThread>& thread); + nsecs_t processAudioBuffer(); // caller must hold lock on mLock for all _l methods + status_t openRecord_l(size_t epoch); // FIXME enum is faster than strcmp() for parameter 'from' @@ -446,12 +458,13 @@ private: // notification callback uint32_t mNotificationFramesAct; // actual number of frames between each // notification callback - bool mRefreshRemaining; // processAudioBuffer() should refresh next 2 + bool mRefreshRemaining; // processAudioBuffer() should refresh + // mRemainingFrames and mRetryOnPartialBuffer // These are private to processAudioBuffer(), and are not protected by a lock uint32_t mRemainingFrames; // number of frames to request in obtainBuffer() bool mRetryOnPartialBuffer; // sleep and retry after partial obtainBuffer() - int mObservedSequence; // last observed value of mSequence + uint32_t mObservedSequence; // last observed value of mSequence uint32_t mMarkerPosition; // in wrapping (overflow) frame units bool mMarkerReached; @@ -473,12 +486,11 @@ private: int mSessionId; transfer_type mTransfer; - audio_io_handle_t mInput; // returned by AudioSystem::getInput() - - // may be changed if IAudioRecord object is re-created + // Next 4 fields may be changed if IAudioRecord is re-created, but always != 0 sp<IAudioRecord> mAudioRecord; sp<IMemory> mCblkMemory; audio_track_cblk_t* mCblk; // re-load after mLock.unlock() + audio_io_handle_t mInput; // returned by AudioSystem::getInput() int mPreviousPriority; // before start() SchedPolicy mPreviousSchedulingGroup; diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h index 225ef76..5abab8a 100644 --- a/include/media/AudioSystem.h +++ b/include/media/AudioSystem.h @@ -67,20 +67,24 @@ public: // returns true in *state if tracks are active on the specified stream or have been active // in the past inPastMs milliseconds - static status_t isStreamActive(audio_stream_type_t stream, bool *state, uint32_t inPastMs = 0); + static status_t isStreamActive(audio_stream_type_t stream, bool *state, uint32_t inPastMs); // returns true in *state if tracks are active for what qualifies as remote playback // on the specified stream or have been active in the past inPastMs milliseconds. Remote // playback isn't mutually exclusive with local playback. static status_t isStreamActiveRemotely(audio_stream_type_t stream, bool *state, - uint32_t inPastMs = 0); + uint32_t inPastMs); // returns true in *state if a recorder is currently recording with the specified source static status_t isSourceActive(audio_source_t source, bool *state); // set/get audio hardware parameters. The function accepts a list of parameters // key value pairs in the form: key1=value1;key2=value2;... // Some keys are reserved for standard parameters (See AudioParameter class). + // The versions with audio_io_handle_t are intended for internal media framework use only. static status_t setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs); static String8 getParameters(audio_io_handle_t ioHandle, const String8& keys); + // The versions without audio_io_handle_t are intended for JNI. + static status_t setParameters(const String8& keyValuePairs); + static String8 getParameters(const String8& keys); static void setErrorCallback(audio_error_callback cb); @@ -90,12 +94,14 @@ public: static float linearToLog(int volume); static int logToLinear(float volume); + // Returned samplingRate and frameCount output values are guaranteed + // to be non-zero if status == NO_ERROR static status_t getOutputSamplingRate(uint32_t* samplingRate, - audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); + audio_stream_type_t stream); static status_t getOutputFrameCount(size_t* frameCount, - audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); + audio_stream_type_t stream); static status_t getOutputLatency(uint32_t* latency, - audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); + audio_stream_type_t stream); static status_t getSamplingRate(audio_io_handle_t output, audio_stream_type_t streamType, uint32_t* samplingRate); @@ -132,7 +138,7 @@ public: audio_stream_type_t stream = AUDIO_STREAM_DEFAULT); // return the number of input frames lost by HAL implementation, or 0 if the handle is invalid - static size_t getInputFramesLost(audio_io_handle_t ioHandle); + static uint32_t getInputFramesLost(audio_io_handle_t ioHandle); static int newAudioSessionId(); static void acquireAudioSessionId(int audioSession); @@ -155,7 +161,8 @@ public: class OutputDescriptor { public: OutputDescriptor() - : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channelMask(0), frameCount(0), latency(0) {} + : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channelMask(0), frameCount(0), latency(0) + {} uint32_t samplingRate; audio_format_t format; @@ -193,24 +200,32 @@ public: static status_t setPhoneState(audio_mode_t state); static status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config); static audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage); + + // Client must successfully hand off the handle reference to AudioFlinger via createTrack(), + // or release it with releaseOutput(). static audio_io_handle_t getOutput(audio_stream_type_t stream, uint32_t samplingRate = 0, audio_format_t format = AUDIO_FORMAT_DEFAULT, audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO, audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, const audio_offload_info_t *offloadInfo = NULL); + static status_t startOutput(audio_io_handle_t output, audio_stream_type_t stream, - int session = 0); + int session); static status_t stopOutput(audio_io_handle_t output, audio_stream_type_t stream, - int session = 0); + int session); static void releaseOutput(audio_io_handle_t output); + + // Client must successfully hand off the handle reference to AudioFlinger via openRecord(), + // or release it with releaseInput(). static audio_io_handle_t getInput(audio_source_t inputSource, - uint32_t samplingRate = 0, - audio_format_t format = AUDIO_FORMAT_DEFAULT, - audio_channel_mask_t channelMask = AUDIO_CHANNEL_IN_MONO, - int sessionId = 0); + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + int sessionId); + static status_t startInput(audio_io_handle_t input); static status_t stopInput(audio_io_handle_t input); static void releaseInput(audio_io_handle_t input); diff --git a/include/media/AudioTimestamp.h b/include/media/AudioTimestamp.h index c29c7e5..99e9c3e 100644 --- a/include/media/AudioTimestamp.h +++ b/include/media/AudioTimestamp.h @@ -19,6 +19,8 @@ #include <time.h> +namespace android { + class AudioTimestamp { public: AudioTimestamp() : mPosition(0) { @@ -30,4 +32,6 @@ public: struct timespec mTime; // corresponding CLOCK_MONOTONIC when frame is expected to present }; +} // namespace + #endif // ANDROID_AUDIO_TIMESTAMP_H diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h index f6646ab..3a60217 100644 --- a/include/media/AudioTrack.h +++ b/include/media/AudioTrack.h @@ -123,6 +123,8 @@ public: * - NO_ERROR: successful operation * - NO_INIT: audio server or audio hardware not initialized * - BAD_VALUE: unsupported configuration + * frameCount is guaranteed to be non-zero if status is NO_ERROR, + * and is undefined otherwise. */ static status_t getMinFrameCount(size_t* frameCount, @@ -158,7 +160,7 @@ public: * sampleRate: Data source sampling rate in Hz. * format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed * 16 bits per sample). - * channelMask: Channel mask. + * channelMask: Channel mask, such that audio_is_output_channel(channelMask) is true. * frameCount: Minimum size of track PCM buffer in frames. This defines the * application's contribution to the * latency of the track. The actual size selected by the AudioTrack could be @@ -185,7 +187,7 @@ public: callback_t cbf = NULL, void* user = NULL, int notificationFrames = 0, - int sessionId = 0, + int sessionId = AUDIO_SESSION_ALLOCATE, transfer_type transferType = TRANSFER_DEFAULT, const audio_offload_info_t *offloadInfo = NULL, int uid = -1); @@ -210,7 +212,7 @@ public: callback_t cbf = NULL, void* user = NULL, int notificationFrames = 0, - int sessionId = 0, + int sessionId = AUDIO_SESSION_ALLOCATE, transfer_type transferType = TRANSFER_DEFAULT, const audio_offload_info_t *offloadInfo = NULL, int uid = -1); @@ -248,7 +250,7 @@ public: int notificationFrames = 0, const sp<IMemory>& sharedBuffer = 0, bool threadCanCallJava = false, - int sessionId = 0, + int sessionId = AUDIO_SESSION_ALLOCATE, transfer_type transferType = TRANSFER_DEFAULT, const audio_offload_info_t *offloadInfo = NULL, int uid = -1); @@ -336,7 +338,7 @@ public: */ status_t setSampleRate(uint32_t sampleRate); - /* Return current source sample rate in Hz, or 0 if unknown */ + /* Return current source sample rate in Hz */ uint32_t getSampleRate() const; /* Enables looping and sets the start and end points of looping. @@ -361,7 +363,7 @@ public: /* Sets marker position. When playback reaches the number of frames specified, a callback with * event type EVENT_MARKER is called. Calling setMarkerPosition with marker == 0 cancels marker * notification callback. To set a marker at a position which would compute as 0, - * a workaround is to the set the marker at a nearby position such as ~0 or 1. + * a workaround is to set the marker at a nearby position such as ~0 or 1. * If the AudioTrack has been opened with no callback function associated, the operation will * fail. * @@ -452,7 +454,7 @@ public: * Returned value: * handle on audio hardware output */ - audio_io_handle_t getOutput(); + audio_io_handle_t getOutput() const; /* Returns the unique session ID associated with this track. * @@ -566,7 +568,7 @@ public: uint32_t getUnderrunFrames() const; /* Get the flags */ - audio_output_flags_t getFlags() const { return mFlags; } + audio_output_flags_t getFlags() const { AutoMutex _l(mLock); return mFlags; } /* Set parameters - only possible when using direct output */ status_t setParameters(const String8& keyValuePairs); @@ -626,53 +628,50 @@ protected: // NS_INACTIVE inactive so don't run again until re-started // NS_NEVER never again static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3; - nsecs_t processAudioBuffer(const sp<AudioTrackThread>& thread); - status_t processStreamEnd(int32_t waitCount); + nsecs_t processAudioBuffer(); + bool isOffloaded() const; // caller must hold lock on mLock for all _l methods - status_t createTrack_l(audio_stream_type_t streamType, - uint32_t sampleRate, - audio_format_t format, - size_t frameCount, - audio_output_flags_t flags, - const sp<IMemory>& sharedBuffer, - audio_io_handle_t output, - size_t epoch); + status_t createTrack_l(size_t epoch); // can only be called when mState != STATE_ACTIVE void flush_l(); void setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount); - audio_io_handle_t getOutput_l(); // FIXME enum is faster than strcmp() for parameter 'from' status_t restoreTrack_l(const char *from); - bool isOffloaded() const + bool isOffloaded_l() const { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; } - // Next 3 fields may be changed if IAudioTrack is re-created, but always != 0 + // Next 4 fields may be changed if IAudioTrack is re-created, but always != 0 sp<IAudioTrack> mAudioTrack; sp<IMemory> mCblkMemory; audio_track_cblk_t* mCblk; // re-load after mLock.unlock() + audio_io_handle_t mOutput; // returned by AudioSystem::getOutput() sp<AudioTrackThread> mAudioTrackThread; + float mVolume[2]; float mSendLevel; mutable uint32_t mSampleRate; // mutable because getSampleRate() can update it. - size_t mFrameCount; // corresponds to current IAudioTrack - size_t mReqFrameCount; // frame count to request the next time a new - // IAudioTrack is needed - + size_t mFrameCount; // corresponds to current IAudioTrack, value is + // reported back by AudioFlinger to the client + size_t mReqFrameCount; // frame count to request the first or next time + // a new IAudioTrack is needed, non-decreasing // constant after constructor or set() audio_format_t mFormat; // as requested by client, not forced to 16-bit audio_stream_type_t mStreamType; uint32_t mChannelCount; audio_channel_mask_t mChannelMask; + sp<IMemory> mSharedBuffer; transfer_type mTransfer; + audio_offload_info_t mOffloadInfoCopy; + const audio_offload_info_t* mOffloadInfo; // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data. For 8-bit PCM data, it's // twice as large as mFrameSize because data is expanded to 16-bit before it's stored in buffer. @@ -705,21 +704,25 @@ protected: uint32_t mNotificationFramesAct; // actual number of frames between each // notification callback, // at initial source sample rate - bool mRefreshRemaining; // processAudioBuffer() should refresh next 2 + bool mRefreshRemaining; // processAudioBuffer() should refresh + // mRemainingFrames and mRetryOnPartialBuffer // These are private to processAudioBuffer(), and are not protected by a lock uint32_t mRemainingFrames; // number of frames to request in obtainBuffer() bool mRetryOnPartialBuffer; // sleep and retry after partial obtainBuffer() uint32_t mObservedSequence; // last observed value of mSequence - sp<IMemory> mSharedBuffer; uint32_t mLoopPeriod; // in frames, zero means looping is disabled + uint32_t mMarkerPosition; // in wrapping (overflow) frame units bool mMarkerReached; uint32_t mNewPosition; // in frames uint32_t mUpdatePeriod; // in frames, zero means no EVENT_NEW_POS audio_output_flags_t mFlags; + // const after set(), except for bits AUDIO_OUTPUT_FLAG_FAST and AUDIO_OUTPUT_FLAG_OFFLOAD. + // mLock must be held to read or write those bits reliably. + int mSessionId; int mAuxEffectId; @@ -753,7 +756,6 @@ private: sp<DeathNotifier> mDeathNotifier; uint32_t mSequence; // incremented for each new IAudioTrack attempt - audio_io_handle_t mOutput; // cached output io handle int mClientUid; }; diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h index 899d79f..4bd111a 100644 --- a/include/media/IAudioFlinger.h +++ b/include/media/IAudioFlinger.h @@ -64,9 +64,12 @@ public: uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, track_flags_t *flags, const sp<IMemory>& sharedBuffer, + // On successful return, AudioFlinger takes over the handle + // reference and will release it when the track is destroyed. + // However on failure, the client is responsible for release. audio_io_handle_t output, pid_t tid, // -1 means unused, otherwise must be valid non-0 int *sessionId, @@ -78,11 +81,14 @@ public: status_t *status) = 0; virtual sp<IAudioRecord> openRecord( + // On successful return, AudioFlinger takes over the handle + // reference and will release it when the track is destroyed. + // However on failure, the client is responsible for release. audio_io_handle_t input, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, track_flags_t *flags, pid_t tid, // -1 means unused, otherwise must be valid non-0 int *sessionId, @@ -170,7 +176,7 @@ public: virtual status_t getRenderPosition(size_t *halFrames, size_t *dspFrames, audio_io_handle_t output) const = 0; - virtual size_t getInputFramesLost(audio_io_handle_t ioHandle) const = 0; + virtual uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const = 0; virtual int newAudioSessionId() = 0; @@ -188,6 +194,7 @@ public: effect_descriptor_t *pDesc, const sp<IEffectClient>& client, int32_t priority, + // AudioFlinger doesn't take over handle reference from client audio_io_handle_t output, int sessionId, status_t *status, diff --git a/include/media/mediascanner.h b/include/media/mediascanner.h index a73403b..4537679 100644 --- a/include/media/mediascanner.h +++ b/include/media/mediascanner.h @@ -21,6 +21,7 @@ #include <utils/threads.h> #include <utils/List.h> #include <utils/Errors.h> +#include <utils/String8.h> #include <pthread.h> struct dirent; @@ -29,6 +30,7 @@ namespace android { class MediaScannerClient; class StringArray; +class CharacterEncodingDetector; enum MediaScanResult { // This file or directory was scanned successfully. @@ -94,15 +96,9 @@ public: virtual status_t setMimeType(const char* mimeType) = 0; protected: - void convertValues(uint32_t encoding); - -protected: - // cached name and value strings, for native encoding support. - StringArray* mNames; - StringArray* mValues; - - // default encoding based on MediaScanner::mLocale string - uint32_t mLocaleEncoding; + // default encoding from MediaScanner::mLocale + String8 mLocale; + CharacterEncodingDetector *mEncodingDetector; }; }; // namespace android diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h index 7395055..f1636e6 100644 --- a/include/media/stagefright/ACodec.h +++ b/include/media/stagefright/ACodec.h @@ -249,6 +249,8 @@ private: int32_t numChannels, int32_t sampleRate, int32_t bitRate, int32_t aacProfile, bool isADTS); + status_t setupAC3Codec(bool encoder, int32_t numChannels, int32_t sampleRate); + status_t selectAudioPortFormat( OMX_U32 portIndex, OMX_AUDIO_CODINGTYPE desiredFormat); diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h index a829916..69cfbd0 100644 --- a/include/media/stagefright/CameraSource.h +++ b/include/media/stagefright/CameraSource.h @@ -185,6 +185,8 @@ protected: virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType, const sp<IMemory> &data); + void releaseCamera(); + private: friend class CameraSourceListener; @@ -233,7 +235,6 @@ private: int32_t frameRate); void stopCameraRecording(); - void releaseCamera(); status_t reset(); CameraSource(const CameraSource &); diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h index 85693d4..cf5beda 100644 --- a/include/media/stagefright/MediaDefs.h +++ b/include/media/stagefright/MediaDefs.h @@ -44,6 +44,7 @@ extern const char *MEDIA_MIMETYPE_AUDIO_RAW; extern const char *MEDIA_MIMETYPE_AUDIO_FLAC; extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS; extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM; +extern const char *MEDIA_MIMETYPE_AUDIO_AC3; extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4; extern const char *MEDIA_MIMETYPE_CONTAINER_WAV; diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h index daaf20f..5121c17 100644 --- a/include/media/stagefright/OMXCodec.h +++ b/include/media/stagefright/OMXCodec.h @@ -248,6 +248,8 @@ private: int32_t numChannels, int32_t sampleRate, int32_t bitRate, int32_t aacProfile, bool isADTS); + status_t setAC3Format(int32_t numChannels, int32_t sampleRate); + void setG711Format(int32_t numChannels); status_t setVideoPortFormatType( diff --git a/include/media/stagefright/SkipCutBuffer.h b/include/media/stagefright/SkipCutBuffer.h index 2653b53..098aa69 100644 --- a/include/media/stagefright/SkipCutBuffer.h +++ b/include/media/stagefright/SkipCutBuffer.h @@ -47,6 +47,7 @@ class SkipCutBuffer: public RefBase { private: void write(const char *src, size_t num); size_t read(char *dst, size_t num); + int32_t mSkip; int32_t mFrontPadding; int32_t mBackPadding; int32_t mWriteHead; diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h index 7fd9379..b5a4c0b 100644 --- a/include/private/media/AudioTrackShared.h +++ b/include/private/media/AudioTrackShared.h @@ -48,7 +48,7 @@ namespace android { #define CBLK_STREAM_END_DONE 0x400 // set by server on render completion, cleared by client //EL_FIXME 20 seconds may not be enough and must be reconciled with new obtainBuffer implementation -#define MAX_RUN_OFFLOADED_TIMEOUT_MS 20000 //assuming upto a maximum of 20 seconds of offloaded +#define MAX_RUN_OFFLOADED_TIMEOUT_MS 20000 // assuming up to a maximum of 20 seconds of offloaded struct AudioTrackSharedStreaming { // similar to NBAIO MonoPipe @@ -96,11 +96,7 @@ struct audio_track_cblk_t // The value should be used "for entertainment purposes only", // which means don't make important decisions based on it. - size_t frameCount_; // used during creation to pass actual track buffer size - // from AudioFlinger to client, and not referenced again - // FIXME remove here and replace by createTrack() in/out - // parameter - // renamed to "_" to detect incorrect use + uint32_t mPad1; // unused volatile int32_t mFutex; // event flag: down (P) by client, // up (V) by server or binderDied() or interrupt() diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk index 56e7787..8aa54dc 100644 --- a/media/libmedia/Android.mk +++ b/media/libmedia/Android.mk @@ -44,7 +44,7 @@ LOCAL_SRC_FILES:= \ IAudioPolicyService.cpp \ MediaScanner.cpp \ MediaScannerClient.cpp \ - autodetect.cpp \ + CharacterEncodingDetector.cpp \ IMediaDeathNotifier.cpp \ MediaProfiles.cpp \ IEffect.cpp \ @@ -65,7 +65,7 @@ LOCAL_CFLAGS += -DSINGLE_STATE_QUEUE_INSTANTIATIONS='"SingleStateQueueInstantiat # Consider a separate a library for SingleStateQueueInstantiations. LOCAL_SHARED_LIBRARIES := \ - libui liblog libcutils libutils libbinder libsonivox libicuuc libexpat \ + libui liblog libcutils libutils libbinder libsonivox libicuuc libicui18n libexpat \ libcamera_client libstagefright_foundation \ libgui libdl libaudioutils @@ -77,6 +77,7 @@ LOCAL_C_INCLUDES := \ $(call include-path-for, graphics corecg) \ $(TOP)/frameworks/native/include/media/openmax \ external/icu4c/common \ + external/icu4c/i18n \ $(call include-path-for, audio-effects) \ $(call include-path-for, audio-utils) diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp index 666fafa..a999e7e 100644 --- a/media/libmedia/AudioRecord.cpp +++ b/media/libmedia/AudioRecord.cpp @@ -71,7 +71,7 @@ status_t AudioRecord::getMinFrameCount( // --------------------------------------------------------------------------- AudioRecord::AudioRecord() - : mStatus(NO_INIT), mSessionId(0), + : mStatus(NO_INIT), mSessionId(AUDIO_SESSION_ALLOCATE), mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT) { } @@ -88,7 +88,7 @@ AudioRecord::AudioRecord( int sessionId, transfer_type transferType, audio_input_flags_t flags) - : mStatus(NO_INIT), mSessionId(0), + : mStatus(NO_INIT), mSessionId(AUDIO_SESSION_ALLOCATE), mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT), mProxy(NULL) @@ -233,7 +233,7 @@ status_t AudioRecord::set( mNotificationFramesReq = notificationFrames; mNotificationFramesAct = 0; - if (sessionId == 0 ) { + if (sessionId == AUDIO_SESSION_ALLOCATE) { mSessionId = AudioSystem::newAudioSessionId(); } else { mSessionId = sessionId; @@ -244,7 +244,7 @@ status_t AudioRecord::set( // create the IAudioRecord status = openRecord_l(0 /*epoch*/); - if (status) { + if (status != NO_ERROR) { return status; } @@ -255,9 +255,6 @@ status_t AudioRecord::set( mStatus = NO_ERROR; - // Update buffer size in case it has been limited by AudioFlinger during track creation - mFrameCount = mCblk->frameCount_; - mActive = false; mCbf = cbf; mRefreshRemaining = true; @@ -289,6 +286,9 @@ status_t AudioRecord::start(AudioSystem::sync_event_t event, int triggerSession) // reset current position as seen by client to 0 mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition()); + // force refresh of remaining frames by processAudioBuffer() as last + // read before stop could be partial. + mRefreshRemaining = true; mNewPosition = mProxy->getPosition() + mUpdatePeriod; int32_t flags = android_atomic_acquire_load(&mCblk->mFlags); @@ -352,6 +352,7 @@ bool AudioRecord::stopped() const status_t AudioRecord::setMarkerPosition(uint32_t marker) { + // The only purpose of setting marker position is to get a callback if (mCbf == NULL) { return INVALID_OPERATION; } @@ -377,6 +378,7 @@ status_t AudioRecord::getMarkerPosition(uint32_t *marker) const status_t AudioRecord::setPositionUpdatePeriod(uint32_t updatePeriod) { + // The only purpose of setting position update period is to get a callback if (mCbf == NULL) { return INVALID_OPERATION; } @@ -412,7 +414,7 @@ status_t AudioRecord::getPosition(uint32_t *position) const return NO_ERROR; } -unsigned int AudioRecord::getInputFramesLost() const +uint32_t AudioRecord::getInputFramesLost() const { // no need to check mActive, because if inactive this will return 0, which is what we want return AudioSystem::getInputFramesLost(getInput()); @@ -461,24 +463,31 @@ status_t AudioRecord::openRecord_l(size_t epoch) ALOGE("Could not get audio input for record source %d", mInputSource); return BAD_VALUE; } + { + // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger, + // we must release it ourselves if anything goes wrong. + size_t temp = mFrameCount; // temp may be replaced by a revised value of frameCount, + // but we will still need the original value also int originalSessionId = mSessionId; sp<IAudioRecord> record = audioFlinger->openRecord(input, mSampleRate, mFormat, mChannelMask, - mFrameCount, + &temp, &trackFlags, tid, &mSessionId, &status); - ALOGE_IF(originalSessionId != 0 && mSessionId != originalSessionId, + ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId, "session ID changed from %d to %d", originalSessionId, mSessionId); if (record == 0 || status != NO_ERROR) { ALOGE("AudioFlinger could not create record track, status: %d", status); - AudioSystem::releaseInput(input); - return status; + goto release; } + // AudioFlinger now owns the reference to the I/O handle, + // so we are no longer responsible for releasing it. + sp<IMemory> iMem = record->getCblk(); if (iMem == 0) { ALOGE("Could not get control block"); @@ -493,11 +502,19 @@ status_t AudioRecord::openRecord_l(size_t epoch) mAudioRecord->asBinder()->unlinkToDeath(mDeathNotifier, this); mDeathNotifier.clear(); } + + // We retain a copy of the I/O handle, but don't own the reference mInput = input; mAudioRecord = record; mCblkMemory = iMem; audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer); mCblk = cblk; + // note that temp is the (possibly revised) value of mFrameCount + if (temp < mFrameCount || (mFrameCount == 0 && temp == 0)) { + ALOGW("Requested frameCount %u but received frameCount %u", mFrameCount, temp); + } + mFrameCount = temp; + // FIXME missing fast track frameCount logic mAwaitBoost = false; if (mFlags & AUDIO_INPUT_FLAG_FAST) { @@ -530,6 +547,14 @@ status_t AudioRecord::openRecord_l(size_t epoch) mAudioRecord->asBinder()->linkToDeath(mDeathNotifier, this); return NO_ERROR; + } + +release: + AudioSystem::releaseInput(input); + if (status == NO_ERROR) { + status = NO_INIT; + } + return status; } status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) @@ -591,6 +616,9 @@ status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, const struct timespec *r if (newSequence == oldSequence) { status = restoreRecord_l("obtainBuffer"); if (status != NO_ERROR) { + buffer.mFrameCount = 0; + buffer.mRaw = NULL; + buffer.mNonContig = 0; break; } } @@ -692,7 +720,7 @@ ssize_t AudioRecord::read(void* buffer, size_t userSize) // ------------------------------------------------------------------------- -nsecs_t AudioRecord::processAudioBuffer(const sp<AudioRecordThread>& thread) +nsecs_t AudioRecord::processAudioBuffer() { mLock.lock(); if (mAwaitBoost) { @@ -767,10 +795,10 @@ nsecs_t AudioRecord::processAudioBuffer(const sp<AudioRecordThread>& thread) mRetryOnPartialBuffer = false; } size_t misalignment = mProxy->getMisalignment(); - int32_t sequence = mSequence; + uint32_t sequence = mSequence; // These fields don't need to be cached, because they are assigned only by set(): - // mTransfer, mCbf, mUserData, mSampleRate + // mTransfer, mCbf, mUserData, mSampleRate, mFrameSize mLock.unlock(); @@ -844,8 +872,8 @@ nsecs_t AudioRecord::processAudioBuffer(const sp<AudioRecordThread>& thread) "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount); requested = &ClientProxy::kNonBlocking; size_t avail = audioBuffer.frameCount + nonContig; - ALOGV("obtainBuffer(%u) returned %u = %u + %u", - mRemainingFrames, avail, audioBuffer.frameCount, nonContig); + ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d", + mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err); if (err != NO_ERROR) { if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR) { break; @@ -954,7 +982,7 @@ status_t AudioRecord::restoreRecord_l(const char *from) // ========================================================================= -void AudioRecord::DeathNotifier::binderDied(const wp<IBinder>& who) +void AudioRecord::DeathNotifier::binderDied(const wp<IBinder>& who __unused) { sp<AudioRecord> audioRecord = mAudioRecord.promote(); if (audioRecord != 0) { @@ -966,7 +994,8 @@ void AudioRecord::DeathNotifier::binderDied(const wp<IBinder>& who) // ========================================================================= AudioRecord::AudioRecordThread::AudioRecordThread(AudioRecord& receiver, bool bCanCallJava) - : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL) + : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL), + mIgnoreNextPausedInt(false) { } @@ -983,6 +1012,10 @@ bool AudioRecord::AudioRecordThread::threadLoop() // caller will check for exitPending() return true; } + if (mIgnoreNextPausedInt) { + mIgnoreNextPausedInt = false; + mPausedInt = false; + } if (mPausedInt) { if (mPausedNs > 0) { (void) mMyCond.waitRelative(mMyLock, mPausedNs); @@ -993,7 +1026,7 @@ bool AudioRecord::AudioRecordThread::threadLoop() return true; } } - nsecs_t ns = mReceiver.processAudioBuffer(this); + nsecs_t ns = mReceiver.processAudioBuffer(); switch (ns) { case 0: return true; @@ -1017,12 +1050,7 @@ void AudioRecord::AudioRecordThread::requestExit() { // must be in this order to avoid a race condition Thread::requestExit(); - AutoMutex _l(mMyLock); - if (mPaused || mPausedInt) { - mPaused = false; - mPausedInt = false; - mMyCond.signal(); - } + resume(); } void AudioRecord::AudioRecordThread::pause() @@ -1034,6 +1062,7 @@ void AudioRecord::AudioRecordThread::pause() void AudioRecord::AudioRecordThread::resume() { AutoMutex _l(mMyLock); + mIgnoreNextPausedInt = true; if (mPaused || mPausedInt) { mPaused = false; mPausedInt = false; diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp index 8033c2c..dcb72f8 100644 --- a/media/libmedia/AudioSystem.cpp +++ b/media/libmedia/AudioSystem.cpp @@ -40,10 +40,10 @@ audio_error_callback AudioSystem::gAudioErrorCallback = NULL; DefaultKeyedVector<audio_io_handle_t, AudioSystem::OutputDescriptor *> AudioSystem::gOutputs(0); // Cached values for recording queries, all protected by gLock -uint32_t AudioSystem::gPrevInSamplingRate = 16000; -audio_format_t AudioSystem::gPrevInFormat = AUDIO_FORMAT_PCM_16_BIT; -audio_channel_mask_t AudioSystem::gPrevInChannelMask = AUDIO_CHANNEL_IN_MONO; -size_t AudioSystem::gInBuffSize = 0; +uint32_t AudioSystem::gPrevInSamplingRate; +audio_format_t AudioSystem::gPrevInFormat; +audio_channel_mask_t AudioSystem::gPrevInChannelMask; +size_t AudioSystem::gInBuffSize = 0; // zero indicates cache is invalid // establish binder interface to AudioFlinger service @@ -190,6 +190,16 @@ String8 AudioSystem::getParameters(audio_io_handle_t ioHandle, const String8& ke return result; } +status_t AudioSystem::setParameters(const String8& keyValuePairs) +{ + return setParameters((audio_io_handle_t) 0, keyValuePairs); +} + +String8 AudioSystem::getParameters(const String8& keys) +{ + return getParameters((audio_io_handle_t) 0, keys); +} + // convert volume steps to natural log scale // change this value to change volume scaling @@ -249,6 +259,11 @@ status_t AudioSystem::getSamplingRate(audio_io_handle_t output, *samplingRate = outputDesc->samplingRate; gLock.unlock(); } + if (*samplingRate == 0) { + ALOGE("AudioSystem::getSamplingRate failed for output %d stream type %d", + output, streamType); + return BAD_VALUE; + } ALOGV("getSamplingRate() streamType %d, output %d, sampling rate %u", streamType, output, *samplingRate); @@ -289,6 +304,11 @@ status_t AudioSystem::getFrameCount(audio_io_handle_t output, *frameCount = outputDesc->frameCount; gLock.unlock(); } + if (*frameCount == 0) { + ALOGE("AudioSystem::getFrameCount failed for output %d stream type %d", + output, streamType); + return BAD_VALUE; + } ALOGV("getFrameCount() streamType %d, output %d, frameCount %d", streamType, output, *frameCount); @@ -349,6 +369,12 @@ status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, audio_format_t for return PERMISSION_DENIED; } inBuffSize = af->getInputBufferSize(sampleRate, format, channelMask); + if (inBuffSize == 0) { + ALOGE("AudioSystem::getInputBufferSize failed sampleRate %d format %x channelMask %x", + sampleRate, format, channelMask); + return BAD_VALUE; + } + // A benign race is possible here: we could overwrite a fresher cache entry gLock.lock(); // save the request params gPrevInSamplingRate = sampleRate; @@ -387,9 +413,9 @@ status_t AudioSystem::getRenderPosition(audio_io_handle_t output, size_t *halFra return af->getRenderPosition(halFrames, dspFrames, output); } -size_t AudioSystem::getInputFramesLost(audio_io_handle_t ioHandle) { +uint32_t AudioSystem::getInputFramesLost(audio_io_handle_t ioHandle) { const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); - unsigned int result = 0; + uint32_t result = 0; if (af == 0) return result; if (ioHandle == 0) return result; @@ -419,7 +445,7 @@ void AudioSystem::releaseAudioSessionId(int audioSession) { // --------------------------------------------------------------------------- -void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who) { +void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who __unused) { Mutex::Autolock _l(AudioSystem::gLock); AudioSystem::gAudioFlinger.clear(); @@ -709,7 +735,8 @@ audio_devices_t AudioSystem::getDevicesForStream(audio_stream_type_t stream) audio_io_handle_t AudioSystem::getOutputForEffect(const effect_descriptor_t *desc) { const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); - if (aps == 0) return PERMISSION_DENIED; + // FIXME change return type to status_t, and return PERMISSION_DENIED here + if (aps == 0) return 0; return aps->getOutputForEffect(desc); } @@ -804,7 +831,7 @@ bool AudioSystem::isOffloadSupported(const audio_offload_info_t& info) // --------------------------------------------------------------------------- -void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who) { +void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who __unused) { Mutex::Autolock _l(AudioSystem::gLock); AudioSystem::gAudioPolicyService.clear(); diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp index 0609a22..f61a265 100644 --- a/media/libmedia/AudioTrack.cpp +++ b/media/libmedia/AudioTrack.cpp @@ -44,9 +44,6 @@ status_t AudioTrack::getMinFrameCount( return BAD_VALUE; } - // default to 0 in case of error - *frameCount = 0; - // FIXME merge with similar code in createTrack_l(), except we're missing // some information here that is available in createTrack_l(): // audio_io_handle_t output @@ -54,16 +51,26 @@ status_t AudioTrack::getMinFrameCount( // audio_channel_mask_t channelMask // audio_output_flags_t flags uint32_t afSampleRate; - if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) { - return NO_INIT; + status_t status; + status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType); + if (status != NO_ERROR) { + ALOGE("Unable to query output sample rate for stream type %d; status %d", + streamType, status); + return status; } size_t afFrameCount; - if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) { - return NO_INIT; + status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType); + if (status != NO_ERROR) { + ALOGE("Unable to query output frame count for stream type %d; status %d", + streamType, status); + return status; } uint32_t afLatency; - if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) { - return NO_INIT; + status = AudioSystem::getOutputLatency(&afLatency, streamType); + if (status != NO_ERROR) { + ALOGE("Unable to query output latency for stream type %d; status %d", + streamType, status); + return status; } // Ensure that buffer depth covers at least audio hardware latency @@ -74,6 +81,13 @@ status_t AudioTrack::getMinFrameCount( *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount : afFrameCount * minBufCount * sampleRate / afSampleRate; + // The formula above should always produce a non-zero value, but return an error + // in the unlikely event that it does not, as that's part of the API contract. + if (*frameCount == 0) { + ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d", + streamType, sampleRate); + return BAD_VALUE; + } ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d", *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency); return NO_ERROR; @@ -208,6 +222,7 @@ status_t AudioTrack::set( ALOGE("Invalid transfer type %d", transferType); return BAD_VALUE; } + mSharedBuffer = sharedBuffer; mTransfer = transferType; // FIXME "int" here is legacy and will be replaced by size_t later @@ -230,19 +245,24 @@ status_t AudioTrack::set( return INVALID_OPERATION; } - mOutput = 0; - // handle default values first. if (streamType == AUDIO_STREAM_DEFAULT) { streamType = AUDIO_STREAM_MUSIC; } + if (uint32_t(streamType) >= AUDIO_STREAM_CNT) { + ALOGE("Invalid stream type %d", streamType); + return BAD_VALUE; + } + mStreamType = streamType; + status_t status; if (sampleRate == 0) { - uint32_t afSampleRate; - if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) { - return NO_INIT; + status = AudioSystem::getOutputSamplingRate(&sampleRate, streamType); + if (status != NO_ERROR) { + ALOGE("Could not get output sample rate for stream type %d; status %d", + streamType, status); + return status; } - sampleRate = afSampleRate; } mSampleRate = sampleRate; @@ -250,15 +270,18 @@ status_t AudioTrack::set( if (format == AUDIO_FORMAT_DEFAULT) { format = AUDIO_FORMAT_PCM_16_BIT; } - if (channelMask == 0) { - channelMask = AUDIO_CHANNEL_OUT_STEREO; - } // validate parameters if (!audio_is_valid_format(format)) { ALOGE("Invalid format %d", format); return BAD_VALUE; } + mFormat = format; + + if (!audio_is_output_channel(channelMask)) { + ALOGE("Invalid channel mask %#x", channelMask); + return BAD_VALUE; + } // AudioFlinger does not currently support 8-bit data in shared memory if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) { @@ -282,10 +305,6 @@ status_t AudioTrack::set( flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER); } - if (!audio_is_output_channel(channelMask)) { - ALOGE("Invalid channel mask %#x", channelMask); - return BAD_VALUE; - } mChannelMask = channelMask; uint32_t channelCount = popcount(channelMask); mChannelCount = channelCount; @@ -298,21 +317,20 @@ status_t AudioTrack::set( mFrameSizeAF = sizeof(uint8_t); } - audio_io_handle_t output = AudioSystem::getOutput( - streamType, - sampleRate, format, channelMask, - flags, - offloadInfo); - - if (output == 0) { - ALOGE("Could not get audio output for stream type %d", streamType); - return BAD_VALUE; + // Make copy of input parameter offloadInfo so that in the future: + // (a) createTrack_l doesn't need it as an input parameter + // (b) we can support re-creation of offloaded tracks + if (offloadInfo != NULL) { + mOffloadInfoCopy = *offloadInfo; + mOffloadInfo = &mOffloadInfoCopy; + } else { + mOffloadInfo = NULL; } mVolume[LEFT] = 1.0f; mVolume[RIGHT] = 1.0f; mSendLevel = 0.0f; - mFrameCount = frameCount; + // mFrameCount is initialized in createTrack_l mReqFrameCount = frameCount; mNotificationFramesReq = notificationFrames; mNotificationFramesAct = 0; @@ -332,14 +350,7 @@ status_t AudioTrack::set( } // create the IAudioTrack - status_t status = createTrack_l(streamType, - sampleRate, - format, - frameCount, - flags, - sharedBuffer, - output, - 0 /*epoch*/); + status = createTrack_l(0 /*epoch*/); if (status != NO_ERROR) { if (mAudioTrackThread != 0) { @@ -347,17 +358,20 @@ status_t AudioTrack::set( mAudioTrackThread->requestExitAndWait(); mAudioTrackThread.clear(); } + // Use of direct and offloaded output streams is ref counted by audio policy manager. +#if 0 // FIXME This should no longer be needed //Use of direct and offloaded output streams is ref counted by audio policy manager. // As getOutput was called above and resulted in an output stream to be opened, // we need to release it. - AudioSystem::releaseOutput(output); + if (mOutput != 0) { + AudioSystem::releaseOutput(mOutput); + mOutput = 0; + } +#endif return status; } mStatus = NO_ERROR; - mStreamType = streamType; - mFormat = format; - mSharedBuffer = sharedBuffer; mState = STATE_STOPPED; mUserData = user; mLoopPeriod = 0; @@ -369,7 +383,6 @@ status_t AudioTrack::set( mSequence = 1; mObservedSequence = mSequence; mInUnderrun = false; - mOutput = output; return NO_ERROR; } @@ -445,12 +458,11 @@ status_t AudioTrack::start() void AudioTrack::stop() { AutoMutex lock(mLock); - // FIXME pause then stop should not be a nop - if (mState != STATE_ACTIVE) { + if (mState != STATE_ACTIVE && mState != STATE_PAUSED) { return; } - if (isOffloaded()) { + if (isOffloaded_l()) { mState = STATE_STOPPING; } else { mState = STATE_STOPPED; @@ -472,7 +484,7 @@ void AudioTrack::stop() sp<AudioTrackThread> t = mAudioTrackThread; if (t != 0) { - if (!isOffloaded()) { + if (!isOffloaded_l()) { t->pause(); } } else { @@ -510,7 +522,7 @@ void AudioTrack::flush_l() mRefreshRemaining = true; mState = STATE_FLUSHED; - if (isOffloaded()) { + if (isOffloaded_l()) { mProxy->interrupt(); } mProxy->flush(); @@ -543,7 +555,7 @@ status_t AudioTrack::setVolume(float left, float right) mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000)); - if (isOffloaded()) { + if (isOffloaded_l()) { mAudioTrack->signal(); } return NO_ERROR; @@ -607,7 +619,7 @@ uint32_t AudioTrack::getSampleRate() const // sample rate can be updated during playback by the offloaded decoder so we need to // query the HAL and update if needed. // FIXME use Proxy return channel to update the rate from server and avoid polling here - if (isOffloaded()) { + if (isOffloaded_l()) { if (mOutput != 0) { uint32_t sampleRate = 0; status_t status = AudioSystem::getSamplingRate(mOutput, mStreamType, &sampleRate); @@ -691,6 +703,7 @@ status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod) AutoMutex lock(mLock); mNewPosition = mProxy->getPosition() + updatePeriod; mUpdatePeriod = updatePeriod; + return NO_ERROR; } @@ -744,7 +757,7 @@ status_t AudioTrack::getPosition(uint32_t *position) const } AutoMutex lock(mLock); - if (isOffloaded()) { + if (isOffloaded_l()) { uint32_t dspFrames = 0; if (mOutput != 0) { @@ -793,23 +806,12 @@ status_t AudioTrack::reload() return NO_ERROR; } -audio_io_handle_t AudioTrack::getOutput() +audio_io_handle_t AudioTrack::getOutput() const { AutoMutex lock(mLock); return mOutput; } -// must be called with mLock held -audio_io_handle_t AudioTrack::getOutput_l() -{ - if (mOutput) { - return mOutput; - } else { - return AudioSystem::getOutput(mStreamType, - mSampleRate, mFormat, mChannelMask, mFlags); - } -} - status_t AudioTrack::attachAuxEffect(int effectId) { AutoMutex lock(mLock); @@ -823,15 +825,7 @@ status_t AudioTrack::attachAuxEffect(int effectId) // ------------------------------------------------------------------------- // must be called with mLock held -status_t AudioTrack::createTrack_l( - audio_stream_type_t streamType, - uint32_t sampleRate, - audio_format_t format, - size_t frameCount, - audio_output_flags_t flags, - const sp<IMemory>& sharedBuffer, - audio_io_handle_t output, - size_t epoch) +status_t AudioTrack::createTrack_l(size_t epoch) { status_t status; const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger(); @@ -840,41 +834,52 @@ status_t AudioTrack::createTrack_l( return NO_INIT; } + audio_io_handle_t output = AudioSystem::getOutput(mStreamType, mSampleRate, mFormat, + mChannelMask, mFlags, mOffloadInfo); + if (output == 0) { + ALOGE("Could not get audio output for stream type %d, sample rate %u, format %#x, " + "channel mask %#x, flags %#x", + mStreamType, mSampleRate, mFormat, mChannelMask, mFlags); + return BAD_VALUE; + } + { + // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger, + // we must release it ourselves if anything goes wrong. + // Not all of these values are needed under all conditions, but it is easier to get them all uint32_t afLatency; - status = AudioSystem::getLatency(output, streamType, &afLatency); + status = AudioSystem::getLatency(output, mStreamType, &afLatency); if (status != NO_ERROR) { ALOGE("getLatency(%d) failed status %d", output, status); - return NO_INIT; + goto release; } size_t afFrameCount; - status = AudioSystem::getFrameCount(output, streamType, &afFrameCount); + status = AudioSystem::getFrameCount(output, mStreamType, &afFrameCount); if (status != NO_ERROR) { - ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status); - return NO_INIT; + ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, mStreamType, status); + goto release; } uint32_t afSampleRate; - status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate); + status = AudioSystem::getSamplingRate(output, mStreamType, &afSampleRate); if (status != NO_ERROR) { - ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType, status); - return NO_INIT; + ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, mStreamType, status); + goto release; } // Client decides whether the track is TIMED (see below), but can only express a preference // for FAST. Server will perform additional tests. - if ((flags & AUDIO_OUTPUT_FLAG_FAST) && !( + if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !( // either of these use cases: // use case 1: shared buffer - (sharedBuffer != 0) || + (mSharedBuffer != 0) || // use case 2: callback handler (mCbf != NULL))) { ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client"); // once denied, do not request again if IAudioTrack is re-created - flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST); - mFlags = flags; + mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST); } ALOGV("createTrack_l() output %d afLatency %d", output, afLatency); @@ -885,43 +890,45 @@ status_t AudioTrack::createTrack_l( // n = 3 normal track, with sample rate conversion // (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering) // n > 3 very high latency or very small notification interval; nBuffering is ignored - const uint32_t nBuffering = (sampleRate == afSampleRate) ? 2 : 3; + const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3; mNotificationFramesAct = mNotificationFramesReq; - if (!audio_is_linear_pcm(format)) { + size_t frameCount = mReqFrameCount; + if (!audio_is_linear_pcm(mFormat)) { - if (sharedBuffer != 0) { + if (mSharedBuffer != 0) { // Same comment as below about ignoring frameCount parameter for set() - frameCount = sharedBuffer->size(); + frameCount = mSharedBuffer->size(); } else if (frameCount == 0) { frameCount = afFrameCount; } if (mNotificationFramesAct != frameCount) { mNotificationFramesAct = frameCount; } - } else if (sharedBuffer != 0) { + } else if (mSharedBuffer != 0) { // Ensure that buffer alignment matches channel count // 8-bit data in shared memory is not currently supported by AudioFlinger - size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2; + size_t alignment = /* mFormat == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2; if (mChannelCount > 1) { // More than 2 channels does not require stronger alignment than stereo alignment <<= 1; } - if (((size_t)sharedBuffer->pointer() & (alignment - 1)) != 0) { + if (((size_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) { ALOGE("Invalid buffer alignment: address %p, channel count %u", - sharedBuffer->pointer(), mChannelCount); - return BAD_VALUE; + mSharedBuffer->pointer(), mChannelCount); + status = BAD_VALUE; + goto release; } // When initializing a shared buffer AudioTrack via constructors, // there's no frameCount parameter. // But when initializing a shared buffer AudioTrack via set(), // there _is_ a frameCount parameter. We silently ignore it. - frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t); + frameCount = mSharedBuffer->size()/mChannelCount/sizeof(int16_t); - } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) { + } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) { // FIXME move these calculations and associated checks to server @@ -933,10 +940,10 @@ status_t AudioTrack::createTrack_l( minBufCount = nBuffering; } - size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate; + size_t minFrameCount = (afFrameCount*mSampleRate*minBufCount)/afSampleRate; ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u" ", afLatency=%d", - minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency); + minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency); if (frameCount == 0) { frameCount = minFrameCount; @@ -961,26 +968,28 @@ status_t AudioTrack::createTrack_l( } pid_t tid = -1; - if (flags & AUDIO_OUTPUT_FLAG_FAST) { + if (mFlags & AUDIO_OUTPUT_FLAG_FAST) { trackFlags |= IAudioFlinger::TRACK_FAST; if (mAudioTrackThread != 0) { tid = mAudioTrackThread->getTid(); } } - if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) { + if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) { trackFlags |= IAudioFlinger::TRACK_OFFLOAD; } - sp<IAudioTrack> track = audioFlinger->createTrack(streamType, - sampleRate, + size_t temp = frameCount; // temp may be replaced by a revised value of frameCount, + // but we will still need the original value also + sp<IAudioTrack> track = audioFlinger->createTrack(mStreamType, + mSampleRate, // AudioFlinger only sees 16-bit PCM - format == AUDIO_FORMAT_PCM_8_BIT ? - AUDIO_FORMAT_PCM_16_BIT : format, + mFormat == AUDIO_FORMAT_PCM_8_BIT ? + AUDIO_FORMAT_PCM_16_BIT : mFormat, mChannelMask, - frameCount, + &temp, &trackFlags, - sharedBuffer, + mSharedBuffer, output, tid, &mSessionId, @@ -990,13 +999,21 @@ status_t AudioTrack::createTrack_l( if (track == 0) { ALOGE("AudioFlinger could not create track, status: %d", status); - return status; + goto release; } + // AudioFlinger now owns the reference to the I/O handle, + // so we are no longer responsible for releasing it. + sp<IMemory> iMem = track->getCblk(); if (iMem == 0) { ALOGE("Could not get control block"); return NO_INIT; } + void *iMemPointer = iMem->pointer(); + if (iMemPointer == NULL) { + ALOGE("Could not get control block pointer"); + return NO_INIT; + } // invariant that mAudioTrack != 0 is true only after set() returns successfully if (mAudioTrack != 0) { mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this); @@ -1004,9 +1021,9 @@ status_t AudioTrack::createTrack_l( } mAudioTrack = track; mCblkMemory = iMem; - audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer()); + audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer); mCblk = cblk; - size_t temp = cblk->frameCount_; + // note that temp is the (possibly revised) value of frameCount if (temp < frameCount || (frameCount == 0 && temp == 0)) { // In current design, AudioTrack client checks and ensures frame count validity before // passing it to AudioFlinger so AudioFlinger should not return a different value except @@ -1015,11 +1032,11 @@ status_t AudioTrack::createTrack_l( } frameCount = temp; mAwaitBoost = false; - if (flags & AUDIO_OUTPUT_FLAG_FAST) { + if (mFlags & AUDIO_OUTPUT_FLAG_FAST) { if (trackFlags & IAudioFlinger::TRACK_FAST) { ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount); mAwaitBoost = true; - if (sharedBuffer == 0) { + if (mSharedBuffer == 0) { // Theoretically double-buffering is not required for fast tracks, // due to tighter scheduling. But in practice, to accommodate kernels with // scheduling jitter, and apps with computation jitter, we use double-buffering. @@ -1030,26 +1047,27 @@ status_t AudioTrack::createTrack_l( } else { ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount); // once denied, do not request again if IAudioTrack is re-created - flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST); - mFlags = flags; - if (sharedBuffer == 0) { + mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST); + if (mSharedBuffer == 0) { if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) { mNotificationFramesAct = frameCount/nBuffering; } } } } - if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) { + if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) { if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) { ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful"); } else { ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server"); - flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD); - mFlags = flags; - return NO_INIT; + mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD); + // FIXME This is a warning, not an error, so don't return error status + //return NO_INIT; } } + // We retain a copy of the I/O handle, but don't own the reference + mOutput = output; mRefreshRemaining = true; // Starting address of buffers in shared memory. If there is a shared buffer, buffers @@ -1057,15 +1075,15 @@ status_t AudioTrack::createTrack_l( // immediately after the control block. This address is for the mapping within client // address space. AudioFlinger::TrackBase::mBuffer is for the server address space. void* buffers; - if (sharedBuffer == 0) { + if (mSharedBuffer == 0) { buffers = (char*)cblk + sizeof(audio_track_cblk_t); } else { - buffers = sharedBuffer->pointer(); + buffers = mSharedBuffer->pointer(); } mAudioTrack->attachAuxEffect(mAuxEffectId); // FIXME don't believe this lie - mLatency = afLatency + (1000*frameCount) / sampleRate; + mLatency = afLatency + (1000*frameCount) / mSampleRate; mFrameCount = frameCount; // If IAudioTrack is re-created, don't let the requested frameCount // decrease. This can confuse clients that cache frameCount(). @@ -1074,7 +1092,7 @@ status_t AudioTrack::createTrack_l( } // update proxy - if (sharedBuffer == 0) { + if (mSharedBuffer == 0) { mStaticProxy.clear(); mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF); } else { @@ -1092,6 +1110,14 @@ status_t AudioTrack::createTrack_l( mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this); return NO_ERROR; + } + +release: + AudioSystem::releaseOutput(output); + if (status == NO_ERROR) { + status = NO_INIT; + } + return status; } status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) @@ -1344,7 +1370,7 @@ status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform, // ------------------------------------------------------------------------- -nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread) +nsecs_t AudioTrack::processAudioBuffer() { // Currently the AudioTrack thread is not created if there are no callbacks. // Would it ever make sense to run the thread, even without callbacks? @@ -1382,7 +1408,7 @@ nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread) // for offloaded tracks restoreTrack_l() will just update the sequence and clear // AudioSystem cache. We should not exit here but after calling the callback so // that the upper layers can recreate the track - if (!isOffloaded() || (mSequence == mObservedSequence)) { + if (!isOffloaded_l() || (mSequence == mObservedSequence)) { status_t status = restoreTrack_l("processAudioBuffer"); mLock.unlock(); // Run again immediately, but with a new IAudioTrack @@ -1603,7 +1629,6 @@ nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread) size_t reqSize = audioBuffer.size; mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer); size_t writtenSize = audioBuffer.size; - size_t writtenFrames = writtenSize / mFrameSize; // Sanity check on returned size if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) { @@ -1669,22 +1694,19 @@ nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread) status_t AudioTrack::restoreTrack_l(const char *from) { ALOGW("dead IAudioTrack, %s, creating a new one from %s()", - isOffloaded() ? "Offloaded" : "PCM", from); + isOffloaded_l() ? "Offloaded" : "PCM", from); ++mSequence; status_t result; // refresh the audio configuration cache in this process to make sure we get new - // output parameters in getOutput_l() and createTrack_l() + // output parameters in createTrack_l() AudioSystem::clearAudioConfigCache(); - if (isOffloaded()) { + if (isOffloaded_l()) { + // FIXME re-creation of offloaded tracks is not yet implemented return DEAD_OBJECT; } - // force new output query from audio policy manager; - mOutput = 0; - audio_io_handle_t output = getOutput_l(); - // if the new IAudioTrack is created, createTrack_l() will modify the // following member variables: mAudioTrack, mCblkMemory and mCblk. // It will also delete the strong references on previous IAudioTrack and IMemory @@ -1692,14 +1714,7 @@ status_t AudioTrack::restoreTrack_l(const char *from) // take the frames that will be lost by track recreation into account in saved position size_t position = mProxy->getPosition() + mProxy->getFramesFilled(); size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0; - result = createTrack_l(mStreamType, - mSampleRate, - mFormat, - mReqFrameCount, // so that frame count never goes down - mFlags, - mSharedBuffer, - output, - position /*epoch*/); + result = createTrack_l(position /*epoch*/); if (result == NO_ERROR) { // continue playback from last known position, but @@ -1727,10 +1742,16 @@ status_t AudioTrack::restoreTrack_l(const char *from) } } if (result != NO_ERROR) { + // Use of direct and offloaded output streams is ref counted by audio policy manager. +#if 0 // FIXME This should no longer be needed //Use of direct and offloaded output streams is ref counted by audio policy manager. // As getOutput was called above and resulted in an output stream to be opened, // we need to release it. - AudioSystem::releaseOutput(output); + if (mOutput != 0) { + AudioSystem::releaseOutput(mOutput); + mOutput = 0; + } +#endif ALOGW("restoreTrack_l() failed status %d", result); mState = STATE_STOPPED; } @@ -1763,14 +1784,21 @@ status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp) String8 AudioTrack::getParameters(const String8& keys) { - if (mOutput) { - return AudioSystem::getParameters(mOutput, keys); + audio_io_handle_t output = getOutput(); + if (output != 0) { + return AudioSystem::getParameters(output, keys); } else { return String8::empty(); } } -status_t AudioTrack::dump(int fd, const Vector<String16>& args) const +bool AudioTrack::isOffloaded() const +{ + AutoMutex lock(mLock); + return isOffloaded_l(); +} + +status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const { const size_t SIZE = 256; @@ -1800,7 +1828,7 @@ uint32_t AudioTrack::getUnderrunFrames() const // ========================================================================= -void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who) +void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused) { sp<AudioTrack> audioTrack = mAudioTrack.promote(); if (audioTrack != 0) { @@ -1844,7 +1872,7 @@ bool AudioTrack::AudioTrackThread::threadLoop() return true; } } - nsecs_t ns = mReceiver.processAudioBuffer(this); + nsecs_t ns = mReceiver.processAudioBuffer(); switch (ns) { case 0: return true; diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp index caa7900..21018a0 100644 --- a/media/libmedia/AudioTrackShared.cpp +++ b/media/libmedia/AudioTrackShared.cpp @@ -26,7 +26,7 @@ extern "C" { namespace android { audio_track_cblk_t::audio_track_cblk_t() - : mServer(0), frameCount_(0), mFutex(0), mMinimum(0), + : mServer(0), mFutex(0), mMinimum(0), mVolumeLR(0x10001000), mSampleRate(0), mSendLevel(0), mFlags(0) { memset(&u, 0, sizeof(u)); @@ -765,7 +765,7 @@ ssize_t StaticAudioTrackServerProxy::pollPosition() return (ssize_t) position; } -status_t StaticAudioTrackServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush) +status_t StaticAudioTrackServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush __unused) { if (mIsShutdown) { buffer->mFrameCount = 0; @@ -847,7 +847,7 @@ void StaticAudioTrackServerProxy::releaseBuffer(Buffer* buffer) buffer->mNonContig = 0; } -void StaticAudioTrackServerProxy::tallyUnderrunFrames(uint32_t frameCount) +void StaticAudioTrackServerProxy::tallyUnderrunFrames(uint32_t frameCount __unused) { // Unlike AudioTrackServerProxy::tallyUnderrunFrames() used for streaming tracks, // we don't have a location to count underrun frames. The underrun frame counter diff --git a/media/libmedia/CharacterEncodingDetector.cpp b/media/libmedia/CharacterEncodingDetector.cpp new file mode 100644 index 0000000..eb091ac --- /dev/null +++ b/media/libmedia/CharacterEncodingDetector.cpp @@ -0,0 +1,364 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "CharacterEncodingDector" +#include <utils/Log.h> + +#include "CharacterEncodingDetector.h" +#include "CharacterEncodingDetectorTables.h" + +#include "utils/Vector.h" +#include "StringArray.h" + +#include "unicode/ucnv.h" +#include "unicode/ucsdet.h" +#include "unicode/ustring.h" + +namespace android { + +CharacterEncodingDetector::CharacterEncodingDetector() { + + UErrorCode status = U_ZERO_ERROR; + mUtf8Conv = ucnv_open("UTF-8", &status); + if (U_FAILURE(status)) { + ALOGE("could not create UConverter for UTF-8"); + mUtf8Conv = NULL; + } +} + +CharacterEncodingDetector::~CharacterEncodingDetector() { + ucnv_close(mUtf8Conv); +} + +void CharacterEncodingDetector::addTag(const char *name, const char *value) { + mNames.push_back(name); + mValues.push_back(value); +} + +size_t CharacterEncodingDetector::size() { + return mNames.size(); +} + +status_t CharacterEncodingDetector::getTag(int index, const char **name, const char**value) { + if (index >= mNames.size()) { + return BAD_VALUE; + } + + *name = mNames.getEntry(index); + *value = mValues.getEntry(index); + return OK; +} + +static bool isPrintableAscii(const char *value, size_t len) { + for (size_t i = 0; i < len; i++) { + if ((value[i] & 0x80) || value[i] < 0x20 || value[i] == 0x7f) { + return false; + } + } + return true; +} + +void CharacterEncodingDetector::detectAndConvert() { + + int size = mNames.size(); + ALOGV("%d tags before conversion", size); + for (int i = 0; i < size; i++) { + ALOGV("%s: %s", mNames.getEntry(i), mValues.getEntry(i)); + } + + if (size && mUtf8Conv) { + + UErrorCode status = U_ZERO_ERROR; + UCharsetDetector *csd = ucsdet_open(&status); + const UCharsetMatch *ucm; + + // try combined detection of artist/album/title etc. + char buf[1024]; + buf[0] = 0; + int idx; + for (int i = 0; i < size; i++) { + const char *name = mNames.getEntry(i); + const char *value = mValues.getEntry(i); + if (!isPrintableAscii(value, strlen(value)) && ( + !strcmp(name, "artist") || + !strcmp(name, "albumartist") || + !strcmp(name, "composer") || + !strcmp(name, "genre") || + !strcmp(name, "album") || + !strcmp(name, "title"))) { + strlcat(buf, value, sizeof(buf)); + // separate tags by space so ICU's ngram detector can do its job + strlcat(buf, " ", sizeof(buf)); + } + } + ucsdet_setText(csd, buf, strlen(buf), &status); + + int32_t matches; + const UCharsetMatch** ucma = ucsdet_detectAll(csd, &matches, &status); + const char *combinedenc = "???"; + + const UCharsetMatch* bestCombinedMatch = getPreferred(buf, strlen(buf), ucma, matches); + + if (bestCombinedMatch != NULL) { + combinedenc = ucsdet_getName(bestCombinedMatch, &status); + } + + for (int i = 0; i < size; i++) { + const char *name = mNames.getEntry(i); + uint8_t* src = (uint8_t *)mValues.getEntry(i); + int len = strlen((char *)src); + uint8_t* dest = src; + + ALOGV("@@@ checking %s", name); + const char *s = mValues.getEntry(i); + int32_t inputLength = strlen(s); + const char *enc; + + if (!strcmp(name, "artist") || + !strcmp(name, "albumartist") || + !strcmp(name, "composer") || + !strcmp(name, "genre") || + !strcmp(name, "album") || + !strcmp(name, "title")) { + // use encoding determined from the combination of artist/album/title etc. + enc = combinedenc; + } else { + ucsdet_setText(csd, s, inputLength, &status); + ucm = ucsdet_detect(csd, &status); + if (!ucm) { + mValues.setEntry(i, "???"); + continue; + } + enc = ucsdet_getName(ucm, &status); + ALOGV("@@@@ recognized charset: %s for %s confidence %d", + enc, mNames.getEntry(i), ucsdet_getConfidence(ucm, &status)); + } + + if (strcmp(enc,"UTF-8") != 0) { + // only convert if the source encoding isn't already UTF-8 + ALOGV("@@@ using converter %s for %s", enc, mNames.getEntry(i)); + UConverter *conv = ucnv_open(enc, &status); + if (U_FAILURE(status)) { + ALOGE("could not create UConverter for %s", enc); + continue; + } + + // convert from native encoding to UTF-8 + const char* source = mValues.getEntry(i); + int targetLength = len * 3 + 1; + char* buffer = new char[targetLength]; + // don't normally check for NULL, but in this case targetLength may be large + if (!buffer) + break; + char* target = buffer; + + ucnv_convertEx(mUtf8Conv, conv, &target, target + targetLength, + &source, source + strlen(source), + NULL, NULL, NULL, NULL, TRUE, TRUE, &status); + + if (U_FAILURE(status)) { + ALOGE("ucnv_convertEx failed: %d", status); + mValues.setEntry(i, "???"); + } else { + // zero terminate + *target = 0; + mValues.setEntry(i, buffer); + } + + delete[] buffer; + + ucnv_close(conv); + } + } + + for (int i = size - 1; i >= 0; --i) { + if (strlen(mValues.getEntry(i)) == 0) { + ALOGV("erasing %s because entry is empty", mNames.getEntry(i)); + mNames.erase(i); + mValues.erase(i); + } + } + + ucsdet_close(csd); + } +} + +/* + * When ICU detects multiple encoding matches, apply additional heuristics to determine + * which one is the best match, since ICU can't always be trusted to make the right choice. + * + * What this method does is: + * - decode the input using each of the matches found + * - recalculate the starting confidence level for multibyte encodings using a different + * algorithm and larger frequent character lists than ICU + * - devalue encoding where the conversion contains unlikely characters (symbols, reserved, etc) + * - pick the highest match + */ +const UCharsetMatch *CharacterEncodingDetector::getPreferred( + const char *input, size_t len, const UCharsetMatch** ucma, size_t nummatches) { + + Vector<const UCharsetMatch*> matches; + UErrorCode status = U_ZERO_ERROR; + + ALOGV("%d matches", nummatches); + for (size_t i = 0; i < nummatches; i++) { + const char *encname = ucsdet_getName(ucma[i], &status); + int confidence = ucsdet_getConfidence(ucma[i], &status); + ALOGV("%d: %s %d", i, encname, confidence); + matches.push_back(ucma[i]); + } + + size_t num = matches.size(); + if (num == 0) { + return NULL; + } + if (num == 1) { + return matches[0]; + } + + ALOGV("considering %d matches", num); + + // keep track of how many "special" characters result when converting the input using each + // encoding + Vector<int> newconfidence; + for (size_t i = 0; i < num; i++) { + const uint16_t *freqdata = NULL; + float freqcoverage = 0; + status = U_ZERO_ERROR; + const char *encname = ucsdet_getName(matches[i], &status); + int confidence = ucsdet_getConfidence(matches[i], &status); + if (!strcmp("GB18030", encname)) { + freqdata = frequent_zhCN; + freqcoverage = frequent_zhCN_coverage; + } else if (!strcmp("Big5", encname)) { + freqdata = frequent_zhTW; + freqcoverage = frequent_zhTW_coverage; + } else if (!strcmp("EUC-KR", encname)) { + freqdata = frequent_ko; + freqcoverage = frequent_ko_coverage; + } else if (!strcmp("EUC-JP", encname)) { + freqdata = frequent_ja; + freqcoverage = frequent_ja_coverage; + } else if (!strcmp("Shift_JIS", encname)) { + freqdata = frequent_ja; + freqcoverage = frequent_ja_coverage; + } + + ALOGV("%d: %s %d", i, encname, confidence); + UConverter *conv = ucnv_open(encname, &status); + const char *source = input; + const char *sourceLimit = input + len; + status = U_ZERO_ERROR; + int demerit = 0; + int frequentchars = 0; + int totalchars = 0; + while (true) { + // demerit the current encoding for each "special" character found after conversion. + // The amount of demerit is somewhat arbitrarily chosen. + int inchar; + if (source != sourceLimit) { + inchar = (source[0] << 8) + source[1]; + } + UChar32 c = ucnv_getNextUChar(conv, &source, sourceLimit, &status); + if (!U_SUCCESS(status)) { + break; + } + if (c < 0x20 || (c >= 0x7f && c <= 0x009f)) { + ALOGV("control character %x", c); + demerit += 100; + } else if ((c >= 0xa0 && c <= 0xbe) // symbols, superscripts + || (c == 0xd7) || (c == 0xf7) // multiplication and division signs + || (c >= 0x2000 && c <= 0x209f)) { // punctuation, superscripts + ALOGV("unlikely character %x", c); + demerit += 10; + } else if (c >= 0xe000 && c <= 0xf8ff) { + ALOGV("private use character %x", c); + demerit += 30; + } else if (c >= 0x2190 && c <= 0x2bff) { + // this range comprises various symbol ranges that are unlikely to appear in + // music file metadata. + ALOGV("symbol %x", c); + demerit += 10; + } else if (c == 0xfffd) { + ALOGV("replacement character"); + demerit += 50; + } else if (c >= 0xfff0 && c <= 0xfffc) { + ALOGV("unicode special %x", c); + demerit += 50; + } else if (freqdata != NULL) { + totalchars++; + if (isFrequent(freqdata, c)) { + frequentchars++; + } + } + } + if (freqdata != NULL && totalchars != 0) { + int myconfidence = 10 + float((100 * frequentchars) / totalchars) / freqcoverage; + ALOGV("ICU confidence: %d, my confidence: %d (%d %d)", confidence, myconfidence, + totalchars, frequentchars); + if (myconfidence > 100) myconfidence = 100; + if (myconfidence < 0) myconfidence = 0; + confidence = myconfidence; + } + ALOGV("%d-%d=%d", confidence, demerit, confidence - demerit); + newconfidence.push_back(confidence - demerit); + ucnv_close(conv); + if (i == 0 && (confidence - demerit) == 100) { + // no need to check any further, we'll end up using this match anyway + break; + } + } + + // find match with highest confidence after adjusting for unlikely characters + int highest = newconfidence[0]; + size_t highestidx = 0; + num = newconfidence.size(); + for (size_t i = 1; i < num; i++) { + if (newconfidence[i] > highest) { + highest = newconfidence[i]; + highestidx = i; + } + } + status = U_ZERO_ERROR; + ALOGV("selecting '%s' w/ %d confidence", ucsdet_getName(matches[highestidx], &status), highest); + return matches[highestidx]; +} + + +bool CharacterEncodingDetector::isFrequent(const uint16_t *values, uint32_t c) { + + int start = 0; + int end = 511; // All the tables have 512 entries + int mid = (start+end)/2; + + while(start <= end) { + if(c == values[mid]) { + return true; + } else if (c > values[mid]) { + start = mid + 1; + } else { + end = mid - 1; + } + + mid = (start + end) / 2; + } + + return false; +} + + +} // namespace android diff --git a/media/libmedia/CharacterEncodingDetector.h b/media/libmedia/CharacterEncodingDetector.h new file mode 100644 index 0000000..3655a91 --- /dev/null +++ b/media/libmedia/CharacterEncodingDetector.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _CHARACTER_ENCODING_DETECTOR_H +#define _CHARACTER_ENCODING_DETECTOR_H + +#include <media/mediascanner.h> + +#include "StringArray.h" + +#include "unicode/ucnv.h" +#include "unicode/ucsdet.h" +#include "unicode/ustring.h" + +namespace android { + +class CharacterEncodingDetector { + + public: + CharacterEncodingDetector(); + ~CharacterEncodingDetector(); + + void addTag(const char *name, const char *value); + size_t size(); + + void detectAndConvert(); + status_t getTag(int index, const char **name, const char**value); + + private: + const UCharsetMatch *getPreferred( + const char *input, size_t len, const UCharsetMatch** ucma, size_t matches); + + bool isFrequent(const uint16_t *values, uint32_t c); + + // cached name and value strings, for native encoding support. + // TODO: replace these with byte blob arrays that don't require the data to be + // singlenullbyte-terminated + StringArray mNames; + StringArray mValues; + + UConverter* mUtf8Conv; +}; + + + +}; // namespace android + +#endif diff --git a/media/libmedia/CharacterEncodingDetectorTables.h b/media/libmedia/CharacterEncodingDetectorTables.h new file mode 100644 index 0000000..1fe1137 --- /dev/null +++ b/media/libmedia/CharacterEncodingDetectorTables.h @@ -0,0 +1,2092 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// The 512 most frequently occuring characters for the zhCN language in a sample of the Internet. +// Ordered by codepoint, comment shows character and ranking by frequency +const uint16_t frequent_zhCN[] = { + 0x4E00, // 一, #2 + 0x4E07, // 万, #306 + 0x4E09, // 三, #138 + 0x4E0A, // 上, #16 + 0x4E0B, // 下, #25 + 0x4E0D, // 不, #7 + 0x4E0E, // 与, #133 + 0x4E13, // 专, #151 + 0x4E16, // 世, #346 + 0x4E1A, // 业, #39 + 0x4E1C, // 东, #197 + 0x4E24, // 两, #376 + 0x4E2A, // 个, #23 + 0x4E2D, // 中, #4 + 0x4E3A, // 为, #31 + 0x4E3B, // 主, #95 + 0x4E3E, // 举, #418 + 0x4E48, // 么, #93 + 0x4E4B, // 之, #131 + 0x4E50, // 乐, #130 + 0x4E5F, // 也, #145 + 0x4E66, // 书, #283 + 0x4E70, // 买, #483 + 0x4E86, // 了, #13 + 0x4E8B, // 事, #168 + 0x4E8C, // 二, #218 + 0x4E8E, // 于, #64 + 0x4E94, // 五, #430 + 0x4E9A, // 亚, #468 + 0x4E9B, // 些, #366 + 0x4EA4, // 交, #243 + 0x4EA7, // 产, #86 + 0x4EAB, // 享, #345 + 0x4EAC, // 京, #206 + 0x4EBA, // 人, #3 + 0x4EC0, // 什, #287 + 0x4ECB, // 介, #478 + 0x4ECE, // 从, #381 + 0x4ED6, // 他, #129 + 0x4EE3, // 代, #241 + 0x4EE5, // 以, #51 + 0x4EEC, // 们, #83 + 0x4EF6, // 件, #141 + 0x4EF7, // 价, #140 + 0x4EFB, // 任, #383 + 0x4F01, // 企, #439 + 0x4F18, // 优, #374 + 0x4F1A, // 会, #29 + 0x4F20, // 传, #222 + 0x4F46, // 但, #451 + 0x4F4D, // 位, #208 + 0x4F53, // 体, #98 + 0x4F55, // 何, #339 + 0x4F5C, // 作, #44 + 0x4F60, // 你, #76 + 0x4F7F, // 使, #272 + 0x4F9B, // 供, #375 + 0x4FDD, // 保, #180 + 0x4FE1, // 信, #84 + 0x4FEE, // 修, #437 + 0x503C, // 值, #450 + 0x505A, // 做, #368 + 0x5065, // 健, #484 + 0x50CF, // 像, #487 + 0x513F, // 儿, #326 + 0x5143, // 元, #202 + 0x5148, // 先, #485 + 0x5149, // 光, #254 + 0x514B, // 克, #503 + 0x514D, // 免, #349 + 0x5165, // 入, #156 + 0x5168, // 全, #47 + 0x516C, // 公, #35 + 0x5171, // 共, #448 + 0x5173, // 关, #49 + 0x5176, // 其, #195 + 0x5177, // 具, #329 + 0x5185, // 内, #109 + 0x518C, // 册, #225 + 0x519B, // 军, #466 + 0x51FA, // 出, #53 + 0x51FB, // 击, #359 + 0x5206, // 分, #22 + 0x5217, // 列, #410 + 0x521B, // 创, #399 + 0x5229, // 利, #296 + 0x522B, // 别, #372 + 0x5230, // 到, #33 + 0x5236, // 制, #192 + 0x524D, // 前, #117 + 0x529B, // 力, #173 + 0x529E, // 办, #436 + 0x529F, // 功, #455 + 0x52A0, // 加, #97 + 0x52A1, // 务, #100 + 0x52A8, // 动, #46 + 0x52A9, // 助, #365 + 0x5305, // 包, #331 + 0x5316, // 化, #155 + 0x5317, // 北, #194 + 0x533A, // 区, #105 + 0x533B, // 医, #234 + 0x5341, // 十, #294 + 0x534E, // 华, #205 + 0x5355, // 单, #259 + 0x5357, // 南, #182 + 0x535A, // 博, #153 + 0x5361, // 卡, #332 + 0x539F, // 原, #271 + 0x53BB, // 去, #282 + 0x53C2, // 参, #500 + 0x53CA, // 及, #255 + 0x53CB, // 友, #186 + 0x53CD, // 反, #422 + 0x53D1, // 发, #15 + 0x53D7, // 受, #507 + 0x53D8, // 变, #395 + 0x53E3, // 口, #293 + 0x53EA, // 只, #340 + 0x53EF, // 可, #45 + 0x53F0, // 台, #267 + 0x53F7, // 号, #121 + 0x53F8, // 司, #150 + 0x5404, // 各, #491 + 0x5408, // 合, #115 + 0x540C, // 同, #189 + 0x540D, // 名, #127 + 0x540E, // 后, #75 + 0x5411, // 向, #459 + 0x5427, // 吧, #353 + 0x544A, // 告, #318 + 0x5458, // 员, #232 + 0x5468, // 周, #347 + 0x548C, // 和, #43 + 0x54C1, // 品, #36 + 0x5546, // 商, #148 + 0x5668, // 器, #228 + 0x56DB, // 四, #352 + 0x56DE, // 回, #38 + 0x56E0, // 因, #355 + 0x56E2, // 团, #412 + 0x56ED, // 园, #470 + 0x56FD, // 国, #12 + 0x56FE, // 图, #32 + 0x5728, // 在, #10 + 0x5730, // 地, #30 + 0x573A, // 场, #177 + 0x575B, // 坛, #364 + 0x578B, // 型, #274 + 0x57CE, // 城, #172 + 0x57FA, // 基, #315 + 0x58EB, // 士, #434 + 0x58F0, // 声, #397 + 0x5904, // 处, #416 + 0x5907, // 备, #270 + 0x590D, // 复, #122 + 0x5916, // 外, #190 + 0x591A, // 多, #40 + 0x5927, // 大, #8 + 0x5929, // 天, #52 + 0x592A, // 太, #456 + 0x5934, // 头, #258 + 0x5973, // 女, #65 + 0x597D, // 好, #62 + 0x5982, // 如, #135 + 0x5A31, // 娱, #452 + 0x5B50, // 子, #37 + 0x5B57, // 字, #285 + 0x5B66, // 学, #19 + 0x5B89, // 安, #144 + 0x5B8C, // 完, #469 + 0x5B9A, // 定, #179 + 0x5B9D, // 宝, #188 + 0x5B9E, // 实, #154 + 0x5BA2, // 客, #174 + 0x5BB6, // 家, #26 + 0x5BB9, // 容, #307 + 0x5BC6, // 密, #471 + 0x5BF9, // 对, #90 + 0x5BFC, // 导, #348 + 0x5C06, // 将, #265 + 0x5C0F, // 小, #28 + 0x5C11, // 少, #379 + 0x5C14, // 尔, #490 + 0x5C31, // 就, #101 + 0x5C55, // 展, #291 + 0x5C71, // 山, #239 + 0x5DDE, // 州, #227 + 0x5DE5, // 工, #73 + 0x5DF1, // 己, #480 + 0x5DF2, // 已, #310 + 0x5E02, // 市, #78 + 0x5E03, // 布, #350 + 0x5E08, // 师, #277 + 0x5E16, // 帖, #396 + 0x5E26, // 带, #449 + 0x5E2E, // 帮, #461 + 0x5E38, // 常, #319 + 0x5E73, // 平, #217 + 0x5E74, // 年, #20 + 0x5E76, // 并, #440 + 0x5E7F, // 广, #166 + 0x5E93, // 库, #446 + 0x5E94, // 应, #187 + 0x5E97, // 店, #320 + 0x5EA6, // 度, #114 + 0x5EB7, // 康, #499 + 0x5EFA, // 建, #211 + 0x5F00, // 开, #72 + 0x5F0F, // 式, #207 + 0x5F15, // 引, #495 + 0x5F20, // 张, #385 + 0x5F3A, // 强, #404 + 0x5F53, // 当, #233 + 0x5F55, // 录, #146 + 0x5F62, // 形, #494 + 0x5F69, // 彩, #356 + 0x5F71, // 影, #214 + 0x5F88, // 很, #300 + 0x5F97, // 得, #193 + 0x5FAE, // 微, #245 + 0x5FC3, // 心, #70 + 0x5FEB, // 快, #324 + 0x6001, // 态, #508 + 0x600E, // 怎, #370 + 0x6027, // 性, #99 + 0x603B, // 总, #398 + 0x606F, // 息, #176 + 0x60A8, // 您, #251 + 0x60C5, // 情, #87 + 0x60F3, // 想, #290 + 0x610F, // 意, #184 + 0x611F, // 感, #253 + 0x620F, // 戏, #237 + 0x6210, // 成, #71 + 0x6211, // 我, #11 + 0x6216, // 或, #321 + 0x6218, // 战, #369 + 0x6237, // 户, #215 + 0x623F, // 房, #236 + 0x6240, // 所, #147 + 0x624B, // 手, #55 + 0x624D, // 才, #407 + 0x6253, // 打, #281 + 0x6280, // 技, #203 + 0x6295, // 投, #408 + 0x62A4, // 护, #502 + 0x62A5, // 报, #113 + 0x62DB, // 招, #363 + 0x6301, // 持, #403 + 0x6307, // 指, #414 + 0x636E, // 据, #409 + 0x6392, // 排, #377 + 0x63A5, // 接, #266 + 0x63A8, // 推, #244 + 0x63D0, // 提, #181 + 0x641C, // 搜, #301 + 0x64AD, // 播, #401 + 0x652F, // 支, #400 + 0x6536, // 收, #158 + 0x653E, // 放, #317 + 0x653F, // 政, #380 + 0x6548, // 效, #496 + 0x6559, // 教, #170 + 0x6570, // 数, #136 + 0x6587, // 文, #21 + 0x6599, // 料, #295 + 0x65AF, // 斯, #473 + 0x65B0, // 新, #14 + 0x65B9, // 方, #68 + 0x65C5, // 旅, #457 + 0x65E0, // 无, #164 + 0x65E5, // 日, #50 + 0x65F6, // 时, #18 + 0x660E, // 明, #132 + 0x6613, // 易, #428 + 0x661F, // 星, #240 + 0x662F, // 是, #6 + 0x663E, // 显, #486 + 0x66F4, // 更, #103 + 0x6700, // 最, #61 + 0x6708, // 月, #80 + 0x6709, // 有, #5 + 0x670D, // 服, #94 + 0x671F, // 期, #139 + 0x672C, // 本, #56 + 0x672F, // 术, #216 + 0x673A, // 机, #27 + 0x6743, // 权, #250 + 0x6761, // 条, #309 + 0x6765, // 来, #42 + 0x677F, // 板, #505 + 0x6797, // 林, #475 + 0x679C, // 果, #212 + 0x67E5, // 查, #165 + 0x6807, // 标, #269 + 0x6821, // 校, #462 + 0x6837, // 样, #314 + 0x683C, // 格, #238 + 0x6848, // 案, #378 + 0x697C, // 楼, #342 + 0x6A21, // 模, #413 + 0x6B21, // 次, #263 + 0x6B22, // 欢, #443 + 0x6B3E, // 款, #358 + 0x6B63, // 正, #219 + 0x6B64, // 此, #362 + 0x6BD4, // 比, #298 + 0x6C11, // 民, #279 + 0x6C14, // 气, #303 + 0x6C34, // 水, #163 + 0x6C42, // 求, #373 + 0x6C5F, // 江, #336 + 0x6CA1, // 没, #229 + 0x6CBB, // 治, #425 + 0x6CD5, // 法, #85 + 0x6CE8, // 注, #119 + 0x6D3B, // 活, #231 + 0x6D41, // 流, #280 + 0x6D4B, // 测, #460 + 0x6D77, // 海, #124 + 0x6D88, // 消, #415 + 0x6DF1, // 深, #477 + 0x6E05, // 清, #311 + 0x6E38, // 游, #81 + 0x6E90, // 源, #325 + 0x706B, // 火, #498 + 0x70B9, // 点, #58 + 0x70ED, // 热, #183 + 0x7136, // 然, #308 + 0x7167, // 照, #431 + 0x7231, // 爱, #223 + 0x7247, // 片, #128 + 0x7248, // 版, #91 + 0x724C, // 牌, #429 + 0x7269, // 物, #169 + 0x7279, // 特, #224 + 0x738B, // 王, #351 + 0x73A9, // 玩, #476 + 0x73B0, // 现, #125 + 0x7403, // 球, #367 + 0x7406, // 理, #69 + 0x751F, // 生, #24 + 0x7528, // 用, #17 + 0x7531, // 由, #441 + 0x7535, // 电, #34 + 0x7537, // 男, #275 + 0x754C, // 界, #419 + 0x75C5, // 病, #371 + 0x767B, // 登, #204 + 0x767D, // 白, #338 + 0x767E, // 百, #157 + 0x7684, // 的, #1 + 0x76D8, // 盘, #493 + 0x76EE, // 目, #261 + 0x76F4, // 直, #391 + 0x76F8, // 相, #143 + 0x7701, // 省, #464 + 0x770B, // 看, #54 + 0x771F, // 真, #249 + 0x7740, // 着, #302 + 0x77E5, // 知, #142 + 0x7801, // 码, #257 + 0x7814, // 研, #387 + 0x793A, // 示, #334 + 0x793E, // 社, #343 + 0x795E, // 神, #330 + 0x798F, // 福, #509 + 0x79BB, // 离, #454 + 0x79CD, // 种, #278 + 0x79D1, // 科, #126 + 0x79EF, // 积, #390 + 0x7A0B, // 程, #209 + 0x7A76, // 究, #504 + 0x7A7A, // 空, #312 + 0x7ACB, // 立, #393 + 0x7AD9, // 站, #107 + 0x7AE0, // 章, #304 + 0x7B2C, // 第, #96 + 0x7B49, // 等, #210 + 0x7B54, // 答, #256 + 0x7B80, // 简, #474 + 0x7BA1, // 管, #221 + 0x7C7B, // 类, #246 + 0x7CBE, // 精, #226 + 0x7CFB, // 系, #89 + 0x7D22, // 索, #354 + 0x7EA2, // 红, #417 + 0x7EA7, // 级, #178 + 0x7EBF, // 线, #108 + 0x7EC4, // 组, #389 + 0x7EC6, // 细, #442 + 0x7ECF, // 经, #74 + 0x7ED3, // 结, #333 + 0x7ED9, // 给, #384 + 0x7EDC, // 络, #472 + 0x7EDF, // 统, #344 + 0x7F16, // 编, #424 + 0x7F51, // 网, #9 + 0x7F6E, // 置, #411 + 0x7F8E, // 美, #60 + 0x8001, // 老, #292 + 0x8003, // 考, #288 + 0x8005, // 者, #106 + 0x800C, // 而, #297 + 0x8054, // 联, #159 + 0x80B2, // 育, #327 + 0x80FD, // 能, #59 + 0x81EA, // 自, #77 + 0x8272, // 色, #198 + 0x8282, // 节, #361 + 0x82B1, // 花, #299 + 0x82F1, // 英, #316 + 0x8350, // 荐, #402 + 0x836F, // 药, #481 + 0x8425, // 营, #394 + 0x85CF, // 藏, #337 + 0x884C, // 行, #41 + 0x8868, // 表, #104 + 0x88AB, // 被, #289 + 0x88C5, // 装, #161 + 0x897F, // 西, #199 + 0x8981, // 要, #48 + 0x89C1, // 见, #360 + 0x89C2, // 观, #423 + 0x89C4, // 规, #453 + 0x89C6, // 视, #120 + 0x89E3, // 解, #264 + 0x8A00, // 言, #433 + 0x8BA1, // 计, #191 + 0x8BA4, // 认, #482 + 0x8BA9, // 让, #421 + 0x8BAE, // 议, #427 + 0x8BAF, // 讯, #388 + 0x8BB0, // 记, #273 + 0x8BBA, // 论, #66 + 0x8BBE, // 设, #162 + 0x8BC1, // 证, #201 + 0x8BC4, // 评, #111 + 0x8BC6, // 识, #463 + 0x8BD5, // 试, #323 + 0x8BDD, // 话, #247 + 0x8BE2, // 询, #432 + 0x8BE5, // 该, #447 + 0x8BE6, // 详, #497 + 0x8BED, // 语, #268 + 0x8BF4, // 说, #112 + 0x8BF7, // 请, #213 + 0x8BFB, // 读, #341 + 0x8C03, // 调, #438 + 0x8D22, // 财, #488 + 0x8D28, // 质, #386 + 0x8D2D, // 购, #260 + 0x8D34, // 贴, #510 + 0x8D39, // 费, #242 + 0x8D44, // 资, #116 + 0x8D77, // 起, #220 + 0x8D85, // 超, #406 + 0x8DEF, // 路, #235 + 0x8EAB, // 身, #262 + 0x8F66, // 车, #82 + 0x8F6C, // 转, #322 + 0x8F7D, // 载, #175 + 0x8FBE, // 达, #435 + 0x8FC7, // 过, #118 + 0x8FD0, // 运, #357 + 0x8FD1, // 近, #492 + 0x8FD8, // 还, #171 + 0x8FD9, // 这, #57 + 0x8FDB, // 进, #160 + 0x8FDE, // 连, #489 + 0x9009, // 选, #328 + 0x901A, // 通, #137 + 0x901F, // 速, #458 + 0x9020, // 造, #511 + 0x9053, // 道, #79 + 0x90A3, // 那, #305 + 0x90E8, // 部, #102 + 0x90FD, // 都, #167 + 0x914D, // 配, #479 + 0x9152, // 酒, #444 + 0x91CC, // 里, #196 + 0x91CD, // 重, #230 + 0x91CF, // 量, #248 + 0x91D1, // 金, #134 + 0x9500, // 销, #465 + 0x957F, // 长, #152 + 0x95E8, // 门, #185 + 0x95EE, // 问, #92 + 0x95F4, // 间, #88 + 0x95FB, // 闻, #313 + 0x9605, // 阅, #467 + 0x9633, // 阳, #420 + 0x9645, // 际, #501 + 0x9650, // 限, #286 + 0x9662, // 院, #276 + 0x96C6, // 集, #284 + 0x9700, // 需, #405 + 0x9762, // 面, #123 + 0x97F3, // 音, #335 + 0x9875, // 页, #63 + 0x9879, // 项, #506 + 0x9891, // 频, #200 + 0x9898, // 题, #110 + 0x98CE, // 风, #252 + 0x98DF, // 食, #445 + 0x9996, // 首, #149 + 0x9999, // 香, #512 + 0x9A6C, // 马, #392 + 0x9A8C, // 验, #382 + 0x9AD8, // 高, #67 + 0x9F99, // 龙, #426 +}; +// the percentage of the sample covered by the above characters +static const float frequent_zhCN_coverage=0.718950369339973; + +// The 512 most frequently occuring characters for the zhTW language in a sample of the Internet. +// Ordered by codepoint, comment shows character and ranking by frequency +const uint16_t frequent_zhTW[] = { + 0x4E00, // 一, #2 + 0x4E09, // 三, #131 + 0x4E0A, // 上, #12 + 0x4E0B, // 下, #37 + 0x4E0D, // 不, #6 + 0x4E16, // 世, #312 + 0x4E26, // 並, #434 + 0x4E2D, // 中, #9 + 0x4E3B, // 主, #97 + 0x4E4B, // 之, #55 + 0x4E5F, // 也, #95 + 0x4E86, // 了, #19 + 0x4E8B, // 事, #128 + 0x4E8C, // 二, #187 + 0x4E94, // 五, #339 + 0x4E9B, // 些, #435 + 0x4E9E, // 亞, #432 + 0x4EA4, // 交, #264 + 0x4EAB, // 享, #160 + 0x4EBA, // 人, #3 + 0x4EC0, // 什, #483 + 0x4ECA, // 今, #380 + 0x4ECB, // 介, #468 + 0x4ED6, // 他, #65 + 0x4EE3, // 代, #284 + 0x4EE5, // 以, #26 + 0x4EF6, // 件, #234 + 0x4EFB, // 任, #381 + 0x4EFD, // 份, #447 + 0x4F46, // 但, #281 + 0x4F4D, // 位, #202 + 0x4F4F, // 住, #471 + 0x4F55, // 何, #334 + 0x4F5C, // 作, #56 + 0x4F60, // 你, #64 + 0x4F7F, // 使, #236 + 0x4F86, // 來, #38 + 0x4F9B, // 供, #397 + 0x4FBF, // 便, #440 + 0x4FC2, // 係, #506 + 0x4FDD, // 保, #161 + 0x4FE1, // 信, #268 + 0x4FEE, // 修, #473 + 0x500B, // 個, #27 + 0x5011, // 們, #109 + 0x505A, // 做, #383 + 0x5065, // 健, #415 + 0x5099, // 備, #461 + 0x50B3, // 傳, #277 + 0x50CF, // 像, #403 + 0x50F9, // 價, #93 + 0x512A, // 優, #396 + 0x5143, // 元, #158 + 0x5148, // 先, #382 + 0x5149, // 光, #216 + 0x514D, // 免, #321 + 0x5152, // 兒, #374 + 0x5165, // 入, #58 + 0x5167, // 內, #106 + 0x5168, // 全, #67 + 0x5169, // 兩, #322 + 0x516C, // 公, #53 + 0x516D, // 六, #493 + 0x5171, // 共, #456 + 0x5176, // 其, #148 + 0x5177, // 具, #328 + 0x518A, // 冊, #360 + 0x518D, // 再, #311 + 0x51FA, // 出, #44 + 0x5206, // 分, #15 + 0x5217, // 列, #259 + 0x5225, // 別, #361 + 0x5229, // 利, #251 + 0x5230, // 到, #29 + 0x5247, // 則, #511 + 0x524D, // 前, #82 + 0x5275, // 創, #409 + 0x529B, // 力, #176 + 0x529F, // 功, #430 + 0x52A0, // 加, #87 + 0x52A9, // 助, #465 + 0x52D5, // 動, #48 + 0x52D9, // 務, #102 + 0x5305, // 包, #248 + 0x5316, // 化, #223 + 0x5317, // 北, #145 + 0x5340, // 區, #60 + 0x5341, // 十, #242 + 0x5357, // 南, #261 + 0x535A, // 博, #484 + 0x5361, // 卡, #327 + 0x5370, // 印, #498 + 0x5373, // 即, #351 + 0x539F, // 原, #237 + 0x53BB, // 去, #190 + 0x53C3, // 參, #444 + 0x53C8, // 又, #426 + 0x53CA, // 及, #136 + 0x53CB, // 友, #142 + 0x53D6, // 取, #422 + 0x53D7, // 受, #410 + 0x53E3, // 口, #357 + 0x53EA, // 只, #250 + 0x53EF, // 可, #35 + 0x53F0, // 台, #34 + 0x53F8, // 司, #226 + 0x5403, // 吃, #362 + 0x5404, // 各, #454 + 0x5408, // 合, #147 + 0x540C, // 同, #173 + 0x540D, // 名, #108 + 0x544A, // 告, #186 + 0x548C, // 和, #130 + 0x54C1, // 品, #23 + 0x54E1, // 員, #150 + 0x5546, // 商, #75 + 0x554F, // 問, #120 + 0x559C, // 喜, #502 + 0x55AE, // 單, #210 + 0x55CE, // 嗎, #443 + 0x5668, // 器, #305 + 0x56DB, // 四, #318 + 0x56DE, // 回, #59 + 0x56E0, // 因, #253 + 0x570B, // 國, #21 + 0x5712, // 園, #345 + 0x5716, // 圖, #73 + 0x5718, // 團, #338 + 0x5728, // 在, #11 + 0x5730, // 地, #50 + 0x578B, // 型, #270 + 0x57CE, // 城, #466 + 0x57FA, // 基, #349 + 0x5831, // 報, #127 + 0x5834, // 場, #165 + 0x58EB, // 士, #372 + 0x5916, // 外, #152 + 0x591A, // 多, #54 + 0x5927, // 大, #8 + 0x5929, // 天, #43 + 0x592A, // 太, #343 + 0x5947, // 奇, #325 + 0x5973, // 女, #85 + 0x5979, // 她, #420 + 0x597D, // 好, #22 + 0x5982, // 如, #144 + 0x5B50, // 子, #46 + 0x5B57, // 字, #275 + 0x5B78, // 學, #49 + 0x5B89, // 安, #239 + 0x5B8C, // 完, #320 + 0x5B9A, // 定, #159 + 0x5BA2, // 客, #188 + 0x5BB6, // 家, #31 + 0x5BB9, // 容, #244 + 0x5BE6, // 實, #198 + 0x5BF6, // 寶, #367 + 0x5C07, // 將, #232 + 0x5C08, // 專, #133 + 0x5C0B, // 尋, #352 + 0x5C0D, // 對, #126 + 0x5C0E, // 導, #418 + 0x5C0F, // 小, #20 + 0x5C11, // 少, #368 + 0x5C31, // 就, #63 + 0x5C55, // 展, #341 + 0x5C71, // 山, #273 + 0x5DE5, // 工, #121 + 0x5DF1, // 己, #402 + 0x5DF2, // 已, #299 + 0x5E02, // 市, #81 + 0x5E2B, // 師, #262 + 0x5E36, // 帶, #470 + 0x5E38, // 常, #303 + 0x5E73, // 平, #297 + 0x5E74, // 年, #30 + 0x5E97, // 店, #171 + 0x5EA6, // 度, #220 + 0x5EB7, // 康, #441 + 0x5EE3, // 廣, #279 + 0x5EFA, // 建, #254 + 0x5F0F, // 式, #155 + 0x5F15, // 引, #346 + 0x5F35, // 張, #366 + 0x5F37, // 強, #437 + 0x5F71, // 影, #94 + 0x5F88, // 很, #177 + 0x5F8C, // 後, #66 + 0x5F97, // 得, #113 + 0x5F9E, // 從, #436 + 0x5FC3, // 心, #57 + 0x5FEB, // 快, #292 + 0x6027, // 性, #175 + 0x606F, // 息, #378 + 0x60A8, // 您, #252 + 0x60C5, // 情, #123 + 0x60F3, // 想, #178 + 0x610F, // 意, #168 + 0x611B, // 愛, #125 + 0x611F, // 感, #211 + 0x61C9, // 應, #164 + 0x6210, // 成, #86 + 0x6211, // 我, #7 + 0x6216, // 或, #199 + 0x6230, // 戰, #438 + 0x6232, // 戲, #309 + 0x6236, // 戶, #497 + 0x623F, // 房, #274 + 0x6240, // 所, #79 + 0x624B, // 手, #68 + 0x624D, // 才, #400 + 0x6253, // 打, #278 + 0x627E, // 找, #449 + 0x6280, // 技, #332 + 0x6295, // 投, #425 + 0x62C9, // 拉, #500 + 0x62CD, // 拍, #398 + 0x6307, // 指, #407 + 0x6392, // 排, #458 + 0x63A5, // 接, #326 + 0x63A8, // 推, #153 + 0x63D0, // 提, #235 + 0x641C, // 搜, #314 + 0x6469, // 摩, #472 + 0x6536, // 收, #249 + 0x6539, // 改, #508 + 0x653E, // 放, #331 + 0x653F, // 政, #295 + 0x6559, // 教, #184 + 0x6574, // 整, #394 + 0x6578, // 數, #134 + 0x6587, // 文, #16 + 0x6599, // 料, #167 + 0x65AF, // 斯, #476 + 0x65B0, // 新, #10 + 0x65B9, // 方, #96 + 0x65BC, // 於, #70 + 0x65C5, // 旅, #289 + 0x65E5, // 日, #18 + 0x660E, // 明, #118 + 0x6613, // 易, #482 + 0x661F, // 星, #205 + 0x662F, // 是, #5 + 0x6642, // 時, #13 + 0x66F4, // 更, #149 + 0x66F8, // 書, #209 + 0x6700, // 最, #51 + 0x6703, // 會, #14 + 0x6708, // 月, #25 + 0x6709, // 有, #4 + 0x670D, // 服, #99 + 0x671F, // 期, #139 + 0x672A, // 未, #404 + 0x672C, // 本, #45 + 0x6771, // 東, #221 + 0x677F, // 板, #364 + 0x6797, // 林, #330 + 0x679C, // 果, #179 + 0x67E5, // 查, #283 + 0x683C, // 格, #157 + 0x6848, // 案, #392 + 0x689D, // 條, #406 + 0x696D, // 業, #103 + 0x6A02, // 樂, #116 + 0x6A13, // 樓, #411 + 0x6A19, // 標, #384 + 0x6A23, // 樣, #306 + 0x6A5F, // 機, #40 + 0x6AA2, // 檢, #359 + 0x6B0A, // 權, #228 + 0x6B21, // 次, #227 + 0x6B3E, // 款, #276 + 0x6B4C, // 歌, #496 + 0x6B61, // 歡, #427 + 0x6B63, // 正, #206 + 0x6B64, // 此, #247 + 0x6BCF, // 每, #391 + 0x6BD4, // 比, #257 + 0x6C11, // 民, #230 + 0x6C23, // 氣, #200 + 0x6C34, // 水, #140 + 0x6C42, // 求, #501 + 0x6C92, // 沒, #162 + 0x6CD5, // 法, #89 + 0x6D3B, // 活, #124 + 0x6D41, // 流, #315 + 0x6D77, // 海, #258 + 0x6D88, // 消, #342 + 0x6E05, // 清, #329 + 0x6E2F, // 港, #293 + 0x6F14, // 演, #491 + 0x7063, // 灣, #195 + 0x70BA, // 為, #39 + 0x7121, // 無, #107 + 0x7136, // 然, #215 + 0x7167, // 照, #376 + 0x71B1, // 熱, #245 + 0x7247, // 片, #90 + 0x7248, // 版, #112 + 0x724C, // 牌, #467 + 0x7269, // 物, #110 + 0x7279, // 特, #183 + 0x738B, // 王, #287 + 0x73A9, // 玩, #354 + 0x73FE, // 現, #143 + 0x7403, // 球, #350 + 0x7406, // 理, #105 + 0x751F, // 生, #24 + 0x7522, // 產, #201 + 0x7528, // 用, #17 + 0x7531, // 由, #288 + 0x7537, // 男, #298 + 0x754C, // 界, #399 + 0x7559, // 留, #218 + 0x756B, // 畫, #412 + 0x7576, // 當, #185 + 0x767B, // 登, #138 + 0x767C, // 發, #28 + 0x767D, // 白, #377 + 0x767E, // 百, #393 + 0x7684, // 的, #1 + 0x76EE, // 目, #271 + 0x76F4, // 直, #379 + 0x76F8, // 相, #98 + 0x770B, // 看, #52 + 0x771F, // 真, #180 + 0x773C, // 眼, #433 + 0x77E5, // 知, #170 + 0x78BC, // 碼, #481 + 0x793A, // 示, #353 + 0x793E, // 社, #333 + 0x795E, // 神, #304 + 0x7968, // 票, #477 + 0x798F, // 福, #494 + 0x79C1, // 私, #507 + 0x79D1, // 科, #280 + 0x7A0B, // 程, #272 + 0x7A2E, // 種, #337 + 0x7A4D, // 積, #385 + 0x7A7A, // 空, #324 + 0x7ACB, // 立, #286 + 0x7AD9, // 站, #117 + 0x7AE0, // 章, #141 + 0x7B2C, // 第, #135 + 0x7B49, // 等, #240 + 0x7BA1, // 管, #340 + 0x7BC0, // 節, #431 + 0x7BC7, // 篇, #479 + 0x7C21, // 簡, #499 + 0x7CBE, // 精, #213 + 0x7CFB, // 系, #212 + 0x7D04, // 約, #462 + 0x7D05, // 紅, #452 + 0x7D1A, // 級, #267 + 0x7D30, // 細, #486 + 0x7D44, // 組, #335 + 0x7D50, // 結, #243 + 0x7D66, // 給, #355 + 0x7D71, // 統, #375 + 0x7D93, // 經, #111 + 0x7DB2, // 網, #32 + 0x7DDA, // 線, #151 + 0x7E23, // 縣, #439 + 0x7E3D, // 總, #370 + 0x7F8E, // 美, #41 + 0x7FA9, // 義, #504 + 0x8001, // 老, #290 + 0x8003, // 考, #428 + 0x8005, // 者, #92 + 0x800C, // 而, #217 + 0x805E, // 聞, #181 + 0x806F, // 聯, #310 + 0x8072, // 聲, #413 + 0x80A1, // 股, #390 + 0x80B2, // 育, #453 + 0x80FD, // 能, #71 + 0x8166, // 腦, #408 + 0x81EA, // 自, #61 + 0x81F3, // 至, #344 + 0x8207, // 與, #84 + 0x8209, // 舉, #463 + 0x8272, // 色, #192 + 0x82B1, // 花, #255 + 0x82F1, // 英, #348 + 0x83EF, // 華, #196 + 0x842C, // 萬, #316 + 0x843D, // 落, #308 + 0x8457, // 著, #233 + 0x85A6, // 薦, #401 + 0x85CF, // 藏, #503 + 0x85DD, // 藝, #488 + 0x8655, // 處, #419 + 0x865F, // 號, #191 + 0x884C, // 行, #47 + 0x8853, // 術, #395 + 0x8868, // 表, #77 + 0x88AB, // 被, #291 + 0x88DD, // 裝, #256 + 0x88E1, // 裡, #369 + 0x88FD, // 製, #510 + 0x897F, // 西, #300 + 0x8981, // 要, #36 + 0x898B, // 見, #307 + 0x8996, // 視, #204 + 0x89BA, // 覺, #450 + 0x89BD, // 覽, #387 + 0x89C0, // 觀, #365 + 0x89E3, // 解, #323 + 0x8A00, // 言, #169 + 0x8A02, // 訂, #423 + 0x8A08, // 計, #225 + 0x8A0A, // 訊, #156 + 0x8A0E, // 討, #373 + 0x8A18, // 記, #222 + 0x8A2D, // 設, #174 + 0x8A3B, // 註, #356 + 0x8A55, // 評, #246 + 0x8A66, // 試, #448 + 0x8A71, // 話, #229 + 0x8A72, // 該, #446 + 0x8A8D, // 認, #464 + 0x8A9E, // 語, #371 + 0x8AAA, // 說, #91 + 0x8ABF, // 調, #509 + 0x8ACB, // 請, #119 + 0x8AD6, // 論, #114 + 0x8B1D, // 謝, #389 + 0x8B49, // 證, #429 + 0x8B58, // 識, #416 + 0x8B70, // 議, #485 + 0x8B77, // 護, #475 + 0x8B80, // 讀, #386 + 0x8B8A, // 變, #388 + 0x8B93, // 讓, #336 + 0x8CA8, // 貨, #313 + 0x8CB7, // 買, #260 + 0x8CBB, // 費, #203 + 0x8CC7, // 資, #62 + 0x8CE3, // 賣, #294 + 0x8CEA, // 質, #457 + 0x8CFC, // 購, #189 + 0x8D77, // 起, #214 + 0x8D85, // 超, #296 + 0x8DDF, // 跟, #489 + 0x8DEF, // 路, #137 + 0x8EAB, // 身, #197 + 0x8ECA, // 車, #76 + 0x8F09, // 載, #301 + 0x8F49, // 轉, #282 + 0x8FD1, // 近, #414 + 0x9001, // 送, #363 + 0x9019, // 這, #42 + 0x901A, // 通, #207 + 0x901F, // 速, #495 + 0x9020, // 造, #455 + 0x9023, // 連, #285 + 0x9032, // 進, #231 + 0x904A, // 遊, #132 + 0x904B, // 運, #219 + 0x904E, // 過, #101 + 0x9053, // 道, #146 + 0x9054, // 達, #417 + 0x9078, // 選, #182 + 0x9084, // 還, #154 + 0x908A, // 邊, #487 + 0x90A3, // 那, #269 + 0x90E8, // 部, #78 + 0x90FD, // 都, #104 + 0x914D, // 配, #421 + 0x9152, // 酒, #512 + 0x91AB, // 醫, #358 + 0x91CD, // 重, #224 + 0x91CF, // 量, #319 + 0x91D1, // 金, #115 + 0x9304, // 錄, #302 + 0x9577, // 長, #172 + 0x9580, // 門, #193 + 0x958B, // 開, #72 + 0x9593, // 間, #80 + 0x95B1, // 閱, #405 + 0x95DC, // 關, #74 + 0x963F, // 阿, #460 + 0x9650, // 限, #265 + 0x9662, // 院, #474 + 0x9664, // 除, #478 + 0x969B, // 際, #459 + 0x96C6, // 集, #347 + 0x96E2, // 離, #442 + 0x96FB, // 電, #33 + 0x9700, // 需, #445 + 0x975E, // 非, #451 + 0x9762, // 面, #129 + 0x97F3, // 音, #194 + 0x9801, // 頁, #83 + 0x982D, // 頭, #238 + 0x984C, // 題, #122 + 0x985E, // 類, #163 + 0x98A8, // 風, #266 + 0x98DF, // 食, #208 + 0x9910, // 餐, #469 + 0x9928, // 館, #424 + 0x9996, // 首, #166 + 0x9999, // 香, #263 + 0x99AC, // 馬, #317 + 0x9A57, // 驗, #492 + 0x9AD4, // 體, #100 + 0x9AD8, // 高, #88 + 0x9EBC, // 麼, #241 + 0x9EC3, // 黃, #480 + 0x9ED1, // 黑, #490 + 0x9EDE, // 點, #69 + 0x9F8D, // 龍, #505 +}; +// the percentage of the sample covered by the above characters +static const float frequent_zhTW_coverage=0.704841200026877; + +// The 512 most frequently occuring characters for the ja language in a sample of the Internet. +// Ordered by codepoint, comment shows character and ranking by frequency +const uint16_t frequent_ja[] = { + 0x3005, // 々, #352 + 0x3041, // ぁ, #486 + 0x3042, // あ, #50 + 0x3044, // い, #2 + 0x3046, // う, #33 + 0x3048, // え, #83 + 0x304A, // お, #37 + 0x304B, // か, #21 + 0x304C, // が, #17 + 0x304D, // き, #51 + 0x304E, // ぎ, #324 + 0x304F, // く, #38 + 0x3050, // ぐ, #334 + 0x3051, // け, #60 + 0x3052, // げ, #296 + 0x3053, // こ, #34 + 0x3054, // ご, #100 + 0x3055, // さ, #31 + 0x3056, // ざ, #378 + 0x3057, // し, #4 + 0x3058, // じ, #121 + 0x3059, // す, #12 + 0x305A, // ず, #215 + 0x305B, // せ, #86 + 0x305D, // そ, #68 + 0x305F, // た, #11 + 0x3060, // だ, #42 + 0x3061, // ち, #67 + 0x3063, // っ, #23 + 0x3064, // つ, #73 + 0x3066, // て, #7 + 0x3067, // で, #6 + 0x3068, // と, #14 + 0x3069, // ど, #75 + 0x306A, // な, #8 + 0x306B, // に, #5 + 0x306D, // ね, #123 + 0x306E, // の, #1 + 0x306F, // は, #16 + 0x3070, // ば, #150 + 0x3071, // ぱ, #259 + 0x3072, // ひ, #364 + 0x3073, // び, #266 + 0x3075, // ふ, #484 + 0x3076, // ぶ, #330 + 0x3078, // へ, #146 + 0x3079, // べ, #207 + 0x307B, // ほ, #254 + 0x307E, // ま, #18 + 0x307F, // み, #74 + 0x3080, // む, #285 + 0x3081, // め, #78 + 0x3082, // も, #32 + 0x3083, // ゃ, #111 + 0x3084, // や, #85 + 0x3086, // ゆ, #392 + 0x3087, // ょ, #224 + 0x3088, // よ, #63 + 0x3089, // ら, #29 + 0x308A, // り, #28 + 0x308B, // る, #9 + 0x308C, // れ, #35 + 0x308D, // ろ, #127 + 0x308F, // わ, #88 + 0x3092, // を, #19 + 0x3093, // ん, #22 + 0x30A1, // ァ, #193 + 0x30A2, // ア, #27 + 0x30A3, // ィ, #70 + 0x30A4, // イ, #15 + 0x30A6, // ウ, #89 + 0x30A7, // ェ, #134 + 0x30A8, // エ, #81 + 0x30A9, // ォ, #225 + 0x30AA, // オ, #76 + 0x30AB, // カ, #52 + 0x30AC, // ガ, #147 + 0x30AD, // キ, #66 + 0x30AE, // ギ, #246 + 0x30AF, // ク, #25 + 0x30B0, // グ, #39 + 0x30B1, // ケ, #137 + 0x30B2, // ゲ, #200 + 0x30B3, // コ, #46 + 0x30B4, // ゴ, #183 + 0x30B5, // サ, #64 + 0x30B6, // ザ, #221 + 0x30B7, // シ, #48 + 0x30B8, // ジ, #55 + 0x30B9, // ス, #13 + 0x30BA, // ズ, #103 + 0x30BB, // セ, #109 + 0x30BC, // ゼ, #499 + 0x30BD, // ソ, #175 + 0x30BF, // タ, #45 + 0x30C0, // ダ, #104 + 0x30C1, // チ, #71 + 0x30C3, // ッ, #20 + 0x30C4, // ツ, #119 + 0x30C6, // テ, #59 + 0x30C7, // デ, #82 + 0x30C8, // ト, #10 + 0x30C9, // ド, #44 + 0x30CA, // ナ, #102 + 0x30CB, // ニ, #72 + 0x30CD, // ネ, #117 + 0x30CE, // ノ, #192 + 0x30CF, // ハ, #164 + 0x30D0, // バ, #62 + 0x30D1, // パ, #90 + 0x30D2, // ヒ, #398 + 0x30D3, // ビ, #77 + 0x30D4, // ピ, #135 + 0x30D5, // フ, #47 + 0x30D6, // ブ, #56 + 0x30D7, // プ, #43 + 0x30D8, // ヘ, #268 + 0x30D9, // ベ, #157 + 0x30DA, // ペ, #125 + 0x30DB, // ホ, #155 + 0x30DC, // ボ, #168 + 0x30DD, // ポ, #114 + 0x30DE, // マ, #57 + 0x30DF, // ミ, #97 + 0x30E0, // ム, #69 + 0x30E1, // メ, #53 + 0x30E2, // モ, #142 + 0x30E3, // ャ, #93 + 0x30E4, // ヤ, #258 + 0x30E5, // ュ, #79 + 0x30E6, // ユ, #405 + 0x30E7, // ョ, #98 + 0x30E9, // ラ, #26 + 0x30EA, // リ, #30 + 0x30EB, // ル, #24 + 0x30EC, // レ, #41 + 0x30ED, // ロ, #40 + 0x30EF, // ワ, #144 + 0x30F3, // ン, #3 + 0x30F4, // ヴ, #483 + 0x30FD, // ヽ, #501 + 0x4E00, // 一, #84 + 0x4E07, // 万, #337 + 0x4E09, // 三, #323 + 0x4E0A, // 上, #133 + 0x4E0B, // 下, #180 + 0x4E0D, // 不, #277 + 0x4E16, // 世, #385 + 0x4E2D, // 中, #87 + 0x4E3B, // 主, #432 + 0x4E88, // 予, #326 + 0x4E8B, // 事, #95 + 0x4E8C, // 二, #394 + 0x4E95, // 井, #468 + 0x4EA4, // 交, #410 + 0x4EAC, // 京, #260 + 0x4EBA, // 人, #61 + 0x4ECA, // 今, #184 + 0x4ECB, // 介, #358 + 0x4ED5, // 仕, #391 + 0x4ED6, // 他, #256 + 0x4ED8, // 付, #243 + 0x4EE3, // 代, #280 + 0x4EE5, // 以, #216 + 0x4EF6, // 件, #190 + 0x4F1A, // 会, #105 + 0x4F4D, // 位, #177 + 0x4F4F, // 住, #376 + 0x4F53, // 体, #223 + 0x4F55, // 何, #294 + 0x4F5C, // 作, #154 + 0x4F7F, // 使, #233 + 0x4F9B, // 供, #503 + 0x4FA1, // 価, #217 + 0x4FBF, // 便, #511 + 0x4FDD, // 保, #279 + 0x4FE1, // 信, #271 + 0x500B, // 個, #415 + 0x50CF, // 像, #178 + 0x512A, // 優, #403 + 0x5143, // 元, #384 + 0x5148, // 先, #311 + 0x5149, // 光, #488 + 0x5165, // 入, #115 + 0x5168, // 全, #173 + 0x516C, // 公, #287 + 0x5177, // 具, #447 + 0x5185, // 内, #169 + 0x5186, // 円, #131 + 0x5199, // 写, #275 + 0x51FA, // 出, #110 + 0x5206, // 分, #130 + 0x5207, // 切, #401 + 0x521D, // 初, #319 + 0x5225, // 別, #290 + 0x5229, // 利, #226 + 0x5236, // 制, #507 + 0x524D, // 前, #124 + 0x529B, // 力, #272 + 0x52A0, // 加, #249 + 0x52D5, // 動, #120 + 0x52D9, // 務, #421 + 0x52DF, // 募, #476 + 0x5316, // 化, #308 + 0x5317, // 北, #341 + 0x533A, // 区, #348 + 0x539F, // 原, #321 + 0x53C2, // 参, #452 + 0x53CB, // 友, #451 + 0x53D6, // 取, #237 + 0x53D7, // 受, #354 + 0x53E3, // 口, #289 + 0x53E4, // 古, #339 + 0x53EF, // 可, #298 + 0x53F0, // 台, #439 + 0x53F7, // 号, #361 + 0x5408, // 合, #118 + 0x540C, // 同, #263 + 0x540D, // 名, #65 + 0x5411, // 向, #434 + 0x544A, // 告, #386 + 0x5468, // 周, #393 + 0x5473, // 味, #299 + 0x548C, // 和, #350 + 0x54C1, // 品, #96 + 0x54E1, // 員, #293 + 0x5546, // 商, #198 + 0x554F, // 問, #158 + 0x55B6, // 営, #438 + 0x5668, // 器, #366 + 0x56DE, // 回, #143 + 0x56F3, // 図, #444 + 0x56FD, // 国, #153 + 0x5712, // 園, #435 + 0x571F, // 土, #239 + 0x5728, // 在, #351 + 0x5730, // 地, #163 + 0x578B, // 型, #430 + 0x5831, // 報, #112 + 0x5834, // 場, #139 + 0x58F2, // 売, #232 + 0x5909, // 変, #306 + 0x5916, // 外, #222 + 0x591A, // 多, #336 + 0x5927, // 大, #80 + 0x5929, // 天, #278 + 0x5973, // 女, #161 + 0x597D, // 好, #349 + 0x5A5A, // 婚, #479 + 0x5B50, // 子, #113 + 0x5B57, // 字, #492 + 0x5B66, // 学, #132 + 0x5B89, // 安, #295 + 0x5B9A, // 定, #145 + 0x5B9F, // 実, #220 + 0x5BA4, // 室, #482 + 0x5BAE, // 宮, #487 + 0x5BB6, // 家, #211 + 0x5BB9, // 容, #333 + 0x5BFE, // 対, #252 + 0x5C02, // 専, #474 + 0x5C0F, // 小, #212 + 0x5C11, // 少, #377 + 0x5C4B, // 屋, #284 + 0x5C71, // 山, #206 + 0x5CA1, // 岡, #429 + 0x5CF6, // 島, #297 + 0x5DDD, // 川, #253 + 0x5DE5, // 工, #374 + 0x5E02, // 市, #159 + 0x5E2F, // 帯, #416 + 0x5E38, // 常, #437 + 0x5E73, // 平, #390 + 0x5E74, // 年, #54 + 0x5E83, // 広, #367 + 0x5E97, // 店, #149 + 0x5EA6, // 度, #269 + 0x5EAB, // 庫, #380 + 0x5F0F, // 式, #265 + 0x5F15, // 引, #345 + 0x5F37, // 強, #446 + 0x5F53, // 当, #240 + 0x5F62, // 形, #502 + 0x5F8C, // 後, #230 + 0x5F97, // 得, #490 + 0x5FC3, // 心, #307 + 0x5FC5, // 必, #422 + 0x5FDC, // 応, #356 + 0x601D, // 思, #189 + 0x6027, // 性, #201 + 0x6075, // 恵, #400 + 0x60C5, // 情, #140 + 0x60F3, // 想, #477 + 0x610F, // 意, #305 + 0x611B, // 愛, #273 + 0x611F, // 感, #257 + 0x6210, // 成, #262 + 0x6226, // 戦, #365 + 0x6240, // 所, #236 + 0x624B, // 手, #160 + 0x6295, // 投, #129 + 0x6301, // 持, #355 + 0x6307, // 指, #425 + 0x63A2, // 探, #369 + 0x63B2, // 掲, #399 + 0x643A, // 携, #459 + 0x652F, // 支, #512 + 0x653E, // 放, #469 + 0x6559, // 教, #270 + 0x6570, // 数, #181 + 0x6587, // 文, #202 + 0x6599, // 料, #106 + 0x65B0, // 新, #99 + 0x65B9, // 方, #126 + 0x65C5, // 旅, #445 + 0x65E5, // 日, #36 + 0x660E, // 明, #300 + 0x6620, // 映, #418 + 0x6642, // 時, #107 + 0x66F4, // 更, #359 + 0x66F8, // 書, #174 + 0x6700, // 最, #152 + 0x6708, // 月, #49 + 0x6709, // 有, #302 + 0x671F, // 期, #332 + 0x6728, // 木, #203 + 0x672C, // 本, #92 + 0x6750, // 材, #489 + 0x6751, // 村, #466 + 0x6765, // 来, #267 + 0x6771, // 東, #191 + 0x677F, // 板, #411 + 0x679C, // 果, #441 + 0x6821, // 校, #327 + 0x682A, // 株, #412 + 0x683C, // 格, #228 + 0x691C, // 検, #179 + 0x696D, // 業, #166 + 0x697D, // 楽, #172 + 0x69D8, // 様, #255 + 0x6A5F, // 機, #235 + 0x6B21, // 次, #318 + 0x6B62, // 止, #475 + 0x6B63, // 正, #312 + 0x6C17, // 気, #116 + 0x6C34, // 水, #165 + 0x6C42, // 求, #465 + 0x6C7A, // 決, #370 + 0x6CBB, // 治, #505 + 0x6CC1, // 況, #462 + 0x6CD5, // 法, #227 + 0x6CE8, // 注, #372 + 0x6D3B, // 活, #303 + 0x6D41, // 流, #480 + 0x6D77, // 海, #274 + 0x6E08, // 済, #417 + 0x6F14, // 演, #504 + 0x706B, // 火, #264 + 0x70B9, // 点, #331 + 0x7121, // 無, #58 + 0x7248, // 版, #409 + 0x7269, // 物, #170 + 0x7279, // 特, #242 + 0x72B6, // 状, #458 + 0x73FE, // 現, #322 + 0x7406, // 理, #162 + 0x751F, // 生, #122 + 0x7523, // 産, #320 + 0x7528, // 用, #94 + 0x7530, // 田, #195 + 0x7537, // 男, #373 + 0x753A, // 町, #314 + 0x753B, // 画, #91 + 0x754C, // 界, #436 + 0x756A, // 番, #261 + 0x75C5, // 病, #428 + 0x767A, // 発, #194 + 0x767B, // 登, #231 + 0x767D, // 白, #419 + 0x7684, // 的, #251 + 0x76EE, // 目, #197 + 0x76F4, // 直, #497 + 0x76F8, // 相, #286 + 0x770C, // 県, #199 + 0x771F, // 真, #219 + 0x7740, // 着, #283 + 0x77E5, // 知, #185 + 0x77F3, // 石, #500 + 0x78BA, // 確, #383 + 0x793A, // 示, #241 + 0x793E, // 社, #167 + 0x795E, // 神, #315 + 0x798F, // 福, #423 + 0x79C1, // 私, #347 + 0x79D1, // 科, #420 + 0x7A0E, // 税, #368 + 0x7A2E, // 種, #455 + 0x7A3F, // 稿, #148 + 0x7A7A, // 空, #427 + 0x7ACB, // 立, #309 + 0x7B11, // 笑, #454 + 0x7B2C, // 第, #317 + 0x7B49, // 等, #457 + 0x7B54, // 答, #426 + 0x7BA1, // 管, #481 + 0x7CFB, // 系, #408 + 0x7D04, // 約, #276 + 0x7D20, // 素, #407 + 0x7D22, // 索, #214 + 0x7D30, // 細, #381 + 0x7D39, // 紹, #471 + 0x7D42, // 終, #456 + 0x7D44, // 組, #424 + 0x7D4C, // 経, #360 + 0x7D50, // 結, #291 + 0x7D9A, // 続, #357 + 0x7DCF, // 総, #467 + 0x7DDA, // 線, #338 + 0x7DE8, // 編, #453 + 0x7F8E, // 美, #204 + 0x8003, // 考, #387 + 0x8005, // 者, #151 + 0x805E, // 聞, #463 + 0x8077, // 職, #363 + 0x80B2, // 育, #433 + 0x80FD, // 能, #250 + 0x8179, // 腹, #396 + 0x81EA, // 自, #156 + 0x826F, // 良, #329 + 0x8272, // 色, #402 + 0x82B1, // 花, #440 + 0x82B8, // 芸, #413 + 0x82F1, // 英, #485 + 0x8449, // 葉, #472 + 0x884C, // 行, #128 + 0x8853, // 術, #460 + 0x8868, // 表, #209 + 0x88FD, // 製, #431 + 0x897F, // 西, #406 + 0x8981, // 要, #313 + 0x898B, // 見, #101 + 0x898F, // 規, #375 + 0x89A7, // 覧, #171 + 0x89E3, // 解, #388 + 0x8A00, // 言, #210 + 0x8A08, // 計, #343 + 0x8A18, // 記, #136 + 0x8A2D, // 設, #292 + 0x8A71, // 話, #213 + 0x8A73, // 詳, #371 + 0x8A8D, // 認, #404 + 0x8A9E, // 語, #234 + 0x8AAC, // 説, #494 + 0x8AAD, // 読, #301 + 0x8ABF, // 調, #443 + 0x8AC7, // 談, #448 + 0x8B77, // 護, #509 + 0x8C37, // 谷, #506 + 0x8CA9, // 販, #362 + 0x8CB7, // 買, #346 + 0x8CC7, // 資, #473 + 0x8CEA, // 質, #281 + 0x8CFC, // 購, #495 + 0x8EAB, // 身, #470 + 0x8ECA, // 車, #205 + 0x8EE2, // 転, #335 + 0x8F09, // 載, #342 + 0x8FBC, // 込, #229 + 0x8FD1, // 近, #304 + 0x8FD4, // 返, #461 + 0x8FFD, // 追, #379 + 0x9001, // 送, #186 + 0x901A, // 通, #182 + 0x901F, // 速, #340 + 0x9023, // 連, #244 + 0x904B, // 運, #382 + 0x904E, // 過, #498 + 0x9053, // 道, #282 + 0x9054, // 達, #450 + 0x9055, // 違, #414 + 0x9078, // 選, #288 + 0x90E8, // 部, #208 + 0x90FD, // 都, #344 + 0x914D, // 配, #389 + 0x91CD, // 重, #478 + 0x91CE, // 野, #245 + 0x91D1, // 金, #138 + 0x9332, // 録, #238 + 0x9577, // 長, #247 + 0x9580, // 門, #508 + 0x958B, // 開, #248 + 0x9593, // 間, #141 + 0x95A2, // 関, #188 + 0x962A, // 阪, #496 + 0x9650, // 限, #395 + 0x9662, // 院, #449 + 0x9664, // 除, #510 + 0x969B, // 際, #493 + 0x96C6, // 集, #196 + 0x96D1, // 雑, #442 + 0x96FB, // 電, #187 + 0x9762, // 面, #328 + 0x97F3, // 音, #325 + 0x984C, // 題, #310 + 0x985E, // 類, #491 + 0x98A8, // 風, #353 + 0x98DF, // 食, #218 + 0x9928, // 館, #464 + 0x99C5, // 駅, #316 + 0x9A13, // 験, #397 + 0x9AD8, // 高, #176 + 0xFF57, // w, #108 +}; +// the percentage of the sample covered by the above characters +static const float frequent_ja_coverage=0.880569589120162; + +// The 512 most frequently occuring characters for the ko language in a sample of the Internet. +// Ordered by codepoint, comment shows character and ranking by frequency +const uint16_t frequent_ko[] = { + 0x314B, // ㅋ, #148 + 0x314E, // ㅎ, #390 + 0x3160, // ㅠ, #354 + 0x318D, // ㆍ, #439 + 0xAC00, // 가, #6 + 0xAC01, // 각, #231 + 0xAC04, // 간, #106 + 0xAC08, // 갈, #362 + 0xAC10, // 감, #122 + 0xAC11, // 갑, #493 + 0xAC15, // 강, #155 + 0xAC19, // 같, #264 + 0xAC1C, // 개, #87 + 0xAC1D, // 객, #198 + 0xAC24, // 갤, #457 + 0xAC70, // 거, #91 + 0xAC74, // 건, #161 + 0xAC78, // 걸, #338 + 0xAC80, // 검, #184 + 0xAC83, // 것, #116 + 0xAC8C, // 게, #36 + 0xACA0, // 겠, #233 + 0xACA8, // 겨, #341 + 0xACA9, // 격, #245 + 0xACAC, // 견, #413 + 0xACB0, // 결, #202 + 0xACBD, // 경, #62 + 0xACC4, // 계, #142 + 0xACE0, // 고, #12 + 0xACE1, // 곡, #444 + 0xACE8, // 골, #379 + 0xACF3, // 곳, #388 + 0xACF5, // 공, #59 + 0xACFC, // 과, #69 + 0xAD00, // 관, #95 + 0xAD11, // 광, #235 + 0xAD50, // 교, #128 + 0xAD6C, // 구, #52 + 0xAD6D, // 국, #85 + 0xAD70, // 군, #293 + 0xAD74, // 굴, #487 + 0xAD81, // 궁, #441 + 0xAD8C, // 권, #192 + 0xADC0, // 귀, #386 + 0xADDC, // 규, #367 + 0xADF8, // 그, #30 + 0xADF9, // 극, #424 + 0xADFC, // 근, #241 + 0xAE00, // 글, #61 + 0xAE08, // 금, #138 + 0xAE09, // 급, #269 + 0xAE30, // 기, #3 + 0xAE34, // 긴, #465 + 0xAE38, // 길, #297 + 0xAE40, // 김, #205 + 0xAE4C, // 까, #171 + 0xAED8, // 께, #273 + 0xAF43, // 꽃, #475 + 0xB05D, // 끝, #505 + 0xB07C, // 끼, #490 + 0xB098, // 나, #39 + 0xB09C, // 난, #274 + 0xB0A0, // 날, #292 + 0xB0A8, // 남, #139 + 0xB0B4, // 내, #56 + 0xB108, // 너, #272 + 0xB110, // 널, #476 + 0xB118, // 넘, #492 + 0xB124, // 네, #100 + 0xB137, // 넷, #329 + 0xB140, // 녀, #288 + 0xB144, // 년, #151 + 0xB178, // 노, #149 + 0xB17C, // 논, #491 + 0xB180, // 놀, #464 + 0xB18D, // 농, #442 + 0xB204, // 누, #319 + 0xB208, // 눈, #383 + 0xB274, // 뉴, #173 + 0xB290, // 느, #368 + 0xB294, // 는, #5 + 0xB298, // 늘, #322 + 0xB2A5, // 능, #190 + 0xB2C8, // 니, #16 + 0xB2D8, // 님, #153 + 0xB2E4, // 다, #2 + 0xB2E8, // 단, #134 + 0xB2EB, // 닫, #195 + 0xB2EC, // 달, #243 + 0xB2F4, // 담, #254 + 0xB2F5, // 답, #287 + 0xB2F9, // 당, #159 + 0xB300, // 대, #33 + 0xB313, // 댓, #303 + 0xB354, // 더, #140 + 0xB358, // 던, #252 + 0xB367, // 덧, #463 + 0xB370, // 데, #104 + 0xB378, // 델, #429 + 0xB3C4, // 도, #25 + 0xB3C5, // 독, #301 + 0xB3CC, // 돌, #309 + 0xB3D9, // 동, #58 + 0xB418, // 되, #82 + 0xB41C, // 된, #189 + 0xB420, // 될, #408 + 0xB429, // 됩, #332 + 0xB450, // 두, #199 + 0xB4A4, // 뒤, #496 + 0xB4DC, // 드, #40 + 0xB4E0, // 든, #283 + 0xB4E4, // 들, #54 + 0xB4EF, // 듯, #478 + 0xB4F1, // 등, #90 + 0xB514, // 디, #133 + 0xB529, // 딩, #462 + 0xB530, // 따, #333 + 0xB54C, // 때, #240 + 0xB610, // 또, #313 + 0xB77C, // 라, #42 + 0xB77D, // 락, #355 + 0xB780, // 란, #290 + 0xB78C, // 람, #246 + 0xB78D, // 랍, #420 + 0xB791, // 랑, #270 + 0xB798, // 래, #174 + 0xB799, // 랙, #381 + 0xB79C, // 랜, #357 + 0xB7A8, // 램, #359 + 0xB7A9, // 랩, #402 + 0xB7C9, // 량, #346 + 0xB7EC, // 러, #130 + 0xB7F0, // 런, #312 + 0xB7FC, // 럼, #327 + 0xB7FD, // 럽, #447 + 0xB807, // 렇, #412 + 0xB808, // 레, #114 + 0xB80C, // 렌, #395 + 0xB824, // 려, #158 + 0xB825, // 력, #194 + 0xB828, // 련, #326 + 0xB839, // 령, #389 + 0xB85C, // 로, #4 + 0xB85D, // 록, #84 + 0xB860, // 론, #366 + 0xB8CC, // 료, #154 + 0xB8E8, // 루, #236 + 0xB958, // 류, #265 + 0xB974, // 르, #212 + 0xB978, // 른, #250 + 0xB97C, // 를, #35 + 0xB984, // 름, #276 + 0xB9AC, // 리, #19 + 0xB9AD, // 릭, #394 + 0xB9B0, // 린, #259 + 0xB9B4, // 릴, #485 + 0xB9BC, // 림, #305 + 0xB9BD, // 립, #217 + 0xB9C1, // 링, #351 + 0xB9C8, // 마, #67 + 0xB9C9, // 막, #310 + 0xB9CC, // 만, #65 + 0xB9CE, // 많, #257 + 0xB9D0, // 말, #188 + 0xB9DB, // 맛, #397 + 0xB9DD, // 망, #370 + 0xB9DE, // 맞, #399 + 0xB9E4, // 매, #125 + 0xB9E8, // 맨, #422 + 0xBA38, // 머, #311 + 0xBA39, // 먹, #377 + 0xBA3C, // 먼, #469 + 0xBA54, // 메, #147 + 0xBA70, // 며, #191 + 0xBA74, // 면, #72 + 0xBA85, // 명, #131 + 0xBAA8, // 모, #73 + 0xBAA9, // 목, #157 + 0xBAB0, // 몰, #401 + 0xBAB8, // 몸, #437 + 0xBABB, // 못, #336 + 0xBB34, // 무, #80 + 0xBB38, // 문, #57 + 0xBB3C, // 물, #94 + 0xBBA4, // 뮤, #431 + 0xBBF8, // 미, #76 + 0xBBFC, // 민, #200 + 0xBC00, // 밀, #308 + 0xBC0F, // 및, #249 + 0xBC14, // 바, #89 + 0xBC15, // 박, #226 + 0xBC18, // 반, #175 + 0xBC1B, // 받, #248 + 0xBC1C, // 발, #164 + 0xBC29, // 방, #92 + 0xBC30, // 배, #162 + 0xBC31, // 백, #256 + 0xBC84, // 버, #111 + 0xBC88, // 번, #167 + 0xBC8C, // 벌, #423 + 0xBC94, // 범, #427 + 0xBC95, // 법, #207 + 0xBCA0, // 베, #281 + 0xBCA4, // 벤, #378 + 0xBCA8, // 벨, #387 + 0xBCC0, // 변, #253 + 0xBCC4, // 별, #262 + 0xBCD1, // 병, #340 + 0xBCF4, // 보, #20 + 0xBCF5, // 복, #204 + 0xBCF8, // 본, #182 + 0xBCFC, // 볼, #385 + 0xBD09, // 봉, #405 + 0xBD80, // 부, #46 + 0xBD81, // 북, #261 + 0xBD84, // 분, #105 + 0xBD88, // 불, #225 + 0xBDF0, // 뷰, #350 + 0xBE0C, // 브, #214 + 0xBE14, // 블, #99 + 0xBE44, // 비, #55 + 0xBE4C, // 빌, #510 + 0xBE60, // 빠, #398 + 0xC0AC, // 사, #14 + 0xC0AD, // 삭, #342 + 0xC0B0, // 산, #121 + 0xC0B4, // 살, #279 + 0xC0BC, // 삼, #348 + 0xC0C1, // 상, #41 + 0xC0C8, // 새, #282 + 0xC0C9, // 색, #181 + 0xC0DD, // 생, #109 + 0xC11C, // 서, #21 + 0xC11D, // 석, #234 + 0xC120, // 선, #107 + 0xC124, // 설, #170 + 0xC131, // 성, #50 + 0xC138, // 세, #60 + 0xC139, // 섹, #456 + 0xC13C, // 센, #267 + 0xC154, // 셔, #455 + 0xC158, // 션, #237 + 0xC15C, // 셜, #448 + 0xC168, // 셨, #421 + 0xC18C, // 소, #51 + 0xC18D, // 속, #219 + 0xC190, // 손, #323 + 0xC1A1, // 송, #203 + 0xC1C4, // 쇄, #501 + 0xC1FC, // 쇼, #364 + 0xC218, // 수, #27 + 0xC219, // 숙, #467 + 0xC21C, // 순, #258 + 0xC220, // 술, #302 + 0xC26C, // 쉬, #511 + 0xC288, // 슈, #384 + 0xC2A4, // 스, #11 + 0xC2AC, // 슬, #438 + 0xC2B4, // 슴, #504 + 0xC2B5, // 습, #77 + 0xC2B9, // 승, #299 + 0xC2DC, // 시, #13 + 0xC2DD, // 식, #137 + 0xC2E0, // 신, #47 + 0xC2E4, // 실, #132 + 0xC2EC, // 심, #196 + 0xC2ED, // 십, #482 + 0xC2F6, // 싶, #352 + 0xC2F8, // 싸, #419 + 0xC4F0, // 쓰, #278 + 0xC528, // 씨, #360 + 0xC544, // 아, #23 + 0xC545, // 악, #296 + 0xC548, // 안, #71 + 0xC54A, // 않, #209 + 0xC54C, // 알, #222 + 0xC554, // 암, #460 + 0xC558, // 았, #349 + 0xC559, // 앙, #473 + 0xC55E, // 앞, #434 + 0xC560, // 애, #271 + 0xC561, // 액, #415 + 0xC571, // 앱, #477 + 0xC57C, // 야, #124 + 0xC57D, // 약, #229 + 0xC591, // 양, #177 + 0xC5B4, // 어, #24 + 0xC5B5, // 억, #407 + 0xC5B8, // 언, #294 + 0xC5BC, // 얼, #356 + 0xC5C4, // 엄, #426 + 0xC5C5, // 업, #118 + 0xC5C6, // 없, #178 + 0xC5C8, // 었, #165 + 0xC5D0, // 에, #9 + 0xC5D4, // 엔, #375 + 0xC5D8, // 엘, #506 + 0xC5EC, // 여, #66 + 0xC5ED, // 역, #186 + 0xC5EE, // 엮, #488 + 0xC5F0, // 연, #96 + 0xC5F4, // 열, #266 + 0xC5FC, // 염, #449 + 0xC600, // 였, #374 + 0xC601, // 영, #83 + 0xC608, // 예, #168 + 0xC624, // 오, #75 + 0xC628, // 온, #300 + 0xC62C, // 올, #306 + 0xC640, // 와, #119 + 0xC644, // 완, #361 + 0xC654, // 왔, #489 + 0xC655, // 왕, #418 + 0xC678, // 외, #218 + 0xC694, // 요, #43 + 0xC695, // 욕, #479 + 0xC6A9, // 용, #48 + 0xC6B0, // 우, #64 + 0xC6B1, // 욱, #503 + 0xC6B4, // 운, #108 + 0xC6B8, // 울, #223 + 0xC6C0, // 움, #317 + 0xC6C3, // 웃, #404 + 0xC6CC, // 워, #280 + 0xC6D0, // 원, #45 + 0xC6D4, // 월, #150 + 0xC6E8, // 웨, #446 + 0xC6F9, // 웹, #500 + 0xC704, // 위, #78 + 0xC720, // 유, #81 + 0xC721, // 육, #321 + 0xC724, // 윤, #416 + 0xC73C, // 으, #49 + 0xC740, // 은, #31 + 0xC744, // 을, #17 + 0xC74C, // 음, #112 + 0xC751, // 응, #461 + 0xC758, // 의, #8 + 0xC774, // 이, #1 + 0xC775, // 익, #403 + 0xC778, // 인, #18 + 0xC77C, // 일, #28 + 0xC784, // 임, #160 + 0xC785, // 입, #93 + 0xC788, // 있, #44 + 0xC790, // 자, #22 + 0xC791, // 작, #88 + 0xC798, // 잘, #347 + 0xC7A1, // 잡, #372 + 0xC7A5, // 장, #53 + 0xC7AC, // 재, #120 + 0xC7C1, // 쟁, #483 + 0xC800, // 저, #98 + 0xC801, // 적, #97 + 0xC804, // 전, #34 + 0xC808, // 절, #320 + 0xC810, // 점, #201 + 0xC811, // 접, #331 + 0xC815, // 정, #26 + 0xC81C, // 제, #29 + 0xC838, // 져, #414 + 0xC870, // 조, #86 + 0xC871, // 족, #373 + 0xC874, // 존, #432 + 0xC880, // 좀, #470 + 0xC885, // 종, #208 + 0xC88B, // 좋, #239 + 0xC8E0, // 죠, #451 + 0xC8FC, // 주, #38 + 0xC8FD, // 죽, #471 + 0xC900, // 준, #286 + 0xC904, // 줄, #392 + 0xC911, // 중, #103 + 0xC988, // 즈, #255 + 0xC98C, // 즌, #507 + 0xC990, // 즐, #371 + 0xC99D, // 증, #260 + 0xC9C0, // 지, #10 + 0xC9C1, // 직, #216 + 0xC9C4, // 진, #79 + 0xC9C8, // 질, #238 + 0xC9D1, // 집, #206 + 0xC9DC, // 짜, #411 + 0xC9F8, // 째, #494 + 0xCABD, // 쪽, #435 + 0xCC28, // 차, #146 + 0xCC29, // 착, #443 + 0xCC2C, // 찬, #481 + 0xCC30, // 찰, #440 + 0xCC38, // 참, #343 + 0xCC3D, // 창, #304 + 0xCC3E, // 찾, #335 + 0xCC44, // 채, #284 + 0xCC45, // 책, #298 + 0xCC98, // 처, #242 + 0xCC9C, // 천, #143 + 0xCCA0, // 철, #380 + 0xCCA8, // 첨, #452 + 0xCCAB, // 첫, #484 + 0xCCAD, // 청, #197 + 0xCCB4, // 체, #126 + 0xCCD0, // 쳐, #472 + 0xCD08, // 초, #220 + 0xCD1D, // 총, #406 + 0xCD5C, // 최, #179 + 0xCD94, // 추, #136 + 0xCD95, // 축, #337 + 0xCD9C, // 출, #166 + 0xCDA9, // 충, #369 + 0xCDE8, // 취, #210 + 0xCE20, // 츠, #215 + 0xCE21, // 측, #468 + 0xCE35, // 층, #512 + 0xCE58, // 치, #102 + 0xCE5C, // 친, #325 + 0xCE68, // 침, #263 + 0xCE74, // 카, #115 + 0xCE7C, // 칼, #466 + 0xCE90, // 캐, #454 + 0xCEE4, // 커, #285 + 0xCEE8, // 컨, #328 + 0xCEF4, // 컴, #417 + 0xCF00, // 케, #339 + 0xCF13, // 켓, #509 + 0xCF1C, // 켜, #508 + 0xCF54, // 코, #193 + 0xCF58, // 콘, #391 + 0xCFE0, // 쿠, #393 + 0xD035, // 퀵, #453 + 0xD06C, // 크, #101 + 0xD070, // 큰, #495 + 0xD074, // 클, #289 + 0xD0A4, // 키, #230 + 0xD0C0, // 타, #127 + 0xD0C1, // 탁, #314 + 0xD0C4, // 탄, #450 + 0xD0C8, // 탈, #436 + 0xD0DC, // 태, #221 + 0xD0DD, // 택, #275 + 0xD130, // 터, #70 + 0xD14C, // 테, #213 + 0xD150, // 텐, #324 + 0xD154, // 텔, #430 + 0xD15C, // 템, #382 + 0xD1A0, // 토, #145 + 0xD1B5, // 통, #156 + 0xD22C, // 투, #227 + 0xD2B8, // 트, #37 + 0xD2B9, // 특, #247 + 0xD2F0, // 티, #187 + 0xD305, // 팅, #410 + 0xD30C, // 파, #141 + 0xD310, // 판, #163 + 0xD314, // 팔, #499 + 0xD328, // 패, #307 + 0xD32C, // 팬, #459 + 0xD338, // 팸, #433 + 0xD37C, // 퍼, #344 + 0xD398, // 페, #172 + 0xD3B8, // 편, #251 + 0xD3C9, // 평, #291 + 0xD3EC, // 포, #68 + 0xD3ED, // 폭, #445 + 0xD3F0, // 폰, #318 + 0xD45C, // 표, #232 + 0xD480, // 풀, #497 + 0xD488, // 품, #113 + 0xD48D, // 풍, #425 + 0xD504, // 프, #110 + 0xD508, // 픈, #498 + 0xD50C, // 플, #211 + 0xD53C, // 피, #169 + 0xD544, // 필, #295 + 0xD551, // 핑, #376 + 0xD558, // 하, #7 + 0xD559, // 학, #129 + 0xD55C, // 한, #15 + 0xD560, // 할, #144 + 0xD568, // 함, #152 + 0xD569, // 합, #123 + 0xD56D, // 항, #268 + 0xD574, // 해, #32 + 0xD588, // 했, #180 + 0xD589, // 행, #135 + 0xD5A5, // 향, #345 + 0xD5C8, // 허, #396 + 0xD5D8, // 험, #316 + 0xD5E4, // 헤, #474 + 0xD604, // 현, #185 + 0xD611, // 협, #315 + 0xD615, // 형, #244 + 0xD61C, // 혜, #428 + 0xD638, // 호, #117 + 0xD63C, // 혼, #358 + 0xD648, // 홈, #330 + 0xD64D, // 홍, #363 + 0xD654, // 화, #63 + 0xD655, // 확, #183 + 0xD658, // 환, #224 + 0xD65C, // 활, #277 + 0xD669, // 황, #353 + 0xD68C, // 회, #74 + 0xD68D, // 획, #458 + 0xD69F, // 횟, #409 + 0xD6A8, // 효, #400 + 0xD6C4, // 후, #176 + 0xD6C8, // 훈, #486 + 0xD734, // 휴, #365 + 0xD754, // 흔, #480 + 0xD76C, // 희, #334 + 0xD788, // 히, #228 + 0xD798, // 힘, #502 +}; +// the percentage of the sample covered by the above characters +static const float frequent_ko_coverage=0.948157021464184; + diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp index acfaea0..f3f3e15 100644 --- a/media/libmedia/IAudioFlinger.cpp +++ b/media/libmedia/IAudioFlinger.cpp @@ -89,7 +89,7 @@ public: uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, track_flags_t *flags, const sp<IMemory>& sharedBuffer, audio_io_handle_t output, @@ -106,6 +106,7 @@ public: data.writeInt32(sampleRate); data.writeInt32(format); data.writeInt32(channelMask); + size_t frameCount = pFrameCount != NULL ? *pFrameCount : 0; data.writeInt32(frameCount); track_flags_t lFlags = flags != NULL ? *flags : (track_flags_t) TRACK_DEFAULT; data.writeInt32(lFlags); @@ -117,7 +118,7 @@ public: } data.writeInt32((int32_t) output); data.writeInt32((int32_t) tid); - int lSessionId = 0; + int lSessionId = AUDIO_SESSION_ALLOCATE; if (sessionId != NULL) { lSessionId = *sessionId; } @@ -127,6 +128,10 @@ public: if (lStatus != NO_ERROR) { ALOGE("createTrack error: %s", strerror(-lStatus)); } else { + frameCount = reply.readInt32(); + if (pFrameCount != NULL) { + *pFrameCount = frameCount; + } lFlags = reply.readInt32(); if (flags != NULL) { *flags = lFlags; @@ -138,8 +143,19 @@ public: name = reply.readString8(); lStatus = reply.readInt32(); track = interface_cast<IAudioTrack>(reply.readStrongBinder()); + if (lStatus == NO_ERROR) { + if (track == 0) { + ALOGE("createTrack should have returned an IAudioTrack"); + lStatus = UNKNOWN_ERROR; + } + } else { + if (track != 0) { + ALOGE("createTrack returned an IAudioTrack but with status %d", lStatus); + track.clear(); + } + } } - if (status) { + if (status != NULL) { *status = lStatus; } return track; @@ -150,7 +166,7 @@ public: uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, track_flags_t *flags, pid_t tid, int *sessionId, @@ -163,11 +179,12 @@ public: data.writeInt32(sampleRate); data.writeInt32(format); data.writeInt32(channelMask); + size_t frameCount = pFrameCount != NULL ? *pFrameCount : 0; data.writeInt32(frameCount); track_flags_t lFlags = flags != NULL ? *flags : (track_flags_t) TRACK_DEFAULT; data.writeInt32(lFlags); data.writeInt32((int32_t) tid); - int lSessionId = 0; + int lSessionId = AUDIO_SESSION_ALLOCATE; if (sessionId != NULL) { lSessionId = *sessionId; } @@ -176,6 +193,10 @@ public: if (lStatus != NO_ERROR) { ALOGE("openRecord error: %s", strerror(-lStatus)); } else { + frameCount = reply.readInt32(); + if (pFrameCount != NULL) { + *pFrameCount = frameCount; + } lFlags = reply.readInt32(); if (flags != NULL) { *flags = lFlags; @@ -198,7 +219,7 @@ public: } } } - if (status) { + if (status != NULL) { *status = lStatus; } return record; @@ -415,15 +436,25 @@ public: audio_io_handle_t output = (audio_io_handle_t) reply.readInt32(); ALOGV("openOutput() returned output, %d", output); devices = (audio_devices_t)reply.readInt32(); - if (pDevices != NULL) *pDevices = devices; + if (pDevices != NULL) { + *pDevices = devices; + } samplingRate = reply.readInt32(); - if (pSamplingRate != NULL) *pSamplingRate = samplingRate; + if (pSamplingRate != NULL) { + *pSamplingRate = samplingRate; + } format = (audio_format_t) reply.readInt32(); - if (pFormat != NULL) *pFormat = format; + if (pFormat != NULL) { + *pFormat = format; + } channelMask = (audio_channel_mask_t)reply.readInt32(); - if (pChannelMask != NULL) *pChannelMask = channelMask; + if (pChannelMask != NULL) { + *pChannelMask = channelMask; + } latency = reply.readInt32(); - if (pLatencyMs != NULL) *pLatencyMs = latency; + if (pLatencyMs != NULL) { + *pLatencyMs = latency; + } return output; } @@ -487,13 +518,21 @@ public: remote()->transact(OPEN_INPUT, data, &reply); audio_io_handle_t input = (audio_io_handle_t) reply.readInt32(); devices = (audio_devices_t)reply.readInt32(); - if (pDevices != NULL) *pDevices = devices; + if (pDevices != NULL) { + *pDevices = devices; + } samplingRate = reply.readInt32(); - if (pSamplingRate != NULL) *pSamplingRate = samplingRate; + if (pSamplingRate != NULL) { + *pSamplingRate = samplingRate; + } format = (audio_format_t) reply.readInt32(); - if (pFormat != NULL) *pFormat = format; + if (pFormat != NULL) { + *pFormat = format; + } channelMask = (audio_channel_mask_t)reply.readInt32(); - if (pChannelMask != NULL) *pChannelMask = channelMask; + if (pChannelMask != NULL) { + *pChannelMask = channelMask; + } return input; } @@ -535,24 +574,27 @@ public: status_t status = reply.readInt32(); if (status == NO_ERROR) { uint32_t tmp = reply.readInt32(); - if (halFrames) { + if (halFrames != NULL) { *halFrames = tmp; } tmp = reply.readInt32(); - if (dspFrames) { + if (dspFrames != NULL) { *dspFrames = tmp; } } return status; } - virtual size_t getInputFramesLost(audio_io_handle_t ioHandle) const + virtual uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); data.writeInt32((int32_t) ioHandle); - remote()->transact(GET_INPUT_FRAMES_LOST, data, &reply); - return reply.readInt32(); + status_t status = remote()->transact(GET_INPUT_FRAMES_LOST, data, &reply); + if (status != NO_ERROR) { + return 0; + } + return (uint32_t) reply.readInt32(); } virtual int newAudioSessionId() @@ -657,7 +699,7 @@ public: if (pDesc == NULL) { return effect; - if (status) { + if (status != NULL) { *status = BAD_VALUE; } } @@ -675,7 +717,7 @@ public: } else { lStatus = reply.readInt32(); int tmp = reply.readInt32(); - if (id) { + if (id != NULL) { *id = tmp; } tmp = reply.readInt32(); @@ -685,7 +727,7 @@ public: effect = interface_cast<IEffect>(reply.readStrongBinder()); reply.read(pDesc, sizeof(effect_descriptor_t)); } - if (status) { + if (status != NULL) { *status = lStatus; } @@ -775,9 +817,11 @@ status_t BnAudioFlinger::onTransact( } else { track = createTrack( (audio_stream_type_t) streamType, sampleRate, format, - channelMask, frameCount, &flags, buffer, output, tid, + channelMask, &frameCount, &flags, buffer, output, tid, &sessionId, name, clientUid, &status); + LOG_ALWAYS_FATAL_IF((track != 0) != (status == NO_ERROR)); } + reply->writeInt32(frameCount); reply->writeInt32(flags); reply->writeInt32(sessionId); reply->writeString8(name); @@ -797,8 +841,9 @@ status_t BnAudioFlinger::onTransact( int sessionId = data.readInt32(); status_t status; sp<IAudioRecord> record = openRecord(input, - sampleRate, format, channelMask, frameCount, &flags, tid, &sessionId, &status); + sampleRate, format, channelMask, &frameCount, &flags, tid, &sessionId, &status); LOG_ALWAYS_FATAL_IF((record != 0) != (status == NO_ERROR)); + reply->writeInt32(frameCount); reply->writeInt32(flags); reply->writeInt32(sessionId); reply->writeInt32(status); @@ -1026,7 +1071,7 @@ status_t BnAudioFlinger::onTransact( case GET_INPUT_FRAMES_LOST: { CHECK_INTERFACE(IAudioFlinger, data, reply); audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32(); - reply->writeInt32(getInputFramesLost(ioHandle)); + reply->writeInt32((int32_t) getInputFramesLost(ioHandle)); return NO_ERROR; } break; case NEW_AUDIO_SESSION_ID: { diff --git a/media/libmedia/IAudioRecord.cpp b/media/libmedia/IAudioRecord.cpp index 4a7de65..9866d70 100644 --- a/media/libmedia/IAudioRecord.cpp +++ b/media/libmedia/IAudioRecord.cpp @@ -50,6 +50,9 @@ public: status_t status = remote()->transact(GET_CBLK, data, &reply); if (status == NO_ERROR) { cblk = interface_cast<IMemory>(reply.readStrongBinder()); + if (cblk != 0 && cblk->pointer() == NULL) { + cblk.clear(); + } } return cblk; } diff --git a/media/libmedia/IAudioTrack.cpp b/media/libmedia/IAudioTrack.cpp index 3cd9cfd..ffc21fc 100644 --- a/media/libmedia/IAudioTrack.cpp +++ b/media/libmedia/IAudioTrack.cpp @@ -60,6 +60,9 @@ public: status_t status = remote()->transact(GET_CBLK, data, &reply); if (status == NO_ERROR) { cblk = interface_cast<IMemory>(reply.readStrongBinder()); + if (cblk != 0 && cblk->pointer() == NULL) { + cblk.clear(); + } } return cblk; } @@ -122,6 +125,9 @@ public: status = reply.readInt32(); if (status == NO_ERROR) { *buffer = interface_cast<IMemory>(reply.readStrongBinder()); + if (*buffer != 0 && (*buffer)->pointer() == NULL) { + (*buffer).clear(); + } } } return status; diff --git a/media/libmedia/IEffect.cpp b/media/libmedia/IEffect.cpp index a303a8f..b94012a 100644 --- a/media/libmedia/IEffect.cpp +++ b/media/libmedia/IEffect.cpp @@ -117,6 +117,9 @@ public: status_t status = remote()->transact(GET_CBLK, data, &reply); if (status == NO_ERROR) { cblk = interface_cast<IMemory>(reply.readStrongBinder()); + if (cblk != 0 && cblk->pointer() == NULL) { + cblk.clear(); + } } return cblk; } diff --git a/media/libmedia/IMediaDeathNotifier.cpp b/media/libmedia/IMediaDeathNotifier.cpp index 9db5b1b..10b4934 100644 --- a/media/libmedia/IMediaDeathNotifier.cpp +++ b/media/libmedia/IMediaDeathNotifier.cpp @@ -75,7 +75,7 @@ IMediaDeathNotifier::removeObitRecipient(const wp<IMediaDeathNotifier>& recipien } void -IMediaDeathNotifier::DeathNotifier::binderDied(const wp<IBinder>& who) { +IMediaDeathNotifier::DeathNotifier::binderDied(const wp<IBinder>& who __unused) { ALOGW("media server died"); // Need to do this with the lock held diff --git a/media/libmedia/MediaScannerClient.cpp b/media/libmedia/MediaScannerClient.cpp index 93a4a4c..1661f04 100644 --- a/media/libmedia/MediaScannerClient.cpp +++ b/media/libmedia/MediaScannerClient.cpp @@ -14,217 +14,57 @@ * limitations under the License. */ +//#define LOG_NDEBUG 0 +#define LOG_TAG "MediaScannerClient" +#include <utils/Log.h> + #include <media/mediascanner.h> +#include "CharacterEncodingDetector.h" #include "StringArray.h" -#include "autodetect.h" -#include "unicode/ucnv.h" -#include "unicode/ustring.h" - namespace android { MediaScannerClient::MediaScannerClient() - : mNames(NULL), - mValues(NULL), - mLocaleEncoding(kEncodingNone) + : mEncodingDetector(NULL) { } MediaScannerClient::~MediaScannerClient() { - delete mNames; - delete mValues; + delete mEncodingDetector; } void MediaScannerClient::setLocale(const char* locale) { - if (!locale) return; - - if (!strncmp(locale, "ja", 2)) - mLocaleEncoding = kEncodingShiftJIS; - else if (!strncmp(locale, "ko", 2)) - mLocaleEncoding = kEncodingEUCKR; - else if (!strncmp(locale, "zh", 2)) { - if (!strcmp(locale, "zh_CN")) { - // simplified chinese for mainland China - mLocaleEncoding = kEncodingGBK; - } else { - // assume traditional for non-mainland Chinese locales (Taiwan, Hong Kong, Singapore) - mLocaleEncoding = kEncodingBig5; - } - } + mLocale = locale; // not currently used } void MediaScannerClient::beginFile() { - mNames = new StringArray; - mValues = new StringArray; + delete mEncodingDetector; + mEncodingDetector = new CharacterEncodingDetector(); } status_t MediaScannerClient::addStringTag(const char* name, const char* value) { - if (mLocaleEncoding != kEncodingNone) { - // don't bother caching strings that are all ASCII. - // call handleStringTag directly instead. - // check to see if value (which should be utf8) has any non-ASCII characters - bool nonAscii = false; - const char* chp = value; - char ch; - while ((ch = *chp++)) { - if (ch & 0x80) { - nonAscii = true; - break; - } - } - - if (nonAscii) { - // save the strings for later so they can be used for native encoding detection - mNames->push_back(name); - mValues->push_back(value); - return OK; - } - // else fall through - } - - // autodetection is not necessary, so no need to cache the values - // pass directly to the client instead - return handleStringTag(name, value); -} - -static uint32_t possibleEncodings(const char* s) -{ - uint32_t result = kEncodingAll; - // if s contains a native encoding, then it was mistakenly encoded in utf8 as if it were latin-1 - // so we need to reverse the latin-1 -> utf8 conversion to get the native chars back - uint8_t ch1, ch2; - uint8_t* chp = (uint8_t *)s; - - while ((ch1 = *chp++)) { - if (ch1 & 0x80) { - ch2 = *chp++; - ch1 = ((ch1 << 6) & 0xC0) | (ch2 & 0x3F); - // ch1 is now the first byte of the potential native char - - ch2 = *chp++; - if (ch2 & 0x80) - ch2 = ((ch2 << 6) & 0xC0) | (*chp++ & 0x3F); - // ch2 is now the second byte of the potential native char - int ch = (int)ch1 << 8 | (int)ch2; - result &= findPossibleEncodings(ch); - } - // else ASCII character, which could be anything - } - - return result; -} - -void MediaScannerClient::convertValues(uint32_t encoding) -{ - const char* enc = NULL; - switch (encoding) { - case kEncodingShiftJIS: - enc = "shift-jis"; - break; - case kEncodingGBK: - enc = "gbk"; - break; - case kEncodingBig5: - enc = "Big5"; - break; - case kEncodingEUCKR: - enc = "EUC-KR"; - break; - } - - if (enc) { - UErrorCode status = U_ZERO_ERROR; - - UConverter *conv = ucnv_open(enc, &status); - if (U_FAILURE(status)) { - ALOGE("could not create UConverter for %s", enc); - return; - } - UConverter *utf8Conv = ucnv_open("UTF-8", &status); - if (U_FAILURE(status)) { - ALOGE("could not create UConverter for UTF-8"); - ucnv_close(conv); - return; - } - - // for each value string, convert from native encoding to UTF-8 - for (int i = 0; i < mNames->size(); i++) { - // first we need to untangle the utf8 and convert it back to the original bytes - // since we are reducing the length of the string, we can do this in place - uint8_t* src = (uint8_t *)mValues->getEntry(i); - int len = strlen((char *)src); - uint8_t* dest = src; - - uint8_t uch; - while ((uch = *src++)) { - if (uch & 0x80) - *dest++ = ((uch << 6) & 0xC0) | (*src++ & 0x3F); - else - *dest++ = uch; - } - *dest = 0; - - // now convert from native encoding to UTF-8 - const char* source = mValues->getEntry(i); - int targetLength = len * 3 + 1; - char* buffer = new char[targetLength]; - // don't normally check for NULL, but in this case targetLength may be large - if (!buffer) - break; - char* target = buffer; - - ucnv_convertEx(utf8Conv, conv, &target, target + targetLength, - &source, (const char *)dest, NULL, NULL, NULL, NULL, TRUE, TRUE, &status); - if (U_FAILURE(status)) { - ALOGE("ucnv_convertEx failed: %d", status); - mValues->setEntry(i, "???"); - } else { - // zero terminate - *target = 0; - mValues->setEntry(i, buffer); - } - - delete[] buffer; - } - - ucnv_close(conv); - ucnv_close(utf8Conv); - } + mEncodingDetector->addTag(name, value); + return OK; } void MediaScannerClient::endFile() { - if (mLocaleEncoding != kEncodingNone) { - int size = mNames->size(); - uint32_t encoding = kEncodingAll; - - // compute a bit mask containing all possible encodings - for (int i = 0; i < mNames->size(); i++) - encoding &= possibleEncodings(mValues->getEntry(i)); - - // if the locale encoding matches, then assume we have a native encoding. - if (encoding & mLocaleEncoding) - convertValues(mLocaleEncoding); - - // finally, push all name/value pairs to the client - for (int i = 0; i < mNames->size(); i++) { - status_t status = handleStringTag(mNames->getEntry(i), mValues->getEntry(i)); - if (status) { - break; - } + mEncodingDetector->detectAndConvert(); + + int size = mEncodingDetector->size(); + if (size) { + for (int i = 0; i < size; i++) { + const char *name; + const char *value; + mEncodingDetector->getTag(i, &name, &value); + handleStringTag(name, value); } } - // else addStringTag() has done all the work so we have nothing to do - - delete mNames; - delete mValues; - mNames = NULL; - mValues = NULL; } } // namespace android diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp index 22e9fad..98acd1f 100644 --- a/media/libmedia/SoundPool.cpp +++ b/media/libmedia/SoundPool.cpp @@ -199,7 +199,7 @@ SoundChannel* SoundPool::findNextChannel(int channelID) return NULL; } -int SoundPool::load(const char* path, int priority) +int SoundPool::load(const char* path, int priority __unused) { ALOGV("load: path=%s, priority=%d", path, priority); Mutex::Autolock lock(&mLock); @@ -209,7 +209,7 @@ int SoundPool::load(const char* path, int priority) return sample->sampleID(); } -int SoundPool::load(int fd, int64_t offset, int64_t length, int priority) +int SoundPool::load(int fd, int64_t offset, int64_t length, int priority __unused) { ALOGV("load: fd=%d, offset=%lld, length=%lld, priority=%d", fd, offset, length, priority); @@ -600,16 +600,15 @@ void SoundChannel::play(const sp<Sample>& sample, int nextChannelID, float leftV // wrong audio audio buffer size (mAudioBufferSize) unsigned long toggle = mToggle ^ 1; void *userData = (void *)((unsigned long)this | toggle); - uint32_t channels = (numChannels == 2) ? - AUDIO_CHANNEL_OUT_STEREO : AUDIO_CHANNEL_OUT_MONO; + audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(numChannels); // do not create a new audio track if current track is compatible with sample parameters #ifdef USE_SHARED_MEM_BUFFER newTrack = new AudioTrack(streamType, sampleRate, sample->format(), - channels, sample->getIMemory(), AUDIO_OUTPUT_FLAG_FAST, callback, userData); + channelMask, sample->getIMemory(), AUDIO_OUTPUT_FLAG_FAST, callback, userData); #else newTrack = new AudioTrack(streamType, sampleRate, sample->format(), - channels, frameCount, AUDIO_OUTPUT_FLAG_FAST, callback, userData, + channelMask, frameCount, AUDIO_OUTPUT_FLAG_FAST, callback, userData, bufferFrames); #endif oldTrack = mAudioTrack; diff --git a/media/libmedia/autodetect.cpp b/media/libmedia/autodetect.cpp deleted file mode 100644 index be5c3b2..0000000 --- a/media/libmedia/autodetect.cpp +++ /dev/null @@ -1,885 +0,0 @@ -/* - * Copyright (C) 2008 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "autodetect.h" - -struct CharRange { - uint16_t first; - uint16_t last; -}; - -#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*x)) - -// generated from http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP932.TXT -static const CharRange kShiftJISRanges[] = { - { 0x8140, 0x817E }, - { 0x8180, 0x81AC }, - { 0x81B8, 0x81BF }, - { 0x81C8, 0x81CE }, - { 0x81DA, 0x81E8 }, - { 0x81F0, 0x81F7 }, - { 0x81FC, 0x81FC }, - { 0x824F, 0x8258 }, - { 0x8260, 0x8279 }, - { 0x8281, 0x829A }, - { 0x829F, 0x82F1 }, - { 0x8340, 0x837E }, - { 0x8380, 0x8396 }, - { 0x839F, 0x83B6 }, - { 0x83BF, 0x83D6 }, - { 0x8440, 0x8460 }, - { 0x8470, 0x847E }, - { 0x8480, 0x8491 }, - { 0x849F, 0x84BE }, - { 0x8740, 0x875D }, - { 0x875F, 0x8775 }, - { 0x877E, 0x877E }, - { 0x8780, 0x879C }, - { 0x889F, 0x88FC }, - { 0x8940, 0x897E }, - { 0x8980, 0x89FC }, - { 0x8A40, 0x8A7E }, - { 0x8A80, 0x8AFC }, - { 0x8B40, 0x8B7E }, - { 0x8B80, 0x8BFC }, - { 0x8C40, 0x8C7E }, - { 0x8C80, 0x8CFC }, - { 0x8D40, 0x8D7E }, - { 0x8D80, 0x8DFC }, - { 0x8E40, 0x8E7E }, - { 0x8E80, 0x8EFC }, - { 0x8F40, 0x8F7E }, - { 0x8F80, 0x8FFC }, - { 0x9040, 0x907E }, - { 0x9080, 0x90FC }, - { 0x9140, 0x917E }, - { 0x9180, 0x91FC }, - { 0x9240, 0x927E }, - { 0x9280, 0x92FC }, - { 0x9340, 0x937E }, - { 0x9380, 0x93FC }, - { 0x9440, 0x947E }, - { 0x9480, 0x94FC }, - { 0x9540, 0x957E }, - { 0x9580, 0x95FC }, - { 0x9640, 0x967E }, - { 0x9680, 0x96FC }, - { 0x9740, 0x977E }, - { 0x9780, 0x97FC }, - { 0x9840, 0x9872 }, - { 0x989F, 0x98FC }, - { 0x9940, 0x997E }, - { 0x9980, 0x99FC }, - { 0x9A40, 0x9A7E }, - { 0x9A80, 0x9AFC }, - { 0x9B40, 0x9B7E }, - { 0x9B80, 0x9BFC }, - { 0x9C40, 0x9C7E }, - { 0x9C80, 0x9CFC }, - { 0x9D40, 0x9D7E }, - { 0x9D80, 0x9DFC }, - { 0x9E40, 0x9E7E }, - { 0x9E80, 0x9EFC }, - { 0x9F40, 0x9F7E }, - { 0x9F80, 0x9FFC }, - { 0xE040, 0xE07E }, - { 0xE080, 0xE0FC }, - { 0xE140, 0xE17E }, - { 0xE180, 0xE1FC }, - { 0xE240, 0xE27E }, - { 0xE280, 0xE2FC }, - { 0xE340, 0xE37E }, - { 0xE380, 0xE3FC }, - { 0xE440, 0xE47E }, - { 0xE480, 0xE4FC }, - { 0xE540, 0xE57E }, - { 0xE580, 0xE5FC }, - { 0xE640, 0xE67E }, - { 0xE680, 0xE6FC }, - { 0xE740, 0xE77E }, - { 0xE780, 0xE7FC }, - { 0xE840, 0xE87E }, - { 0xE880, 0xE8FC }, - { 0xE940, 0xE97E }, - { 0xE980, 0xE9FC }, - { 0xEA40, 0xEA7E }, - { 0xEA80, 0xEAA4 }, - { 0xED40, 0xED7E }, - { 0xED80, 0xEDFC }, - { 0xEE40, 0xEE7E }, - { 0xEE80, 0xEEEC }, - { 0xEEEF, 0xEEFC }, - { 0xFA40, 0xFA7E }, - { 0xFA80, 0xFAFC }, - { 0xFB40, 0xFB7E }, - { 0xFB80, 0xFBFC }, - { 0xFC40, 0xFC4B }, -}; - -// generated from http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP936.TXT -static const CharRange kGBKRanges[] = { - { 0x8140, 0x817E }, - { 0x8180, 0x81FE }, - { 0x8240, 0x827E }, - { 0x8280, 0x82FE }, - { 0x8340, 0x837E }, - { 0x8380, 0x83FE }, - { 0x8440, 0x847E }, - { 0x8480, 0x84FE }, - { 0x8540, 0x857E }, - { 0x8580, 0x85FE }, - { 0x8640, 0x867E }, - { 0x8680, 0x86FE }, - { 0x8740, 0x877E }, - { 0x8780, 0x87FE }, - { 0x8840, 0x887E }, - { 0x8880, 0x88FE }, - { 0x8940, 0x897E }, - { 0x8980, 0x89FE }, - { 0x8A40, 0x8A7E }, - { 0x8A80, 0x8AFE }, - { 0x8B40, 0x8B7E }, - { 0x8B80, 0x8BFE }, - { 0x8C40, 0x8C7E }, - { 0x8C80, 0x8CFE }, - { 0x8D40, 0x8D7E }, - { 0x8D80, 0x8DFE }, - { 0x8E40, 0x8E7E }, - { 0x8E80, 0x8EFE }, - { 0x8F40, 0x8F7E }, - { 0x8F80, 0x8FFE }, - { 0x9040, 0x907E }, - { 0x9080, 0x90FE }, - { 0x9140, 0x917E }, - { 0x9180, 0x91FE }, - { 0x9240, 0x927E }, - { 0x9280, 0x92FE }, - { 0x9340, 0x937E }, - { 0x9380, 0x93FE }, - { 0x9440, 0x947E }, - { 0x9480, 0x94FE }, - { 0x9540, 0x957E }, - { 0x9580, 0x95FE }, - { 0x9640, 0x967E }, - { 0x9680, 0x96FE }, - { 0x9740, 0x977E }, - { 0x9780, 0x97FE }, - { 0x9840, 0x987E }, - { 0x9880, 0x98FE }, - { 0x9940, 0x997E }, - { 0x9980, 0x99FE }, - { 0x9A40, 0x9A7E }, - { 0x9A80, 0x9AFE }, - { 0x9B40, 0x9B7E }, - { 0x9B80, 0x9BFE }, - { 0x9C40, 0x9C7E }, - { 0x9C80, 0x9CFE }, - { 0x9D40, 0x9D7E }, - { 0x9D80, 0x9DFE }, - { 0x9E40, 0x9E7E }, - { 0x9E80, 0x9EFE }, - { 0x9F40, 0x9F7E }, - { 0x9F80, 0x9FFE }, - { 0xA040, 0xA07E }, - { 0xA080, 0xA0FE }, - { 0xA1A1, 0xA1FE }, - { 0xA2A1, 0xA2AA }, - { 0xA2B1, 0xA2E2 }, - { 0xA2E5, 0xA2EE }, - { 0xA2F1, 0xA2FC }, - { 0xA3A1, 0xA3FE }, - { 0xA4A1, 0xA4F3 }, - { 0xA5A1, 0xA5F6 }, - { 0xA6A1, 0xA6B8 }, - { 0xA6C1, 0xA6D8 }, - { 0xA6E0, 0xA6EB }, - { 0xA6EE, 0xA6F2 }, - { 0xA6F4, 0xA6F5 }, - { 0xA7A1, 0xA7C1 }, - { 0xA7D1, 0xA7F1 }, - { 0xA840, 0xA87E }, - { 0xA880, 0xA895 }, - { 0xA8A1, 0xA8BB }, - { 0xA8BD, 0xA8BE }, - { 0xA8C0, 0xA8C0 }, - { 0xA8C5, 0xA8E9 }, - { 0xA940, 0xA957 }, - { 0xA959, 0xA95A }, - { 0xA95C, 0xA95C }, - { 0xA960, 0xA97E }, - { 0xA980, 0xA988 }, - { 0xA996, 0xA996 }, - { 0xA9A4, 0xA9EF }, - { 0xAA40, 0xAA7E }, - { 0xAA80, 0xAAA0 }, - { 0xAB40, 0xAB7E }, - { 0xAB80, 0xABA0 }, - { 0xAC40, 0xAC7E }, - { 0xAC80, 0xACA0 }, - { 0xAD40, 0xAD7E }, - { 0xAD80, 0xADA0 }, - { 0xAE40, 0xAE7E }, - { 0xAE80, 0xAEA0 }, - { 0xAF40, 0xAF7E }, - { 0xAF80, 0xAFA0 }, - { 0xB040, 0xB07E }, - { 0xB080, 0xB0FE }, - { 0xB140, 0xB17E }, - { 0xB180, 0xB1FE }, - { 0xB240, 0xB27E }, - { 0xB280, 0xB2FE }, - { 0xB340, 0xB37E }, - { 0xB380, 0xB3FE }, - { 0xB440, 0xB47E }, - { 0xB480, 0xB4FE }, - { 0xB540, 0xB57E }, - { 0xB580, 0xB5FE }, - { 0xB640, 0xB67E }, - { 0xB680, 0xB6FE }, - { 0xB740, 0xB77E }, - { 0xB780, 0xB7FE }, - { 0xB840, 0xB87E }, - { 0xB880, 0xB8FE }, - { 0xB940, 0xB97E }, - { 0xB980, 0xB9FE }, - { 0xBA40, 0xBA7E }, - { 0xBA80, 0xBAFE }, - { 0xBB40, 0xBB7E }, - { 0xBB80, 0xBBFE }, - { 0xBC40, 0xBC7E }, - { 0xBC80, 0xBCFE }, - { 0xBD40, 0xBD7E }, - { 0xBD80, 0xBDFE }, - { 0xBE40, 0xBE7E }, - { 0xBE80, 0xBEFE }, - { 0xBF40, 0xBF7E }, - { 0xBF80, 0xBFFE }, - { 0xC040, 0xC07E }, - { 0xC080, 0xC0FE }, - { 0xC140, 0xC17E }, - { 0xC180, 0xC1FE }, - { 0xC240, 0xC27E }, - { 0xC280, 0xC2FE }, - { 0xC340, 0xC37E }, - { 0xC380, 0xC3FE }, - { 0xC440, 0xC47E }, - { 0xC480, 0xC4FE }, - { 0xC540, 0xC57E }, - { 0xC580, 0xC5FE }, - { 0xC640, 0xC67E }, - { 0xC680, 0xC6FE }, - { 0xC740, 0xC77E }, - { 0xC780, 0xC7FE }, - { 0xC840, 0xC87E }, - { 0xC880, 0xC8FE }, - { 0xC940, 0xC97E }, - { 0xC980, 0xC9FE }, - { 0xCA40, 0xCA7E }, - { 0xCA80, 0xCAFE }, - { 0xCB40, 0xCB7E }, - { 0xCB80, 0xCBFE }, - { 0xCC40, 0xCC7E }, - { 0xCC80, 0xCCFE }, - { 0xCD40, 0xCD7E }, - { 0xCD80, 0xCDFE }, - { 0xCE40, 0xCE7E }, - { 0xCE80, 0xCEFE }, - { 0xCF40, 0xCF7E }, - { 0xCF80, 0xCFFE }, - { 0xD040, 0xD07E }, - { 0xD080, 0xD0FE }, - { 0xD140, 0xD17E }, - { 0xD180, 0xD1FE }, - { 0xD240, 0xD27E }, - { 0xD280, 0xD2FE }, - { 0xD340, 0xD37E }, - { 0xD380, 0xD3FE }, - { 0xD440, 0xD47E }, - { 0xD480, 0xD4FE }, - { 0xD540, 0xD57E }, - { 0xD580, 0xD5FE }, - { 0xD640, 0xD67E }, - { 0xD680, 0xD6FE }, - { 0xD740, 0xD77E }, - { 0xD780, 0xD7F9 }, - { 0xD840, 0xD87E }, - { 0xD880, 0xD8FE }, - { 0xD940, 0xD97E }, - { 0xD980, 0xD9FE }, - { 0xDA40, 0xDA7E }, - { 0xDA80, 0xDAFE }, - { 0xDB40, 0xDB7E }, - { 0xDB80, 0xDBFE }, - { 0xDC40, 0xDC7E }, - { 0xDC80, 0xDCFE }, - { 0xDD40, 0xDD7E }, - { 0xDD80, 0xDDFE }, - { 0xDE40, 0xDE7E }, - { 0xDE80, 0xDEFE }, - { 0xDF40, 0xDF7E }, - { 0xDF80, 0xDFFE }, - { 0xE040, 0xE07E }, - { 0xE080, 0xE0FE }, - { 0xE140, 0xE17E }, - { 0xE180, 0xE1FE }, - { 0xE240, 0xE27E }, - { 0xE280, 0xE2FE }, - { 0xE340, 0xE37E }, - { 0xE380, 0xE3FE }, - { 0xE440, 0xE47E }, - { 0xE480, 0xE4FE }, - { 0xE540, 0xE57E }, - { 0xE580, 0xE5FE }, - { 0xE640, 0xE67E }, - { 0xE680, 0xE6FE }, - { 0xE740, 0xE77E }, - { 0xE780, 0xE7FE }, - { 0xE840, 0xE87E }, - { 0xE880, 0xE8FE }, - { 0xE940, 0xE97E }, - { 0xE980, 0xE9FE }, - { 0xEA40, 0xEA7E }, - { 0xEA80, 0xEAFE }, - { 0xEB40, 0xEB7E }, - { 0xEB80, 0xEBFE }, - { 0xEC40, 0xEC7E }, - { 0xEC80, 0xECFE }, - { 0xED40, 0xED7E }, - { 0xED80, 0xEDFE }, - { 0xEE40, 0xEE7E }, - { 0xEE80, 0xEEFE }, - { 0xEF40, 0xEF7E }, - { 0xEF80, 0xEFFE }, - { 0xF040, 0xF07E }, - { 0xF080, 0xF0FE }, - { 0xF140, 0xF17E }, - { 0xF180, 0xF1FE }, - { 0xF240, 0xF27E }, - { 0xF280, 0xF2FE }, - { 0xF340, 0xF37E }, - { 0xF380, 0xF3FE }, - { 0xF440, 0xF47E }, - { 0xF480, 0xF4FE }, - { 0xF540, 0xF57E }, - { 0xF580, 0xF5FE }, - { 0xF640, 0xF67E }, - { 0xF680, 0xF6FE }, - { 0xF740, 0xF77E }, - { 0xF780, 0xF7FE }, - { 0xF840, 0xF87E }, - { 0xF880, 0xF8A0 }, - { 0xF940, 0xF97E }, - { 0xF980, 0xF9A0 }, - { 0xFA40, 0xFA7E }, - { 0xFA80, 0xFAA0 }, - { 0xFB40, 0xFB7E }, - { 0xFB80, 0xFBA0 }, - { 0xFC40, 0xFC7E }, - { 0xFC80, 0xFCA0 }, - { 0xFD40, 0xFD7E }, - { 0xFD80, 0xFDA0 }, - { 0xFE40, 0xFE4F }, -}; - -// generated from http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP949.TXT -static const CharRange kEUCKRRanges[] = { - { 0x8141, 0x815A }, - { 0x8161, 0x817A }, - { 0x8181, 0x81FE }, - { 0x8241, 0x825A }, - { 0x8261, 0x827A }, - { 0x8281, 0x82FE }, - { 0x8341, 0x835A }, - { 0x8361, 0x837A }, - { 0x8381, 0x83FE }, - { 0x8441, 0x845A }, - { 0x8461, 0x847A }, - { 0x8481, 0x84FE }, - { 0x8541, 0x855A }, - { 0x8561, 0x857A }, - { 0x8581, 0x85FE }, - { 0x8641, 0x865A }, - { 0x8661, 0x867A }, - { 0x8681, 0x86FE }, - { 0x8741, 0x875A }, - { 0x8761, 0x877A }, - { 0x8781, 0x87FE }, - { 0x8841, 0x885A }, - { 0x8861, 0x887A }, - { 0x8881, 0x88FE }, - { 0x8941, 0x895A }, - { 0x8961, 0x897A }, - { 0x8981, 0x89FE }, - { 0x8A41, 0x8A5A }, - { 0x8A61, 0x8A7A }, - { 0x8A81, 0x8AFE }, - { 0x8B41, 0x8B5A }, - { 0x8B61, 0x8B7A }, - { 0x8B81, 0x8BFE }, - { 0x8C41, 0x8C5A }, - { 0x8C61, 0x8C7A }, - { 0x8C81, 0x8CFE }, - { 0x8D41, 0x8D5A }, - { 0x8D61, 0x8D7A }, - { 0x8D81, 0x8DFE }, - { 0x8E41, 0x8E5A }, - { 0x8E61, 0x8E7A }, - { 0x8E81, 0x8EFE }, - { 0x8F41, 0x8F5A }, - { 0x8F61, 0x8F7A }, - { 0x8F81, 0x8FFE }, - { 0x9041, 0x905A }, - { 0x9061, 0x907A }, - { 0x9081, 0x90FE }, - { 0x9141, 0x915A }, - { 0x9161, 0x917A }, - { 0x9181, 0x91FE }, - { 0x9241, 0x925A }, - { 0x9261, 0x927A }, - { 0x9281, 0x92FE }, - { 0x9341, 0x935A }, - { 0x9361, 0x937A }, - { 0x9381, 0x93FE }, - { 0x9441, 0x945A }, - { 0x9461, 0x947A }, - { 0x9481, 0x94FE }, - { 0x9541, 0x955A }, - { 0x9561, 0x957A }, - { 0x9581, 0x95FE }, - { 0x9641, 0x965A }, - { 0x9661, 0x967A }, - { 0x9681, 0x96FE }, - { 0x9741, 0x975A }, - { 0x9761, 0x977A }, - { 0x9781, 0x97FE }, - { 0x9841, 0x985A }, - { 0x9861, 0x987A }, - { 0x9881, 0x98FE }, - { 0x9941, 0x995A }, - { 0x9961, 0x997A }, - { 0x9981, 0x99FE }, - { 0x9A41, 0x9A5A }, - { 0x9A61, 0x9A7A }, - { 0x9A81, 0x9AFE }, - { 0x9B41, 0x9B5A }, - { 0x9B61, 0x9B7A }, - { 0x9B81, 0x9BFE }, - { 0x9C41, 0x9C5A }, - { 0x9C61, 0x9C7A }, - { 0x9C81, 0x9CFE }, - { 0x9D41, 0x9D5A }, - { 0x9D61, 0x9D7A }, - { 0x9D81, 0x9DFE }, - { 0x9E41, 0x9E5A }, - { 0x9E61, 0x9E7A }, - { 0x9E81, 0x9EFE }, - { 0x9F41, 0x9F5A }, - { 0x9F61, 0x9F7A }, - { 0x9F81, 0x9FFE }, - { 0xA041, 0xA05A }, - { 0xA061, 0xA07A }, - { 0xA081, 0xA0FE }, - { 0xA141, 0xA15A }, - { 0xA161, 0xA17A }, - { 0xA181, 0xA1FE }, - { 0xA241, 0xA25A }, - { 0xA261, 0xA27A }, - { 0xA281, 0xA2E7 }, - { 0xA341, 0xA35A }, - { 0xA361, 0xA37A }, - { 0xA381, 0xA3FE }, - { 0xA441, 0xA45A }, - { 0xA461, 0xA47A }, - { 0xA481, 0xA4FE }, - { 0xA541, 0xA55A }, - { 0xA561, 0xA57A }, - { 0xA581, 0xA5AA }, - { 0xA5B0, 0xA5B9 }, - { 0xA5C1, 0xA5D8 }, - { 0xA5E1, 0xA5F8 }, - { 0xA641, 0xA65A }, - { 0xA661, 0xA67A }, - { 0xA681, 0xA6E4 }, - { 0xA741, 0xA75A }, - { 0xA761, 0xA77A }, - { 0xA781, 0xA7EF }, - { 0xA841, 0xA85A }, - { 0xA861, 0xA87A }, - { 0xA881, 0xA8A4 }, - { 0xA8A6, 0xA8A6 }, - { 0xA8A8, 0xA8AF }, - { 0xA8B1, 0xA8FE }, - { 0xA941, 0xA95A }, - { 0xA961, 0xA97A }, - { 0xA981, 0xA9FE }, - { 0xAA41, 0xAA5A }, - { 0xAA61, 0xAA7A }, - { 0xAA81, 0xAAF3 }, - { 0xAB41, 0xAB5A }, - { 0xAB61, 0xAB7A }, - { 0xAB81, 0xABF6 }, - { 0xAC41, 0xAC5A }, - { 0xAC61, 0xAC7A }, - { 0xAC81, 0xACC1 }, - { 0xACD1, 0xACF1 }, - { 0xAD41, 0xAD5A }, - { 0xAD61, 0xAD7A }, - { 0xAD81, 0xADA0 }, - { 0xAE41, 0xAE5A }, - { 0xAE61, 0xAE7A }, - { 0xAE81, 0xAEA0 }, - { 0xAF41, 0xAF5A }, - { 0xAF61, 0xAF7A }, - { 0xAF81, 0xAFA0 }, - { 0xB041, 0xB05A }, - { 0xB061, 0xB07A }, - { 0xB081, 0xB0FE }, - { 0xB141, 0xB15A }, - { 0xB161, 0xB17A }, - { 0xB181, 0xB1FE }, - { 0xB241, 0xB25A }, - { 0xB261, 0xB27A }, - { 0xB281, 0xB2FE }, - { 0xB341, 0xB35A }, - { 0xB361, 0xB37A }, - { 0xB381, 0xB3FE }, - { 0xB441, 0xB45A }, - { 0xB461, 0xB47A }, - { 0xB481, 0xB4FE }, - { 0xB541, 0xB55A }, - { 0xB561, 0xB57A }, - { 0xB581, 0xB5FE }, - { 0xB641, 0xB65A }, - { 0xB661, 0xB67A }, - { 0xB681, 0xB6FE }, - { 0xB741, 0xB75A }, - { 0xB761, 0xB77A }, - { 0xB781, 0xB7FE }, - { 0xB841, 0xB85A }, - { 0xB861, 0xB87A }, - { 0xB881, 0xB8FE }, - { 0xB941, 0xB95A }, - { 0xB961, 0xB97A }, - { 0xB981, 0xB9FE }, - { 0xBA41, 0xBA5A }, - { 0xBA61, 0xBA7A }, - { 0xBA81, 0xBAFE }, - { 0xBB41, 0xBB5A }, - { 0xBB61, 0xBB7A }, - { 0xBB81, 0xBBFE }, - { 0xBC41, 0xBC5A }, - { 0xBC61, 0xBC7A }, - { 0xBC81, 0xBCFE }, - { 0xBD41, 0xBD5A }, - { 0xBD61, 0xBD7A }, - { 0xBD81, 0xBDFE }, - { 0xBE41, 0xBE5A }, - { 0xBE61, 0xBE7A }, - { 0xBE81, 0xBEFE }, - { 0xBF41, 0xBF5A }, - { 0xBF61, 0xBF7A }, - { 0xBF81, 0xBFFE }, - { 0xC041, 0xC05A }, - { 0xC061, 0xC07A }, - { 0xC081, 0xC0FE }, - { 0xC141, 0xC15A }, - { 0xC161, 0xC17A }, - { 0xC181, 0xC1FE }, - { 0xC241, 0xC25A }, - { 0xC261, 0xC27A }, - { 0xC281, 0xC2FE }, - { 0xC341, 0xC35A }, - { 0xC361, 0xC37A }, - { 0xC381, 0xC3FE }, - { 0xC441, 0xC45A }, - { 0xC461, 0xC47A }, - { 0xC481, 0xC4FE }, - { 0xC541, 0xC55A }, - { 0xC561, 0xC57A }, - { 0xC581, 0xC5FE }, - { 0xC641, 0xC652 }, - { 0xC6A1, 0xC6FE }, - { 0xC7A1, 0xC7FE }, - { 0xC8A1, 0xC8FE }, - { 0xCAA1, 0xCAFE }, - { 0xCBA1, 0xCBFE }, - { 0xCCA1, 0xCCFE }, - { 0xCDA1, 0xCDFE }, - { 0xCEA1, 0xCEFE }, - { 0xCFA1, 0xCFFE }, - { 0xD0A1, 0xD0FE }, - { 0xD1A1, 0xD1FE }, - { 0xD2A1, 0xD2FE }, - { 0xD3A1, 0xD3FE }, - { 0xD4A1, 0xD4FE }, - { 0xD5A1, 0xD5FE }, - { 0xD6A1, 0xD6FE }, - { 0xD7A1, 0xD7FE }, - { 0xD8A1, 0xD8FE }, - { 0xD9A1, 0xD9FE }, - { 0xDAA1, 0xDAFE }, - { 0xDBA1, 0xDBFE }, - { 0xDCA1, 0xDCFE }, - { 0xDDA1, 0xDDFE }, - { 0xDEA1, 0xDEFE }, - { 0xDFA1, 0xDFFE }, - { 0xE0A1, 0xE0FE }, - { 0xE1A1, 0xE1FE }, - { 0xE2A1, 0xE2FE }, - { 0xE3A1, 0xE3FE }, - { 0xE4A1, 0xE4FE }, - { 0xE5A1, 0xE5FE }, - { 0xE6A1, 0xE6FE }, - { 0xE7A1, 0xE7FE }, - { 0xE8A1, 0xE8FE }, - { 0xE9A1, 0xE9FE }, - { 0xEAA1, 0xEAFE }, - { 0xEBA1, 0xEBFE }, - { 0xECA1, 0xECFE }, - { 0xEDA1, 0xEDFE }, - { 0xEEA1, 0xEEFE }, - { 0xEFA1, 0xEFFE }, - { 0xF0A1, 0xF0FE }, - { 0xF1A1, 0xF1FE }, - { 0xF2A1, 0xF2FE }, - { 0xF3A1, 0xF3FE }, - { 0xF4A1, 0xF4FE }, - { 0xF5A1, 0xF5FE }, - { 0xF6A1, 0xF6FE }, - { 0xF7A1, 0xF7FE }, - { 0xF8A1, 0xF8FE }, - { 0xF9A1, 0xF9FE }, - { 0xFAA1, 0xFAFE }, - { 0xFBA1, 0xFBFE }, - { 0xFCA1, 0xFCFE }, - { 0xFDA1, 0xFDFE }, -}; - -// generated from http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP950.TXT -static const CharRange kBig5Ranges[] = { - { 0xA140, 0xA17E }, - { 0xA1A1, 0xA1FE }, - { 0xA240, 0xA27E }, - { 0xA2A1, 0xA2FE }, - { 0xA340, 0xA37E }, - { 0xA3A1, 0xA3BF }, - { 0xA3E1, 0xA3E1 }, - { 0xA440, 0xA47E }, - { 0xA4A1, 0xA4FE }, - { 0xA540, 0xA57E }, - { 0xA5A1, 0xA5FE }, - { 0xA640, 0xA67E }, - { 0xA6A1, 0xA6FE }, - { 0xA740, 0xA77E }, - { 0xA7A1, 0xA7FE }, - { 0xA840, 0xA87E }, - { 0xA8A1, 0xA8FE }, - { 0xA940, 0xA97E }, - { 0xA9A1, 0xA9FE }, - { 0xAA40, 0xAA7E }, - { 0xAAA1, 0xAAFE }, - { 0xAB40, 0xAB7E }, - { 0xABA1, 0xABFE }, - { 0xAC40, 0xAC7E }, - { 0xACA1, 0xACFE }, - { 0xAD40, 0xAD7E }, - { 0xADA1, 0xADFE }, - { 0xAE40, 0xAE7E }, - { 0xAEA1, 0xAEFE }, - { 0xAF40, 0xAF7E }, - { 0xAFA1, 0xAFFE }, - { 0xB040, 0xB07E }, - { 0xB0A1, 0xB0FE }, - { 0xB140, 0xB17E }, - { 0xB1A1, 0xB1FE }, - { 0xB240, 0xB27E }, - { 0xB2A1, 0xB2FE }, - { 0xB340, 0xB37E }, - { 0xB3A1, 0xB3FE }, - { 0xB440, 0xB47E }, - { 0xB4A1, 0xB4FE }, - { 0xB540, 0xB57E }, - { 0xB5A1, 0xB5FE }, - { 0xB640, 0xB67E }, - { 0xB6A1, 0xB6FE }, - { 0xB740, 0xB77E }, - { 0xB7A1, 0xB7FE }, - { 0xB840, 0xB87E }, - { 0xB8A1, 0xB8FE }, - { 0xB940, 0xB97E }, - { 0xB9A1, 0xB9FE }, - { 0xBA40, 0xBA7E }, - { 0xBAA1, 0xBAFE }, - { 0xBB40, 0xBB7E }, - { 0xBBA1, 0xBBFE }, - { 0xBC40, 0xBC7E }, - { 0xBCA1, 0xBCFE }, - { 0xBD40, 0xBD7E }, - { 0xBDA1, 0xBDFE }, - { 0xBE40, 0xBE7E }, - { 0xBEA1, 0xBEFE }, - { 0xBF40, 0xBF7E }, - { 0xBFA1, 0xBFFE }, - { 0xC040, 0xC07E }, - { 0xC0A1, 0xC0FE }, - { 0xC140, 0xC17E }, - { 0xC1A1, 0xC1FE }, - { 0xC240, 0xC27E }, - { 0xC2A1, 0xC2FE }, - { 0xC340, 0xC37E }, - { 0xC3A1, 0xC3FE }, - { 0xC440, 0xC47E }, - { 0xC4A1, 0xC4FE }, - { 0xC540, 0xC57E }, - { 0xC5A1, 0xC5FE }, - { 0xC640, 0xC67E }, - { 0xC940, 0xC97E }, - { 0xC9A1, 0xC9FE }, - { 0xCA40, 0xCA7E }, - { 0xCAA1, 0xCAFE }, - { 0xCB40, 0xCB7E }, - { 0xCBA1, 0xCBFE }, - { 0xCC40, 0xCC7E }, - { 0xCCA1, 0xCCFE }, - { 0xCD40, 0xCD7E }, - { 0xCDA1, 0xCDFE }, - { 0xCE40, 0xCE7E }, - { 0xCEA1, 0xCEFE }, - { 0xCF40, 0xCF7E }, - { 0xCFA1, 0xCFFE }, - { 0xD040, 0xD07E }, - { 0xD0A1, 0xD0FE }, - { 0xD140, 0xD17E }, - { 0xD1A1, 0xD1FE }, - { 0xD240, 0xD27E }, - { 0xD2A1, 0xD2FE }, - { 0xD340, 0xD37E }, - { 0xD3A1, 0xD3FE }, - { 0xD440, 0xD47E }, - { 0xD4A1, 0xD4FE }, - { 0xD540, 0xD57E }, - { 0xD5A1, 0xD5FE }, - { 0xD640, 0xD67E }, - { 0xD6A1, 0xD6FE }, - { 0xD740, 0xD77E }, - { 0xD7A1, 0xD7FE }, - { 0xD840, 0xD87E }, - { 0xD8A1, 0xD8FE }, - { 0xD940, 0xD97E }, - { 0xD9A1, 0xD9FE }, - { 0xDA40, 0xDA7E }, - { 0xDAA1, 0xDAFE }, - { 0xDB40, 0xDB7E }, - { 0xDBA1, 0xDBFE }, - { 0xDC40, 0xDC7E }, - { 0xDCA1, 0xDCFE }, - { 0xDD40, 0xDD7E }, - { 0xDDA1, 0xDDFE }, - { 0xDE40, 0xDE7E }, - { 0xDEA1, 0xDEFE }, - { 0xDF40, 0xDF7E }, - { 0xDFA1, 0xDFFE }, - { 0xE040, 0xE07E }, - { 0xE0A1, 0xE0FE }, - { 0xE140, 0xE17E }, - { 0xE1A1, 0xE1FE }, - { 0xE240, 0xE27E }, - { 0xE2A1, 0xE2FE }, - { 0xE340, 0xE37E }, - { 0xE3A1, 0xE3FE }, - { 0xE440, 0xE47E }, - { 0xE4A1, 0xE4FE }, - { 0xE540, 0xE57E }, - { 0xE5A1, 0xE5FE }, - { 0xE640, 0xE67E }, - { 0xE6A1, 0xE6FE }, - { 0xE740, 0xE77E }, - { 0xE7A1, 0xE7FE }, - { 0xE840, 0xE87E }, - { 0xE8A1, 0xE8FE }, - { 0xE940, 0xE97E }, - { 0xE9A1, 0xE9FE }, - { 0xEA40, 0xEA7E }, - { 0xEAA1, 0xEAFE }, - { 0xEB40, 0xEB7E }, - { 0xEBA1, 0xEBFE }, - { 0xEC40, 0xEC7E }, - { 0xECA1, 0xECFE }, - { 0xED40, 0xED7E }, - { 0xEDA1, 0xEDFE }, - { 0xEE40, 0xEE7E }, - { 0xEEA1, 0xEEFE }, - { 0xEF40, 0xEF7E }, - { 0xEFA1, 0xEFFE }, - { 0xF040, 0xF07E }, - { 0xF0A1, 0xF0FE }, - { 0xF140, 0xF17E }, - { 0xF1A1, 0xF1FE }, - { 0xF240, 0xF27E }, - { 0xF2A1, 0xF2FE }, - { 0xF340, 0xF37E }, - { 0xF3A1, 0xF3FE }, - { 0xF440, 0xF47E }, - { 0xF4A1, 0xF4FE }, - { 0xF540, 0xF57E }, - { 0xF5A1, 0xF5FE }, - { 0xF640, 0xF67E }, - { 0xF6A1, 0xF6FE }, - { 0xF740, 0xF77E }, - { 0xF7A1, 0xF7FE }, - { 0xF840, 0xF87E }, - { 0xF8A1, 0xF8FE }, - { 0xF940, 0xF97E }, - { 0xF9A1, 0xF9FE }, -}; - -static bool charMatchesEncoding(int ch, const CharRange* encodingRanges, int rangeCount) { - // Use binary search to see if the character is contained in the encoding - int low = 0; - int high = rangeCount; - - while (low < high) { - int i = (low + high) / 2; - const CharRange* range = &encodingRanges[i]; - if (ch >= range->first && ch <= range->last) - return true; - if (ch > range->last) - low = i + 1; - else - high = i; - } - - return false; -} - -extern uint32_t findPossibleEncodings(int ch) -{ - // ASCII matches everything - if (ch < 256) return kEncodingAll; - - int result = kEncodingNone; - - if (charMatchesEncoding(ch, kShiftJISRanges, ARRAY_SIZE(kShiftJISRanges))) - result |= kEncodingShiftJIS; - if (charMatchesEncoding(ch, kGBKRanges, ARRAY_SIZE(kGBKRanges))) - result |= kEncodingGBK; - if (charMatchesEncoding(ch, kBig5Ranges, ARRAY_SIZE(kBig5Ranges))) - result |= kEncodingBig5; - if (charMatchesEncoding(ch, kEUCKRRanges, ARRAY_SIZE(kEUCKRRanges))) - result |= kEncodingEUCKR; - - return result; -} diff --git a/media/libmedia/autodetect.h b/media/libmedia/autodetect.h deleted file mode 100644 index 9675db3..0000000 --- a/media/libmedia/autodetect.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (C) 2008 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef AUTODETECT_H -#define AUTODETECT_H - -#include <inttypes.h> - -// flags used for native encoding detection -enum { - kEncodingNone = 0, - kEncodingShiftJIS = (1 << 0), - kEncodingGBK = (1 << 1), - kEncodingBig5 = (1 << 2), - kEncodingEUCKR = (1 << 3), - - kEncodingAll = (kEncodingShiftJIS | kEncodingGBK | kEncodingBig5 | kEncodingEUCKR), -}; - - -// returns a bitfield containing the possible native encodings for the given character -extern uint32_t findPossibleEncodings(int ch); - -#endif // AUTODETECT_H diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp index 110b94c..bad2494 100644 --- a/media/libmedia/mediametadataretriever.cpp +++ b/media/libmedia/mediametadataretriever.cpp @@ -157,7 +157,7 @@ sp<IMemory> MediaMetadataRetriever::extractAlbumArt() return mRetriever->extractAlbumArt(); } -void MediaMetadataRetriever::DeathNotifier::binderDied(const wp<IBinder>& who) { +void MediaMetadataRetriever::DeathNotifier::binderDied(const wp<IBinder>& who __unused) { Mutex::Autolock lock(MediaMetadataRetriever::sServiceLock); MediaMetadataRetriever::sService.clear(); ALOGW("MediaMetadataRetriever server died!"); diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp index 0f6d897..7a6f31d 100644 --- a/media/libmedia/mediaplayer.cpp +++ b/media/libmedia/mediaplayer.cpp @@ -654,7 +654,7 @@ status_t MediaPlayer::setRetransmitEndpoint(const char* addrString, return BAD_VALUE; } - memset(&mRetransmitEndpoint, 0, sizeof(&mRetransmitEndpoint)); + memset(&mRetransmitEndpoint, 0, sizeof(mRetransmitEndpoint)); mRetransmitEndpoint.sin_family = AF_INET; mRetransmitEndpoint.sin_addr = saddr; mRetransmitEndpoint.sin_port = htons(port); diff --git a/media/libmediaplayerservice/HDCP.cpp b/media/libmediaplayerservice/HDCP.cpp index c2ac1a3..afe3936 100644 --- a/media/libmediaplayerservice/HDCP.cpp +++ b/media/libmediaplayerservice/HDCP.cpp @@ -107,11 +107,7 @@ uint32_t HDCP::getCaps() { return NO_INIT; } - // TO-DO: - // Only support HDCP_CAPS_ENCRYPT (byte-array to byte-array) for now. - // use mHDCPModule->getCaps() when the HDCP libraries get updated. - //return mHDCPModule->getCaps(); - return HDCPModule::HDCP_CAPS_ENCRYPT; + return mHDCPModule->getCaps(); } status_t HDCP::encrypt( diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp index f9d9020..78dad19 100644 --- a/media/libmediaplayerservice/StagefrightRecorder.cpp +++ b/media/libmediaplayerservice/StagefrightRecorder.cpp @@ -973,7 +973,7 @@ status_t StagefrightRecorder::startRTPRecording() { return err; } - err = setupVideoEncoder(mediaSource, mVideoBitRate, &source); + err = setupVideoEncoder(mediaSource, &source); if (err != OK) { return err; } @@ -1017,7 +1017,7 @@ status_t StagefrightRecorder::startMPEG2TSRecording() { } sp<MediaSource> encoder; - err = setupVideoEncoder(mediaSource, mVideoBitRate, &encoder); + err = setupVideoEncoder(mediaSource, &encoder); if (err != OK) { return err; @@ -1383,12 +1383,11 @@ status_t StagefrightRecorder::setupCameraSource( status_t StagefrightRecorder::setupVideoEncoder( sp<MediaSource> cameraSource, - int32_t videoBitRate, sp<MediaSource> *source) { source->clear(); sp<MetaData> enc_meta = new MetaData; - enc_meta->setInt32(kKeyBitRate, videoBitRate); + enc_meta->setInt32(kKeyBitRate, mVideoBitRate); enc_meta->setInt32(kKeyFrameRate, mFrameRate); switch (mVideoEncoder) { @@ -1495,16 +1494,11 @@ status_t StagefrightRecorder::setupAudioEncoder(const sp<MediaWriter>& writer) { return OK; } -status_t StagefrightRecorder::setupMPEG4Recording( - int outputFd, - int32_t videoWidth, int32_t videoHeight, - int32_t videoBitRate, - int32_t *totalBitRate, - sp<MediaWriter> *mediaWriter) { - mediaWriter->clear(); +status_t StagefrightRecorder::setupMPEG4Recording(int32_t *totalBitRate) { + mWriter.clear(); *totalBitRate = 0; status_t err = OK; - sp<MediaWriter> writer = new MPEG4Writer(outputFd); + sp<MediaWriter> writer = new MPEG4Writer(mOutputFd); if (mVideoSource < VIDEO_SOURCE_LIST_END) { @@ -1515,13 +1509,13 @@ status_t StagefrightRecorder::setupMPEG4Recording( } sp<MediaSource> encoder; - err = setupVideoEncoder(mediaSource, videoBitRate, &encoder); + err = setupVideoEncoder(mediaSource, &encoder); if (err != OK) { return err; } writer->addSource(encoder); - *totalBitRate += videoBitRate; + *totalBitRate += mVideoBitRate; } // Audio source is added at the end if it exists. @@ -1555,7 +1549,7 @@ status_t StagefrightRecorder::setupMPEG4Recording( } writer->setListener(mListener); - *mediaWriter = writer; + mWriter = writer; return OK; } @@ -1578,9 +1572,7 @@ void StagefrightRecorder::setupMPEG4MetaData(int64_t startTimeUs, int32_t totalB status_t StagefrightRecorder::startMPEG4Recording() { int32_t totalBitRate; - status_t err = setupMPEG4Recording( - mOutputFd, mVideoWidth, mVideoHeight, - mVideoBitRate, &totalBitRate, &mWriter); + status_t err = setupMPEG4Recording(&totalBitRate); if (err != OK) { return err; } diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h index 31f09e0..bc43488 100644 --- a/media/libmediaplayerservice/StagefrightRecorder.h +++ b/media/libmediaplayerservice/StagefrightRecorder.h @@ -124,12 +124,7 @@ private: // frame buffers will be queued and dequeued sp<SurfaceMediaSource> mSurfaceMediaSource; - status_t setupMPEG4Recording( - int outputFd, - int32_t videoWidth, int32_t videoHeight, - int32_t videoBitRate, - int32_t *totalBitRate, - sp<MediaWriter> *mediaWriter); + status_t setupMPEG4Recording(int32_t *totalBitRate); void setupMPEG4MetaData(int64_t startTimeUs, int32_t totalBitRate, sp<MetaData> *meta); status_t startMPEG4Recording(); @@ -151,10 +146,7 @@ private: status_t setupSurfaceMediaSource(); status_t setupAudioEncoder(const sp<MediaWriter>& writer); - status_t setupVideoEncoder( - sp<MediaSource> cameraSource, - int32_t videoBitRate, - sp<MediaSource> *source); + status_t setupVideoEncoder(sp<MediaSource> cameraSource, sp<MediaSource> *source); // Encoding parameter handling utilities status_t setParameter(const String8 &key, const String8 &value); diff --git a/media/libnbaio/NBLog.cpp b/media/libnbaio/NBLog.cpp index 045bf64..ba8d0b4 100644 --- a/media/libnbaio/NBLog.cpp +++ b/media/libnbaio/NBLog.cpp @@ -441,7 +441,7 @@ void NBLog::Reader::dump(int fd, size_t indent) bool NBLog::Reader::isIMemory(const sp<IMemory>& iMemory) const { - return iMemory.get() == mIMemory.get(); + return iMemory != 0 && mIMemory != 0 && iMemory->pointer() == mIMemory->pointer(); } } // namespace android diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp index 5d5220f..eb274a8 100644 --- a/media/libstagefright/ACodec.cpp +++ b/media/libstagefright/ACodec.cpp @@ -35,7 +35,9 @@ #include <media/hardware/HardwareAPI.h> +#include <OMX_AudioExt.h> #include <OMX_Component.h> +#include <OMX_IndexExt.h> #include "include/avc_utils.h" @@ -977,6 +979,10 @@ status_t ACodec::setComponentRole( "audio_decoder.flac", "audio_encoder.flac" }, { MEDIA_MIMETYPE_AUDIO_MSGSM, "audio_decoder.gsm", "audio_encoder.gsm" }, + { MEDIA_MIMETYPE_VIDEO_MPEG2, + "video_decoder.mpeg2", "video_encoder.mpeg2" }, + { MEDIA_MIMETYPE_AUDIO_AC3, + "audio_decoder.ac3", "audio_encoder.ac3" }, }; static const size_t kNumMimeToRole = @@ -1268,6 +1274,15 @@ status_t ACodec::configureCodec( } else { err = setupRawAudioFormat(kPortIndexInput, sampleRate, numChannels); } + } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC3)) { + int32_t numChannels; + int32_t sampleRate; + if (!msg->findInt32("channel-count", &numChannels) + || !msg->findInt32("sample-rate", &sampleRate)) { + err = INVALID_OPERATION; + } else { + err = setupAC3Codec(encoder, numChannels, sampleRate); + } } if (err != OK) { @@ -1464,6 +1479,44 @@ status_t ACodec::setupAACCodec( mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile)); } +status_t ACodec::setupAC3Codec( + bool encoder, int32_t numChannels, int32_t sampleRate) { + status_t err = setupRawAudioFormat( + encoder ? kPortIndexInput : kPortIndexOutput, sampleRate, numChannels); + + if (err != OK) { + return err; + } + + if (encoder) { + ALOGW("AC3 encoding is not supported."); + return INVALID_OPERATION; + } + + OMX_AUDIO_PARAM_ANDROID_AC3TYPE def; + InitOMXParams(&def); + def.nPortIndex = kPortIndexInput; + + err = mOMX->getParameter( + mNode, + (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3, + &def, + sizeof(def)); + + if (err != OK) { + return err; + } + + def.nChannels = numChannels; + def.nSampleRate = sampleRate; + + return mOMX->setParameter( + mNode, + (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3, + &def, + sizeof(def)); +} + static OMX_AUDIO_AMRBANDMODETYPE pickModeFromBitRate( bool isAMRWB, int32_t bps) { if (isAMRWB) { @@ -2558,7 +2611,7 @@ void ACodec::sendFormatChange(const sp<AMessage> &reply) { { OMX_AUDIO_PORTDEFINITIONTYPE *audioDef = &def.format.audio; - switch (audioDef->eEncoding) { + switch ((int)audioDef->eEncoding) { case OMX_AUDIO_CodingPCM: { OMX_AUDIO_PARAM_PCMMODETYPE params; @@ -2664,6 +2717,24 @@ void ACodec::sendFormatChange(const sp<AMessage> &reply) { break; } + case OMX_AUDIO_CodingAndroidAC3: + { + OMX_AUDIO_PARAM_ANDROID_AC3TYPE params; + InitOMXParams(¶ms); + params.nPortIndex = kPortIndexOutput; + + CHECK_EQ((status_t)OK, mOMX->getParameter( + mNode, + (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3, + ¶ms, + sizeof(params))); + + notify->setString("mime", MEDIA_MIMETYPE_AUDIO_AC3); + notify->setInt32("channel-count", params.nChannels); + notify->setInt32("sample-rate", params.nSampleRate); + break; + } + default: TRESPASS(); } @@ -3342,7 +3413,7 @@ bool ACodec::BaseState::onOMXFillBufferDone( sp<AMessage> reply = new AMessage(kWhatOutputBufferDrained, mCodec->id()); - if (!mCodec->mSentFormat) { + if (!mCodec->mSentFormat && rangeLength > 0) { mCodec->sendFormatChange(reply); } diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp index d7223d9..cadadc8 100644 --- a/media/libstagefright/AudioSource.cpp +++ b/media/libstagefright/AudioSource.cpp @@ -278,7 +278,7 @@ status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) { // Drop retrieved and previously lost audio data. if (mNumFramesReceived == 0 && timeUs < mStartTimeUs) { - mRecord->getInputFramesLost(); + (void) mRecord->getInputFramesLost(); ALOGV("Drop audio data at %lld/%lld us", timeUs, mStartTimeUs); return OK; } diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp index 5772316..86844b8 100644 --- a/media/libstagefright/CameraSourceTimeLapse.cpp +++ b/media/libstagefright/CameraSourceTimeLapse.cpp @@ -85,7 +85,8 @@ CameraSourceTimeLapse::CameraSourceTimeLapse( mVideoWidth = videoSize.width; mVideoHeight = videoSize.height; - if (!trySettingVideoSize(videoSize.width, videoSize.height)) { + if (OK == mInitCheck && !trySettingVideoSize(videoSize.width, videoSize.height)) { + releaseCamera(); mInitCheck = NO_INIT; } diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp index 4f1c5b3..dfb5c04 100644 --- a/media/libstagefright/MPEG4Extractor.cpp +++ b/media/libstagefright/MPEG4Extractor.cpp @@ -2441,6 +2441,58 @@ status_t MPEG4Extractor::verifyTrack(Track *track) { return OK; } +typedef enum { + //AOT_NONE = -1, + //AOT_NULL_OBJECT = 0, + //AOT_AAC_MAIN = 1, /**< Main profile */ + AOT_AAC_LC = 2, /**< Low Complexity object */ + //AOT_AAC_SSR = 3, + //AOT_AAC_LTP = 4, + AOT_SBR = 5, + //AOT_AAC_SCAL = 6, + //AOT_TWIN_VQ = 7, + //AOT_CELP = 8, + //AOT_HVXC = 9, + //AOT_RSVD_10 = 10, /**< (reserved) */ + //AOT_RSVD_11 = 11, /**< (reserved) */ + //AOT_TTSI = 12, /**< TTSI Object */ + //AOT_MAIN_SYNTH = 13, /**< Main Synthetic object */ + //AOT_WAV_TAB_SYNTH = 14, /**< Wavetable Synthesis object */ + //AOT_GEN_MIDI = 15, /**< General MIDI object */ + //AOT_ALG_SYNTH_AUD_FX = 16, /**< Algorithmic Synthesis and Audio FX object */ + AOT_ER_AAC_LC = 17, /**< Error Resilient(ER) AAC Low Complexity */ + //AOT_RSVD_18 = 18, /**< (reserved) */ + //AOT_ER_AAC_LTP = 19, /**< Error Resilient(ER) AAC LTP object */ + AOT_ER_AAC_SCAL = 20, /**< Error Resilient(ER) AAC Scalable object */ + //AOT_ER_TWIN_VQ = 21, /**< Error Resilient(ER) TwinVQ object */ + AOT_ER_BSAC = 22, /**< Error Resilient(ER) BSAC object */ + AOT_ER_AAC_LD = 23, /**< Error Resilient(ER) AAC LowDelay object */ + //AOT_ER_CELP = 24, /**< Error Resilient(ER) CELP object */ + //AOT_ER_HVXC = 25, /**< Error Resilient(ER) HVXC object */ + //AOT_ER_HILN = 26, /**< Error Resilient(ER) HILN object */ + //AOT_ER_PARA = 27, /**< Error Resilient(ER) Parametric object */ + //AOT_RSVD_28 = 28, /**< might become SSC */ + AOT_PS = 29, /**< PS, Parametric Stereo (includes SBR) */ + //AOT_MPEGS = 30, /**< MPEG Surround */ + + AOT_ESCAPE = 31, /**< Signal AOT uses more than 5 bits */ + + //AOT_MP3ONMP4_L1 = 32, /**< MPEG-Layer1 in mp4 */ + //AOT_MP3ONMP4_L2 = 33, /**< MPEG-Layer2 in mp4 */ + //AOT_MP3ONMP4_L3 = 34, /**< MPEG-Layer3 in mp4 */ + //AOT_RSVD_35 = 35, /**< might become DST */ + //AOT_RSVD_36 = 36, /**< might become ALS */ + //AOT_AAC_SLS = 37, /**< AAC + SLS */ + //AOT_SLS = 38, /**< SLS */ + //AOT_ER_AAC_ELD = 39, /**< AAC Enhanced Low Delay */ + + //AOT_USAC = 42, /**< USAC */ + //AOT_SAOC = 43, /**< SAOC */ + //AOT_LD_MPEGS = 44, /**< Low Delay MPEG Surround */ + + //AOT_RSVD50 = 50, /**< Interim AOT for Rsvd50 */ +} AUDIO_OBJECT_TYPE; + status_t MPEG4Extractor::updateAudioTrackInfoFromESDS_MPEG4Audio( const void *esds_data, size_t esds_size) { ESDS esds(esds_data, esds_size); @@ -2523,7 +2575,7 @@ status_t MPEG4Extractor::updateAudioTrackInfoFromESDS_MPEG4Audio( sampleRate = kSamplingRate[freqIndex]; } - if (objectType == 5 || objectType == 29) { // SBR specific config per 14496-3 table 1.13 + if (objectType == AOT_SBR || objectType == AOT_PS) {//SBR specific config per 14496-3 table 1.13 uint32_t extFreqIndex = br.getBits(4); int32_t extSampleRate; if (extFreqIndex == 15) { @@ -2541,6 +2593,111 @@ status_t MPEG4Extractor::updateAudioTrackInfoFromESDS_MPEG4Audio( // mLastTrack->meta->setInt32(kKeyExtSampleRate, extSampleRate); } + switch (numChannels) { + // values defined in 14496-3_2009 amendment-4 Table 1.19 - Channel Configuration + case 0: + case 1:// FC + case 2:// FL FR + case 3:// FC, FL FR + case 4:// FC, FL FR, RC + case 5:// FC, FL FR, SL SR + case 6:// FC, FL FR, SL SR, LFE + //numChannels already contains the right value + break; + case 11:// FC, FL FR, SL SR, RC, LFE + numChannels = 7; + break; + case 7: // FC, FCL FCR, FL FR, SL SR, LFE + case 12:// FC, FL FR, SL SR, RL RR, LFE + case 14:// FC, FL FR, SL SR, LFE, FHL FHR + numChannels = 8; + break; + default: + return ERROR_UNSUPPORTED; + } + + { + if (objectType == AOT_SBR || objectType == AOT_PS) { + const int32_t extensionSamplingFrequency = br.getBits(4); + objectType = br.getBits(5); + + if (objectType == AOT_ESCAPE) { + objectType = 32 + br.getBits(6); + } + } + if (objectType == AOT_AAC_LC || objectType == AOT_ER_AAC_LC || + objectType == AOT_ER_AAC_LD || objectType == AOT_ER_AAC_SCAL || + objectType == AOT_ER_BSAC) { + const int32_t frameLengthFlag = br.getBits(1); + + const int32_t dependsOnCoreCoder = br.getBits(1); + + if (dependsOnCoreCoder ) { + const int32_t coreCoderDelay = br.getBits(14); + } + + const int32_t extensionFlag = br.getBits(1); + + if (numChannels == 0 ) { + int32_t channelsEffectiveNum = 0; + int32_t channelsNum = 0; + const int32_t ElementInstanceTag = br.getBits(4); + const int32_t Profile = br.getBits(2); + const int32_t SamplingFrequencyIndex = br.getBits(4); + const int32_t NumFrontChannelElements = br.getBits(4); + const int32_t NumSideChannelElements = br.getBits(4); + const int32_t NumBackChannelElements = br.getBits(4); + const int32_t NumLfeChannelElements = br.getBits(2); + const int32_t NumAssocDataElements = br.getBits(3); + const int32_t NumValidCcElements = br.getBits(4); + + const int32_t MonoMixdownPresent = br.getBits(1); + if (MonoMixdownPresent != 0) { + const int32_t MonoMixdownElementNumber = br.getBits(4); + } + + const int32_t StereoMixdownPresent = br.getBits(1); + if (StereoMixdownPresent != 0) { + const int32_t StereoMixdownElementNumber = br.getBits(4); + } + + const int32_t MatrixMixdownIndexPresent = br.getBits(1); + if (MatrixMixdownIndexPresent != 0) { + const int32_t MatrixMixdownIndex = br.getBits(2); + const int32_t PseudoSurroundEnable = br.getBits(1); + } + + int i; + for (i=0; i < NumFrontChannelElements; i++) { + const int32_t FrontElementIsCpe = br.getBits(1); + const int32_t FrontElementTagSelect = br.getBits(4); + channelsNum += FrontElementIsCpe ? 2 : 1; + } + + for (i=0; i < NumSideChannelElements; i++) { + const int32_t SideElementIsCpe = br.getBits(1); + const int32_t SideElementTagSelect = br.getBits(4); + channelsNum += SideElementIsCpe ? 2 : 1; + } + + for (i=0; i < NumBackChannelElements; i++) { + const int32_t BackElementIsCpe = br.getBits(1); + const int32_t BackElementTagSelect = br.getBits(4); + channelsNum += BackElementIsCpe ? 2 : 1; + } + channelsEffectiveNum = channelsNum; + + for (i=0; i < NumLfeChannelElements; i++) { + const int32_t LfeElementTagSelect = br.getBits(4); + channelsNum += 1; + } + ALOGV("mpeg4 audio channelsNum = %d", channelsNum); + ALOGV("mpeg4 audio channelsEffectiveNum = %d", channelsEffectiveNum); + numChannels = channelsNum; + } + } + } + if (numChannels == 0) { return ERROR_UNSUPPORTED; } diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp index b5d4e44..340cba7 100644 --- a/media/libstagefright/MediaDefs.cpp +++ b/media/libstagefright/MediaDefs.cpp @@ -42,6 +42,7 @@ const char *MEDIA_MIMETYPE_AUDIO_RAW = "audio/raw"; const char *MEDIA_MIMETYPE_AUDIO_FLAC = "audio/flac"; const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS = "audio/aac-adts"; const char *MEDIA_MIMETYPE_AUDIO_MSGSM = "audio/gsm"; +const char *MEDIA_MIMETYPE_AUDIO_AC3 = "audio/ac3"; const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4"; const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav"; diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp index 43736ad..625922f 100644 --- a/media/libstagefright/OMXCodec.cpp +++ b/media/libstagefright/OMXCodec.cpp @@ -40,7 +40,9 @@ #include <utils/Vector.h> #include <OMX_Audio.h> +#include <OMX_AudioExt.h> #include <OMX_Component.h> +#include <OMX_IndexExt.h> #include "include/avc_utils.h" @@ -528,6 +530,17 @@ status_t OMXCodec::configureCodec(const sp<MetaData> &meta) { sampleRate, numChannels); } + } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AC3, mMIME)) { + int32_t numChannels; + int32_t sampleRate; + CHECK(meta->findInt32(kKeyChannelCount, &numChannels)); + CHECK(meta->findInt32(kKeySampleRate, &sampleRate)); + + status_t err = setAC3Format(numChannels, sampleRate); + if (err != OK) { + CODEC_LOGE("setAC3Format() failed (err = %d)", err); + return err; + } } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_G711_ALAW, mMIME) || !strcasecmp(MEDIA_MIMETYPE_AUDIO_G711_MLAW, mMIME)) { // These are PCM-like formats with a fixed sample rate but @@ -1394,6 +1407,10 @@ void OMXCodec::setComponentRole( "audio_decoder.flac", "audio_encoder.flac" }, { MEDIA_MIMETYPE_AUDIO_MSGSM, "audio_decoder.gsm", "audio_encoder.gsm" }, + { MEDIA_MIMETYPE_VIDEO_MPEG2, + "video_decoder.mpeg2", "video_encoder.mpeg2" }, + { MEDIA_MIMETYPE_AUDIO_AC3, + "audio_decoder.ac3", "audio_encoder.ac3" }, }; static const size_t kNumMimeToRole = @@ -3489,6 +3506,31 @@ status_t OMXCodec::setAACFormat( return OK; } +status_t OMXCodec::setAC3Format(int32_t numChannels, int32_t sampleRate) { + OMX_AUDIO_PARAM_ANDROID_AC3TYPE def; + InitOMXParams(&def); + def.nPortIndex = kPortIndexInput; + + status_t err = mOMX->getParameter( + mNode, + (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3, + &def, + sizeof(def)); + + if (err != OK) { + return err; + } + + def.nChannels = numChannels; + def.nSampleRate = sampleRate; + + return mOMX->setParameter( + mNode, + (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3, + &def, + sizeof(def)); +} + void OMXCodec::setG711Format(int32_t numChannels) { CHECK(!mIsEncoder); setRawAudioFormat(kPortIndexInput, 8000, numChannels); @@ -4422,6 +4464,17 @@ void OMXCodec::initOutputFormat(const sp<MetaData> &inputFormat) { mOutputFormat->setInt32(kKeyChannelCount, numChannels); mOutputFormat->setInt32(kKeySampleRate, sampleRate); mOutputFormat->setInt32(kKeyBitRate, bitRate); + } else if (audio_def->eEncoding == + (OMX_AUDIO_CODINGTYPE)OMX_AUDIO_CodingAndroidAC3) { + mOutputFormat->setCString( + kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC3); + int32_t numChannels, sampleRate, bitRate; + inputFormat->findInt32(kKeyChannelCount, &numChannels); + inputFormat->findInt32(kKeySampleRate, &sampleRate); + inputFormat->findInt32(kKeyBitRate, &bitRate); + mOutputFormat->setInt32(kKeyChannelCount, numChannels); + mOutputFormat->setInt32(kKeySampleRate, sampleRate); + mOutputFormat->setInt32(kKeyBitRate, bitRate); } else { CHECK(!"Should not be here. Unknown audio encoding."); } diff --git a/media/libstagefright/SkipCutBuffer.cpp b/media/libstagefright/SkipCutBuffer.cpp index 773854f..e2e6d79 100644 --- a/media/libstagefright/SkipCutBuffer.cpp +++ b/media/libstagefright/SkipCutBuffer.cpp @@ -25,7 +25,7 @@ namespace android { SkipCutBuffer::SkipCutBuffer(int32_t skip, int32_t cut) { - mFrontPadding = skip; + mFrontPadding = mSkip = skip; mBackPadding = cut; mWriteHead = 0; mReadHead = 0; @@ -94,6 +94,7 @@ void SkipCutBuffer::submit(const sp<ABuffer>& buffer) { void SkipCutBuffer::clear() { mWriteHead = mReadHead = 0; + mFrontPadding = mSkip; } void SkipCutBuffer::write(const char *src, size_t num) { diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp index 1b20cbb..2f5eff4 100644 --- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp +++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp @@ -30,7 +30,7 @@ #define DRC_DEFAULT_MOBILE_REF_LEVEL 64 /* 64*-0.25dB = -16 dB below full scale for mobile conf */ #define DRC_DEFAULT_MOBILE_DRC_CUT 127 /* maximum compression of dynamic range for mobile conf */ #define DRC_DEFAULT_MOBILE_DRC_BOOST 127 /* maximum compression of dynamic range for mobile conf */ -#define MAX_CHANNEL_COUNT 6 /* maximum number of audio channels that can be decoded */ +#define MAX_CHANNEL_COUNT 8 /* maximum number of audio channels that can be decoded */ // names of properties that can be used to override the default DRC settings #define PROP_DRC_OVERRIDE_REF_LEVEL "aac_drc_reference_level" #define PROP_DRC_OVERRIDE_CUT "aac_drc_cut" @@ -58,6 +58,8 @@ SoftAAC2::SoftAAC2( mIsADTS(false), mInputBufferCount(0), mSignalledError(false), + mSawInputEos(false), + mSignalledOutputEos(false), mAnchorTimeUs(0), mNumSamplesOutput(0), mOutputPortSettingsChange(NONE) { @@ -294,8 +296,11 @@ void SoftAAC2::maybeConfigureDownmix() const { if (!(property_get("media.aac_51_output_enabled", value, NULL) && (!strcmp(value, "1") || !strcasecmp(value, "true")))) { ALOGI("Downmixing multichannel AAC to stereo"); - aacDecoder_SetParam(mAACDecoder, AAC_PCM_OUTPUT_CHANNELS, 2); + aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, 2); mStreamInfo->numChannels = 2; + // By default, the decoder creates a 5.1 channel downmix signal + // for seven and eight channel input streams. To enable 6.1 and 7.1 channel output + // use aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, -1) } } } @@ -350,115 +355,83 @@ void SoftAAC2::onQueueFilled(OMX_U32 portIndex) { return; } - while (!inQueue.empty() && !outQueue.empty()) { - BufferInfo *inInfo = *inQueue.begin(); - OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader; + while ((!inQueue.empty() || (mSawInputEos && !mSignalledOutputEos)) && !outQueue.empty()) { + BufferInfo *inInfo = NULL; + OMX_BUFFERHEADERTYPE *inHeader = NULL; + if (!inQueue.empty()) { + inInfo = *inQueue.begin(); + inHeader = inInfo->mHeader; + } BufferInfo *outInfo = *outQueue.begin(); OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader; + outHeader->nFlags = 0; - if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { - inQueue.erase(inQueue.begin()); - inInfo->mOwnedByUs = false; - notifyEmptyBufferDone(inHeader); - - if (mDecoderHasData) { - // flush out the decoder's delayed data by calling DecodeFrame - // one more time, with the AACDEC_FLUSH flag set - INT_PCM *outBuffer = - reinterpret_cast<INT_PCM *>( - outHeader->pBuffer + outHeader->nOffset); - - AAC_DECODER_ERROR decoderErr = - aacDecoder_DecodeFrame(mAACDecoder, - outBuffer, - outHeader->nAllocLen, - AACDEC_FLUSH); - mDecoderHasData = false; - - if (decoderErr != AAC_DEC_OK) { - mSignalledError = true; - - notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, - NULL); - - return; - } - - outHeader->nFilledLen = - mStreamInfo->frameSize - * sizeof(int16_t) - * mStreamInfo->numChannels; - } else { - // we never submitted any data to the decoder, so there's nothing to flush out - outHeader->nFilledLen = 0; + if (inHeader) { + if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { + mSawInputEos = true; } - outHeader->nFlags = OMX_BUFFERFLAG_EOS; - - outQueue.erase(outQueue.begin()); - outInfo->mOwnedByUs = false; - notifyFillBufferDone(outHeader); - return; - } - - if (inHeader->nOffset == 0) { - mAnchorTimeUs = inHeader->nTimeStamp; - mNumSamplesOutput = 0; - } + if (inHeader->nOffset == 0 && inHeader->nFilledLen) { + mAnchorTimeUs = inHeader->nTimeStamp; + mNumSamplesOutput = 0; + } - size_t adtsHeaderSize = 0; - if (mIsADTS) { - // skip 30 bits, aac_frame_length follows. - // ssssssss ssssiiip ppffffPc ccohCCll llllllll lll????? + if (mIsADTS) { + size_t adtsHeaderSize = 0; + // skip 30 bits, aac_frame_length follows. + // ssssssss ssssiiip ppffffPc ccohCCll llllllll lll????? - const uint8_t *adtsHeader = inHeader->pBuffer + inHeader->nOffset; + const uint8_t *adtsHeader = inHeader->pBuffer + inHeader->nOffset; - bool signalError = false; - if (inHeader->nFilledLen < 7) { - ALOGE("Audio data too short to contain even the ADTS header. " - "Got %ld bytes.", inHeader->nFilledLen); - hexdump(adtsHeader, inHeader->nFilledLen); - signalError = true; - } else { - bool protectionAbsent = (adtsHeader[1] & 1); - - unsigned aac_frame_length = - ((adtsHeader[3] & 3) << 11) - | (adtsHeader[4] << 3) - | (adtsHeader[5] >> 5); - - if (inHeader->nFilledLen < aac_frame_length) { - ALOGE("Not enough audio data for the complete frame. " - "Got %ld bytes, frame size according to the ADTS " - "header is %u bytes.", - inHeader->nFilledLen, aac_frame_length); + bool signalError = false; + if (inHeader->nFilledLen < 7) { + ALOGE("Audio data too short to contain even the ADTS header. " + "Got %ld bytes.", inHeader->nFilledLen); hexdump(adtsHeader, inHeader->nFilledLen); signalError = true; } else { - adtsHeaderSize = (protectionAbsent ? 7 : 9); - - inBuffer[0] = (UCHAR *)adtsHeader + adtsHeaderSize; - inBufferLength[0] = aac_frame_length - adtsHeaderSize; - - inHeader->nOffset += adtsHeaderSize; - inHeader->nFilledLen -= adtsHeaderSize; + bool protectionAbsent = (adtsHeader[1] & 1); + + unsigned aac_frame_length = + ((adtsHeader[3] & 3) << 11) + | (adtsHeader[4] << 3) + | (adtsHeader[5] >> 5); + + if (inHeader->nFilledLen < aac_frame_length) { + ALOGE("Not enough audio data for the complete frame. " + "Got %ld bytes, frame size according to the ADTS " + "header is %u bytes.", + inHeader->nFilledLen, aac_frame_length); + hexdump(adtsHeader, inHeader->nFilledLen); + signalError = true; + } else { + adtsHeaderSize = (protectionAbsent ? 7 : 9); + + inBuffer[0] = (UCHAR *)adtsHeader + adtsHeaderSize; + inBufferLength[0] = aac_frame_length - adtsHeaderSize; + + inHeader->nOffset += adtsHeaderSize; + inHeader->nFilledLen -= adtsHeaderSize; + } } - } - if (signalError) { - mSignalledError = true; + if (signalError) { + mSignalledError = true; - notify(OMX_EventError, - OMX_ErrorStreamCorrupt, - ERROR_MALFORMED, - NULL); + notify(OMX_EventError, + OMX_ErrorStreamCorrupt, + ERROR_MALFORMED, + NULL); - return; + return; + } + } else { + inBuffer[0] = inHeader->pBuffer + inHeader->nOffset; + inBufferLength[0] = inHeader->nFilledLen; } } else { - inBuffer[0] = inHeader->pBuffer + inHeader->nOffset; - inBufferLength[0] = inHeader->nFilledLen; + inBufferLength[0] = 0; } // Fill and decode @@ -471,50 +444,66 @@ void SoftAAC2::onQueueFilled(OMX_U32 portIndex) { int prevNumChannels = mStreamInfo->numChannels; AAC_DECODER_ERROR decoderErr = AAC_DEC_NOT_ENOUGH_BITS; - while (bytesValid[0] > 0 && decoderErr == AAC_DEC_NOT_ENOUGH_BITS) { + while ((bytesValid[0] > 0 || mSawInputEos) && decoderErr == AAC_DEC_NOT_ENOUGH_BITS) { + mDecoderHasData |= (bytesValid[0] > 0); aacDecoder_Fill(mAACDecoder, inBuffer, inBufferLength, bytesValid); - mDecoderHasData = true; decoderErr = aacDecoder_DecodeFrame(mAACDecoder, outBuffer, outHeader->nAllocLen, 0 /* flags */); - if (decoderErr == AAC_DEC_NOT_ENOUGH_BITS) { - ALOGW("Not enough bits, bytesValid %d", bytesValid[0]); + if (mSawInputEos && bytesValid[0] <= 0) { + if (mDecoderHasData) { + // flush out the decoder's delayed data by calling DecodeFrame + // one more time, with the AACDEC_FLUSH flag set + decoderErr = aacDecoder_DecodeFrame(mAACDecoder, + outBuffer, + outHeader->nAllocLen, + AACDEC_FLUSH); + mDecoderHasData = false; + } + outHeader->nFlags = OMX_BUFFERFLAG_EOS; + mSignalledOutputEos = true; + break; + } else { + ALOGW("Not enough bits, bytesValid %d", bytesValid[0]); + } } } size_t numOutBytes = mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels; - if (decoderErr == AAC_DEC_OK) { - UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0]; - inHeader->nFilledLen -= inBufferUsedLength; - inHeader->nOffset += inBufferUsedLength; - } else { - ALOGW("AAC decoder returned error %d, substituting silence", - decoderErr); + if (inHeader) { + if (decoderErr == AAC_DEC_OK) { + UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0]; + inHeader->nFilledLen -= inBufferUsedLength; + inHeader->nOffset += inBufferUsedLength; + } else { + ALOGW("AAC decoder returned error %d, substituting silence", + decoderErr); - memset(outHeader->pBuffer + outHeader->nOffset, 0, numOutBytes); + memset(outHeader->pBuffer + outHeader->nOffset, 0, numOutBytes); - // Discard input buffer. - inHeader->nFilledLen = 0; + // Discard input buffer. + inHeader->nFilledLen = 0; - aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1); + aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1); - // fall through - } + // fall through + } - if (inHeader->nFilledLen == 0) { - inInfo->mOwnedByUs = false; - inQueue.erase(inQueue.begin()); - inInfo = NULL; - notifyEmptyBufferDone(inHeader); - inHeader = NULL; + if (inHeader->nFilledLen == 0) { + inInfo->mOwnedByUs = false; + inQueue.erase(inQueue.begin()); + inInfo = NULL; + notifyEmptyBufferDone(inHeader); + inHeader = NULL; + } } /* @@ -555,7 +544,6 @@ void SoftAAC2::onQueueFilled(OMX_U32 portIndex) { // we've previously decoded valid data, in the latter case // (decode failed) we'll output a silent frame. outHeader->nFilledLen = numOutBytes; - outHeader->nFlags = 0; outHeader->nTimeStamp = mAnchorTimeUs @@ -582,6 +570,12 @@ void SoftAAC2::onPortFlushCompleted(OMX_U32 portIndex) { // depend on fragments from the last one decoded. // drain all existing data drainDecoder(); + // force decoder loop to drop the first decoded buffer by resetting these state variables, + // but only if initialization has already happened. + if (mInputBufferCount != 0) { + mInputBufferCount = 1; + mStreamInfo->sampleRate = 0; + } } } @@ -606,6 +600,8 @@ void SoftAAC2::onReset() { mStreamInfo->sampleRate = 0; mSignalledError = false; + mSawInputEos = false; + mSignalledOutputEos = false; mOutputPortSettingsChange = NONE; } diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.h b/media/libstagefright/codecs/aacdec/SoftAAC2.h index 2d960ab..a7ea1e2 100644 --- a/media/libstagefright/codecs/aacdec/SoftAAC2.h +++ b/media/libstagefright/codecs/aacdec/SoftAAC2.h @@ -55,6 +55,8 @@ private: bool mDecoderHasData; size_t mInputBufferCount; bool mSignalledError; + bool mSawInputEos; + bool mSignalledOutputEos; int64_t mAnchorTimeUs; int64_t mNumSamplesOutput; diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp index 7c382fb..877e3cb 100644 --- a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp +++ b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp @@ -49,6 +49,8 @@ SoftMP3::SoftMP3( mNumChannels(2), mSamplingRate(44100), mSignalledError(false), + mSawInputEos(false), + mSignalledOutputEos(false), mOutputPortSettingsChange(NONE) { initPorts(); initDecoder(); @@ -194,48 +196,36 @@ void SoftMP3::onQueueFilled(OMX_U32 portIndex) { List<BufferInfo *> &inQueue = getPortQueue(0); List<BufferInfo *> &outQueue = getPortQueue(1); - while (!inQueue.empty() && !outQueue.empty()) { - BufferInfo *inInfo = *inQueue.begin(); - OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader; + while ((!inQueue.empty() || (mSawInputEos && !mSignalledOutputEos)) && !outQueue.empty()) { + BufferInfo *inInfo = NULL; + OMX_BUFFERHEADERTYPE *inHeader = NULL; + if (!inQueue.empty()) { + inInfo = *inQueue.begin(); + inHeader = inInfo->mHeader; + } BufferInfo *outInfo = *outQueue.begin(); OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader; + outHeader->nFlags = 0; - if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { - inQueue.erase(inQueue.begin()); - inInfo->mOwnedByUs = false; - notifyEmptyBufferDone(inHeader); - - if (!mIsFirst) { - // pad the end of the stream with 529 samples, since that many samples - // were trimmed off the beginning when decoding started - outHeader->nFilledLen = - kPVMP3DecoderDelay * mNumChannels * sizeof(int16_t); + if (inHeader) { + if (inHeader->nOffset == 0 && inHeader->nFilledLen) { + mAnchorTimeUs = inHeader->nTimeStamp; + mNumFramesOutput = 0; + } - memset(outHeader->pBuffer, 0, outHeader->nFilledLen); - } else { - // Since we never discarded frames from the start, we won't have - // to add any padding at the end either. - outHeader->nFilledLen = 0; + if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { + mSawInputEos = true; } - outHeader->nFlags = OMX_BUFFERFLAG_EOS; + mConfig->pInputBuffer = + inHeader->pBuffer + inHeader->nOffset; - outQueue.erase(outQueue.begin()); - outInfo->mOwnedByUs = false; - notifyFillBufferDone(outHeader); - return; - } - - if (inHeader->nOffset == 0) { - mAnchorTimeUs = inHeader->nTimeStamp; - mNumFramesOutput = 0; + mConfig->inputBufferCurrentLength = inHeader->nFilledLen; + } else { + mConfig->pInputBuffer = NULL; + mConfig->inputBufferCurrentLength = 0; } - - mConfig->pInputBuffer = - inHeader->pBuffer + inHeader->nOffset; - - mConfig->inputBufferCurrentLength = inHeader->nFilledLen; mConfig->inputBufferMaxLength = 0; mConfig->inputBufferUsedLength = 0; @@ -262,13 +252,28 @@ void SoftMP3::onQueueFilled(OMX_U32 portIndex) { mConfig->outputFrameSize = kOutputBufferSize / sizeof(int16_t); } - // This is recoverable, just ignore the current frame and - // play silence instead. - memset(outHeader->pBuffer, - 0, - mConfig->outputFrameSize * sizeof(int16_t)); - - mConfig->inputBufferUsedLength = inHeader->nFilledLen; + if (decoderErr == NO_ENOUGH_MAIN_DATA_ERROR && mSawInputEos) { + if (!mIsFirst) { + // pad the end of the stream with 529 samples, since that many samples + // were trimmed off the beginning when decoding started + outHeader->nOffset = 0; + outHeader->nFilledLen = kPVMP3DecoderDelay * mNumChannels * sizeof(int16_t); + + memset(outHeader->pBuffer, 0, outHeader->nFilledLen); + } + outHeader->nFlags = OMX_BUFFERFLAG_EOS; + mSignalledOutputEos = true; + } else { + // This is recoverable, just ignore the current frame and + // play silence instead. + memset(outHeader->pBuffer, + 0, + mConfig->outputFrameSize * sizeof(int16_t)); + + if (inHeader) { + mConfig->inputBufferUsedLength = inHeader->nFilledLen; + } + } } else if (mConfig->samplingRate != mSamplingRate || mConfig->num_channels != mNumChannels) { mSamplingRate = mConfig->samplingRate; @@ -289,7 +294,7 @@ void SoftMP3::onQueueFilled(OMX_U32 portIndex) { outHeader->nFilledLen = mConfig->outputFrameSize * sizeof(int16_t) - outHeader->nOffset; - } else { + } else if (!mSignalledOutputEos) { outHeader->nOffset = 0; outHeader->nFilledLen = mConfig->outputFrameSize * sizeof(int16_t); } @@ -298,23 +303,24 @@ void SoftMP3::onQueueFilled(OMX_U32 portIndex) { mAnchorTimeUs + (mNumFramesOutput * 1000000ll) / mConfig->samplingRate; - outHeader->nFlags = 0; - - CHECK_GE(inHeader->nFilledLen, mConfig->inputBufferUsedLength); + if (inHeader) { + CHECK_GE(inHeader->nFilledLen, mConfig->inputBufferUsedLength); - inHeader->nOffset += mConfig->inputBufferUsedLength; - inHeader->nFilledLen -= mConfig->inputBufferUsedLength; + inHeader->nOffset += mConfig->inputBufferUsedLength; + inHeader->nFilledLen -= mConfig->inputBufferUsedLength; - mNumFramesOutput += mConfig->outputFrameSize / mNumChannels; - if (inHeader->nFilledLen == 0) { - inInfo->mOwnedByUs = false; - inQueue.erase(inQueue.begin()); - inInfo = NULL; - notifyEmptyBufferDone(inHeader); - inHeader = NULL; + if (inHeader->nFilledLen == 0) { + inInfo->mOwnedByUs = false; + inQueue.erase(inQueue.begin()); + inInfo = NULL; + notifyEmptyBufferDone(inHeader); + inHeader = NULL; + } } + mNumFramesOutput += mConfig->outputFrameSize / mNumChannels; + outInfo->mOwnedByUs = false; outQueue.erase(outQueue.begin()); outInfo = NULL; @@ -362,6 +368,8 @@ void SoftMP3::onReset() { pvmp3_InitDecoder(mConfig, mDecoderBuf); mIsFirst = true; mSignalledError = false; + mSawInputEos = false; + mSignalledOutputEos = false; mOutputPortSettingsChange = NONE; } diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.h b/media/libstagefright/codecs/mp3dec/SoftMP3.h index 4af91ea..f9e7b53 100644 --- a/media/libstagefright/codecs/mp3dec/SoftMP3.h +++ b/media/libstagefright/codecs/mp3dec/SoftMP3.h @@ -61,6 +61,8 @@ private: bool mIsFirst; bool mSignalledError; + bool mSawInputEos; + bool mSignalledOutputEos; enum { NONE, diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp index 51bb958..515e4d3 100644 --- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp +++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp @@ -54,6 +54,8 @@ SoftVorbis::SoftVorbis( mAnchorTimeUs(0), mNumFramesOutput(0), mNumFramesLeftOnPage(-1), + mSawInputEos(false), + mSignalledOutputEos(false), mOutputPortSettingsChange(NONE) { initPorts(); CHECK_EQ(initDecoder(), (status_t)OK); @@ -290,48 +292,47 @@ void SoftVorbis::onQueueFilled(OMX_U32 portIndex) { return; } - while (!inQueue.empty() && !outQueue.empty()) { - BufferInfo *inInfo = *inQueue.begin(); - OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader; + while ((!inQueue.empty() || (mSawInputEos && !mSignalledOutputEos)) && !outQueue.empty()) { + BufferInfo *inInfo = NULL; + OMX_BUFFERHEADERTYPE *inHeader = NULL; + if (!inQueue.empty()) { + inInfo = *inQueue.begin(); + inHeader = inInfo->mHeader; + } BufferInfo *outInfo = *outQueue.begin(); OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader; - if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { - inQueue.erase(inQueue.begin()); - inInfo->mOwnedByUs = false; - notifyEmptyBufferDone(inHeader); + int32_t numPageSamples = 0; - outHeader->nFilledLen = 0; - outHeader->nFlags = OMX_BUFFERFLAG_EOS; + if (inHeader) { + if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { + mSawInputEos = true; + } - outQueue.erase(outQueue.begin()); - outInfo->mOwnedByUs = false; - notifyFillBufferDone(outHeader); - return; - } + if (inHeader->nFilledLen || !mSawInputEos) { + CHECK_GE(inHeader->nFilledLen, sizeof(numPageSamples)); + memcpy(&numPageSamples, + inHeader->pBuffer + + inHeader->nOffset + inHeader->nFilledLen - 4, + sizeof(numPageSamples)); - int32_t numPageSamples; - CHECK_GE(inHeader->nFilledLen, sizeof(numPageSamples)); - memcpy(&numPageSamples, - inHeader->pBuffer - + inHeader->nOffset + inHeader->nFilledLen - 4, - sizeof(numPageSamples)); + if (inHeader->nOffset == 0) { + mAnchorTimeUs = inHeader->nTimeStamp; + mNumFramesOutput = 0; + } - if (numPageSamples >= 0) { - mNumFramesLeftOnPage = numPageSamples; + inHeader->nFilledLen -= sizeof(numPageSamples);; + } } - if (inHeader->nOffset == 0) { - mAnchorTimeUs = inHeader->nTimeStamp; - mNumFramesOutput = 0; + if (numPageSamples >= 0) { + mNumFramesLeftOnPage = numPageSamples; } - inHeader->nFilledLen -= sizeof(numPageSamples);; - ogg_buffer buf; - buf.data = inHeader->pBuffer + inHeader->nOffset; - buf.size = inHeader->nFilledLen; + buf.data = inHeader ? inHeader->pBuffer + inHeader->nOffset : NULL; + buf.size = inHeader ? inHeader->nFilledLen : 0; buf.refcount = 1; buf.ptr.owner = NULL; @@ -351,6 +352,7 @@ void SoftVorbis::onQueueFilled(OMX_U32 portIndex) { int numFrames = 0; + outHeader->nFlags = 0; int err = vorbis_dsp_synthesis(mState, &pack, 1); if (err != 0) { ALOGW("vorbis_dsp_synthesis returned %d", err); @@ -370,13 +372,16 @@ void SoftVorbis::onQueueFilled(OMX_U32 portIndex) { ALOGV("discarding %d frames at end of page", numFrames - mNumFramesLeftOnPage); numFrames = mNumFramesLeftOnPage; + if (mSawInputEos) { + outHeader->nFlags = OMX_BUFFERFLAG_EOS; + mSignalledOutputEos = true; + } } mNumFramesLeftOnPage -= numFrames; } outHeader->nFilledLen = numFrames * sizeof(int16_t) * mVi->channels; outHeader->nOffset = 0; - outHeader->nFlags = 0; outHeader->nTimeStamp = mAnchorTimeUs @@ -384,11 +389,13 @@ void SoftVorbis::onQueueFilled(OMX_U32 portIndex) { mNumFramesOutput += numFrames; - inInfo->mOwnedByUs = false; - inQueue.erase(inQueue.begin()); - inInfo = NULL; - notifyEmptyBufferDone(inHeader); - inHeader = NULL; + if (inHeader) { + inInfo->mOwnedByUs = false; + inQueue.erase(inQueue.begin()); + inInfo = NULL; + notifyEmptyBufferDone(inHeader); + inHeader = NULL; + } outInfo->mOwnedByUs = false; outQueue.erase(outQueue.begin()); @@ -425,6 +432,8 @@ void SoftVorbis::onReset() { mVi = NULL; } + mSawInputEos = false; + mSignalledOutputEos = false; mOutputPortSettingsChange = NONE; } diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h index cb628a0..1d00816 100644 --- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h +++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h @@ -59,6 +59,8 @@ private: int64_t mAnchorTimeUs; int64_t mNumFramesOutput; int32_t mNumFramesLeftOnPage; + bool mSawInputEos; + bool mSignalledOutputEos; enum { NONE, diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp index bd12ddc..233db44 100644 --- a/media/libstagefright/httplive/LiveSession.cpp +++ b/media/libstagefright/httplive/LiveSession.cpp @@ -632,9 +632,6 @@ sp<M3UParser> LiveSession::fetchPlaylist( // playlist unchanged *unchanged = true; - ALOGV("Playlist unchanged, refresh state is now %d", - (int)mRefreshState); - return NULL; } diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp index 973b779..f095987 100644 --- a/media/libstagefright/httplive/PlaylistFetcher.cpp +++ b/media/libstagefright/httplive/PlaylistFetcher.cpp @@ -47,6 +47,7 @@ namespace android { // static const int64_t PlaylistFetcher::kMinBufferedDurationUs = 10000000ll; +const int64_t PlaylistFetcher::kMaxMonitorDelayUs = 3000000ll; PlaylistFetcher::PlaylistFetcher( const sp<AMessage> ¬ify, @@ -61,6 +62,7 @@ PlaylistFetcher::PlaylistFetcher( mSeqNumber(-1), mNumRetries(0), mStartup(true), + mPrepared(false), mNextPTSTimeUs(-1ll), mMonitorQueueGeneration(0), mRefreshState(INITIAL_MINIMUM_RELOAD_DELAY), @@ -103,10 +105,16 @@ int64_t PlaylistFetcher::getSegmentStartTimeUs(int32_t seqNumber) const { return segmentStartUs; } -bool PlaylistFetcher::timeToRefreshPlaylist(int64_t nowUs) const { - if (mPlaylist == NULL) { +int64_t PlaylistFetcher::delayUsToRefreshPlaylist() const { + int64_t nowUs = ALooper::GetNowUs(); + + if (mPlaylist == NULL || mLastPlaylistFetchTimeUs < 0ll) { CHECK_EQ((int)mRefreshState, (int)INITIAL_MINIMUM_RELOAD_DELAY); - return true; + return 0ll; + } + + if (mPlaylist->isComplete()) { + return (~0llu >> 1); } int32_t targetDurationSecs; @@ -157,7 +165,8 @@ bool PlaylistFetcher::timeToRefreshPlaylist(int64_t nowUs) const { break; } - return mLastPlaylistFetchTimeUs + minPlaylistAgeUs <= nowUs; + int64_t delayUs = mLastPlaylistFetchTimeUs + minPlaylistAgeUs - nowUs; + return delayUs > 0ll ? delayUs : 0ll; } status_t PlaylistFetcher::decryptBuffer( @@ -274,7 +283,15 @@ status_t PlaylistFetcher::decryptBuffer( return OK; } -void PlaylistFetcher::postMonitorQueue(int64_t delayUs) { +void PlaylistFetcher::postMonitorQueue(int64_t delayUs, int64_t minDelayUs) { + int64_t maxDelayUs = delayUsToRefreshPlaylist(); + if (maxDelayUs < minDelayUs) { + maxDelayUs = minDelayUs; + } + if (delayUs > maxDelayUs) { + ALOGV("Need to refresh playlist in %lld", maxDelayUs); + delayUs = maxDelayUs; + } sp<AMessage> msg = new AMessage(kWhatMonitorQueue, id()); msg->setInt32("generation", mMonitorQueueGeneration); msg->post(delayUs); @@ -415,6 +432,7 @@ status_t PlaylistFetcher::onStart(const sp<AMessage> &msg) { if (mStartTimeUs >= 0ll) { mSeqNumber = -1; mStartup = true; + mPrepared = false; } postMonitorQueue(); @@ -456,40 +474,62 @@ void PlaylistFetcher::queueDiscontinuity( void PlaylistFetcher::onMonitorQueue() { bool downloadMore = false; + refreshPlaylist(); + + int32_t targetDurationSecs; + int64_t targetDurationUs = kMinBufferedDurationUs; + if (mPlaylist != NULL) { + CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs)); + targetDurationUs = targetDurationSecs * 1000000ll; + } - status_t finalResult; + // buffer at least 3 times the target duration, or up to 10 seconds + int64_t durationToBufferUs = targetDurationUs * 3; + if (durationToBufferUs > kMinBufferedDurationUs) { + durationToBufferUs = kMinBufferedDurationUs; + } + + int64_t bufferedDurationUs = 0ll; + status_t finalResult = NOT_ENOUGH_DATA; if (mStreamTypeMask == LiveSession::STREAMTYPE_SUBTITLES) { sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(LiveSession::STREAMTYPE_SUBTITLES); - int64_t bufferedDurationUs = + bufferedDurationUs = packetSource->getBufferedDurationUs(&finalResult); - - downloadMore = (bufferedDurationUs < kMinBufferedDurationUs); finalResult = OK; } else { bool first = true; - int64_t minBufferedDurationUs = 0ll; for (size_t i = 0; i < mPacketSources.size(); ++i) { if ((mStreamTypeMask & mPacketSources.keyAt(i)) == 0) { continue; } - int64_t bufferedDurationUs = + int64_t bufferedStreamDurationUs = mPacketSources.valueAt(i)->getBufferedDurationUs(&finalResult); - - if (first || bufferedDurationUs < minBufferedDurationUs) { - minBufferedDurationUs = bufferedDurationUs; + if (first || bufferedStreamDurationUs < bufferedDurationUs) { + bufferedDurationUs = bufferedStreamDurationUs; first = false; } } + } + downloadMore = (bufferedDurationUs < durationToBufferUs); - downloadMore = - !first && (minBufferedDurationUs < kMinBufferedDurationUs); + // signal start if buffered up at least the target size + if (!mPrepared && bufferedDurationUs > targetDurationUs && downloadMore) { + mPrepared = true; + + ALOGV("prepared, buffered=%lld > %lld", + bufferedDurationUs, targetDurationUs); + sp<AMessage> msg = mNotify->dup(); + msg->setInt32("what", kWhatTemporarilyDoneFetching); + msg->post(); } if (finalResult == OK && downloadMore) { + ALOGV("monitoring, buffered=%lld < %lld", + bufferedDurationUs, durationToBufferUs); onDownloadNext(); } else { // Nothing to do yet, try again in a second. @@ -498,15 +538,17 @@ void PlaylistFetcher::onMonitorQueue() { msg->setInt32("what", kWhatTemporarilyDoneFetching); msg->post(); - postMonitorQueue(1000000ll); + int64_t delayUs = mPrepared ? kMaxMonitorDelayUs : targetDurationUs / 2; + ALOGV("pausing for %lld, buffered=%lld > %lld", + delayUs, bufferedDurationUs, durationToBufferUs); + // :TRICKY: need to enforce minimum delay because the delay to + // refresh the playlist will become 0 + postMonitorQueue(delayUs, mPrepared ? targetDurationUs * 2 : 0); } } -void PlaylistFetcher::onDownloadNext() { - int64_t nowUs = ALooper::GetNowUs(); - - if (mLastPlaylistFetchTimeUs < 0ll - || (!mPlaylist->isComplete() && timeToRefreshPlaylist(nowUs))) { +status_t PlaylistFetcher::refreshPlaylist() { + if (delayUsToRefreshPlaylist() <= 0) { bool unchanged; sp<M3UParser> playlist = mSession->fetchPlaylist( mURI.c_str(), mPlaylistHash, &unchanged); @@ -522,7 +564,7 @@ void PlaylistFetcher::onDownloadNext() { } else { ALOGE("failed to load playlist at url '%s'", mURI.c_str()); notifyError(ERROR_IO); - return; + return ERROR_IO; } } else { mRefreshState = INITIAL_MINIMUM_RELOAD_DELAY; @@ -535,6 +577,13 @@ void PlaylistFetcher::onDownloadNext() { mLastPlaylistFetchTimeUs = ALooper::GetNowUs(); } + return OK; +} + +void PlaylistFetcher::onDownloadNext() { + if (refreshPlaylist() != OK) { + return; + } int32_t firstSeqNumberInPlaylist; if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32( @@ -553,12 +602,18 @@ void PlaylistFetcher::onDownloadNext() { if (mPlaylist->isComplete() || mPlaylist->isEvent()) { mSeqNumber = getSeqNumberForTime(mStartTimeUs); + ALOGV("Initial sequence number for time %lld is %ld from (%ld .. %ld)", + mStartTimeUs, mSeqNumber, firstSeqNumberInPlaylist, + lastSeqNumberInPlaylist); } else { // If this is a live session, start 3 segments from the end. mSeqNumber = lastSeqNumberInPlaylist - 3; if (mSeqNumber < firstSeqNumberInPlaylist) { mSeqNumber = firstSeqNumberInPlaylist; } + ALOGV("Initial sequence number for live event %ld from (%ld .. %ld)", + mSeqNumber, firstSeqNumberInPlaylist, + lastSeqNumberInPlaylist); } mStartTimeUs = -1ll; @@ -570,16 +625,34 @@ void PlaylistFetcher::onDownloadNext() { ++mNumRetries; if (mSeqNumber > lastSeqNumberInPlaylist) { - mLastPlaylistFetchTimeUs = -1; - postMonitorQueue(3000000ll); + // refresh in increasing fraction (1/2, 1/3, ...) of the + // playlist's target duration or 3 seconds, whichever is less + int32_t targetDurationSecs; + CHECK(mPlaylist->meta()->findInt32( + "target-duration", &targetDurationSecs)); + int64_t delayUs = mPlaylist->size() * targetDurationSecs * + 1000000ll / (1 + mNumRetries); + if (delayUs > kMaxMonitorDelayUs) { + delayUs = kMaxMonitorDelayUs; + } + ALOGV("sequence number high: %ld from (%ld .. %ld), monitor in %lld (retry=%d)", + mSeqNumber, firstSeqNumberInPlaylist, + lastSeqNumberInPlaylist, delayUs, mNumRetries); + postMonitorQueue(delayUs); return; } // we've missed the boat, let's start from the lowest sequence // number available and signal a discontinuity. - ALOGI("We've missed the boat, restarting playback."); - mSeqNumber = lastSeqNumberInPlaylist; + ALOGI("We've missed the boat, restarting playback." + " mStartup=%d, was looking for %d in %d-%d", + mStartup, mSeqNumber, firstSeqNumberInPlaylist, + lastSeqNumberInPlaylist); + mSeqNumber = lastSeqNumberInPlaylist - 3; + if (mSeqNumber < firstSeqNumberInPlaylist) { + mSeqNumber = firstSeqNumberInPlaylist; + } explicitDiscontinuity = true; // fall through @@ -788,12 +861,13 @@ status_t PlaylistFetcher::extractAndQueueAccessUnits( && source->dequeueAccessUnit(&accessUnit) == OK) { // Note that we do NOT dequeue any discontinuities. + // for simplicity, store a reference to the format in each unit + sp<MetaData> format = source->getFormat(); + if (format != NULL) { + accessUnit->meta()->setObject("format", format); + } packetSource->queueAccessUnit(accessUnit); } - - if (packetSource->getFormat() == NULL) { - packetSource->setFormat(source->getFormat()); - } } return OK; diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h index 1648e02..78dea20 100644 --- a/media/libstagefright/httplive/PlaylistFetcher.h +++ b/media/libstagefright/httplive/PlaylistFetcher.h @@ -79,6 +79,7 @@ private: }; static const int64_t kMinBufferedDurationUs; + static const int64_t kMaxMonitorDelayUs; sp<AMessage> mNotify; sp<LiveSession> mSession; @@ -97,6 +98,7 @@ private: int32_t mSeqNumber; int32_t mNumRetries; bool mStartup; + bool mPrepared; int64_t mNextPTSTimeUs; int32_t mMonitorQueueGeneration; @@ -120,10 +122,11 @@ private: status_t decryptBuffer( size_t playlistIndex, const sp<ABuffer> &buffer); - void postMonitorQueue(int64_t delayUs = 0); + void postMonitorQueue(int64_t delayUs = 0, int64_t minDelayUs = 0); void cancelMonitorQueue(); - bool timeToRefreshPlaylist(int64_t nowUs) const; + int64_t delayUsToRefreshPlaylist() const; + status_t refreshPlaylist(); // Returns the media time in us of the segment specified by seqNumber. // This is computed by summing the durations of all segments before it. diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp index 1ec4a40..f0f203c 100644 --- a/media/libstagefright/id3/ID3.cpp +++ b/media/libstagefright/id3/ID3.cpp @@ -468,49 +468,6 @@ void ID3::Iterator::getID(String8 *id) const { } } -static void convertISO8859ToString8( - const uint8_t *data, size_t size, - String8 *s) { - size_t utf8len = 0; - for (size_t i = 0; i < size; ++i) { - if (data[i] == '\0') { - size = i; - break; - } else if (data[i] < 0x80) { - ++utf8len; - } else { - utf8len += 2; - } - } - - if (utf8len == size) { - // Only ASCII characters present. - - s->setTo((const char *)data, size); - return; - } - - char *tmp = new char[utf8len]; - char *ptr = tmp; - for (size_t i = 0; i < size; ++i) { - if (data[i] == '\0') { - break; - } else if (data[i] < 0x80) { - *ptr++ = data[i]; - } else if (data[i] < 0xc0) { - *ptr++ = 0xc2; - *ptr++ = data[i]; - } else { - *ptr++ = 0xc3; - *ptr++ = data[i] - 64; - } - } - - s->setTo(tmp, utf8len); - - delete[] tmp; - tmp = NULL; -} // the 2nd argument is used to get the data following the \0 in a comment field void ID3::Iterator::getString(String8 *id, String8 *comment) const { @@ -543,7 +500,9 @@ void ID3::Iterator::getstring(String8 *id, bool otherdata) const { return; } - convertISO8859ToString8(frameData, mFrameSize, id); + // this is supposed to be ISO-8859-1, but pass it up as-is to the caller, who will figure + // out the real encoding + id->setTo((const char*)frameData, mFrameSize); return; } @@ -561,13 +520,13 @@ void ID3::Iterator::getstring(String8 *id, bool otherdata) const { } if (encoding == 0x00) { - // ISO 8859-1 - convertISO8859ToString8(frameData + 1, n, id); + // supposedly ISO 8859-1 + id->setTo((const char*)frameData + 1, n); } else if (encoding == 0x03) { - // UTF-8 + // supposedly UTF-8 id->setTo((const char *)(frameData + 1), n); } else if (encoding == 0x02) { - // UTF-16 BE, no byte order mark. + // supposedly UTF-16 BE, no byte order mark. // API wants number of characters, not number of bytes... int len = n / 2; const char16_t *framedata = (const char16_t *) (frameData + 1); @@ -583,7 +542,7 @@ void ID3::Iterator::getstring(String8 *id, bool otherdata) const { if (framedatacopy != NULL) { delete[] framedatacopy; } - } else { + } else if (encoding == 0x01) { // UCS-2 // API wants number of characters, not number of bytes... int len = n / 2; @@ -602,7 +561,27 @@ void ID3::Iterator::getstring(String8 *id, bool otherdata) const { framedata++; len--; } - id->setTo(framedata, len); + + // check if the resulting data consists entirely of 8-bit values + bool eightBit = true; + for (int i = 0; i < len; i++) { + if (framedata[i] > 0xff) { + eightBit = false; + break; + } + } + if (eightBit) { + // collapse to 8 bit, then let the media scanner client figure out the real encoding + char *frame8 = new char[len]; + for (int i = 0; i < len; i++) { + frame8[i] = framedata[i]; + } + id->setTo(frame8, len); + delete [] frame8; + } else { + id->setTo(framedata, len); + } + if (framedatacopy != NULL) { delete[] framedatacopy; } diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp index 175a263..cb57a2f 100644 --- a/media/libstagefright/mpeg2ts/ATSParser.cpp +++ b/media/libstagefright/mpeg2ts/ATSParser.cpp @@ -506,6 +506,11 @@ ATSParser::Stream::Stream( ElementaryStreamQueue::PCM_AUDIO); break; + case STREAMTYPE_AC3: + mQueue = new ElementaryStreamQueue( + ElementaryStreamQueue::AC3); + break; + default: break; } @@ -614,6 +619,7 @@ bool ATSParser::Stream::isAudio() const { case STREAMTYPE_MPEG2_AUDIO: case STREAMTYPE_MPEG2_AUDIO_ADTS: case STREAMTYPE_PCM_AUDIO: + case STREAMTYPE_AC3: return true; default: diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h index a10edc9..d4e30b4 100644 --- a/media/libstagefright/mpeg2ts/ATSParser.h +++ b/media/libstagefright/mpeg2ts/ATSParser.h @@ -88,6 +88,10 @@ struct ATSParser : public RefBase { STREAMTYPE_MPEG2_AUDIO_ADTS = 0x0f, STREAMTYPE_MPEG4_VIDEO = 0x10, STREAMTYPE_H264 = 0x1b, + + // From ATSC A/53 Part 3:2009, 6.7.1 + STREAMTYPE_AC3 = 0x81, + STREAMTYPE_PCM_AUDIO = 0x83, }; diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp index 3153c8b..52fb2a5 100644 --- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp +++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp @@ -70,7 +70,27 @@ status_t AnotherPacketSource::stop() { } sp<MetaData> AnotherPacketSource::getFormat() { - return mFormat; + Mutex::Autolock autoLock(mLock); + if (mFormat != NULL) { + return mFormat; + } + + List<sp<ABuffer> >::iterator it = mBuffers.begin(); + while (it != mBuffers.end()) { + sp<ABuffer> buffer = *it; + int32_t discontinuity; + if (buffer->meta()->findInt32("discontinuity", &discontinuity)) { + break; + } + + sp<RefBase> object; + if (buffer->meta()->findObject("format", &object)) { + return static_cast<MetaData*>(object.get()); + } + + ++it; + } + return NULL; } status_t AnotherPacketSource::dequeueAccessUnit(sp<ABuffer> *buffer) { @@ -94,6 +114,11 @@ status_t AnotherPacketSource::dequeueAccessUnit(sp<ABuffer> *buffer) { return INFO_DISCONTINUITY; } + sp<RefBase> object; + if ((*buffer)->meta()->findObject("format", &object)) { + mFormat = static_cast<MetaData*>(object.get()); + } + return OK; } @@ -120,17 +145,22 @@ status_t AnotherPacketSource::read( } return INFO_DISCONTINUITY; - } else { - int64_t timeUs; - CHECK(buffer->meta()->findInt64("timeUs", &timeUs)); + } - MediaBuffer *mediaBuffer = new MediaBuffer(buffer); + sp<RefBase> object; + if (buffer->meta()->findObject("format", &object)) { + mFormat = static_cast<MetaData*>(object.get()); + } - mediaBuffer->meta_data()->setInt64(kKeyTime, timeUs); + int64_t timeUs; + CHECK(buffer->meta()->findInt64("timeUs", &timeUs)); - *out = mediaBuffer; - return OK; - } + MediaBuffer *mediaBuffer = new MediaBuffer(buffer); + + mediaBuffer->meta_data()->setInt64(kKeyTime, timeUs); + + *out = mediaBuffer; + return OK; } return mEOSResult; diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp index 8f9c9c8..ea79885 100644 --- a/media/libstagefright/mpeg2ts/ESQueue.cpp +++ b/media/libstagefright/mpeg2ts/ESQueue.cpp @@ -56,6 +56,122 @@ void ElementaryStreamQueue::clear(bool clearFormat) { } } +// Parse AC3 header assuming the current ptr is start position of syncframe, +// update metadata only applicable, and return the payload size +static unsigned parseAC3SyncFrame( + const uint8_t *ptr, size_t size, sp<MetaData> *metaData) { + static const unsigned channelCountTable[] = {2, 1, 2, 3, 4, 4, 5, 6}; + static const unsigned samplingRateTable[] = {48000, 44100, 32000}; + static const unsigned rates[] = {32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, + 320, 384, 448, 512, 576, 640}; + + static const unsigned frameSizeTable[19][3] = { + { 64, 69, 96 }, + { 80, 87, 120 }, + { 96, 104, 144 }, + { 112, 121, 168 }, + { 128, 139, 192 }, + { 160, 174, 240 }, + { 192, 208, 288 }, + { 224, 243, 336 }, + { 256, 278, 384 }, + { 320, 348, 480 }, + { 384, 417, 576 }, + { 448, 487, 672 }, + { 512, 557, 768 }, + { 640, 696, 960 }, + { 768, 835, 1152 }, + { 896, 975, 1344 }, + { 1024, 1114, 1536 }, + { 1152, 1253, 1728 }, + { 1280, 1393, 1920 }, + }; + + ABitReader bits(ptr, size); + unsigned syncStartPos = 0; // in bytes + if (bits.numBitsLeft() < 16) { + return 0; + } + if (bits.getBits(16) != 0x0B77) { + return 0; + } + + if (bits.numBitsLeft() < 16 + 2 + 6 + 5 + 3 + 3) { + ALOGV("Not enough bits left for further parsing"); + return 0; + } + bits.skipBits(16); // crc1 + + unsigned fscod = bits.getBits(2); + if (fscod == 3) { + ALOGW("Incorrect fscod in AC3 header"); + return 0; + } + + unsigned frmsizecod = bits.getBits(6); + if (frmsizecod > 37) { + ALOGW("Incorrect frmsizecod in AC3 header"); + return 0; + } + + unsigned bsid = bits.getBits(5); + if (bsid > 8) { + ALOGW("Incorrect bsid in AC3 header. Possibly E-AC-3?"); + return 0; + } + + unsigned bsmod = bits.getBits(3); + unsigned acmod = bits.getBits(3); + unsigned cmixlev = 0; + unsigned surmixlev = 0; + unsigned dsurmod = 0; + + if ((acmod & 1) > 0 && acmod != 1) { + if (bits.numBitsLeft() < 2) { + return 0; + } + cmixlev = bits.getBits(2); + } + if ((acmod & 4) > 0) { + if (bits.numBitsLeft() < 2) { + return 0; + } + surmixlev = bits.getBits(2); + } + if (acmod == 2) { + if (bits.numBitsLeft() < 2) { + return 0; + } + dsurmod = bits.getBits(2); + } + + if (bits.numBitsLeft() < 1) { + return 0; + } + unsigned lfeon = bits.getBits(1); + + unsigned samplingRate = samplingRateTable[fscod]; + unsigned payloadSize = frameSizeTable[frmsizecod >> 1][fscod]; + if (fscod == 1) { + payloadSize += frmsizecod & 1; + } + payloadSize <<= 1; // convert from 16-bit words to bytes + + unsigned channelCount = channelCountTable[acmod] + lfeon; + + if (metaData != NULL) { + (*metaData)->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC3); + (*metaData)->setInt32(kKeyChannelCount, channelCount); + (*metaData)->setInt32(kKeySampleRate, samplingRate); + } + + return payloadSize; +} + +static bool IsSeeminglyValidAC3Header(const uint8_t *ptr, size_t size) { + return parseAC3SyncFrame(ptr, size, NULL) > 0; +} + static bool IsSeeminglyValidADTSHeader(const uint8_t *ptr, size_t size) { if (size < 3) { // Not enough data to verify header. @@ -224,6 +340,33 @@ status_t ElementaryStreamQueue::appendData( break; } + case AC3: + { + uint8_t *ptr = (uint8_t *)data; + + ssize_t startOffset = -1; + for (size_t i = 0; i < size; ++i) { + if (IsSeeminglyValidAC3Header(&ptr[i], size - i)) { + startOffset = i; + break; + } + } + + if (startOffset < 0) { + return ERROR_MALFORMED; + } + + if (startOffset > 0) { + ALOGI("found something resembling an AC3 syncword at " + "offset %d", + startOffset); + } + + data = &ptr[startOffset]; + size -= startOffset; + break; + } + case MPEG_AUDIO: { uint8_t *ptr = (uint8_t *)data; @@ -328,6 +471,8 @@ sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnit() { return dequeueAccessUnitH264(); case AAC: return dequeueAccessUnitAAC(); + case AC3: + return dequeueAccessUnitAC3(); case MPEG_VIDEO: return dequeueAccessUnitMPEGVideo(); case MPEG4_VIDEO: @@ -340,6 +485,51 @@ sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnit() { } } +sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAC3() { + unsigned syncStartPos = 0; // in bytes + unsigned payloadSize = 0; + sp<MetaData> format = new MetaData; + while (true) { + if (syncStartPos + 2 >= mBuffer->size()) { + return NULL; + } + + payloadSize = parseAC3SyncFrame( + mBuffer->data() + syncStartPos, + mBuffer->size() - syncStartPos, + &format); + if (payloadSize > 0) { + break; + } + ++syncStartPos; + } + + if (mBuffer->size() < syncStartPos + payloadSize) { + ALOGV("Not enough buffer size for AC3"); + return NULL; + } + + if (mFormat == NULL) { + mFormat = format; + } + + sp<ABuffer> accessUnit = new ABuffer(syncStartPos + payloadSize); + memcpy(accessUnit->data(), mBuffer->data(), syncStartPos + payloadSize); + + int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize); + CHECK_GE(timeUs, 0ll); + accessUnit->meta()->setInt64("timeUs", timeUs); + + memmove( + mBuffer->data(), + mBuffer->data() + syncStartPos + payloadSize, + mBuffer->size() - syncStartPos - payloadSize); + + mBuffer->setRange(0, mBuffer->size() - syncStartPos - payloadSize); + + return accessUnit; +} + sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitPCMAudio() { if (mBuffer->size() < 4) { return NULL; diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/ESQueue.h index 66a8087..a2cca77 100644 --- a/media/libstagefright/mpeg2ts/ESQueue.h +++ b/media/libstagefright/mpeg2ts/ESQueue.h @@ -32,6 +32,7 @@ struct ElementaryStreamQueue { enum Mode { H264, AAC, + AC3, MPEG_AUDIO, MPEG_VIDEO, MPEG4_VIDEO, @@ -67,6 +68,7 @@ private: sp<ABuffer> dequeueAccessUnitH264(); sp<ABuffer> dequeueAccessUnitAAC(); + sp<ABuffer> dequeueAccessUnitAC3(); sp<ABuffer> dequeueAccessUnitMPEGAudio(); sp<ABuffer> dequeueAccessUnitMPEGVideo(); sp<ABuffer> dequeueAccessUnitMPEG4Video(); diff --git a/media/libstagefright/timedtext/test/Android.mk b/media/libstagefright/timedtext/test/Android.mk index a5e7ba2..9a9fde2 100644 --- a/media/libstagefright/timedtext/test/Android.mk +++ b/media/libstagefright/timedtext/test/Android.mk @@ -2,7 +2,6 @@ LOCAL_PATH:= $(call my-dir) # ================================================================ # Unit tests for libstagefright_timedtext -# See also /development/testrunner/test_defs.xml # ================================================================ # ================================================================ @@ -18,10 +17,13 @@ LOCAL_SRC_FILES := TimedTextSRTSource_test.cpp LOCAL_C_INCLUDES := \ $(TOP)/external/expat/lib \ - $(TOP)/frameworks/base/media/libstagefright/timedtext + $(TOP)/frameworks/av/media/libstagefright/timedtext LOCAL_SHARED_LIBRARIES := \ + libbinder \ libexpat \ - libstagefright + libstagefright \ + libstagefright_foundation \ + libutils include $(BUILD_NATIVE_TEST) diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk index 1ac647a..f848054 100644 --- a/media/mediaserver/Android.mk +++ b/media/mediaserver/Android.mk @@ -15,6 +15,7 @@ LOCAL_SRC_FILES:= \ LOCAL_SHARED_LIBRARIES := \ libaudioflinger \ + libcamera_metadata\ libcameraservice \ libmedialogservice \ libcutils \ diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk index 54377f1..4524d3c 100644 --- a/services/audioflinger/Android.mk +++ b/services/audioflinger/Android.mk @@ -23,7 +23,8 @@ LOCAL_SRC_FILES:= \ AudioPolicyService.cpp \ ServiceUtilities.cpp \ AudioResamplerCubic.cpp.arm \ - AudioResamplerSinc.cpp.arm + AudioResamplerSinc.cpp.arm \ + AudioResamplerDyn.cpp.arm LOCAL_SRC_FILES += StateQueue.cpp @@ -74,12 +75,20 @@ include $(BUILD_SHARED_LIBRARY) include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ - test-resample.cpp \ + test-resample.cpp \ AudioResampler.cpp.arm \ - AudioResamplerCubic.cpp.arm \ - AudioResamplerSinc.cpp.arm + AudioResamplerCubic.cpp.arm \ + AudioResamplerSinc.cpp.arm \ + AudioResamplerDyn.cpp.arm + +LOCAL_C_INCLUDES := \ + $(call include-path-for, audio-utils) + +LOCAL_STATIC_LIBRARIES := \ + libsndfile LOCAL_SHARED_LIBRARIES := \ + libaudioutils \ libdl \ libcutils \ libutils \ diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp index 3132e54..f9cc17b 100644 --- a/services/audioflinger/AudioFlinger.cpp +++ b/services/audioflinger/AudioFlinger.cpp @@ -162,12 +162,15 @@ AudioFlinger::AudioFlinger() (void) property_get("af.tee", value, "0"); teeEnabled = atoi(value); } - if (teeEnabled & 1) + if (teeEnabled & 1) { mTeeSinkInputEnabled = true; - if (teeEnabled & 2) + } + if (teeEnabled & 2) { mTeeSinkOutputEnabled = true; - if (teeEnabled & 4) + } + if (teeEnabled & 4) { mTeeSinkTrackEnabled = true; + } #endif } @@ -210,6 +213,18 @@ AudioFlinger::~AudioFlinger() audio_hw_device_close(mAudioHwDevs.valueAt(i)->hwDevice()); delete mAudioHwDevs.valueAt(i); } + + // Tell media.log service about any old writers that still need to be unregistered + sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log")); + if (binder != 0) { + sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder)); + for (size_t count = mUnregisteredWriters.size(); count > 0; count--) { + sp<IMemory> iMemory(mUnregisteredWriters.top()->getIMemory()); + mUnregisteredWriters.pop(); + mediaLogService->unregisterWriter(iMemory); + } + } + } static const char * const audio_interfaces[] = { @@ -403,16 +418,44 @@ sp<AudioFlinger::Client> AudioFlinger::registerPid_l(pid_t pid) sp<NBLog::Writer> AudioFlinger::newWriter_l(size_t size, const char *name) { + // If there is no memory allocated for logs, return a dummy writer that does nothing if (mLogMemoryDealer == 0) { return new NBLog::Writer(); } - sp<IMemory> shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size)); - sp<NBLog::Writer> writer = new NBLog::Writer(size, shared); sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log")); - if (binder != 0) { - interface_cast<IMediaLogService>(binder)->registerWriter(shared, size, name); + // Similarly if we can't contact the media.log service, also return a dummy writer + if (binder == 0) { + return new NBLog::Writer(); } - return writer; + sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder)); + sp<IMemory> shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size)); + // If allocation fails, consult the vector of previously unregistered writers + // and garbage-collect one or more them until an allocation succeeds + if (shared == 0) { + Mutex::Autolock _l(mUnregisteredWritersLock); + for (size_t count = mUnregisteredWriters.size(); count > 0; count--) { + { + // Pick the oldest stale writer to garbage-collect + sp<IMemory> iMemory(mUnregisteredWriters[0]->getIMemory()); + mUnregisteredWriters.removeAt(0); + mediaLogService->unregisterWriter(iMemory); + // Now the media.log remote reference to IMemory is gone. When our last local + // reference to IMemory also drops to zero at end of this block, + // the IMemory destructor will deallocate the region from mLogMemoryDealer. + } + // Re-attempt the allocation + shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size)); + if (shared != 0) { + goto success; + } + } + // Even after garbage-collecting all old writers, there is still not enough memory, + // so return a dummy writer + return new NBLog::Writer(); + } +success: + mediaLogService->registerWriter(shared, size, name); + return new NBLog::Writer(size, shared); } void AudioFlinger::unregisterWriter(const sp<NBLog::Writer>& writer) @@ -424,13 +467,10 @@ void AudioFlinger::unregisterWriter(const sp<NBLog::Writer>& writer) if (iMemory == 0) { return; } - sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log")); - if (binder != 0) { - interface_cast<IMediaLogService>(binder)->unregisterWriter(iMemory); - // Now the media.log remote reference to IMemory is gone. - // When our last local reference to IMemory also drops to zero, - // the IMemory destructor will deallocate the region from mMemoryDealer. - } + // Rather than removing the writer immediately, append it to a queue of old writers to + // be garbage-collected later. This allows us to continue to view old logs for a while. + Mutex::Autolock _l(mUnregisteredWritersLock); + mUnregisteredWriters.push(writer); } // IAudioFlinger interface @@ -441,7 +481,7 @@ sp<IAudioTrack> AudioFlinger::createTrack( uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *frameCount, IAudioFlinger::track_flags_t *flags, const sp<IMemory>& sharedBuffer, audio_io_handle_t output, @@ -473,6 +513,12 @@ sp<IAudioTrack> AudioFlinger::createTrack( goto Exit; } + if (sharedBuffer != 0 && sharedBuffer->pointer() == NULL) { + ALOGE("createTrack() sharedBuffer is non-0 but has NULL pointer()"); + lStatus = BAD_VALUE; + goto Exit; + } + { Mutex::Autolock _l(mLock); PlaybackThread *thread = checkPlaybackThread_l(output); @@ -488,7 +534,7 @@ sp<IAudioTrack> AudioFlinger::createTrack( client = registerPid_l(pid); ALOGV("createTrack() sessionId: %d", (sessionId == NULL) ? -2 : *sessionId); - if (sessionId != NULL && *sessionId != AUDIO_SESSION_OUTPUT_MIX) { + if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) { // check if an effect chain with the same session ID is present on another // output thread and move it here. for (size_t i = 0; i < mPlaybackThreads.size(); i++) { @@ -513,10 +559,13 @@ sp<IAudioTrack> AudioFlinger::createTrack( track = thread->createTrack_l(client, streamType, sampleRate, format, channelMask, frameCount, sharedBuffer, lSessionId, flags, tid, clientUid, &lStatus); + LOG_ALWAYS_FATAL_IF((track != 0) != (lStatus == NO_ERROR)); + // we don't abort yet if lStatus != NO_ERROR; there is still work to be done regardless // move effect chain to this output thread if an effect on same session was waiting // for a track to be created if (lStatus == NO_ERROR && effectThread != NULL) { + // no risk of deadlock because AudioFlinger::mLock is held Mutex::Autolock _dl(thread->mLock); Mutex::Autolock _sl(effectThread->mLock); moveEffectChain_l(lSessionId, effectThread, thread, true); @@ -536,7 +585,9 @@ sp<IAudioTrack> AudioFlinger::createTrack( } } } + } + if (lStatus == NO_ERROR) { // s for server's pid, n for normal mixer name, f for fast index name = String8::format("s:%d;n:%d;f:%d", getpid_cached, track->name() - AudioMixer::TRACK0, @@ -550,9 +601,7 @@ sp<IAudioTrack> AudioFlinger::createTrack( } Exit: - if (status != NULL) { - *status = lStatus; - } + *status = lStatus; return trackHandle; } @@ -1010,7 +1059,7 @@ size_t AudioFlinger::getInputBufferSize(uint32_t sampleRate, audio_format_t form return size; } -unsigned int AudioFlinger::getInputFramesLost(audio_io_handle_t ioHandle) const +uint32_t AudioFlinger::getInputFramesLost(audio_io_handle_t ioHandle) const { Mutex::Autolock _l(mLock); @@ -1228,7 +1277,7 @@ sp<IAudioRecord> AudioFlinger::openRecord( uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *frameCount, IAudioFlinger::track_flags_t *flags, pid_t tid, int *sessionId, @@ -1276,7 +1325,7 @@ sp<IAudioRecord> AudioFlinger::openRecord( client = registerPid_l(pid); // If no audio session id is provided, create one here - if (sessionId != NULL && *sessionId != AUDIO_SESSION_OUTPUT_MIX) { + if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) { lSessionId = *sessionId; } else { lSessionId = nextUniqueId(); @@ -1293,6 +1342,7 @@ sp<IAudioRecord> AudioFlinger::openRecord( flags, tid, &lStatus); LOG_ALWAYS_FATAL_IF((recordTrack != 0) != (lStatus == NO_ERROR)); } + if (lStatus != NO_ERROR) { // remove local strong reference to Client before deleting the RecordTrack so that the // Client destructor is called by the TrackBase destructor with mLock held @@ -1301,14 +1351,11 @@ sp<IAudioRecord> AudioFlinger::openRecord( goto Exit; } - // return to handle to client + // return handle to client recordHandle = new RecordHandle(recordTrack); - lStatus = NO_ERROR; Exit: - if (status) { - *status = lStatus; - } + *status = lStatus; return recordHandle; } @@ -1449,18 +1496,15 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module, audio_output_flags_t flags, const audio_offload_info_t *offloadInfo) { - PlaybackThread *thread = NULL; struct audio_config config; + memset(&config, 0, sizeof(config)); config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0; config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0; config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT; - if (offloadInfo) { + if (offloadInfo != NULL) { config.offload_info = *offloadInfo; } - audio_stream_out_t *outStream = NULL; - AudioHwDevice *outHwDev; - ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x", module, (pDevices != NULL) ? *pDevices : 0, @@ -1469,7 +1513,7 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module, config.channel_mask, flags); ALOGV("openOutput(), offloadInfo %p version 0x%04x", - offloadInfo, offloadInfo == NULL ? -1 : offloadInfo->version ); + offloadInfo, offloadInfo == NULL ? -1 : offloadInfo->version); if (pDevices == NULL || *pDevices == 0) { return 0; @@ -1477,15 +1521,17 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module, Mutex::Autolock _l(mLock); - outHwDev = findSuitableHwDev_l(module, *pDevices); - if (outHwDev == NULL) + AudioHwDevice *outHwDev = findSuitableHwDev_l(module, *pDevices); + if (outHwDev == NULL) { return 0; + } audio_hw_device_t *hwDevHal = outHwDev->hwDevice(); audio_io_handle_t id = nextUniqueId(); mHardwareStatus = AUDIO_HW_OUTPUT_OPEN; + audio_stream_out_t *outStream = NULL; status_t status = hwDevHal->open_output_stream(hwDevHal, id, *pDevices, @@ -1505,6 +1551,7 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module, if (status == NO_ERROR && outStream != NULL) { AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream, flags); + PlaybackThread *thread; if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) { thread = new OffloadThread(this, output, id, *pDevices); ALOGV("openOutput() created offload output: ID %d thread %p", id, thread); @@ -1672,18 +1719,15 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module, audio_format_t *pFormat, audio_channel_mask_t *pChannelMask) { - status_t status; - RecordThread *thread = NULL; struct audio_config config; + memset(&config, 0, sizeof(config)); config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0; config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0; config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT; uint32_t reqSamplingRate = config.sample_rate; audio_format_t reqFormat = config.format; - audio_channel_mask_t reqChannels = config.channel_mask; - audio_stream_in_t *inStream = NULL; - AudioHwDevice *inHwDev; + audio_channel_mask_t reqChannelMask = config.channel_mask; if (pDevices == NULL || *pDevices == 0) { return 0; @@ -1691,14 +1735,16 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module, Mutex::Autolock _l(mLock); - inHwDev = findSuitableHwDev_l(module, *pDevices); - if (inHwDev == NULL) + AudioHwDevice *inHwDev = findSuitableHwDev_l(module, *pDevices); + if (inHwDev == NULL) { return 0; + } audio_hw_device_t *inHwHal = inHwDev->hwDevice(); audio_io_handle_t id = nextUniqueId(); - status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, + audio_stream_in_t *inStream = NULL; + status_t status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, &inStream); ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, " "status %d", @@ -1714,10 +1760,12 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module, if (status == BAD_VALUE && reqFormat == config.format && config.format == AUDIO_FORMAT_PCM_16_BIT && (config.sample_rate <= 2 * reqSamplingRate) && - (popcount(config.channel_mask) <= FCC_2) && (popcount(reqChannels) <= FCC_2)) { + (popcount(config.channel_mask) <= FCC_2) && (popcount(reqChannelMask) <= FCC_2)) { + // FIXME describe the change proposed by HAL (save old values so we can log them here) ALOGV("openInput() reopening with proposed sampling rate and channel mask"); inStream = NULL; status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, &inStream); + // FIXME log this new status; HAL should not propose any further changes } if (status == NO_ERROR && inStream != NULL) { @@ -1776,10 +1824,10 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module, // Start record thread // RecordThread requires both input and output device indication to forward to audio // pre processing modules - thread = new RecordThread(this, + RecordThread *thread = new RecordThread(this, input, reqSamplingRate, - reqChannels, + reqChannelMask, id, primaryOutputDevice_l(), *pDevices @@ -1796,7 +1844,7 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module, *pFormat = config.format; } if (pChannelMask != NULL) { - *pChannelMask = reqChannels; + *pChannelMask = reqChannelMask; } // notify client processes of the new input creation @@ -1954,7 +2002,7 @@ void AudioFlinger::purgeStaleEffects_l() { } } if (!found) { - Mutex::Autolock _l (t->mLock); + Mutex::Autolock _l(t->mLock); // remove all effects from the chain while (ec->mEffects.size()) { sp<EffectModule> effect = ec->mEffects[0]; @@ -2249,9 +2297,7 @@ sp<IEffect> AudioFlinger::createEffect( } Exit: - if (status != NULL) { - *status = lStatus; - } + *status = lStatus; return handle; } diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h index 53e238e..e0d1404 100644 --- a/services/audioflinger/AudioFlinger.h +++ b/services/audioflinger/AudioFlinger.h @@ -102,7 +102,7 @@ public: uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, IAudioFlinger::track_flags_t *flags, const sp<IMemory>& sharedBuffer, audio_io_handle_t output, @@ -110,18 +110,18 @@ public: int *sessionId, String8& name, int clientUid, - status_t *status); + status_t *status /*non-NULL*/); virtual sp<IAudioRecord> openRecord( audio_io_handle_t input, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, IAudioFlinger::track_flags_t *flags, pid_t tid, int *sessionId, - status_t *status); + status_t *status /*non-NULL*/); virtual uint32_t sampleRate(audio_io_handle_t output) const; virtual int channelCount(audio_io_handle_t output) const; @@ -189,7 +189,7 @@ public: virtual status_t getRenderPosition(size_t *halFrames, size_t *dspFrames, audio_io_handle_t output) const; - virtual unsigned int getInputFramesLost(audio_io_handle_t ioHandle) const; + virtual uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const; virtual int newAudioSessionId(); @@ -210,7 +210,7 @@ public: int32_t priority, audio_io_handle_t io, int sessionId, - status_t *status, + status_t *status /*non-NULL*/, int *id, int *enabled); @@ -235,8 +235,12 @@ public: sp<NBLog::Writer> newWriter_l(size_t size, const char *name); void unregisterWriter(const sp<NBLog::Writer>& writer); private: - static const size_t kLogMemorySize = 10 * 1024; + static const size_t kLogMemorySize = 40 * 1024; sp<MemoryDealer> mLogMemoryDealer; // == 0 when NBLog is disabled + // When a log writer is unregistered, it is done lazily so that media.log can continue to see it + // for as long as possible. The memory is only freed when it is needed for another log writer. + Vector< sp<NBLog::Writer> > mUnregisteredWriters; + Mutex mUnregisteredWritersLock; public: class SyncEvent; @@ -499,7 +503,7 @@ private: private: const char * const mModuleName; audio_hw_device_t * const mHwDevice; - Flags mFlags; + const Flags mFlags; }; // AudioStreamOut and AudioStreamIn are immutable, so their fields are const. @@ -509,7 +513,7 @@ private: struct AudioStreamOut { AudioHwDevice* const audioHwDev; audio_stream_out_t* const stream; - audio_output_flags_t flags; + const audio_output_flags_t flags; audio_hw_device_t* hwDev() const { return audioHwDev->hwDevice(); } diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp index df4e029..8bea752 100644 --- a/services/audioflinger/AudioMixer.cpp +++ b/services/audioflinger/AudioMixer.cpp @@ -58,7 +58,7 @@ AudioMixer::DownmixerBufferProvider::~DownmixerBufferProvider() status_t AudioMixer::DownmixerBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer, int64_t pts) { //ALOGV("DownmixerBufferProvider::getNextBuffer()"); - if (this->mTrackBufferProvider != NULL) { + if (mTrackBufferProvider != NULL) { status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts); if (res == OK) { mDownmixConfig.inputCfg.buffer.frameCount = pBuffer->frameCount; @@ -81,7 +81,7 @@ status_t AudioMixer::DownmixerBufferProvider::getNextBuffer(AudioBufferProvider: void AudioMixer::DownmixerBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) { //ALOGV("DownmixerBufferProvider::releaseBuffer()"); - if (this->mTrackBufferProvider != NULL) { + if (mTrackBufferProvider != NULL) { mTrackBufferProvider->releaseBuffer(pBuffer); } else { ALOGE("DownmixerBufferProvider::releaseBuffer() error: NULL track buffer provider"); @@ -90,9 +90,9 @@ void AudioMixer::DownmixerBufferProvider::releaseBuffer(AudioBufferProvider::Buf // ---------------------------------------------------------------------------- -bool AudioMixer::isMultichannelCapable = false; +bool AudioMixer::sIsMultichannelCapable = false; -effect_descriptor_t AudioMixer::dwnmFxDesc; +effect_descriptor_t AudioMixer::sDwnmFxDesc; // Ensure mConfiguredNames bitmask is initialized properly on all architectures. // The value of 1 << x is undefined in C when x >= 32. @@ -113,8 +113,6 @@ AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate, uint32_t maxNumTr // AudioMixer is not yet capable of multi-channel output beyond stereo ALOG_ASSERT(2 == MAX_NUM_CHANNELS, "bad MAX_NUM_CHANNELS %d", MAX_NUM_CHANNELS); - LocalClock lc; - pthread_once(&sOnceControl, &sInitRoutine); mState.enabledTracks= 0; @@ -136,27 +134,6 @@ AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate, uint32_t maxNumTr t++; } - // find multichannel downmix effect if we have to play multichannel content - uint32_t numEffects = 0; - int ret = EffectQueryNumberEffects(&numEffects); - if (ret != 0) { - ALOGE("AudioMixer() error %d querying number of effects", ret); - return; - } - ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects); - - for (uint32_t i = 0 ; i < numEffects ; i++) { - if (EffectQueryEffect(i, &dwnmFxDesc) == 0) { - ALOGV("effect %d is called %s", i, dwnmFxDesc.name); - if (memcmp(&dwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) { - ALOGI("found effect \"%s\" from %s", - dwnmFxDesc.name, dwnmFxDesc.implementor); - isMultichannelCapable = true; - break; - } - } - } - ALOGE_IF(!isMultichannelCapable, "unable to find downmix effect"); } AudioMixer::~AudioMixer() @@ -229,7 +206,7 @@ int AudioMixer::getTrackName(audio_channel_mask_t channelMask, int sessionId) void AudioMixer::invalidateState(uint32_t mask) { - if (mask) { + if (mask != 0) { mState.needsChanged |= mask; mState.hook = process__validate; } @@ -276,13 +253,13 @@ status_t AudioMixer::prepareTrackForDownmix(track_t* pTrack, int trackName) DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(); int32_t status; - if (!isMultichannelCapable) { + if (!sIsMultichannelCapable) { ALOGE("prepareTrackForDownmix(%d) fails: mixer doesn't support multichannel content", trackName); goto noDownmixForActiveTrack; } - if (EffectCreate(&dwnmFxDesc.uuid, + if (EffectCreate(&sDwnmFxDesc.uuid, pTrack->sessionId /*sessionId*/, -2 /*ioId not relevant here, using random value*/, &pDbp->mDownmixHandle/*pHandle*/) != 0) { ALOGE("prepareTrackForDownmix(%d) fails: error creating downmixer effect", trackName); @@ -566,7 +543,7 @@ bool AudioMixer::track_t::setResampler(uint32_t value, uint32_t devSampleRate) resampler = AudioResampler::create( format, // the resampler sees the number of channels after the downmixer, if any - downmixerBufferProvider != NULL ? MAX_NUM_CHANNELS : channelCount, + (int) (downmixerBufferProvider != NULL ? MAX_NUM_CHANNELS : channelCount), devSampleRate, quality); resampler->setLocalTimeFreq(sLocalTimeFreq); } @@ -667,27 +644,29 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) countActiveTracks++; track_t& t = state->tracks[i]; uint32_t n = 0; + // FIXME can overflow (mask is only 3 bits) n |= NEEDS_CHANNEL_1 + t.channelCount - 1; - n |= NEEDS_FORMAT_16; - n |= t.doesResample() ? NEEDS_RESAMPLE_ENABLED : NEEDS_RESAMPLE_DISABLED; + if (t.doesResample()) { + n |= NEEDS_RESAMPLE; + } if (t.auxLevel != 0 && t.auxBuffer != NULL) { - n |= NEEDS_AUX_ENABLED; + n |= NEEDS_AUX; } if (t.volumeInc[0]|t.volumeInc[1]) { volumeRamp = true; } else if (!t.doesResample() && t.volumeRL == 0) { - n |= NEEDS_MUTE_ENABLED; + n |= NEEDS_MUTE; } t.needs = n; - if ((n & NEEDS_MUTE__MASK) == NEEDS_MUTE_ENABLED) { + if (n & NEEDS_MUTE) { t.hook = track__nop; } else { - if ((n & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED) { + if (n & NEEDS_AUX) { all16BitsStereoNoResample = false; } - if ((n & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) { + if (n & NEEDS_RESAMPLE) { all16BitsStereoNoResample = false; resampling = true; t.hook = track__genericResample; @@ -709,7 +688,7 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) // select the processing hooks state->hook = process__nop; - if (countActiveTracks) { + if (countActiveTracks > 0) { if (resampling) { if (!state->outputTemp) { state->outputTemp = new int32_t[MAX_NUM_CHANNELS * state->frameCount]; @@ -745,16 +724,15 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) // Now that the volume ramp has been done, set optimal state and // track hooks for subsequent mixer process - if (countActiveTracks) { + if (countActiveTracks > 0) { bool allMuted = true; uint32_t en = state->enabledTracks; while (en) { const int i = 31 - __builtin_clz(en); en &= ~(1<<i); track_t& t = state->tracks[i]; - if (!t.doesResample() && t.volumeRL == 0) - { - t.needs |= NEEDS_MUTE_ENABLED; + if (!t.doesResample() && t.volumeRL == 0) { + t.needs |= NEEDS_MUTE; t.hook = track__nop; } else { allMuted = false; @@ -1124,8 +1102,9 @@ void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) t.in = t.buffer.raw; // t.in == NULL can happen if the track was flushed just after having // been enabled for mixing. - if (t.in == NULL) + if (t.in == NULL) { enabledTracks &= ~(1<<i); + } } e0 = enabledTracks; @@ -1157,12 +1136,12 @@ void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) track_t& t = state->tracks[i]; size_t outFrames = BLOCKSIZE; int32_t *aux = NULL; - if (CC_UNLIKELY((t.needs & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED)) { + if (CC_UNLIKELY(t.needs & NEEDS_AUX)) { aux = t.auxBuffer + numFrames; } while (outFrames) { size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount; - if (inFrames) { + if (inFrames > 0) { t.hook(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames, state->resampleTemp, aux); t.frameCount -= inFrames; @@ -1238,14 +1217,14 @@ void AudioMixer::process__genericResampling(state_t* state, int64_t pts) e1 &= ~(1<<i); track_t& t = state->tracks[i]; int32_t *aux = NULL; - if (CC_UNLIKELY((t.needs & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED)) { + if (CC_UNLIKELY(t.needs & NEEDS_AUX)) { aux = t.auxBuffer; } // this is a little goofy, on the resampling case we don't // acquire/release the buffers because it's done by // the resampler. - if ((t.needs & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) { + if (t.needs & NEEDS_RESAMPLE) { t.resampler->setPTS(pts); t.hook(&t, outTemp, numFrames, state->resampleTemp, aux); } else { @@ -1445,8 +1424,9 @@ void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state, int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS, int outputFrameIndex) { - if (AudioBufferProvider::kInvalidPTS == basePTS) + if (AudioBufferProvider::kInvalidPTS == basePTS) { return AudioBufferProvider::kInvalidPTS; + } return basePTS + ((outputFrameIndex * sLocalTimeFreq) / t.sampleRate); } @@ -1458,6 +1438,28 @@ int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS, { LocalClock lc; sLocalTimeFreq = lc.getLocalFreq(); + + // find multichannel downmix effect if we have to play multichannel content + uint32_t numEffects = 0; + int ret = EffectQueryNumberEffects(&numEffects); + if (ret != 0) { + ALOGE("AudioMixer() error %d querying number of effects", ret); + return; + } + ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects); + + for (uint32_t i = 0 ; i < numEffects ; i++) { + if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) { + ALOGV("effect %d is called %s", i, sDwnmFxDesc.name); + if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) { + ALOGI("found effect \"%s\" from %s", + sDwnmFxDesc.name, sDwnmFxDesc.implementor); + sIsMultichannelCapable = true; + break; + } + } + } + ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect"); } // ---------------------------------------------------------------------------- diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h index 43aeb86..d5c9da7 100644 --- a/services/audioflinger/AudioMixer.h +++ b/services/audioflinger/AudioMixer.h @@ -120,27 +120,19 @@ public: private: enum { + // FIXME this representation permits up to 8 channels NEEDS_CHANNEL_COUNT__MASK = 0x00000007, - NEEDS_FORMAT__MASK = 0x000000F0, - NEEDS_MUTE__MASK = 0x00000100, - NEEDS_RESAMPLE__MASK = 0x00001000, - NEEDS_AUX__MASK = 0x00010000, }; enum { - NEEDS_CHANNEL_1 = 0x00000000, - NEEDS_CHANNEL_2 = 0x00000001, + NEEDS_CHANNEL_1 = 0x00000000, // mono + NEEDS_CHANNEL_2 = 0x00000001, // stereo - NEEDS_FORMAT_16 = 0x00000010, + // sample format is not explicitly specified, and is assumed to be AUDIO_FORMAT_PCM_16_BIT - NEEDS_MUTE_DISABLED = 0x00000000, - NEEDS_MUTE_ENABLED = 0x00000100, - - NEEDS_RESAMPLE_DISABLED = 0x00000000, - NEEDS_RESAMPLE_ENABLED = 0x00001000, - - NEEDS_AUX_DISABLED = 0x00000000, - NEEDS_AUX_ENABLED = 0x00010000, + NEEDS_MUTE = 0x00000100, + NEEDS_RESAMPLE = 0x00001000, + NEEDS_AUX = 0x00010000, }; struct state_t; @@ -256,9 +248,9 @@ private: state_t mState __attribute__((aligned(32))); // effect descriptor for the downmixer used by the mixer - static effect_descriptor_t dwnmFxDesc; + static effect_descriptor_t sDwnmFxDesc; // indicates whether a downmix effect has been found and is usable by this mixer - static bool isMultichannelCapable; + static bool sIsMultichannelCapable; // Call after changing either the enabled status of a track, or parameters of an enabled track. // OK to call more often than that, but unnecessary. diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp index 646a317..415f696 100644 --- a/services/audioflinger/AudioPolicyService.cpp +++ b/services/audioflinger/AudioPolicyService.cpp @@ -77,24 +77,28 @@ AudioPolicyService::AudioPolicyService() mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this); /* instantiate the audio policy manager */ rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module); - if (rc) + if (rc) { return; + } rc = audio_policy_dev_open(module, &mpAudioPolicyDev); ALOGE_IF(rc, "couldn't open audio policy device (%s)", strerror(-rc)); - if (rc) + if (rc) { return; + } rc = mpAudioPolicyDev->create_audio_policy(mpAudioPolicyDev, &aps_ops, this, &mpAudioPolicy); ALOGE_IF(rc, "couldn't create audio policy (%s)", strerror(-rc)); - if (rc) + if (rc) { return; + } rc = mpAudioPolicy->init_check(mpAudioPolicy); ALOGE_IF(rc, "couldn't init_check the audio policy (%s)", strerror(-rc)); - if (rc) + if (rc) { return; + } ALOGI("Loaded audio policy from %s (%s)", module->name, module->id); @@ -126,10 +130,12 @@ AudioPolicyService::~AudioPolicyService() } mInputs.clear(); - if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL) + if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL) { mpAudioPolicyDev->destroy_audio_policy(mpAudioPolicyDev, mpAudioPolicy); - if (mpAudioPolicyDev != NULL) + } + if (mpAudioPolicyDev != NULL) { audio_policy_dev_close(mpAudioPolicyDev); + } } status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device, @@ -469,8 +475,9 @@ audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stre audio_io_handle_t AudioPolicyService::getOutputForEffect(const effect_descriptor_t *desc) { + // FIXME change return type to status_t, and return NO_INIT here if (mpAudioPolicy == NULL) { - return NO_INIT; + return 0; } Mutex::Autolock _l(mLock); return mpAudioPolicy->get_output_for_effect(mpAudioPolicy, desc); @@ -1114,11 +1121,13 @@ int AudioPolicyService::setStreamVolume(audio_stream_type_t stream, int AudioPolicyService::startTone(audio_policy_tone_t tone, audio_stream_type_t stream) { - if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION) + if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION) { ALOGE("startTone: illegal tone requested (%d)", tone); - if (stream != AUDIO_STREAM_VOICE_CALL) + } + if (stream != AUDIO_STREAM_VOICE_CALL) { ALOGE("startTone: illegal stream (%d) requested for tone %d", stream, tone); + } mTonePlaybackThread->startToneCommand(ToneGenerator::TONE_SUP_CALL_WAITING, AUDIO_STREAM_VOICE_CALL); return 0; @@ -1517,8 +1526,9 @@ static audio_io_handle_t aps_open_dup_output(void *service, static int aps_close_output(void *service, audio_io_handle_t output) { sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) + if (af == 0) { return PERMISSION_DENIED; + } return af->closeOutput(output); } @@ -1581,8 +1591,9 @@ static audio_io_handle_t aps_open_input_on_module(void *service, static int aps_close_input(void *service, audio_io_handle_t input) { sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) + if (af == 0) { return PERMISSION_DENIED; + } return af->closeInput(input); } @@ -1591,8 +1602,9 @@ static int aps_set_stream_output(void *service, audio_stream_type_t stream, audio_io_handle_t output) { sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) + if (af == 0) { return PERMISSION_DENIED; + } return af->setStreamOutput(stream, output); } @@ -1602,8 +1614,9 @@ static int aps_move_effects(void *service, int session, audio_io_handle_t dst_output) { sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) + if (af == 0) { return PERMISSION_DENIED; + } return af->moveEffects(session, src_output, dst_output); } diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp index 2c3c719..3b5a8c1 100644 --- a/services/audioflinger/AudioResampler.cpp +++ b/services/audioflinger/AudioResampler.cpp @@ -25,6 +25,7 @@ #include "AudioResampler.h" #include "AudioResamplerSinc.h" #include "AudioResamplerCubic.h" +#include "AudioResamplerDyn.h" #ifdef __arm__ #include <machine/cpu-features.h> @@ -85,6 +86,9 @@ bool AudioResampler::qualityIsSupported(src_quality quality) case MED_QUALITY: case HIGH_QUALITY: case VERY_HIGH_QUALITY: + case DYN_LOW_QUALITY: + case DYN_MED_QUALITY: + case DYN_HIGH_QUALITY: return true; default: return false; @@ -105,7 +109,7 @@ void AudioResampler::init_routine() if (*endptr == '\0') { defaultQuality = (src_quality) l; ALOGD("forcing AudioResampler quality to %d", defaultQuality); - if (defaultQuality < DEFAULT_QUALITY || defaultQuality > VERY_HIGH_QUALITY) { + if (defaultQuality < DEFAULT_QUALITY || defaultQuality > DYN_HIGH_QUALITY) { defaultQuality = DEFAULT_QUALITY; } } @@ -125,6 +129,12 @@ uint32_t AudioResampler::qualityMHz(src_quality quality) return 20; case VERY_HIGH_QUALITY: return 34; + case DYN_LOW_QUALITY: + return 4; + case DYN_MED_QUALITY: + return 6; + case DYN_HIGH_QUALITY: + return 12; } } @@ -175,6 +185,15 @@ AudioResampler* AudioResampler::create(int bitDepth, int inChannelCount, case VERY_HIGH_QUALITY: quality = HIGH_QUALITY; break; + case DYN_LOW_QUALITY: + atFinalQuality = true; + break; + case DYN_MED_QUALITY: + quality = DYN_LOW_QUALITY; + break; + case DYN_HIGH_QUALITY: + quality = DYN_MED_QUALITY; + break; } } pthread_mutex_unlock(&mutex); @@ -200,6 +219,12 @@ AudioResampler* AudioResampler::create(int bitDepth, int inChannelCount, ALOGV("Create VERY_HIGH_QUALITY sinc Resampler = %d", quality); resampler = new AudioResamplerSinc(bitDepth, inChannelCount, sampleRate, quality); break; + case DYN_LOW_QUALITY: + case DYN_MED_QUALITY: + case DYN_HIGH_QUALITY: + ALOGV("Create dynamic Resampler = %d", quality); + resampler = new AudioResamplerDyn(bitDepth, inChannelCount, sampleRate, quality); + break; } // initialize resampler @@ -339,8 +364,9 @@ void AudioResamplerOrder1::resampleStereo16(int32_t* out, size_t outFrameCount, out[outputIndex++] += vl * Interp(mX0L, in[0], phaseFraction); out[outputIndex++] += vr * Interp(mX0R, in[1], phaseFraction); Advance(&inputIndex, &phaseFraction, phaseIncrement); - if (outputIndex == outputSampleCount) + if (outputIndex == outputSampleCount) { break; + } } // process input samples @@ -434,8 +460,9 @@ void AudioResamplerOrder1::resampleMono16(int32_t* out, size_t outFrameCount, out[outputIndex++] += vl * sample; out[outputIndex++] += vr * sample; Advance(&inputIndex, &phaseFraction, phaseIncrement); - if (outputIndex == outputSampleCount) + if (outputIndex == outputSampleCount) { break; + } } // process input samples diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h index 33e64ce..c341325 100644 --- a/services/audioflinger/AudioResampler.h +++ b/services/audioflinger/AudioResampler.h @@ -41,6 +41,9 @@ public: MED_QUALITY=2, HIGH_QUALITY=3, VERY_HIGH_QUALITY=4, + DYN_LOW_QUALITY=5, + DYN_MED_QUALITY=6, + DYN_HIGH_QUALITY=7, }; static AudioResampler* create(int bitDepth, int inChannelCount, diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp index 18e59e9..1f9714b 100644 --- a/services/audioflinger/AudioResamplerCubic.cpp +++ b/services/audioflinger/AudioResamplerCubic.cpp @@ -66,8 +66,9 @@ void AudioResamplerCubic::resampleStereo16(int32_t* out, size_t outFrameCount, if (mBuffer.frameCount == 0) { mBuffer.frameCount = inFrameCount; provider->getNextBuffer(&mBuffer, mPTS); - if (mBuffer.raw == NULL) + if (mBuffer.raw == NULL) { return; + } // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount); } int16_t *in = mBuffer.i16; @@ -97,8 +98,9 @@ void AudioResamplerCubic::resampleStereo16(int32_t* out, size_t outFrameCount, mBuffer.frameCount = inFrameCount; provider->getNextBuffer(&mBuffer, calculateOutputPTS(outputIndex / 2)); - if (mBuffer.raw == NULL) + if (mBuffer.raw == NULL) { goto save_state; // ugly, but efficient + } in = mBuffer.i16; // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount); } @@ -132,8 +134,9 @@ void AudioResamplerCubic::resampleMono16(int32_t* out, size_t outFrameCount, if (mBuffer.frameCount == 0) { mBuffer.frameCount = inFrameCount; provider->getNextBuffer(&mBuffer, mPTS); - if (mBuffer.raw == NULL) + if (mBuffer.raw == NULL) { return; + } // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount); } int16_t *in = mBuffer.i16; @@ -163,8 +166,9 @@ void AudioResamplerCubic::resampleMono16(int32_t* out, size_t outFrameCount, mBuffer.frameCount = inFrameCount; provider->getNextBuffer(&mBuffer, calculateOutputPTS(outputIndex / 2)); - if (mBuffer.raw == NULL) + if (mBuffer.raw == NULL) { goto save_state; // ugly, but efficient + } // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount); in = mBuffer.i16; } diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/services/audioflinger/AudioResamplerDyn.cpp new file mode 100644 index 0000000..984548d --- /dev/null +++ b/services/audioflinger/AudioResamplerDyn.cpp @@ -0,0 +1,551 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "AudioResamplerDyn" +//#define LOG_NDEBUG 0 + +#include <malloc.h> +#include <string.h> +#include <stdlib.h> +#include <dlfcn.h> +#include <math.h> + +#include <cutils/compiler.h> +#include <cutils/properties.h> +#include <utils/Log.h> + +#include "AudioResamplerFirOps.h" // USE_NEON and USE_INLINE_ASSEMBLY defined here +#include "AudioResamplerFirProcess.h" +#include "AudioResamplerFirProcessNeon.h" +#include "AudioResamplerFirGen.h" // requires math.h +#include "AudioResamplerDyn.h" + +//#define DEBUG_RESAMPLER + +namespace android { + +// generate a unique resample type compile-time constant (constexpr) +#define RESAMPLETYPE(CHANNELS, LOCKED, STRIDE, COEFTYPE) \ + ((((CHANNELS)-1)&1) | !!(LOCKED)<<1 | (COEFTYPE)<<2 \ + | ((STRIDE)==8 ? 1 : (STRIDE)==16 ? 2 : 0)<<3) + +/* + * InBuffer is a type agnostic input buffer. + * + * Layout of the state buffer for halfNumCoefs=8. + * + * [rrrrrrppppppppnnnnnnnnrrrrrrrrrrrrrrrrrrr.... rrrrrrr] + * S I R + * + * S = mState + * I = mImpulse + * R = mRingFull + * p = past samples, convoluted with the (p)ositive side of sinc() + * n = future samples, convoluted with the (n)egative side of sinc() + * r = extra space for implementing the ring buffer + */ + +template<typename TI> +AudioResamplerDyn::InBuffer<TI>::InBuffer() + : mState(NULL), mImpulse(NULL), mRingFull(NULL), mStateSize(0) { +} + +template<typename TI> +AudioResamplerDyn::InBuffer<TI>::~InBuffer() { + init(); +} + +template<typename TI> +void AudioResamplerDyn::InBuffer<TI>::init() { + free(mState); + mState = NULL; + mImpulse = NULL; + mRingFull = NULL; + mStateSize = 0; +} + +// resizes the state buffer to accommodate the appropriate filter length +template<typename TI> +void AudioResamplerDyn::InBuffer<TI>::resize(int CHANNELS, int halfNumCoefs) { + // calculate desired state size + int stateSize = halfNumCoefs * CHANNELS * 2 + * kStateSizeMultipleOfFilterLength; + + // check if buffer needs resizing + if (mState + && stateSize == mStateSize + && mRingFull-mState == mStateSize-halfNumCoefs*CHANNELS) { + return; + } + + // create new buffer + TI* state = (int16_t*)memalign(32, stateSize*sizeof(*state)); + memset(state, 0, stateSize*sizeof(*state)); + + // attempt to preserve state + if (mState) { + TI* srcLo = mImpulse - halfNumCoefs*CHANNELS; + TI* srcHi = mImpulse + halfNumCoefs*CHANNELS; + TI* dst = state; + + if (srcLo < mState) { + dst += mState-srcLo; + srcLo = mState; + } + if (srcHi > mState + mStateSize) { + srcHi = mState + mStateSize; + } + memcpy(dst, srcLo, (srcHi - srcLo) * sizeof(*srcLo)); + free(mState); + } + + // set class member vars + mState = state; + mStateSize = stateSize; + mImpulse = mState + halfNumCoefs*CHANNELS; // actually one sample greater than needed + mRingFull = mState + mStateSize - halfNumCoefs*CHANNELS; +} + +// copy in the input data into the head (impulse+halfNumCoefs) of the buffer. +template<typename TI> +template<int CHANNELS> +void AudioResamplerDyn::InBuffer<TI>::readAgain(TI*& impulse, const int halfNumCoefs, + const TI* const in, const size_t inputIndex) { + int16_t* head = impulse + halfNumCoefs*CHANNELS; + for (size_t i=0 ; i<CHANNELS ; i++) { + head[i] = in[inputIndex*CHANNELS + i]; + } +} + +// advance the impulse pointer, and load in data into the head (impulse+halfNumCoefs) +template<typename TI> +template<int CHANNELS> +void AudioResamplerDyn::InBuffer<TI>::readAdvance(TI*& impulse, const int halfNumCoefs, + const TI* const in, const size_t inputIndex) { + impulse += CHANNELS; + + if (CC_UNLIKELY(impulse >= mRingFull)) { + const size_t shiftDown = mRingFull - mState - halfNumCoefs*CHANNELS; + memcpy(mState, mState+shiftDown, halfNumCoefs*CHANNELS*2*sizeof(TI)); + impulse -= shiftDown; + } + readAgain<CHANNELS>(impulse, halfNumCoefs, in, inputIndex); +} + +void AudioResamplerDyn::Constants::set( + int L, int halfNumCoefs, int inSampleRate, int outSampleRate) +{ + int bits = 0; + int lscale = inSampleRate/outSampleRate < 2 ? L - 1 : + static_cast<int>(static_cast<uint64_t>(L)*inSampleRate/outSampleRate); + for (int i=lscale; i; ++bits, i>>=1) + ; + mL = L; + mShift = kNumPhaseBits - bits; + mHalfNumCoefs = halfNumCoefs; +} + +AudioResamplerDyn::AudioResamplerDyn(int bitDepth, + int inChannelCount, int32_t sampleRate, src_quality quality) + : AudioResampler(bitDepth, inChannelCount, sampleRate, quality), + mResampleType(0), mFilterSampleRate(0), mFilterQuality(DEFAULT_QUALITY), + mCoefBuffer(NULL) +{ + mVolumeSimd[0] = mVolumeSimd[1] = 0; + mConstants.set(128, 8, mSampleRate, mSampleRate); // TODO: set better +} + +AudioResamplerDyn::~AudioResamplerDyn() { + free(mCoefBuffer); +} + +void AudioResamplerDyn::init() { + mFilterSampleRate = 0; // always trigger new filter generation + mInBuffer.init(); +} + +void AudioResamplerDyn::setVolume(int16_t left, int16_t right) { + AudioResampler::setVolume(left, right); + mVolumeSimd[0] = static_cast<int32_t>(left)<<16; + mVolumeSimd[1] = static_cast<int32_t>(right)<<16; +} + +template <typename T> T max(T a, T b) {return a > b ? a : b;} + +template <typename T> T absdiff(T a, T b) {return a > b ? a - b : b - a;} + +template<typename T> +void AudioResamplerDyn::createKaiserFir(Constants &c, double stopBandAtten, + int inSampleRate, int outSampleRate, double tbwCheat) { + T* buf = reinterpret_cast<T*>(memalign(32, (c.mL+1)*c.mHalfNumCoefs*sizeof(T))); + static const double atten = 0.9998; // to avoid ripple overflow + double fcr; + double tbw = firKaiserTbw(c.mHalfNumCoefs, stopBandAtten); + + if (inSampleRate < outSampleRate) { // upsample + fcr = max(0.5*tbwCheat - tbw/2, tbw/2); + } else { // downsample + fcr = max(0.5*tbwCheat*outSampleRate/inSampleRate - tbw/2, tbw/2); + } + // create and set filter + firKaiserGen(buf, c.mL, c.mHalfNumCoefs, stopBandAtten, fcr, atten); + c.setBuf(buf); + if (mCoefBuffer) { + free(mCoefBuffer); + } + mCoefBuffer = buf; +#ifdef DEBUG_RESAMPLER + // print basic filter stats + printf("L:%d hnc:%d stopBandAtten:%lf fcr:%lf atten:%lf tbw:%lf\n", + c.mL, c.mHalfNumCoefs, stopBandAtten, fcr, atten, tbw); + // test the filter and report results + double fp = (fcr - tbw/2)/c.mL; + double fs = (fcr + tbw/2)/c.mL; + double passMin, passMax, passRipple; + double stopMax, stopRipple; + testFir(buf, c.mL, c.mHalfNumCoefs, fp, fs, /*passSteps*/ 1000, /*stopSteps*/ 100000, + passMin, passMax, passRipple, stopMax, stopRipple); + printf("passband(%lf, %lf): %.8lf %.8lf %.8lf\n", 0., fp, passMin, passMax, passRipple); + printf("stopband(%lf, %lf): %.8lf %.3lf\n", fs, 0.5, stopMax, stopRipple); +#endif +} + +// recursive gcd. Using objdump, it appears the tail recursion is converted to a while loop. +static int gcd(int n, int m) { + if (m == 0) { + return n; + } + return gcd(m, n % m); +} + +static bool isClose(int32_t newSampleRate, int32_t prevSampleRate, + int32_t filterSampleRate, int32_t outSampleRate) { + + // different upsampling ratios do not need a filter change. + if (filterSampleRate != 0 + && filterSampleRate < outSampleRate + && newSampleRate < outSampleRate) + return true; + + // check design criteria again if downsampling is detected. + int pdiff = absdiff(newSampleRate, prevSampleRate); + int adiff = absdiff(newSampleRate, filterSampleRate); + + // allow up to 6% relative change increments. + // allow up to 12% absolute change increments (from filter design) + return pdiff < prevSampleRate>>4 && adiff < filterSampleRate>>3; +} + +void AudioResamplerDyn::setSampleRate(int32_t inSampleRate) { + if (mInSampleRate == inSampleRate) { + return; + } + int32_t oldSampleRate = mInSampleRate; + int32_t oldHalfNumCoefs = mConstants.mHalfNumCoefs; + uint32_t oldPhaseWrapLimit = mConstants.mL << mConstants.mShift; + bool useS32 = false; + + mInSampleRate = inSampleRate; + + // TODO: Add precalculated Equiripple filters + + if (mFilterQuality != getQuality() || + !isClose(inSampleRate, oldSampleRate, mFilterSampleRate, mSampleRate)) { + mFilterSampleRate = inSampleRate; + mFilterQuality = getQuality(); + + // Begin Kaiser Filter computation + // + // The quantization floor for S16 is about 96db - 10*log_10(#length) + 3dB. + // Keep the stop band attenuation no greater than 84-85dB for 32 length S16 filters + // + // For s32 we keep the stop band attenuation at the same as 16b resolution, about + // 96-98dB + // + + double stopBandAtten; + double tbwCheat = 1.; // how much we "cheat" into aliasing + int halfLength; + if (mFilterQuality == DYN_HIGH_QUALITY) { + // 32b coefficients, 64 length + useS32 = true; + stopBandAtten = 98.; + halfLength = 32; + } else if (mFilterQuality == DYN_LOW_QUALITY) { + // 16b coefficients, 16-32 length + useS32 = false; + stopBandAtten = 80.; + if (mSampleRate >= inSampleRate * 2) { + halfLength = 16; + } else { + halfLength = 8; + } + if (mSampleRate >= inSampleRate) { + tbwCheat = 1.05; + } else { + tbwCheat = 1.03; + } + } else { // DYN_MED_QUALITY + // 16b coefficients, 32-64 length + // note: > 64 length filters with 16b coefs can have quantization noise problems + useS32 = false; + stopBandAtten = 84.; + if (mSampleRate >= inSampleRate * 4) { + halfLength = 32; + } else if (mSampleRate >= inSampleRate * 2) { + halfLength = 24; + } else { + halfLength = 16; + } + if (mSampleRate >= inSampleRate) { + tbwCheat = 1.03; + } else { + tbwCheat = 1.01; + } + } + + // determine the number of polyphases in the filterbank. + // for 16b, it is desirable to have 2^(16/2) = 256 phases. + // https://ccrma.stanford.edu/~jos/resample/Relation_Interpolation_Error_Quantization.html + // + // We are a bit more lax on this. + + int phases = mSampleRate / gcd(mSampleRate, inSampleRate); + + // TODO: Once dynamic sample rate change is an option, the code below + // should be modified to execute only when dynamic sample rate change is enabled. + // + // as above, #phases less than 63 is too few phases for accurate linear interpolation. + // we increase the phases to compensate, but more phases means more memory per + // filter and more time to compute the filter. + // + // if we know that the filter will be used for dynamic sample rate changes, + // that would allow us skip this part for fixed sample rate resamplers. + // + while (phases<63) { + phases *= 2; // this code only needed to support dynamic rate changes + } + + if (phases>=256) { // too many phases, always interpolate + phases = 127; + } + + // create the filter + mConstants.set(phases, halfLength, inSampleRate, mSampleRate); + if (useS32) { + createKaiserFir<int32_t>(mConstants, stopBandAtten, + inSampleRate, mSampleRate, tbwCheat); + } else { + createKaiserFir<int16_t>(mConstants, stopBandAtten, + inSampleRate, mSampleRate, tbwCheat); + } + } // End Kaiser filter + + // update phase and state based on the new filter. + const Constants& c(mConstants); + mInBuffer.resize(mChannelCount, c.mHalfNumCoefs); + const uint32_t phaseWrapLimit = c.mL << c.mShift; + // try to preserve as much of the phase fraction as possible for on-the-fly changes + mPhaseFraction = static_cast<unsigned long long>(mPhaseFraction) + * phaseWrapLimit / oldPhaseWrapLimit; + mPhaseFraction %= phaseWrapLimit; // should not do anything, but just in case. + mPhaseIncrement = static_cast<uint32_t>(static_cast<double>(phaseWrapLimit) + * inSampleRate / mSampleRate); + + // determine which resampler to use + // check if locked phase (works only if mPhaseIncrement has no "fractional phase bits") + int locked = (mPhaseIncrement << (sizeof(mPhaseIncrement)*8 - c.mShift)) == 0; + int stride = (c.mHalfNumCoefs&7)==0 ? 16 : (c.mHalfNumCoefs&3)==0 ? 8 : 2; + if (locked) { + mPhaseFraction = mPhaseFraction >> c.mShift << c.mShift; // remove fractional phase + } + if (!USE_NEON) { + stride = 2; // C version only + } + // TODO: Remove this for testing + //stride = 2; + mResampleType = RESAMPLETYPE(mChannelCount, locked, stride, !!useS32); +#ifdef DEBUG_RESAMPLER + printf("channels:%d %s stride:%d %s coef:%d shift:%d\n", + mChannelCount, locked ? "locked" : "interpolated", + stride, useS32 ? "S32" : "S16", 2*c.mHalfNumCoefs, c.mShift); +#endif +} + +void AudioResamplerDyn::resample(int32_t* out, size_t outFrameCount, + AudioBufferProvider* provider) +{ + // TODO: + // 24 cases - this perhaps can be reduced later, as testing might take too long + switch (mResampleType) { + + // stride 16 (stride 2 for machines that do not support NEON) + case RESAMPLETYPE(1, true, 16, 0): + return resample<1, true, 16>(out, outFrameCount, mConstants.mFirCoefsS16, provider); + case RESAMPLETYPE(2, true, 16, 0): + return resample<2, true, 16>(out, outFrameCount, mConstants.mFirCoefsS16, provider); + case RESAMPLETYPE(1, false, 16, 0): + return resample<1, false, 16>(out, outFrameCount, mConstants.mFirCoefsS16, provider); + case RESAMPLETYPE(2, false, 16, 0): + return resample<2, false, 16>(out, outFrameCount, mConstants.mFirCoefsS16, provider); + case RESAMPLETYPE(1, true, 16, 1): + return resample<1, true, 16>(out, outFrameCount, mConstants.mFirCoefsS32, provider); + case RESAMPLETYPE(2, true, 16, 1): + return resample<2, true, 16>(out, outFrameCount, mConstants.mFirCoefsS32, provider); + case RESAMPLETYPE(1, false, 16, 1): + return resample<1, false, 16>(out, outFrameCount, mConstants.mFirCoefsS32, provider); + case RESAMPLETYPE(2, false, 16, 1): + return resample<2, false, 16>(out, outFrameCount, mConstants.mFirCoefsS32, provider); +#if 0 + // TODO: Remove these? + // stride 8 + case RESAMPLETYPE(1, true, 8, 0): + return resample<1, true, 8>(out, outFrameCount, mConstants.mFirCoefsS16, provider); + case RESAMPLETYPE(2, true, 8, 0): + return resample<2, true, 8>(out, outFrameCount, mConstants.mFirCoefsS16, provider); + case RESAMPLETYPE(1, false, 8, 0): + return resample<1, false, 8>(out, outFrameCount, mConstants.mFirCoefsS16, provider); + case RESAMPLETYPE(2, false, 8, 0): + return resample<2, false, 8>(out, outFrameCount, mConstants.mFirCoefsS16, provider); + case RESAMPLETYPE(1, true, 8, 1): + return resample<1, true, 8>(out, outFrameCount, mConstants.mFirCoefsS32, provider); + case RESAMPLETYPE(2, true, 8, 1): + return resample<2, true, 8>(out, outFrameCount, mConstants.mFirCoefsS32, provider); + case RESAMPLETYPE(1, false, 8, 1): + return resample<1, false, 8>(out, outFrameCount, mConstants.mFirCoefsS32, provider); + case RESAMPLETYPE(2, false, 8, 1): + return resample<2, false, 8>(out, outFrameCount, mConstants.mFirCoefsS32, provider); + // stride 2 (can handle any filter length) + case RESAMPLETYPE(1, true, 2, 0): + return resample<1, true, 2>(out, outFrameCount, mConstants.mFirCoefsS16, provider); + case RESAMPLETYPE(2, true, 2, 0): + return resample<2, true, 2>(out, outFrameCount, mConstants.mFirCoefsS16, provider); + case RESAMPLETYPE(1, false, 2, 0): + return resample<1, false, 2>(out, outFrameCount, mConstants.mFirCoefsS16, provider); + case RESAMPLETYPE(2, false, 2, 0): + return resample<2, false, 2>(out, outFrameCount, mConstants.mFirCoefsS16, provider); + case RESAMPLETYPE(1, true, 2, 1): + return resample<1, true, 2>(out, outFrameCount, mConstants.mFirCoefsS32, provider); + case RESAMPLETYPE(2, true, 2, 1): + return resample<2, true, 2>(out, outFrameCount, mConstants.mFirCoefsS32, provider); + case RESAMPLETYPE(1, false, 2, 1): + return resample<1, false, 2>(out, outFrameCount, mConstants.mFirCoefsS32, provider); + case RESAMPLETYPE(2, false, 2, 1): + return resample<2, false, 2>(out, outFrameCount, mConstants.mFirCoefsS32, provider); +#endif + default: + ; // error + } +} + +template<int CHANNELS, bool LOCKED, int STRIDE, typename TC> +void AudioResamplerDyn::resample(int32_t* out, size_t outFrameCount, + const TC* const coefs, AudioBufferProvider* provider) +{ + const Constants& c(mConstants); + int16_t* impulse = mInBuffer.getImpulse(); + size_t inputIndex = mInputIndex; + uint32_t phaseFraction = mPhaseFraction; + const uint32_t phaseIncrement = mPhaseIncrement; + size_t outputIndex = 0; + size_t outputSampleCount = outFrameCount * 2; // stereo output + size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate; + const uint32_t phaseWrapLimit = c.mL << c.mShift; + + // NOTE: be very careful when modifying the code here. register + // pressure is very high and a small change might cause the compiler + // to generate far less efficient code. + // Always sanity check the result with objdump or test-resample. + + // the following logic is a bit convoluted to keep the main processing loop + // as tight as possible with register allocation. + while (outputIndex < outputSampleCount) { + // buffer is empty, fetch a new one + while (mBuffer.frameCount == 0) { + mBuffer.frameCount = inFrameCount; + provider->getNextBuffer(&mBuffer, + calculateOutputPTS(outputIndex / 2)); + if (mBuffer.raw == NULL) { + goto resample_exit; + } + if (phaseFraction >= phaseWrapLimit) { // read in data + mInBuffer.readAdvance<CHANNELS>( + impulse, c.mHalfNumCoefs, mBuffer.i16, inputIndex); + phaseFraction -= phaseWrapLimit; + while (phaseFraction >= phaseWrapLimit) { + inputIndex++; + if (inputIndex >= mBuffer.frameCount) { + inputIndex -= mBuffer.frameCount; + provider->releaseBuffer(&mBuffer); + break; + } + mInBuffer.readAdvance<CHANNELS>( + impulse, c.mHalfNumCoefs, mBuffer.i16, inputIndex); + phaseFraction -= phaseWrapLimit; + } + } + } + const int16_t* const in = mBuffer.i16; + const size_t frameCount = mBuffer.frameCount; + const int coefShift = c.mShift; + const int halfNumCoefs = c.mHalfNumCoefs; + const int32_t* const volumeSimd = mVolumeSimd; + + // reread the last input in. + mInBuffer.readAgain<CHANNELS>(impulse, halfNumCoefs, in, inputIndex); + + // main processing loop + while (CC_LIKELY(outputIndex < outputSampleCount)) { + // caution: fir() is inlined and may be large. + // output will be loaded with the appropriate values + // + // from the input samples in impulse[-halfNumCoefs+1]... impulse[halfNumCoefs] + // from the polyphase filter of (phaseFraction / phaseWrapLimit) in coefs. + // + fir<CHANNELS, LOCKED, STRIDE>( + &out[outputIndex], + phaseFraction, phaseWrapLimit, + coefShift, halfNumCoefs, coefs, + impulse, volumeSimd); + outputIndex += 2; + + phaseFraction += phaseIncrement; + while (phaseFraction >= phaseWrapLimit) { + inputIndex++; + if (inputIndex >= frameCount) { + goto done; // need a new buffer + } + mInBuffer.readAdvance<CHANNELS>(impulse, halfNumCoefs, in, inputIndex); + phaseFraction -= phaseWrapLimit; + } + } +done: + // often arrives here when input buffer runs out + if (inputIndex >= frameCount) { + inputIndex -= frameCount; + provider->releaseBuffer(&mBuffer); + // mBuffer.frameCount MUST be zero here. + } + } + +resample_exit: + mInBuffer.setImpulse(impulse); + mInputIndex = inputIndex; + mPhaseFraction = phaseFraction; +} + +// ---------------------------------------------------------------------------- +}; // namespace android diff --git a/services/audioflinger/AudioResamplerDyn.h b/services/audioflinger/AudioResamplerDyn.h new file mode 100644 index 0000000..df1fdbe --- /dev/null +++ b/services/audioflinger/AudioResamplerDyn.h @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_RESAMPLER_DYN_H +#define ANDROID_AUDIO_RESAMPLER_DYN_H + +#include <stdint.h> +#include <sys/types.h> +#include <cutils/log.h> + +#include "AudioResampler.h" + +namespace android { + +class AudioResamplerDyn: public AudioResampler { +public: + AudioResamplerDyn(int bitDepth, int inChannelCount, int32_t sampleRate, + src_quality quality); + + virtual ~AudioResamplerDyn(); + + virtual void init(); + + virtual void setSampleRate(int32_t inSampleRate); + + virtual void setVolume(int16_t left, int16_t right); + + virtual void resample(int32_t* out, size_t outFrameCount, + AudioBufferProvider* provider); + +private: + + class Constants { // stores the filter constants. + public: + Constants() : + mL(0), mShift(0), mHalfNumCoefs(0), mFirCoefsS16(NULL) + {} + void set(int L, int halfNumCoefs, + int inSampleRate, int outSampleRate); + inline void setBuf(int16_t* buf) { + mFirCoefsS16 = buf; + } + inline void setBuf(int32_t* buf) { + mFirCoefsS32 = buf; + } + + int mL; // interpolation phases in the filter. + int mShift; // right shift to get polyphase index + unsigned int mHalfNumCoefs; // filter half #coefs + union { // polyphase filter bank + const int16_t* mFirCoefsS16; + const int32_t* mFirCoefsS32; + }; + }; + + // Input buffer management for a given input type TI, now (int16_t) + // Is agnostic of the actual type, can work with int32_t and float. + template<typename TI> + class InBuffer { + public: + InBuffer(); + ~InBuffer(); + void init(); + void resize(int CHANNELS, int halfNumCoefs); + + // used for direct management of the mImpulse pointer + inline TI* getImpulse() { + return mImpulse; + } + inline void setImpulse(TI *impulse) { + mImpulse = impulse; + } + template<int CHANNELS> + inline void readAgain(TI*& impulse, const int halfNumCoefs, + const TI* const in, const size_t inputIndex); + template<int CHANNELS> + inline void readAdvance(TI*& impulse, const int halfNumCoefs, + const TI* const in, const size_t inputIndex); + + private: + // tuning parameter guidelines: 2 <= multiple <= 8 + static const int kStateSizeMultipleOfFilterLength = 4; + + TI* mState; // base pointer for the input buffer storage + TI* mImpulse; // current location of the impulse response (centered) + TI* mRingFull; // mState <= mImpulse < mRingFull + // in general, mRingFull = mState + mStateSize - halfNumCoefs*CHANNELS. + size_t mStateSize; // in units of TI. + }; + + template<int CHANNELS, bool LOCKED, int STRIDE, typename TC> + void resample(int32_t* out, size_t outFrameCount, + const TC* const coefs, AudioBufferProvider* provider); + + template<typename T> + void createKaiserFir(Constants &c, double stopBandAtten, + int inSampleRate, int outSampleRate, double tbwCheat); + + InBuffer<int16_t> mInBuffer; + Constants mConstants; // current set of coefficient parameters + int32_t __attribute__ ((aligned (8))) mVolumeSimd[2]; + int32_t mResampleType; // contains the resample type. + int32_t mFilterSampleRate; // designed filter sample rate. + src_quality mFilterQuality; // designed filter quality. + void* mCoefBuffer; // if a filter is created, this is not null +}; + +// ---------------------------------------------------------------------------- +}; // namespace android + +#endif /*ANDROID_AUDIO_RESAMPLER_DYN_H*/ diff --git a/services/audioflinger/AudioResamplerFirGen.h b/services/audioflinger/AudioResamplerFirGen.h new file mode 100644 index 0000000..fac3001 --- /dev/null +++ b/services/audioflinger/AudioResamplerFirGen.h @@ -0,0 +1,684 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_RESAMPLER_FIR_GEN_H +#define ANDROID_AUDIO_RESAMPLER_FIR_GEN_H + +namespace android { + +/* + * generates a sine wave at equal steps. + * + * As most of our functions use sine or cosine at equal steps, + * it is very efficient to compute them that way (single multiply and subtract), + * rather than invoking the math library sin() or cos() each time. + * + * SineGen uses Goertzel's Algorithm (as a generator not a filter) + * to calculate sine(wstart + n * wstep) or cosine(wstart + n * wstep) + * by stepping through 0, 1, ... n. + * + * e^i(wstart+wstep) = 2cos(wstep) * e^i(wstart) - e^i(wstart-wstep) + * + * or looking at just the imaginary sine term, as the cosine follows identically: + * + * sin(wstart+wstep) = 2cos(wstep) * sin(wstart) - sin(wstart-wstep) + * + * Goertzel's algorithm is more efficient than the angle addition formula, + * e^i(wstart+wstep) = e^i(wstart) * e^i(wstep), which takes up to + * 4 multiplies and 2 adds (or 3* and 3+) and requires both sine and + * cosine generation due to the complex * complex multiply (full rotation). + * + * See: http://en.wikipedia.org/wiki/Goertzel_algorithm + * + */ + +class SineGen { +public: + SineGen(double wstart, double wstep, bool cosine = false) { + if (cosine) { + mCurrent = cos(wstart); + mPrevious = cos(wstart - wstep); + } else { + mCurrent = sin(wstart); + mPrevious = sin(wstart - wstep); + } + mTwoCos = 2.*cos(wstep); + } + SineGen(double expNow, double expPrev, double twoCosStep) { + mCurrent = expNow; + mPrevious = expPrev; + mTwoCos = twoCosStep; + } + inline double value() const { + return mCurrent; + } + inline void advance() { + double tmp = mCurrent; + mCurrent = mCurrent*mTwoCos - mPrevious; + mPrevious = tmp; + } + inline double valueAdvance() { + double tmp = mCurrent; + mCurrent = mCurrent*mTwoCos - mPrevious; + mPrevious = tmp; + return tmp; + } + +private: + double mCurrent; // current value of sine/cosine + double mPrevious; // previous value of sine/cosine + double mTwoCos; // stepping factor +}; + +/* + * generates a series of sine generators, phase offset by fixed steps. + * + * This is used to generate polyphase sine generators, one per polyphase + * in the filter code below. + * + * The SineGen returned by value() starts at innerStart = outerStart + n*outerStep; + * increments by innerStep. + * + */ + +class SineGenGen { +public: + SineGenGen(double outerStart, double outerStep, double innerStep, bool cosine = false) + : mSineInnerCur(outerStart, outerStep, cosine), + mSineInnerPrev(outerStart-innerStep, outerStep, cosine) + { + mTwoCos = 2.*cos(innerStep); + } + inline SineGen value() { + return SineGen(mSineInnerCur.value(), mSineInnerPrev.value(), mTwoCos); + } + inline void advance() { + mSineInnerCur.advance(); + mSineInnerPrev.advance(); + } + inline SineGen valueAdvance() { + return SineGen(mSineInnerCur.valueAdvance(), mSineInnerPrev.valueAdvance(), mTwoCos); + } + +private: + SineGen mSineInnerCur; // generate the inner sine values (stepped by outerStep). + SineGen mSineInnerPrev; // generate the inner sine previous values + // (behind by innerStep, stepped by outerStep). + double mTwoCos; // the inner stepping factor for the returned SineGen. +}; + +static inline double sqr(double x) { + return x * x; +} + +/* + * rounds a double to the nearest integer for FIR coefficients. + * + * One variant uses noise shaping, which must keep error history + * to work (the err parameter, initialized to 0). + * The other variant is a non-noise shaped version for + * S32 coefficients (noise shaping doesn't gain much). + * + * Caution: No bounds saturation is applied, but isn't needed in this case. + * + * @param x is the value to round. + * + * @param maxval is the maximum integer scale factor expressed as an int64 (for headroom). + * Typically this may be the maximum positive integer+1 (using the fact that double precision + * FIR coefficients generated here are never that close to 1.0 to pose an overflow condition). + * + * @param err is the previous error (actual - rounded) for the previous rounding op. + * For 16b coefficients this can improve stopband dB performance by up to 2dB. + * + * Many variants exist for the noise shaping: http://en.wikipedia.org/wiki/Noise_shaping + * + */ + +static inline int64_t toint(double x, int64_t maxval, double& err) { + double val = x * maxval; + double ival = floor(val + 0.5 + err*0.2); + err = val - ival; + return static_cast<int64_t>(ival); +} + +static inline int64_t toint(double x, int64_t maxval) { + return static_cast<int64_t>(floor(x * maxval + 0.5)); +} + +/* + * Modified Bessel function of the first kind + * http://en.wikipedia.org/wiki/Bessel_function + * + * The formulas are taken from Abramowitz and Stegun, + * _Handbook of Mathematical Functions_ (links below): + * + * http://people.math.sfu.ca/~cbm/aands/page_375.htm + * http://people.math.sfu.ca/~cbm/aands/page_378.htm + * + * http://dlmf.nist.gov/10.25 + * http://dlmf.nist.gov/10.40 + * + * Note we assume x is nonnegative (the function is symmetric, + * pass in the absolute value as needed). + * + * Constants are compile time derived with templates I0Term<> and + * I0ATerm<> to the precision of the compiler. The series can be expanded + * to any precision needed, but currently set around 24b precision. + * + * We use a bit of template math here, constexpr would probably be + * more appropriate for a C++11 compiler. + * + * For the intermediate range 3.75 < x < 15, we use minimax polynomial fit. + * + */ + +template <int N> +struct I0Term { + static const double value = I0Term<N-1>::value / (4. * N * N); +}; + +template <> +struct I0Term<0> { + static const double value = 1.; +}; + +template <int N> +struct I0ATerm { + static const double value = I0ATerm<N-1>::value * (2.*N-1.) * (2.*N-1.) / (8. * N); +}; + +template <> +struct I0ATerm<0> { // 1/sqrt(2*PI); + static const double value = 0.398942280401432677939946059934381868475858631164934657665925; +}; + +#if USE_HORNERS_METHOD +/* Polynomial evaluation of A + Bx + Cx^2 + Dx^3 + ... + * using Horner's Method: http://en.wikipedia.org/wiki/Horner's_method + * + * This has fewer multiplications than Estrin's method below, but has back to back + * floating point dependencies. + * + * On ARM this appears to work slower, so USE_HORNERS_METHOD is not default enabled. + */ + +inline double Poly2(double A, double B, double x) { + return A + x * B; +} + +inline double Poly4(double A, double B, double C, double D, double x) { + return A + x * (B + x * (C + x * (D))); +} + +inline double Poly7(double A, double B, double C, double D, double E, double F, double G, + double x) { + return A + x * (B + x * (C + x * (D + x * (E + x * (F + x * (G)))))); +} + +inline double Poly9(double A, double B, double C, double D, double E, double F, double G, + double H, double I, double x) { + return A + x * (B + x * (C + x * (D + x * (E + x * (F + x * (G + x * (H + x * (I)))))))); +} + +#else +/* Polynomial evaluation of A + Bx + Cx^2 + Dx^3 + ... + * using Estrin's Method: http://en.wikipedia.org/wiki/Estrin's_scheme + * + * This is typically faster, perhaps gains about 5-10% overall on ARM processors + * over Horner's method above. + */ + +inline double Poly2(double A, double B, double x) { + return A + B * x; +} + +inline double Poly3(double A, double B, double C, double x, double x2) { + return Poly2(A, B, x) + C * x2; +} + +inline double Poly3(double A, double B, double C, double x) { + return Poly2(A, B, x) + C * x * x; +} + +inline double Poly4(double A, double B, double C, double D, double x, double x2) { + return Poly2(A, B, x) + Poly2(C, D, x) * x2; // same as poly2(poly2, poly2, x2); +} + +inline double Poly4(double A, double B, double C, double D, double x) { + return Poly4(A, B, C, D, x, x * x); +} + +inline double Poly7(double A, double B, double C, double D, double E, double F, double G, + double x) { + double x2 = x * x; + return Poly4(A, B, C, D, x, x2) + Poly3(E, F, G, x, x2) * (x2 * x2); +} + +inline double Poly8(double A, double B, double C, double D, double E, double F, double G, + double H, double x, double x2, double x4) { + return Poly4(A, B, C, D, x, x2) + Poly4(E, F, G, H, x, x2) * x4; +} + +inline double Poly9(double A, double B, double C, double D, double E, double F, double G, + double H, double I, double x) { + double x2 = x * x; +#if 1 + // It does not seem faster to explicitly decompose Poly8 into Poly4, but + // could depend on compiler floating point scheduling. + double x4 = x2 * x2; + return Poly8(A, B, C, D, E, F, G, H, x, x2, x4) + I * (x4 * x4); +#else + double val = Poly4(A, B, C, D, x, x2); + double x4 = x2 * x2; + return val + Poly4(E, F, G, H, x, x2) * x4 + I * (x4 * x4); +#endif +} +#endif + +static inline double I0(double x) { + if (x < 3.75) { + x *= x; + return Poly7(I0Term<0>::value, I0Term<1>::value, + I0Term<2>::value, I0Term<3>::value, + I0Term<4>::value, I0Term<5>::value, + I0Term<6>::value, x); // e < 1.6e-7 + } + if (1) { + /* + * Series expansion coefs are easy to calculate, but are expanded around 0, + * so error is unequal over the interval 0 < x < 3.75, the error being + * significantly better near 0. + * + * A better solution is to use precise minimax polynomial fits. + * + * We use a slightly more complicated solution for 3.75 < x < 15, based on + * the tables in Blair and Edwards, "Stable Rational Minimax Approximations + * to the Modified Bessel Functions I0(x) and I1(x)", Chalk Hill Nuclear Laboratory, + * AECL-4928. + * + * http://www.iaea.org/inis/collection/NCLCollectionStore/_Public/06/178/6178667.pdf + * + * See Table 11 for 0 < x < 15; e < 10^(-7.13). + * + * Note: Beta cannot exceed 15 (hence Stopband cannot exceed 144dB = 24b). + * + * This speeds up overall computation by about 40% over using the else clause below, + * which requires sqrt and exp. + * + */ + + x *= x; + double num = Poly9(-0.13544938430e9, -0.33153754512e8, + -0.19406631946e7, -0.48058318783e5, + -0.63269783360e3, -0.49520779070e1, + -0.24970910370e-1, -0.74741159550e-4, + -0.18257612460e-6, x); + double y = x - 225.; // reflection around 15 (squared) + double den = Poly4(-0.34598737196e8, 0.23852643181e6, + -0.70699387620e3, 0.10000000000e1, y); + return num / den; + +#if IO_EXTENDED_BETA + /* Table 42 for x > 15; e < 10^(-8.11). + * This is used for Beta>15, but is disabled here as + * we never use Beta that high. + * + * NOTE: This should be enabled only for x > 15. + */ + + double y = 1./x; + double z = y - (1./15); + double num = Poly2(0.415079861746e1, -0.5149092496e1, z); + double den = Poly3(0.103150763823e2, -0.14181687413e2, + 0.1000000000e1, z); + return exp(x) * sqrt(y) * num / den; +#endif + } else { + /* + * NOT USED, but reference for large Beta. + * + * Abramowitz and Stegun asymptotic formula. + * works for x > 3.75. + */ + double y = 1./x; + return exp(x) * sqrt(y) * + // note: reciprocal squareroot may be easier! + // http://en.wikipedia.org/wiki/Fast_inverse_square_root + Poly9(I0ATerm<0>::value, I0ATerm<1>::value, + I0ATerm<2>::value, I0ATerm<3>::value, + I0ATerm<4>::value, I0ATerm<5>::value, + I0ATerm<6>::value, I0ATerm<7>::value, + I0ATerm<8>::value, y); // (... e) < 1.9e-7 + } +} + +/* + * calculates the transition bandwidth for a Kaiser filter + * + * Formula 3.2.8, Vaidyanathan, _Multirate Systems and Filter Banks_, p. 48 + * Formula 7.76, Oppenheim and Schafer, _Discrete-time Signal Processing, 3e_, p. 542 + * + * @param halfNumCoef is half the number of coefficients per filter phase. + * + * @param stopBandAtten is the stop band attenuation desired. + * + * @return the transition bandwidth in normalized frequency (0 <= f <= 0.5) + */ +static inline double firKaiserTbw(int halfNumCoef, double stopBandAtten) { + return (stopBandAtten - 7.95)/((2.*14.36)*halfNumCoef); +} + +/* + * calculates the fir transfer response of the overall polyphase filter at w. + * + * Calculates the DTFT transfer coefficient H(w) for 0 <= w <= PI, utilizing the + * fact that h[n] is symmetric (cosines only, no complex arithmetic). + * + * We use Goertzel's algorithm to accelerate the computation to essentially + * a single multiply and 2 adds per filter coefficient h[]. + * + * Be careful be careful to consider that h[n] is the overall polyphase filter, + * with L phases, so rescaling H(w)/L is probably what you expect for "unity gain", + * as you only use one of the polyphases at a time. + */ +template <typename T> +static inline double firTransfer(const T* coef, int L, int halfNumCoef, double w) { + double accum = static_cast<double>(coef[0])*0.5; // "center coefficient" from first bank + coef += halfNumCoef; // skip first filterbank (picked up by the last filterbank). +#if SLOW_FIRTRANSFER + /* Original code for reference. This is equivalent to the code below, but slower. */ + for (int i=1 ; i<=L ; ++i) { + for (int j=0, ix=i ; j<halfNumCoef ; ++j, ix+=L) { + accum += cos(ix*w)*static_cast<double>(*coef++); + } + } +#else + /* + * Our overall filter is stored striped by polyphases, not a contiguous h[n]. + * We could fetch coefficients in a non-contiguous fashion + * but that will not scale to vector processing. + * + * We apply Goertzel's algorithm directly to each polyphase filter bank instead of + * using cosine generation/multiplication, thereby saving one multiply per inner loop. + * + * See: http://en.wikipedia.org/wiki/Goertzel_algorithm + * Also: Oppenheim and Schafer, _Discrete Time Signal Processing, 3e_, p. 720. + * + * We use the basic recursion to incorporate the cosine steps into real sequence x[n]: + * s[n] = x[n] + (2cosw)*s[n-1] + s[n-2] + * + * y[n] = s[n] - e^(iw)s[n-1] + * = sum_{k=-\infty}^{n} x[k]e^(-iw(n-k)) + * = e^(-iwn) sum_{k=0}^{n} x[k]e^(iwk) + * + * The summation contains the frequency steps we want multiplied by the source + * (similar to a DTFT). + * + * Using symmetry, and just the real part (be careful, this must happen + * after any internal complex multiplications), the polyphase filterbank + * transfer function is: + * + * Hpp[n, w, w_0] = sum_{k=0}^{n} x[k] * cos(wk + w_0) + * = Re{ e^(iwn + iw_0) y[n]} + * = cos(wn+w_0) * s[n] - cos(w(n+1)+w_0) * s[n-1] + * + * using the fact that s[n] of real x[n] is real. + * + */ + double dcos = 2. * cos(L*w); + int start = ((halfNumCoef)*L + 1); + SineGen cc((start - L) * w, w, true); // cosine + SineGen cp(start * w, w, true); // cosine + for (int i=1 ; i<=L ; ++i) { + double sc = 0; + double sp = 0; + for (int j=0 ; j<halfNumCoef ; ++j) { + double tmp = sc; + sc = static_cast<double>(*coef++) + dcos*sc - sp; + sp = tmp; + } + // If we are awfully clever, we can apply Goertzel's algorithm + // again on the sc and sp sequences returned here. + accum += cc.valueAdvance() * sc - cp.valueAdvance() * sp; + } +#endif + return accum*2.; +} + +/* + * evaluates the minimum and maximum |H(f)| bound in a band region. + * + * This is usually done with equally spaced increments in the target band in question. + * The passband is often very small, and sampled that way. The stopband is often much + * larger. + * + * We use the fact that the overall polyphase filter has an additional bank at the end + * for interpolation; hence it is overspecified for the H(f) computation. Thus the + * first polyphase is never actually checked, excepting its first term. + * + * In this code we use the firTransfer() evaluator above, which uses Goertzel's + * algorithm to calculate the transfer function at each point. + * + * TODO: An alternative with equal spacing is the FFT/DFT. An alternative with unequal + * spacing is a chirp transform. + * + * @param coef is the designed polyphase filter banks + * + * @param L is the number of phases (for interpolation) + * + * @param halfNumCoef should be half the number of coefficients for a single + * polyphase. + * + * @param fstart is the normalized frequency start. + * + * @param fend is the normalized frequency end. + * + * @param steps is the number of steps to take (sampling) between frequency start and end + * + * @param firMin returns the minimum transfer |H(f)| found + * + * @param firMax returns the maximum transfer |H(f)| found + * + * 0 <= f <= 0.5. + * This is used to test passband and stopband performance. + */ +template <typename T> +static void testFir(const T* coef, int L, int halfNumCoef, + double fstart, double fend, int steps, double &firMin, double &firMax) { + double wstart = fstart*(2.*M_PI); + double wend = fend*(2.*M_PI); + double wstep = (wend - wstart)/steps; + double fmax, fmin; + double trf = firTransfer(coef, L, halfNumCoef, wstart); + if (trf<0) { + trf = -trf; + } + fmin = fmax = trf; + wstart += wstep; + for (int i=1; i<steps; ++i) { + trf = firTransfer(coef, L, halfNumCoef, wstart); + if (trf<0) { + trf = -trf; + } + if (trf>fmax) { + fmax = trf; + } + else if (trf<fmin) { + fmin = trf; + } + wstart += wstep; + } + // renormalize - this is only needed for integer filter types + double norm = 1./((1ULL<<(sizeof(T)*8-1))*L); + + firMin = fmin * norm; + firMax = fmax * norm; +} + +/* + * evaluates the |H(f)| lowpass band characteristics. + * + * This function tests the lowpass characteristics for the overall polyphase filter, + * and is used to verify the design. For this case, fp should be set to the + * passband normalized frequency from 0 to 0.5 for the overall filter (thus it + * is the designed polyphase bank value / L). Likewise for fs. + * + * @param coef is the designed polyphase filter banks + * + * @param L is the number of phases (for interpolation) + * + * @param halfNumCoef should be half the number of coefficients for a single + * polyphase. + * + * @param fp is the passband normalized frequency, 0 < fp < fs < 0.5. + * + * @param fs is the stopband normalized frequency, 0 < fp < fs < 0.5. + * + * @param passSteps is the number of passband sampling steps. + * + * @param stopSteps is the number of stopband sampling steps. + * + * @param passMin is the minimum value in the passband + * + * @param passMax is the maximum value in the passband (useful for scaling). This should + * be less than 1., to avoid sine wave test overflow. + * + * @param passRipple is the passband ripple. Typically this should be less than 0.1 for + * an audio filter. Generally speaker/headphone device characteristics will dominate + * the passband term. + * + * @param stopMax is the maximum value in the stopband. + * + * @param stopRipple is the stopband ripple, also known as stopband attenuation. + * Typically this should be greater than ~80dB for low quality, and greater than + * ~100dB for full 16b quality, otherwise aliasing may become noticeable. + * + */ +template <typename T> +static void testFir(const T* coef, int L, int halfNumCoef, + double fp, double fs, int passSteps, int stopSteps, + double &passMin, double &passMax, double &passRipple, + double &stopMax, double &stopRipple) { + double fmin, fmax; + testFir(coef, L, halfNumCoef, 0., fp, passSteps, fmin, fmax); + double d1 = (fmax - fmin)/2.; + passMin = fmin; + passMax = fmax; + passRipple = -20.*log10(1. - d1); // passband ripple + testFir(coef, L, halfNumCoef, fs, 0.5, stopSteps, fmin, fmax); + // fmin is really not important for the stopband. + stopMax = fmax; + stopRipple = -20.*log10(fmax); // stopband ripple/attenuation +} + +/* + * Calculates the overall polyphase filter based on a windowed sinc function. + * + * The windowed sinc is an odd length symmetric filter of exactly L*halfNumCoef*2+1 + * taps for the entire kernel. This is then decomposed into L+1 polyphase filterbanks. + * The last filterbank is used for interpolation purposes (and is mostly composed + * of the first bank shifted by one sample), and is unnecessary if one does + * not do interpolation. + * + * We use the last filterbank for some transfer function calculation purposes, + * so it needs to be generated anyways. + * + * @param coef is the caller allocated space for coefficients. This should be + * exactly (L+1)*halfNumCoef in size. + * + * @param L is the number of phases (for interpolation) + * + * @param halfNumCoef should be half the number of coefficients for a single + * polyphase. + * + * @param stopBandAtten is the stopband value, should be >50dB. + * + * @param fcr is cutoff frequency/sampling rate (<0.5). At this point, the energy + * should be 6dB less. (fcr is where the amplitude drops by half). Use the + * firKaiserTbw() to calculate the transition bandwidth. fcr is the midpoint + * between the stop band and the pass band (fstop+fpass)/2. + * + * @param atten is the attenuation (generally slightly less than 1). + */ + +template <typename T> +static inline void firKaiserGen(T* coef, int L, int halfNumCoef, + double stopBandAtten, double fcr, double atten) { + // + // Formula 3.2.5, 3.2.7, Vaidyanathan, _Multirate Systems and Filter Banks_, p. 48 + // Formula 7.75, Oppenheim and Schafer, _Discrete-time Signal Processing, 3e_, p. 542 + // + // See also: http://melodi.ee.washington.edu/courses/ee518/notes/lec17.pdf + // + // Kaiser window and beta parameter + // + // | 0.1102*(A - 8.7) A > 50 + // beta = | 0.5842*(A - 21)^0.4 + 0.07886*(A - 21) 21 <= A <= 50 + // | 0. A < 21 + // + // with A is the desired stop-band attenuation in dBFS + // + // 30 dB 2.210 + // 40 dB 3.384 + // 50 dB 4.538 + // 60 dB 5.658 + // 70 dB 6.764 + // 80 dB 7.865 + // 90 dB 8.960 + // 100 dB 10.056 + + const int N = L * halfNumCoef; // non-negative half + const double beta = 0.1102 * (stopBandAtten - 8.7); // >= 50dB always + const double xstep = (2. * M_PI) * fcr / L; + const double xfrac = 1. / N; + const double yscale = atten * L / (I0(beta) * M_PI); + + // We use sine generators, which computes sines on regular step intervals. + // This speeds up overall computation about 40% from computing the sine directly. + + SineGenGen sgg(0., xstep, L*xstep); // generates sine generators (one per polyphase) + + for (int i=0 ; i<=L ; ++i) { // generate an extra set of coefs for interpolation + + // computation for a single polyphase of the overall filter. + SineGen sg = sgg.valueAdvance(); // current sine generator for "j" inner loop. + double err = 0; // for noise shaping on int16_t coefficients (over each polyphase) + + for (int j=0, ix=i ; j<halfNumCoef ; ++j, ix+=L) { + double y; + if (CC_LIKELY(ix)) { + double x = static_cast<double>(ix); + + // sine generator: sg.valueAdvance() returns sin(ix*xstep); + y = I0(beta * sqrt(1.0 - sqr(x * xfrac))) * yscale * sg.valueAdvance() / x; + } else { + y = 2. * atten * fcr; // center of filter, sinc(0) = 1. + sg.advance(); + } + + // (caution!) float version does not need rounding + if (is_same<T, int16_t>::value) { // int16_t needs noise shaping + *coef++ = static_cast<T>(toint(y, 1ULL<<(sizeof(T)*8-1), err)); + } else { + *coef++ = static_cast<T>(toint(y, 1ULL<<(sizeof(T)*8-1))); + } + } + } +} + +}; // namespace android + +#endif /*ANDROID_AUDIO_RESAMPLER_FIR_GEN_H*/ diff --git a/services/audioflinger/AudioResamplerFirOps.h b/services/audioflinger/AudioResamplerFirOps.h new file mode 100644 index 0000000..bf2163f --- /dev/null +++ b/services/audioflinger/AudioResamplerFirOps.h @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_RESAMPLER_FIR_OPS_H +#define ANDROID_AUDIO_RESAMPLER_FIR_OPS_H + +namespace android { + +#if defined(__arm__) && !defined(__thumb__) +#define USE_INLINE_ASSEMBLY (true) +#else +#define USE_INLINE_ASSEMBLY (false) +#endif + +#if USE_INLINE_ASSEMBLY && defined(__ARM_NEON__) +#define USE_NEON (true) +#include <arm_neon.h> +#else +#define USE_NEON (false) +#endif + +template<typename T, typename U> +struct is_same +{ + static const bool value = false; +}; + +template<typename T> +struct is_same<T, T> // partial specialization +{ + static const bool value = true; +}; + +static inline +int32_t mulRL(int left, int32_t in, uint32_t vRL) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + if (left) { + asm( "smultb %[out], %[in], %[vRL] \n" + : [out]"=r"(out) + : [in]"%r"(in), [vRL]"r"(vRL) + : ); + } else { + asm( "smultt %[out], %[in], %[vRL] \n" + : [out]"=r"(out) + : [in]"%r"(in), [vRL]"r"(vRL) + : ); + } + return out; +#else + int16_t v = left ? static_cast<int16_t>(vRL) : static_cast<int16_t>(vRL>>16); + return static_cast<int32_t>((static_cast<int64_t>(in) * v) >> 16); +#endif +} + +static inline +int32_t mulAdd(int16_t in, int16_t v, int32_t a) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + asm( "smlabb %[out], %[v], %[in], %[a] \n" + : [out]"=r"(out) + : [in]"%r"(in), [v]"r"(v), [a]"r"(a) + : ); + return out; +#else + return a + v * in; +#endif +} + +static inline +int32_t mulAdd(int16_t in, int32_t v, int32_t a) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + asm( "smlawb %[out], %[v], %[in], %[a] \n" + : [out]"=r"(out) + : [in]"%r"(in), [v]"r"(v), [a]"r"(a) + : ); + return out; +#else + return a + static_cast<int32_t>((static_cast<int64_t>(v) * in) >> 16); +#endif +} + +static inline +int32_t mulAdd(int32_t in, int32_t v, int32_t a) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + asm( "smmla %[out], %[v], %[in], %[a] \n" + : [out]"=r"(out) + : [in]"%r"(in), [v]"r"(v), [a]"r"(a) + : ); + return out; +#else + return a + static_cast<int32_t>((static_cast<int64_t>(v) * in) >> 32); +#endif +} + +static inline +int32_t mulAddRL(int left, uint32_t inRL, int16_t v, int32_t a) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + if (left) { + asm( "smlabb %[out], %[v], %[inRL], %[a] \n" + : [out]"=r"(out) + : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a) + : ); + } else { + asm( "smlabt %[out], %[v], %[inRL], %[a] \n" + : [out]"=r"(out) + : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a) + : ); + } + return out; +#else + int16_t s = left ? static_cast<int16_t>(inRL) : static_cast<int16_t>(inRL>>16); + return a + v * s; +#endif +} + +static inline +int32_t mulAddRL(int left, uint32_t inRL, int32_t v, int32_t a) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + if (left) { + asm( "smlawb %[out], %[v], %[inRL], %[a] \n" + : [out]"=r"(out) + : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a) + : ); + } else { + asm( "smlawt %[out], %[v], %[inRL], %[a] \n" + : [out]"=r"(out) + : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a) + : ); + } + return out; +#else + int16_t s = left ? static_cast<int16_t>(inRL) : static_cast<int16_t>(inRL>>16); + return a + static_cast<int32_t>((static_cast<int64_t>(v) * s) >> 16); +#endif +} + +}; // namespace android + +#endif /*ANDROID_AUDIO_RESAMPLER_FIR_OPS_H*/ diff --git a/services/audioflinger/AudioResamplerFirProcess.h b/services/audioflinger/AudioResamplerFirProcess.h new file mode 100644 index 0000000..38e387c --- /dev/null +++ b/services/audioflinger/AudioResamplerFirProcess.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H +#define ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H + +namespace android { + +// depends on AudioResamplerFirOps.h + +template<int CHANNELS, typename TC> +static inline +void mac( + int32_t& l, int32_t& r, + const TC coef, + const int16_t* samples) +{ + if (CHANNELS == 2) { + uint32_t rl = *reinterpret_cast<const uint32_t*>(samples); + l = mulAddRL(1, rl, coef, l); + r = mulAddRL(0, rl, coef, r); + } else { + r = l = mulAdd(samples[0], coef, l); + } +} + +template<int CHANNELS, typename TC> +static inline +void interpolate( + int32_t& l, int32_t& r, + const TC coef_0, const TC coef_1, + const int16_t lerp, const int16_t* samples) +{ + TC sinc; + + if (is_same<TC, int16_t>::value) { + sinc = (lerp * ((coef_1-coef_0)<<1)>>16) + coef_0; + } else { + sinc = mulAdd(lerp, (coef_1-coef_0)<<1, coef_0); + } + if (CHANNELS == 2) { + uint32_t rl = *reinterpret_cast<const uint32_t*>(samples); + l = mulAddRL(1, rl, sinc, l); + r = mulAddRL(0, rl, sinc, r); + } else { + r = l = mulAdd(samples[0], sinc, l); + } +} + +/* + * Calculates a single output sample (two stereo frames). + * + * This function computes both the positive half FIR dot product and + * the negative half FIR dot product, accumulates, and then applies the volume. + * + * This is a locked phase filter (it does not compute the interpolation). + * + * Use fir() to compute the proper coefficient pointers for a polyphase + * filter bank. + */ + +template <int CHANNELS, int STRIDE, typename TC> +static inline +void ProcessL(int32_t* const out, + int count, + const TC* coefsP, + const TC* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + int32_t l = 0; + int32_t r = 0; + do { + mac<CHANNELS>(l, r, *coefsP++, sP); + sP -= CHANNELS; + mac<CHANNELS>(l, r, *coefsN++, sN); + sN += CHANNELS; + } while (--count > 0); + out[0] += 2 * mulRL(0, l, volumeLR[0]); // Note: only use top 16b + out[1] += 2 * mulRL(0, r, volumeLR[1]); // Note: only use top 16b +} + +/* + * Calculates a single output sample (two stereo frames) interpolating phase. + * + * This function computes both the positive half FIR dot product and + * the negative half FIR dot product, accumulates, and then applies the volume. + * + * This is an interpolated phase filter. + * + * Use fir() to compute the proper coefficient pointers for a polyphase + * filter bank. + */ + +template <int CHANNELS, int STRIDE, typename TC> +static inline +void Process(int32_t* const out, + int count, + const TC* coefsP, + const TC* coefsN, + const TC* coefsP1, + const TC* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + (void) coefsP1; // suppress unused parameter warning + (void) coefsN1; + if (sizeof(*coefsP)==4) { + lerpP >>= 16; // ensure lerpP is 16b + } + int32_t l = 0; + int32_t r = 0; + for (size_t i = 0; i < count; ++i) { + interpolate<CHANNELS>(l, r, coefsP[0], coefsP[count], lerpP, sP); + coefsP++; + sP -= CHANNELS; + interpolate<CHANNELS>(l, r, coefsN[count], coefsN[0], lerpP, sN); + coefsN++; + sN += CHANNELS; + } + out[0] += 2 * mulRL(0, l, volumeLR[0]); // Note: only use top 16b + out[1] += 2 * mulRL(0, r, volumeLR[1]); // Note: only use top 16b +} + +/* + * Calculates a single output sample (two stereo frames) from input sample pointer. + * + * This sets up the params for the accelerated Process() and ProcessL() + * functions to do the appropriate dot products. + * + * @param out should point to the output buffer with at least enough space for 2 output frames. + * + * @param phase is the fractional distance between input samples for interpolation: + * phase >= 0 && phase < phaseWrapLimit. It can be thought of as a rational fraction + * of phase/phaseWrapLimit. + * + * @param phaseWrapLimit is #polyphases<<coefShift, where #polyphases is the number of polyphases + * in the polyphase filter. Likewise, #polyphases can be obtained as (phaseWrapLimit>>coefShift). + * + * @param coefShift gives the bit alignment of the polyphase index in the phase parameter. + * + * @param halfNumCoefs is the half the number of coefficients per polyphase filter. Since the + * overall filterbank is odd-length symmetric, only halfNumCoefs need be stored. + * + * @param coefs is the polyphase filter bank, starting at from polyphase index 0, and ranging to + * and including the #polyphases. Each polyphase of the filter has half-length halfNumCoefs + * (due to symmetry). The total size of the filter bank in coefficients is + * (#polyphases+1)*halfNumCoefs. + * + * The filter bank coefs should be aligned to a minimum of 16 bytes (preferrably to cache line). + * + * The coefs should be attenuated (to compensate for passband ripple) + * if storing back into the native format. + * + * @param samples are unaligned input samples. The position is in the "middle" of the + * sample array with respect to the FIR filter: + * the negative half of the filter is dot product from samples+1 to samples+halfNumCoefs; + * the positive half of the filter is dot product from samples to samples-halfNumCoefs+1. + * + * @param volumeLR is a pointer to an array of two 32 bit volume values, one per stereo channel, + * expressed as a S32 integer. A negative value inverts the channel 180 degrees. + * The pointer volumeLR should be aligned to a minimum of 8 bytes. + * A typical value for volume is 0x1000 to align to a unity gain output of 20.12. + * + * In between calls to filterCoefficient, the phase is incremented by phaseIncrement, where + * phaseIncrement is calculated as inputSampling * phaseWrapLimit / outputSampling. + * + * The filter polyphase index is given by indexP = phase >> coefShift. Due to + * odd length symmetric filter, the polyphase index of the negative half depends on + * whether interpolation is used. + * + * The fractional siting between the polyphase indices is given by the bits below coefShift: + * + * lerpP = phase << 32 - coefShift >> 1; // for 32 bit unsigned phase multiply + * lerpP = phase << 32 - coefShift >> 17; // for 16 bit unsigned phase multiply + * + * For integer types, this is expressed as: + * + * lerpP = phase << sizeof(phase)*8 - coefShift + * >> (sizeof(phase)-sizeof(*coefs))*8 + 1; + * + */ + +template<int CHANNELS, bool LOCKED, int STRIDE, typename TC> +static inline +void fir(int32_t* const out, + const uint32_t phase, const uint32_t phaseWrapLimit, + const int coefShift, const int halfNumCoefs, const TC* const coefs, + const int16_t* const samples, const int32_t* const volumeLR) +{ + // NOTE: be very careful when modifying the code here. register + // pressure is very high and a small change might cause the compiler + // to generate far less efficient code. + // Always sanity check the result with objdump or test-resample. + + if (LOCKED) { + // locked polyphase (no interpolation) + // Compute the polyphase filter index on the positive and negative side. + uint32_t indexP = phase >> coefShift; + uint32_t indexN = (phaseWrapLimit - phase) >> coefShift; + const TC* coefsP = coefs + indexP*halfNumCoefs; + const TC* coefsN = coefs + indexN*halfNumCoefs; + const int16_t* sP = samples; + const int16_t* sN = samples + CHANNELS; + + // dot product filter. + ProcessL<CHANNELS, STRIDE>(out, + halfNumCoefs, coefsP, coefsN, sP, sN, volumeLR); + } else { + // interpolated polyphase + // Compute the polyphase filter index on the positive and negative side. + uint32_t indexP = phase >> coefShift; + uint32_t indexN = (phaseWrapLimit - phase - 1) >> coefShift; // one's complement. + const TC* coefsP = coefs + indexP*halfNumCoefs; + const TC* coefsN = coefs + indexN*halfNumCoefs; + const TC* coefsP1 = coefsP + halfNumCoefs; + const TC* coefsN1 = coefsN + halfNumCoefs; + const int16_t* sP = samples; + const int16_t* sN = samples + CHANNELS; + + // Interpolation fraction lerpP derived by shifting all the way up and down + // to clear the appropriate bits and align to the appropriate level + // for the integer multiply. The constants should resolve in compile time. + // + // The interpolated filter coefficient is derived as follows for the pos/neg half: + // + // interpolated[P] = index[P]*lerpP + index[P+1]*(1-lerpP) + // interpolated[N] = index[N+1]*lerpP + index[N]*(1-lerpP) + uint32_t lerpP = phase << (sizeof(phase)*8 - coefShift) + >> ((sizeof(phase)-sizeof(*coefs))*8 + 1); + + // on-the-fly interpolated dot product filter + Process<CHANNELS, STRIDE>(out, + halfNumCoefs, coefsP, coefsN, coefsP1, coefsN1, sP, sN, lerpP, volumeLR); + } +} + +}; // namespace android + +#endif /*ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H*/ diff --git a/services/audioflinger/AudioResamplerFirProcessNeon.h b/services/audioflinger/AudioResamplerFirProcessNeon.h new file mode 100644 index 0000000..f311cef --- /dev/null +++ b/services/audioflinger/AudioResamplerFirProcessNeon.h @@ -0,0 +1,1149 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H +#define ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H + +namespace android { + +// depends on AudioResamplerFirOps.h, AudioResamplerFirProcess.h + +#if USE_NEON +// +// NEON specializations are enabled for Process() and ProcessL() +// +// TODO: Stride 16 and Stride 8 can be combined with one pass stride 8 (if necessary) +// and looping stride 16 (or vice versa). This has some polyphase coef data alignment +// issues with S16 coefs. Consider this later. + +// Macros to save a mono/stereo accumulator sample in q0 (and q4) as stereo out. +#define ASSEMBLY_ACCUMULATE_MONO \ + "vld1.s32 {d2}, [%[vLR]:64] \n"/* (1) load volumes */\ + "vld1.s32 {d3}, %[out] \n"/* (2) unaligned load the output */\ + "vpadd.s32 d0, d0, d1 \n"/* (1) add all 4 partial sums */\ + "vpadd.s32 d0, d0, d0 \n"/* (1+4d) and replicate L/R */\ + "vqrdmulh.s32 d0, d0, d2 \n"/* (2+3d) apply volume */\ + "vqadd.s32 d3, d3, d0 \n"/* (1+4d) accumulate result (saturating) */\ + "vst1.s32 {d3}, %[out] \n"/* (2+2d) store result */ + +#define ASSEMBLY_ACCUMULATE_STEREO \ + "vld1.s32 {d2}, [%[vLR]:64] \n"/* (1) load volumes*/\ + "vld1.s32 {d3}, %[out] \n"/* (2) unaligned load the output*/\ + "vpadd.s32 d0, d0, d1 \n"/* (1) add all 4 partial sums from q0*/\ + "vpadd.s32 d8, d8, d9 \n"/* (1) add all 4 partial sums from q4*/\ + "vpadd.s32 d0, d0, d8 \n"/* (1+4d) combine into L/R*/\ + "vqrdmulh.s32 d0, d0, d2 \n"/* (2+3d) apply volume*/\ + "vqadd.s32 d3, d3, d0 \n"/* (1+4d) accumulate result (saturating)*/\ + "vst1.s32 {d3}, %[out] \n"/* (2+2d)store result*/ + +template <> +inline void ProcessL<1, 16>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// (0 - combines+) accumulator = 0 + + "1: \n" + + "vld1.16 {q2}, [%[sP]] \n"// (2+0d) load 8 16-bits mono samples + "vld1.16 {q3}, [%[sN]]! \n"// (2) load 8 16-bits mono samples + "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q10}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs + + "vrev64.16 q2, q2 \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4 + + // reordering the vmal to do d6, d7 before d4, d5 is slower(?) + "vmlal.s16 q0, d4, d17 \n"// (1+0d) multiply (reversed)samples by coef + "vmlal.s16 q0, d5, d16 \n"// (1) multiply (reversed)samples by coef + "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples + "vmlal.s16 q0, d7, d21 \n"// (1) multiply neg samples + + // moving these ARM instructions before neon above seems to be slower + "subs %[count], %[count], #8 \n"// (1) update loop counter + "sub %[sP], %[sP], #16 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q10" + ); +} + +template <> +inline void ProcessL<2, 16>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// (1) acc_L = 0 + "veor q4, q4, q4 \n"// (0 combines+) acc_R = 0 + + "1: \n" + + "vld2.16 {q2, q3}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo samples + "vld2.16 {q5, q6}, [%[sN]]! \n"// (3) load 8 16-bits stereo samples + "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q10}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs + + "vrev64.16 q2, q2 \n"// (1) reverse 8 frames of the left positive + "vrev64.16 q3, q3 \n"// (0 combines+) reverse right positive + + "vmlal.s16 q0, d4, d17 \n"// (1) multiply (reversed) samples left + "vmlal.s16 q0, d5, d16 \n"// (1) multiply (reversed) samples left + "vmlal.s16 q4, d6, d17 \n"// (1) multiply (reversed) samples right + "vmlal.s16 q4, d7, d16 \n"// (1) multiply (reversed) samples right + "vmlal.s16 q0, d10, d20 \n"// (1) multiply samples left + "vmlal.s16 q0, d11, d21 \n"// (1) multiply samples left + "vmlal.s16 q4, d12, d20 \n"// (1) multiply samples right + "vmlal.s16 q4, d13, d21 \n"// (1) multiply samples right + + // moving these ARM before neon seems to be slower + "subs %[count], %[count], #8 \n"// (1) update loop counter + "sub %[sP], %[sP], #32 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q10" + ); +} + +template <> +inline void Process<1, 16>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* coefsP1, + const int16_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase S32 Q15 + "veor q0, q0, q0 \n"// (0 - combines+) accumulator = 0 + + "1: \n" + + "vld1.16 {q2}, [%[sP]] \n"// (2+0d) load 8 16-bits mono samples + "vld1.16 {q3}, [%[sN]]! \n"// (2) load 8 16-bits mono samples + "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q9}, [%[coefsP1]:128]! \n"// (1) load 8 16-bits coefs for interpolation + "vld1.16 {q10}, [%[coefsN1]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q11}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs for interpolation + + "vsub.s16 q9, q9, q8 \n"// (1) interpolate (step1) 1st set of coefs + "vsub.s16 q11, q11, q10 \n"// (1) interpolate (step1) 2nd set of coets + + "vqrdmulh.s16 q9, q9, d2[0] \n"// (2) interpolate (step2) 1st set of coefs + "vqrdmulh.s16 q11, q11, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs + + "vrev64.16 q2, q2 \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4 + + "vadd.s16 q8, q8, q9 \n"// (1+2d) interpolate (step3) 1st set + "vadd.s16 q10, q10, q11 \n"// (1+1d) interpolate (step3) 2nd set + + // reordering the vmal to do d6, d7 before d4, d5 is slower(?) + "vmlal.s16 q0, d4, d17 \n"// (1+0d) multiply reversed samples by coef + "vmlal.s16 q0, d5, d16 \n"// (1) multiply reversed samples by coef + "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples + "vmlal.s16 q0, d7, d21 \n"// (1) multiply neg samples + + // moving these ARM instructions before neon above seems to be slower + "subs %[count], %[count], #8 \n"// (1) update loop counter + "sub %[sP], %[sP], #16 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11" + ); +} + +template <> +inline void Process<2, 16>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* coefsP1, + const int16_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// (1) acc_L = 0 + "veor q4, q4, q4 \n"// (0 combines+) acc_R = 0 + + "1: \n" + + "vld2.16 {q2, q3}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo samples + "vld2.16 {q5, q6}, [%[sN]]! \n"// (3) load 8 16-bits stereo samples + "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q9}, [%[coefsP1]:128]! \n"// (1) load 8 16-bits coefs for interpolation + "vld1.16 {q10}, [%[coefsN1]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q11}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs for interpolation + + "vsub.s16 q9, q9, q8 \n"// (1) interpolate (step1) 1st set of coefs + "vsub.s16 q11, q11, q10 \n"// (1) interpolate (step1) 2nd set of coets + + "vqrdmulh.s16 q9, q9, d2[0] \n"// (2) interpolate (step2) 1st set of coefs + "vqrdmulh.s16 q11, q11, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs + + "vrev64.16 q2, q2 \n"// (1) reverse 8 frames of the left positive + "vrev64.16 q3, q3 \n"// (1) reverse 8 frames of the right positive + + "vadd.s16 q8, q8, q9 \n"// (1+1d) interpolate (step3) 1st set + "vadd.s16 q10, q10, q11 \n"// (1+1d) interpolate (step3) 2nd set + + "vmlal.s16 q0, d4, d17 \n"// (1) multiply reversed samples left + "vmlal.s16 q0, d5, d16 \n"// (1) multiply reversed samples left + "vmlal.s16 q4, d6, d17 \n"// (1) multiply reversed samples right + "vmlal.s16 q4, d7, d16 \n"// (1) multiply reversed samples right + "vmlal.s16 q0, d10, d20 \n"// (1) multiply samples left + "vmlal.s16 q0, d11, d21 \n"// (1) multiply samples left + "vmlal.s16 q4, d12, d20 \n"// (1) multiply samples right + "vmlal.s16 q4, d13, d21 \n"// (1) multiply samples right + + // moving these ARM before neon seems to be slower + "subs %[count], %[count], #8 \n"// (1) update loop counter + "sub %[sP], %[sP], #32 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q9", "q10", "q11" + ); +} + +template <> +inline void ProcessL<1, 16>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// result, initialize to 0 + + "1: \n" + + "vld1.16 {q2}, [%[sP]] \n"// load 8 16-bits mono samples + "vld1.16 {q3}, [%[sN]]! \n"// load 8 16-bits mono samples + "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q10, q11}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs + + "vrev64.16 q2, q2 \n"// reverse 8 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q0, q0, q15 \n"// accumulate result + "vadd.s32 q0, q0, q13 \n"// accumulate result + + "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples + "subs %[count], %[count], #8 \n"// update loop counter + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +template <> +inline void ProcessL<2, 16>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// result, initialize to 0 + "veor q4, q4, q4 \n"// result, initialize to 0 + + "1: \n" + + "vld2.16 {q2, q3}, [%[sP]] \n"// load 4 16-bits stereo samples + "vld2.16 {q5, q6}, [%[sN]]! \n"// load 4 16-bits stereo samples + "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q10, q11}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs + + "vrev64.16 q2, q2 \n"// reverse 8 frames of the positive side + "vrev64.16 q3, q3 \n"// reverse 8 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d10, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d11, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q0, q0, q15 \n"// (+1) accumulate result + "vadd.s32 q0, q0, q13 \n"// (+1) accumulate result + + "vshll.s16 q12, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d7, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d12, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d13, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q4, q4, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q4, q4, q15 \n"// (+1) accumulate result + "vadd.s32 q4, q4, q13 \n"// (+1) accumulate result + + "subs %[count], %[count], #8 \n"// update loop counter + "sub %[sP], %[sP], #32 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +template <> +inline void Process<1, 16>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int32_t* coefsP1, + const int32_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// result, initialize to 0 + + "1: \n" + + "vld1.16 {q2}, [%[sP]] \n"// load 8 16-bits mono samples + "vld1.16 {q3}, [%[sN]]! \n"// load 8 16-bits mono samples + "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q12, q13}, [%[coefsP1]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q10, q11}, [%[coefsN1]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q14, q15}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs + + "vsub.s32 q12, q12, q8 \n"// interpolate (step1) + "vsub.s32 q13, q13, q9 \n"// interpolate (step1) + "vsub.s32 q14, q14, q10 \n"// interpolate (step1) + "vsub.s32 q15, q15, q11 \n"// interpolate (step1) + + "vqrdmulh.s32 q12, q12, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q13, q13, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q14, q14, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q15, q15, d2[0] \n"// interpolate (step2) + + "vadd.s32 q8, q8, q12 \n"// interpolate (step3) + "vadd.s32 q9, q9, q13 \n"// interpolate (step3) + "vadd.s32 q10, q10, q14 \n"// interpolate (step3) + "vadd.s32 q11, q11, q15 \n"// interpolate (step3) + + "vrev64.16 q2, q2 \n"// reverse 8 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q0, q0, q15 \n"// accumulate result + "vadd.s32 q0, q0, q13 \n"// accumulate result + + "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples + "subs %[count], %[count], #8 \n"// update loop counter + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +template <> +inline void Process<2, 16>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int32_t* coefsP1, + const int32_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// result, initialize to 0 + "veor q4, q4, q4 \n"// result, initialize to 0 + + "1: \n" + + "vld2.16 {q2, q3}, [%[sP]] \n"// load 4 16-bits stereo samples + "vld2.16 {q5, q6}, [%[sN]]! \n"// load 4 16-bits stereo samples + "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q12, q13}, [%[coefsP1]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q10, q11}, [%[coefsN1]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q14, q15}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs + + "vsub.s32 q12, q12, q8 \n"// interpolate (step1) + "vsub.s32 q13, q13, q9 \n"// interpolate (step1) + "vsub.s32 q14, q14, q10 \n"// interpolate (step1) + "vsub.s32 q15, q15, q11 \n"// interpolate (step1) + + "vqrdmulh.s32 q12, q12, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q13, q13, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q14, q14, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q15, q15, d2[0] \n"// interpolate (step2) + + "vadd.s32 q8, q8, q12 \n"// interpolate (step3) + "vadd.s32 q9, q9, q13 \n"// interpolate (step3) + "vadd.s32 q10, q10, q14 \n"// interpolate (step3) + "vadd.s32 q11, q11, q15 \n"// interpolate (step3) + + "vrev64.16 q2, q2 \n"// reverse 8 frames of the positive side + "vrev64.16 q3, q3 \n"// reverse 8 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d10, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d11, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q0, q0, q15 \n"// (+1) accumulate result + "vadd.s32 q0, q0, q13 \n"// (+1) accumulate result + + "vshll.s16 q12, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d7, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d12, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d13, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q4, q4, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q4, q4, q15 \n"// (+1) accumulate result + "vadd.s32 q4, q4, q13 \n"// (+1) accumulate result + + "subs %[count], %[count], #8 \n"// update loop counter + "sub %[sP], %[sP], #32 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +template <> +inline void ProcessL<1, 8>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// (0 - combines+) accumulator = 0 + + "1: \n" + + "vld1.16 {d4}, [%[sP]] \n"// (2+0d) load 4 16-bits mono samples + "vld1.16 {d6}, [%[sN]]! \n"// (2) load 4 16-bits mono samples + "vld1.16 {d16}, [%[coefsP0]:64]! \n"// (1) load 4 16-bits coefs + "vld1.16 {d20}, [%[coefsN0]:64]! \n"// (1) load 4 16-bits coefs + + "vrev64.16 d4, d4 \n"// (1) reversed s3, s2, s1, s0, s7, s6, s5, s4 + + // reordering the vmal to do d6, d7 before d4, d5 is slower(?) + "vmlal.s16 q0, d4, d16 \n"// (1) multiply (reversed)samples by coef + "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples + + // moving these ARM instructions before neon above seems to be slower + "subs %[count], %[count], #4 \n"// (1) update loop counter + "sub %[sP], %[sP], #8 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q10" + ); +} + +template <> +inline void ProcessL<2, 8>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// (1) acc_L = 0 + "veor q4, q4, q4 \n"// (0 combines+) acc_R = 0 + + "1: \n" + + "vld2.16 {d4, d5}, [%[sP]] \n"// (2+0d) load 8 16-bits stereo samples + "vld2.16 {d6, d7}, [%[sN]]! \n"// (2) load 8 16-bits stereo samples + "vld1.16 {d16}, [%[coefsP0]:64]! \n"// (1) load 8 16-bits coefs + "vld1.16 {d20}, [%[coefsN0]:64]! \n"// (1) load 8 16-bits coefs + + "vrev64.16 q2, q2 \n"// (1) reverse 8 frames of the left positive + + "vmlal.s16 q0, d4, d16 \n"// (1) multiply (reversed) samples left + "vmlal.s16 q4, d5, d16 \n"// (1) multiply (reversed) samples right + "vmlal.s16 q0, d6, d20 \n"// (1) multiply samples left + "vmlal.s16 q4, d7, d20 \n"// (1) multiply samples right + + // moving these ARM before neon seems to be slower + "subs %[count], %[count], #4 \n"// (1) update loop counter + "sub %[sP], %[sP], #16 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q10" + ); +} + +template <> +inline void Process<1, 8>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* coefsP1, + const int16_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase S32 Q15 + "veor q0, q0, q0 \n"// (0 - combines+) accumulator = 0 + + "1: \n" + + "vld1.16 {d4}, [%[sP]] \n"// (2+0d) load 4 16-bits mono samples + "vld1.16 {d6}, [%[sN]]! \n"// (2) load 4 16-bits mono samples + "vld1.16 {d16}, [%[coefsP0]:64]! \n"// (1) load 4 16-bits coefs + "vld1.16 {d17}, [%[coefsP1]:64]! \n"// (1) load 4 16-bits coefs for interpolation + "vld1.16 {d20}, [%[coefsN1]:64]! \n"// (1) load 4 16-bits coefs + "vld1.16 {d21}, [%[coefsN0]:64]! \n"// (1) load 4 16-bits coefs for interpolation + + "vsub.s16 d17, d17, d16 \n"// (1) interpolate (step1) 1st set of coefs + "vsub.s16 d21, d21, d20 \n"// (1) interpolate (step1) 2nd set of coets + + "vqrdmulh.s16 d17, d17, d2[0] \n"// (2) interpolate (step2) 1st set of coefs + "vqrdmulh.s16 d21, d21, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs + + "vrev64.16 d4, d4 \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4 + + "vadd.s16 d16, d16, d17 \n"// (1+2d) interpolate (step3) 1st set + "vadd.s16 d20, d20, d21 \n"// (1+1d) interpolate (step3) 2nd set + + // reordering the vmal to do d6, d7 before d4, d5 is slower(?) + "vmlal.s16 q0, d4, d16 \n"// (1+0d) multiply (reversed)by coef + "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples + + // moving these ARM instructions before neon above seems to be slower + "subs %[count], %[count], #4 \n"// (1) update loop counter + "sub %[sP], %[sP], #8 \n"// move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11" + ); +} + +template <> +inline void Process<2, 8>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* coefsP1, + const int16_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// (1) acc_L = 0 + "veor q4, q4, q4 \n"// (0 combines+) acc_R = 0 + + "1: \n" + + "vld2.16 {d4, d5}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo samples + "vld2.16 {d6, d7}, [%[sN]]! \n"// (3) load 8 16-bits stereo samples + "vld1.16 {d16}, [%[coefsP0]:64]! \n"// (1) load 8 16-bits coefs + "vld1.16 {d17}, [%[coefsP1]:64]! \n"// (1) load 8 16-bits coefs for interpolation + "vld1.16 {d20}, [%[coefsN1]:64]! \n"// (1) load 8 16-bits coefs + "vld1.16 {d21}, [%[coefsN0]:64]! \n"// (1) load 8 16-bits coefs for interpolation + + "vsub.s16 d17, d17, d16 \n"// (1) interpolate (step1) 1st set of coefs + "vsub.s16 d21, d21, d20 \n"// (1) interpolate (step1) 2nd set of coets + + "vqrdmulh.s16 d17, d17, d2[0] \n"// (2) interpolate (step2) 1st set of coefs + "vqrdmulh.s16 d21, d21, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs + + "vrev64.16 q2, q2 \n"// (1) reverse 8 frames of the left positive + + "vadd.s16 d16, d16, d17 \n"// (1+1d) interpolate (step3) 1st set + "vadd.s16 d20, d20, d21 \n"// (1+1d) interpolate (step3) 2nd set + + "vmlal.s16 q0, d4, d16 \n"// (1) multiply (reversed) samples left + "vmlal.s16 q4, d5, d16 \n"// (1) multiply (reversed) samples right + "vmlal.s16 q0, d6, d20 \n"// (1) multiply samples left + "vmlal.s16 q4, d7, d20 \n"// (1) multiply samples right + + // moving these ARM before neon seems to be slower + "subs %[count], %[count], #4 \n"// (1) update loop counter + "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q9", "q10", "q11" + ); +} + +template <> +inline void ProcessL<1, 8>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// result, initialize to 0 + + "1: \n" + + "vld1.16 {d4}, [%[sP]] \n"// load 4 16-bits mono samples + "vld1.16 {d6}, [%[sN]]! \n"// load 4 16-bits mono samples + "vld1.32 {q8}, [%[coefsP0]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q10}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs + + "vrev64.16 d4, d4 \n"// reverse 2 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// (stall) extend samples to 31 bits + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q0, q0, q14 \n"// (stall) accumulate result + + "subs %[count], %[count], #4 \n"// update loop counter + "sub %[sP], %[sP], #8 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11", + "q12", "q14" + ); +} + +template <> +inline void ProcessL<2, 8>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// result, initialize to 0 + "veor q4, q4, q4 \n"// result, initialize to 0 + + "1: \n" + + "vld2.16 {d4, d5}, [%[sP]] \n"// load 4 16-bits stereo samples + "vld2.16 {d6, d7}, [%[sN]]! \n"// load 4 16-bits stereo samples + "vld1.32 {q8}, [%[coefsP0]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q10}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs + + "vrev64.16 q2, q2 \n"// reverse 2 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q8 \n"// multiply samples by coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by coef + "vqrdmulh.s32 q15, q15, q10 \n"// multiply samples by coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q4, q4, q13 \n"// accumulate result + "vadd.s32 q0, q0, q14 \n"// accumulate result + "vadd.s32 q4, q4, q15 \n"// accumulate result + + "subs %[count], %[count], #4 \n"// update loop counter + "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", "q4", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +template <> +inline void Process<1, 8>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int32_t* coefsP1, + const int32_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// result, initialize to 0 + + "1: \n" + + "vld1.16 {d4}, [%[sP]] \n"// load 4 16-bits mono samples + "vld1.16 {d6}, [%[sN]]! \n"// load 4 16-bits mono samples + "vld1.32 {q8}, [%[coefsP0]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q9}, [%[coefsP1]:128]! \n"// load 4 32-bits coefs for interpolation + "vld1.32 {q10}, [%[coefsN1]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q11}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs for interpolation + + "vrev64.16 d4, d4 \n"// reverse 2 frames of the positive side + + "vsub.s32 q9, q9, q8 \n"// interpolate (step1) 1st set of coefs + "vsub.s32 q11, q11, q10 \n"// interpolate (step1) 2nd set of coets + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q9, q9, d2[0] \n"// interpolate (step2) 1st set of coefs + "vqrdmulh.s32 q11, q11, d2[0] \n"// interpolate (step2) 2nd set of coefs + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + + "vadd.s32 q8, q8, q9 \n"// interpolate (step3) 1st set + "vadd.s32 q10, q10, q11 \n"// interpolate (step4) 2nd set + + "vqrdmulh.s32 q12, q12, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q0, q0, q14 \n"// accumulate result + + "subs %[count], %[count], #4 \n"// update loop counter + "sub %[sP], %[sP], #8 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsP1] "+r" (coefsP1), + [coefsN0] "+r" (coefsN), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11", + "q12", "q14" + ); +} + +template <> +inline +void Process<2, 8>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int32_t* coefsP1, + const int32_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// result, initialize to 0 + "veor q4, q4, q4 \n"// result, initialize to 0 + + "1: \n" + "vld2.16 {d4, d5}, [%[sP]] \n"// load 4 16-bits stereo samples + "vld2.16 {d6, d7}, [%[sN]]! \n"// load 4 16-bits stereo samples + "vld1.32 {q8}, [%[coefsP0]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q9}, [%[coefsP1]:128]! \n"// load 4 32-bits coefs for interpolation + "vld1.32 {q10}, [%[coefsN1]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q11}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs for interpolation + + "vrev64.16 q2, q2 \n"// (reversed) 2 frames of the positive side + + "vsub.s32 q9, q9, q8 \n"// interpolate (step1) 1st set of coefs + "vsub.s32 q11, q11, q10 \n"// interpolate (step1) 2nd set of coets + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q9, q9, d2[0] \n"// interpolate (step2) 1st set of coefs + "vqrdmulh.s32 q11, q11, d2[1] \n"// interpolate (step3) 2nd set of coefs + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits + + "vadd.s32 q8, q8, q9 \n"// interpolate (step3) 1st set + "vadd.s32 q10, q10, q11 \n"// interpolate (step4) 2nd set + + "vqrdmulh.s32 q12, q12, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q10 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q4, q4, q13 \n"// accumulate result + "vadd.s32 q0, q0, q14 \n"// accumulate result + "vadd.s32 q4, q4, q15 \n"// accumulate result + + "subs %[count], %[count], #4 \n"// update loop counter + "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsP1] "+r" (coefsP1), + [coefsN0] "+r" (coefsN), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", "q4", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +#endif //USE_NEON + +}; // namespace android + +#endif /*ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H*/ diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp index a8a5169..59b4770 100644 --- a/services/audioflinger/Effects.cpp +++ b/services/audioflinger/Effects.cpp @@ -116,8 +116,9 @@ status_t AudioFlinger::EffectModule::addHandle(EffectHandle *handle) continue; } // first non destroyed handle is considered in control - if (controlHandle == NULL) + if (controlHandle == NULL) { controlHandle = h; + } if (h->priority() <= priority) { break; } @@ -911,18 +912,15 @@ AudioFlinger::EffectHandle::EffectHandle(const sp<EffectModule>& effect, } int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int); mCblkMemory = client->heap()->allocate(EFFECT_PARAM_BUFFER_SIZE + bufOffset); - if (mCblkMemory != 0) { - mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer()); - - if (mCblk != NULL) { - new(mCblk) effect_param_cblk_t(); - mBuffer = (uint8_t *)mCblk + bufOffset; - } - } else { + if (mCblkMemory == 0 || + (mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer())) == NULL) { ALOGE("not enough memory for Effect size=%u", EFFECT_PARAM_BUFFER_SIZE + sizeof(effect_param_cblk_t)); + mCblkMemory.clear(); return; } + new(mCblk) effect_param_cblk_t(); + mBuffer = (uint8_t *)mCblk + bufOffset; } AudioFlinger::EffectHandle::~EffectHandle() @@ -939,6 +937,11 @@ AudioFlinger::EffectHandle::~EffectHandle() disconnect(false); } +status_t AudioFlinger::EffectHandle::initCheck() +{ + return mClient == 0 || mCblkMemory != 0 ? OK : NO_MEMORY; +} + status_t AudioFlinger::EffectHandle::enable() { ALOGV("enable %p", this); diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h index b717857..50535a2 100644 --- a/services/audioflinger/Effects.h +++ b/services/audioflinger/Effects.h @@ -169,6 +169,7 @@ public: const sp<IEffectClient>& effectClient, int32_t priority); virtual ~EffectHandle(); + virtual status_t initCheck(); // IEffect virtual status_t enable(); diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp index f27ea17..7126e92 100644 --- a/services/audioflinger/FastMixer.cpp +++ b/services/audioflinger/FastMixer.cpp @@ -459,8 +459,9 @@ bool FastMixer::threadLoop() } int64_t pts; - if (outputSink == NULL || (OK != outputSink->getNextWriteTimestamp(&pts))) + if (outputSink == NULL || (OK != outputSink->getNextWriteTimestamp(&pts))) { pts = AudioBufferProvider::kInvalidPTS; + } // process() is CPU-bound mixer->process(pts); diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h index 43b77f3..4b6c74d 100644 --- a/services/audioflinger/PlaybackTracks.h +++ b/services/audioflinger/PlaybackTracks.h @@ -34,6 +34,7 @@ public: int uid, IAudioFlinger::track_flags_t flags); virtual ~Track(); + virtual status_t initCheck() const; static void appendDumpHeader(String8& result); void dump(char* buffer, size_t size); diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h index 57de568..5ef6f58 100644 --- a/services/audioflinger/RecordTracks.h +++ b/services/audioflinger/RecordTracks.h @@ -59,5 +59,4 @@ private: // releaseBuffer() not overridden bool mOverflow; // overflow on most recent attempt to fill client buffer - AudioRecordServerProxy* mAudioRecordServerProxy; }; diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp index 2f71db7..2b37761 100644 --- a/services/audioflinger/Threads.cpp +++ b/services/audioflinger/Threads.cpp @@ -269,8 +269,8 @@ AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio : Thread(false /*canCallJava*/), mType(type), mAudioFlinger(audioFlinger), - // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, and mFormat are - // set by PlaybackThread::readOutputParameters() or RecordThread::readInputParameters() + // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, mFormat, mBufferSize + // are set by PlaybackThread::readOutputParameters() or RecordThread::readInputParameters() mParamStatus(NO_ERROR), //FIXME: mStandby should be true here. Is this some kind of hack? mStandby(false), mOutDevice(outDevice), mInDevice(inDevice), @@ -297,6 +297,17 @@ AudioFlinger::ThreadBase::~ThreadBase() } } +status_t AudioFlinger::ThreadBase::readyToRun() +{ + status_t status = initCheck(); + if (status == NO_ERROR) { + ALOGI("AudioFlinger's thread %p ready to run", this); + } else { + ALOGE("No working audio driver found."); + } + return status; +} + void AudioFlinger::ThreadBase::exit() { ALOGV("ThreadBase::exit"); @@ -369,7 +380,13 @@ void AudioFlinger::ThreadBase::sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32 void AudioFlinger::ThreadBase::processConfigEvents() { - mLock.lock(); + Mutex::Autolock _l(mLock); + processConfigEvents_l(); +} + +// post condition: mConfigEvents.isEmpty() +void AudioFlinger::ThreadBase::processConfigEvents_l() +{ while (!mConfigEvents.isEmpty()) { ALOGV("processConfigEvents() remaining events %d", mConfigEvents.size()); ConfigEvent *event = mConfigEvents[0]; @@ -377,32 +394,31 @@ void AudioFlinger::ThreadBase::processConfigEvents() // release mLock before locking AudioFlinger mLock: lock order is always // AudioFlinger then ThreadBase to avoid cross deadlock mLock.unlock(); - switch(event->type()) { - case CFG_EVENT_PRIO: { - PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event); - // FIXME Need to understand why this has be done asynchronously - int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio(), - true /*asynchronous*/); - if (err != 0) { - ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; " - "error %d", - prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err); - } - } break; - case CFG_EVENT_IO: { - IoConfigEvent *ioEvent = static_cast<IoConfigEvent *>(event); - mAudioFlinger->mLock.lock(); + switch (event->type()) { + case CFG_EVENT_PRIO: { + PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event); + // FIXME Need to understand why this has be done asynchronously + int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio(), + true /*asynchronous*/); + if (err != 0) { + ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d", + prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err); + } + } break; + case CFG_EVENT_IO: { + IoConfigEvent *ioEvent = static_cast<IoConfigEvent *>(event); + { + Mutex::Autolock _l(mAudioFlinger->mLock); audioConfigChanged_l(ioEvent->event(), ioEvent->param()); - mAudioFlinger->mLock.unlock(); - } break; - default: - ALOGE("processConfigEvents() unknown event type %d", event->type()); - break; + } + } break; + default: + ALOGE("processConfigEvents() unknown event type %d", event->type()); + break; } delete event; mLock.lock(); } - mLock.unlock(); } void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args) @@ -427,6 +443,8 @@ void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args) result.append(buffer); snprintf(buffer, SIZE, "HAL frame count: %d\n", mFrameCount); result.append(buffer); + snprintf(buffer, SIZE, "HAL buffer size: %u bytes\n", mBufferSize); + result.append(buffer); snprintf(buffer, SIZE, "Channel Count: %u\n", mChannelCount); result.append(buffer); snprintf(buffer, SIZE, "Channel Mask: 0x%08x\n", mChannelMask); @@ -739,8 +757,7 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l( int sessionId, effect_descriptor_t *desc, int *enabled, - status_t *status - ) + status_t *status) { sp<EffectModule> effect; sp<EffectHandle> handle; @@ -829,7 +846,10 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l( } // create effect handle and connect it to effect module handle = new EffectHandle(effect, client, effectClient, priority); - lStatus = effect->addHandle(handle.get()); + lStatus = handle->initCheck(); + if (lStatus == OK) { + lStatus = effect->addHandle(handle.get()); + } if (enabled != NULL) { *enabled = (int)effect->isEnabled(); } @@ -850,9 +870,7 @@ Exit: handle.clear(); } - if (status != NULL) { - *status = lStatus; - } + *status = lStatus; return handle; } @@ -1002,7 +1020,7 @@ AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinge type_t type) : ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type), mNormalFrameCount(0), mMixBuffer(NULL), - mAllocMixBuffer(NULL), mSuspended(0), mBytesWritten(0), + mSuspended(0), mBytesWritten(0), mActiveTracksGeneration(0), // mStreamTypes[] initialized in constructor body mOutput(output), @@ -1060,7 +1078,7 @@ AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinge AudioFlinger::PlaybackThread::~PlaybackThread() { mAudioFlinger->unregisterWriter(mNBLogWriter); - delete [] mAllocMixBuffer; + delete[] mMixBuffer; } void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args) @@ -1150,16 +1168,6 @@ void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& } // Thread virtuals -status_t AudioFlinger::PlaybackThread::readyToRun() -{ - status_t status = initCheck(); - if (status == NO_ERROR) { - ALOGI("AudioFlinger's thread %p ready to run", this); - } else { - ALOGE("No working audio driver found."); - } - return status; -} void AudioFlinger::PlaybackThread::onFirstRef() { @@ -1182,7 +1190,7 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, const sp<IMemory>& sharedBuffer, int sessionId, IAudioFlinger::track_flags_t *flags, @@ -1190,6 +1198,7 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac int uid, status_t *status) { + size_t frameCount = *pFrameCount; sp<Track> track; status_t lStatus; @@ -1258,6 +1267,7 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac } } } + *pFrameCount = frameCount; if (mType == DIRECT) { if ((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_PCM) { @@ -1326,8 +1336,13 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac track = TimedTrack::create(this, client, streamType, sampleRate, format, channelMask, frameCount, sharedBuffer, sessionId, uid); } - if (track == 0 || track->getCblk() == NULL || track->name() < 0) { - lStatus = NO_MEMORY; + + // new Track always returns non-NULL, + // but TimedTrack::create() is a factory that could fail by returning NULL + lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY; + if (lStatus != NO_ERROR) { + ALOGE("createTrack_l() initCheck failed %d; no control block?", lStatus); + track.clear(); goto Exit; } @@ -1352,9 +1367,7 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac lStatus = NO_ERROR; Exit: - if (status) { - *status = lStatus; - } + *status = lStatus; return track; } @@ -1642,7 +1655,8 @@ void AudioFlinger::PlaybackThread::readOutputParameters() mFormat); } mFrameSize = audio_stream_frame_size(&mOutput->stream->common); - mFrameCount = mOutput->stream->common.get_buffer_size(&mOutput->stream->common) / mFrameSize; + mBufferSize = mOutput->stream->common.get_buffer_size(&mOutput->stream->common); + mFrameCount = mBufferSize / mFrameSize; if (mFrameCount & 15) { ALOGW("HAL output buffer size is %u frames but AudioMixer requires multiples of 16 frames", mFrameCount); @@ -1699,11 +1713,11 @@ void AudioFlinger::PlaybackThread::readOutputParameters() ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount, mNormalFrameCount); - delete[] mAllocMixBuffer; - size_t align = (mFrameSize < sizeof(int16_t)) ? sizeof(int16_t) : mFrameSize; - mAllocMixBuffer = new int8_t[mNormalFrameCount * mFrameSize + align - 1]; - mMixBuffer = (int16_t *) ((((size_t)mAllocMixBuffer + align - 1) / align) * align); - memset(mMixBuffer, 0, mNormalFrameCount * mFrameSize); + delete[] mMixBuffer; + size_t normalBufferSize = mNormalFrameCount * mFrameSize; + // For historical reasons mMixBuffer is int16_t[], but mFrameSize can be odd (such as 1) + mMixBuffer = new int16_t[(normalBufferSize + 1) >> 1]; + memset(mMixBuffer, 0, normalBufferSize); // force reconfiguration of effect chains and engines to take new buffer size and audio // parameters into account @@ -1837,7 +1851,7 @@ void AudioFlinger::PlaybackThread::threadLoop_removeTracks( const Vector< sp<Track> >& tracksToRemove) { size_t count = tracksToRemove.size(); - if (count) { + if (count > 0) { for (size_t i = 0 ; i < count ; i++) { const sp<Track>& track = tracksToRemove.itemAt(i); if (!track->isOutputTrack()) { @@ -1913,7 +1927,7 @@ ssize_t AudioFlinger::PlaybackThread::threadLoop_write() // otherwise use the HAL / AudioStreamOut directly } else { // Direct output and offload threads - size_t offset = (mCurrentWriteLength - mBytesRemaining) / sizeof(int16_t); + size_t offset = (mCurrentWriteLength - mBytesRemaining); if (mUseAsyncWrite) { ALOGW_IF(mWriteAckSequence & 1, "threadLoop_write(): out of sequence write request"); mWriteAckSequence += 2; @@ -1924,7 +1938,7 @@ ssize_t AudioFlinger::PlaybackThread::threadLoop_write() // FIXME We should have an implementation of timestamps for direct output threads. // They are used e.g for multichannel PCM playback over HDMI. bytesWritten = mOutput->stream->write(mOutput->stream, - mMixBuffer + offset, mBytesRemaining); + (char *)mMixBuffer + offset, mBytesRemaining); if (mUseAsyncWrite && ((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) { // do not wait for async callback in case of error of full write @@ -2405,7 +2419,7 @@ if (mType == MIXER) { void AudioFlinger::PlaybackThread::removeTracks_l(const Vector< sp<Track> >& tracksToRemove) { size_t count = tracksToRemove.size(); - if (count) { + if (count > 0) { for (size_t i=0 ; i<count ; i++) { const sp<Track>& track = tracksToRemove.itemAt(i); mActiveTracks.remove(track); @@ -2798,7 +2812,7 @@ void AudioFlinger::MixerThread::threadLoop_sleepTime() sleepTime = idleSleepTime; } } else if (mBytesWritten != 0 || (mMixerStatus == MIXER_TRACKS_ENABLED)) { - memset (mMixBuffer, 0, mixBufferSize); + memset(mMixBuffer, 0, mixBufferSize); sleepTime = 0; ALOGV_IF(mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED), "anticipated start"); @@ -3024,27 +3038,22 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac // +1 for rounding and +1 for additional sample needed for interpolation desiredFrames = (mNormalFrameCount * sr) / mSampleRate + 1 + 1; // add frames already consumed but not yet released by the resampler - // because cblk->framesReady() will include these frames + // because mAudioTrackServerProxy->framesReady() will include these frames desiredFrames += mAudioMixer->getUnreleasedFrames(track->name()); +#if 0 // the minimum track buffer size is normally twice the number of frames necessary // to fill one buffer and the resampler should not leave more than one buffer worth // of unreleased frames after each pass, but just in case... ALOG_ASSERT(desiredFrames <= cblk->frameCount_); +#endif } uint32_t minFrames = 1; if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() && (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) { minFrames = desiredFrames; } - // It's not safe to call framesReady() for a static buffer track, so assume it's ready - size_t framesReady; - if (track->sharedBuffer() == 0) { - framesReady = track->framesReady(); - } else if (track->isStopped()) { - framesReady = 0; - } else { - framesReady = 1; - } + + size_t framesReady = track->framesReady(); if ((framesReady >= minFrames) && track->isReady() && !track->isPaused() && !track->isTerminated()) { @@ -3362,6 +3371,7 @@ bool AudioFlinger::MixerThread::checkForNewParameters_l() if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) { status = BAD_VALUE; } else { + // no need to save value, since it's constant reconfig = true; } } @@ -3369,6 +3379,7 @@ bool AudioFlinger::MixerThread::checkForNewParameters_l() if ((audio_channel_mask_t) value != AUDIO_CHANNEL_OUT_STEREO) { status = BAD_VALUE; } else { + // no need to save value, since it's constant reconfig = true; } } @@ -4168,15 +4179,15 @@ bool AudioFlinger::OffloadThread::waitingAsyncCallback_l() // must be called with thread mutex locked bool AudioFlinger::OffloadThread::shouldStandby_l() { - bool TrackPaused = false; + bool trackPaused = false; // do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack // after a timeout and we will enter standby then. if (mTracks.size() > 0) { - TrackPaused = mTracks[mTracks.size() - 1]->isPaused(); + trackPaused = mTracks[mTracks.size() - 1]->isPaused(); } - return !mStandby && !TrackPaused; + return !mStandby && !trackPaused; } @@ -4383,8 +4394,10 @@ AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger, #endif ) : ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD), - mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL), - // mRsmpInIndex and mBufferSize set by readInputParameters() + mInput(input), mActiveTracksGen(0), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL), + // mRsmpInFrames, mRsmpInFramesP2, mRsmpInUnrel, mRsmpInFront, and mRsmpInRear + // are set by readInputParameters() + // mRsmpInIndex LEGACY mReqChannelCount(popcount(channelMask)), mReqSampleRate(sampleRate) // mBytesRead is only meaningful while active, and so is cleared in start() @@ -4394,6 +4407,7 @@ AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger, #endif { snprintf(mName, kNameLength, "AudioIn_%X", id); + mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mName); readInputParameters(); } @@ -4401,6 +4415,7 @@ AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger, AudioFlinger::RecordThread::~RecordThread() { + mAudioFlinger->unregisterWriter(mNBLogWriter); delete[] mRsmpInBuffer; delete mResampler; delete[] mRsmpOutBuffer; @@ -4411,230 +4426,320 @@ void AudioFlinger::RecordThread::onFirstRef() run(mName, PRIORITY_URGENT_AUDIO); } -status_t AudioFlinger::RecordThread::readyToRun() -{ - status_t status = initCheck(); - ALOGW_IF(status != NO_ERROR,"RecordThread %p could not initialize", this); - return status; -} - bool AudioFlinger::RecordThread::threadLoop() { - AudioBufferProvider::Buffer buffer; - sp<RecordTrack> activeTrack; - Vector< sp<EffectChain> > effectChains; - nsecs_t lastWarning = 0; inputStandBy(); - { - Mutex::Autolock _l(mLock); - activeTrack = mActiveTrack; - acquireWakeLock_l(activeTrack != 0 ? activeTrack->uid() : -1); - } // used to verify we've read at least once before evaluating how many bytes were read bool readOnce = false; - // start recording - while (!exitPending()) { - - processConfigEvents(); + // used to request a deferred sleep, to be executed later while mutex is unlocked + bool doSleep = false; - { // scope for mLock - Mutex::Autolock _l(mLock); - checkForNewParameters_l(); - if (mActiveTrack != 0 && activeTrack != mActiveTrack) { +reacquire_wakelock: + sp<RecordTrack> activeTrack; + int activeTracksGen; + { + Mutex::Autolock _l(mLock); + size_t size = mActiveTracks.size(); + activeTracksGen = mActiveTracksGen; + if (size > 0) { + // FIXME an arbitrary choice + activeTrack = mActiveTracks[0]; + acquireWakeLock_l(activeTrack->uid()); + if (size > 1) { SortedVector<int> tmp; - tmp.add(mActiveTrack->uid()); + for (size_t i = 0; i < size; i++) { + tmp.add(mActiveTracks[i]->uid()); + } updateWakeLockUids_l(tmp); } - activeTrack = mActiveTrack; - if (mActiveTrack == 0 && mConfigEvents.isEmpty()) { - standby(); + } else { + acquireWakeLock_l(-1); + } + } - if (exitPending()) { - break; - } + // start recording + for (;;) { + TrackBase::track_state activeTrackState; + Vector< sp<EffectChain> > effectChains; + // sleep with mutex unlocked + if (doSleep) { + doSleep = false; + usleep(kRecordThreadSleepUs); + } + + { // scope for mLock + Mutex::Autolock _l(mLock); + if (exitPending()) { + break; + } + processConfigEvents_l(); + // return value 'reconfig' is currently unused + bool reconfig = checkForNewParameters_l(); + + // if no active track(s), then standby and release wakelock + size_t size = mActiveTracks.size(); + if (size == 0) { + standbyIfNotAlreadyInStandby(); + // exitPending() can't become true here releaseWakeLock_l(); ALOGV("RecordThread: loop stopping"); // go to sleep mWaitWorkCV.wait(mLock); ALOGV("RecordThread: loop starting"); - acquireWakeLock_l(mActiveTrack != 0 ? mActiveTrack->uid() : -1); + goto reacquire_wakelock; + } + + if (mActiveTracksGen != activeTracksGen) { + activeTracksGen = mActiveTracksGen; + SortedVector<int> tmp; + for (size_t i = 0; i < size; i++) { + tmp.add(mActiveTracks[i]->uid()); + } + updateWakeLockUids_l(tmp); + // FIXME an arbitrary choice + activeTrack = mActiveTracks[0]; + } + + if (activeTrack->isTerminated()) { + removeTrack_l(activeTrack); + mActiveTracks.remove(activeTrack); + mActiveTracksGen++; continue; } - if (mActiveTrack != 0) { - if (mActiveTrack->isTerminated()) { - removeTrack_l(mActiveTrack); - mActiveTrack.clear(); - } else if (mActiveTrack->mState == TrackBase::PAUSING) { - standby(); - mActiveTrack.clear(); + + activeTrackState = activeTrack->mState; + switch (activeTrackState) { + case TrackBase::PAUSING: + standbyIfNotAlreadyInStandby(); + mActiveTracks.remove(activeTrack); + mActiveTracksGen++; + mStartStopCond.broadcast(); + doSleep = true; + continue; + + case TrackBase::RESUMING: + mStandby = false; + if (mReqChannelCount != activeTrack->channelCount()) { + mActiveTracks.remove(activeTrack); + mActiveTracksGen++; mStartStopCond.broadcast(); - } else if (mActiveTrack->mState == TrackBase::RESUMING) { - if (mReqChannelCount != mActiveTrack->channelCount()) { - mActiveTrack.clear(); - mStartStopCond.broadcast(); - } else if (readOnce) { - // record start succeeds only if first read from audio input - // succeeds - if (mBytesRead >= 0) { - mActiveTrack->mState = TrackBase::ACTIVE; - } else { - mActiveTrack.clear(); - } - mStartStopCond.broadcast(); + continue; + } + if (readOnce) { + mStartStopCond.broadcast(); + // record start succeeds only if first read from audio input succeeds + if (mBytesRead < 0) { + mActiveTracks.remove(activeTrack); + mActiveTracksGen++; + continue; } - mStandby = false; + activeTrack->mState = TrackBase::ACTIVE; } + break; + + case TrackBase::ACTIVE: + break; + + case TrackBase::IDLE: + doSleep = true; + continue; + + default: + LOG_FATAL("Unexpected activeTrackState %d", activeTrackState); } lockEffectChains_l(effectChains); } - if (mActiveTrack != 0) { - if (mActiveTrack->mState != TrackBase::ACTIVE && - mActiveTrack->mState != TrackBase::RESUMING) { - unlockEffectChains(effectChains); - usleep(kRecordThreadSleepUs); - continue; - } - for (size_t i = 0; i < effectChains.size(); i ++) { - effectChains[i]->process_l(); - } + // thread mutex is now unlocked, mActiveTracks unknown, activeTrack != 0, kept, immutable + // activeTrack->mState unknown, activeTrackState immutable and is ACTIVE or RESUMING - buffer.frameCount = mFrameCount; - status_t status = mActiveTrack->getNextBuffer(&buffer); - if (status == NO_ERROR) { - readOnce = true; - size_t framesOut = buffer.frameCount; - if (mResampler == NULL) { - // no resampling - while (framesOut) { - size_t framesIn = mFrameCount - mRsmpInIndex; - if (framesIn) { - int8_t *src = (int8_t *)mRsmpInBuffer + mRsmpInIndex * mFrameSize; - int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) * - mActiveTrack->mFrameSize; - if (framesIn > framesOut) - framesIn = framesOut; - mRsmpInIndex += framesIn; - framesOut -= framesIn; - if (mChannelCount == mReqChannelCount) { - memcpy(dst, src, framesIn * mFrameSize); - } else { - if (mChannelCount == 1) { - upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, - (int16_t *)src, framesIn); - } else { - downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, - (int16_t *)src, framesIn); - } - } + for (size_t i = 0; i < effectChains.size(); i ++) { + // thread mutex is not locked, but effect chain is locked + effectChains[i]->process_l(); + } + + AudioBufferProvider::Buffer buffer; + buffer.frameCount = mFrameCount; + status_t status = activeTrack->getNextBuffer(&buffer); + if (status == NO_ERROR) { + readOnce = true; + size_t framesOut = buffer.frameCount; + if (mResampler == NULL) { + // no resampling + while (framesOut) { + size_t framesIn = mFrameCount - mRsmpInIndex; + if (framesIn > 0) { + int8_t *src = (int8_t *)mRsmpInBuffer + mRsmpInIndex * mFrameSize; + int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) * + activeTrack->mFrameSize; + if (framesIn > framesOut) { + framesIn = framesOut; } - if (framesOut && mFrameCount == mRsmpInIndex) { - void *readInto; - if (framesOut == mFrameCount && mChannelCount == mReqChannelCount) { - readInto = buffer.raw; - framesOut = 0; + mRsmpInIndex += framesIn; + framesOut -= framesIn; + if (mChannelCount == mReqChannelCount) { + memcpy(dst, src, framesIn * mFrameSize); + } else { + if (mChannelCount == 1) { + upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, + (int16_t *)src, framesIn); } else { - readInto = mRsmpInBuffer; - mRsmpInIndex = 0; + downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, + (int16_t *)src, framesIn); } - mBytesRead = mInput->stream->read(mInput->stream, readInto, - mBufferSize); - if (mBytesRead <= 0) { - if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE)) - { - ALOGE("Error reading audio input"); - // Force input into standby so that it tries to - // recover at next read attempt - inputStandBy(); - usleep(kRecordThreadSleepUs); - } - mRsmpInIndex = mFrameCount; - framesOut = 0; - buffer.frameCount = 0; + } + } + if (framesOut > 0 && mFrameCount == mRsmpInIndex) { + void *readInto; + if (framesOut == mFrameCount && mChannelCount == mReqChannelCount) { + readInto = buffer.raw; + framesOut = 0; + } else { + readInto = mRsmpInBuffer; + mRsmpInIndex = 0; + } + mBytesRead = mInput->stream->read(mInput->stream, readInto, + mBufferSize); + if (mBytesRead <= 0) { + // TODO: verify that it's benign to use a stale track state + if ((mBytesRead < 0) && (activeTrackState == TrackBase::ACTIVE)) + { + ALOGE("Error reading audio input"); + // Force input into standby so that it tries to + // recover at next read attempt + inputStandBy(); + doSleep = true; } + mRsmpInIndex = mFrameCount; + framesOut = 0; + buffer.frameCount = 0; + } #ifdef TEE_SINK - else if (mTeeSink != 0) { - (void) mTeeSink->write(readInto, - mBytesRead >> Format_frameBitShift(mTeeSink->format())); - } -#endif + else if (mTeeSink != 0) { + (void) mTeeSink->write(readInto, + mBytesRead >> Format_frameBitShift(mTeeSink->format())); } +#endif } - } else { - // resampling - - // resampler accumulates, but we only have one source track - memset(mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t)); - // alter output frame count as if we were expecting stereo samples - if (mChannelCount == 1 && mReqChannelCount == 1) { - framesOut >>= 1; + } + } else { + // resampling + + // avoid busy-waiting if client doesn't keep up + bool madeProgress = false; + + // keep mRsmpInBuffer full so resampler always has sufficient input + for (;;) { + int32_t rear = mRsmpInRear; + ssize_t filled = rear - mRsmpInFront; + ALOG_ASSERT(0 <= filled && (size_t) filled <= mRsmpInFramesP2); + // exit once there is enough data in buffer for resampler + if ((size_t) filled >= mRsmpInFrames) { + break; } - mResampler->resample(mRsmpOutBuffer, framesOut, - this /* AudioBufferProvider* */); - // ditherAndClamp() works as long as all buffers returned by - // mActiveTrack->getNextBuffer() are 32 bit aligned which should be always true. - if (mChannelCount == 2 && mReqChannelCount == 1) { - // temporarily type pun mRsmpOutBuffer from Q19.12 to int16_t - ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut); - // the resampler always outputs stereo samples: - // do post stereo to mono conversion - downmix_to_mono_i16_from_stereo_i16(buffer.i16, (int16_t *)mRsmpOutBuffer, - framesOut); - } else { - ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut); + size_t avail = mRsmpInFramesP2 - filled; + // Only try to read full HAL buffers. + // But if the HAL read returns a partial buffer, use it. + if (avail < mFrameCount) { + ALOGE("insufficient space to read: avail %d < mFrameCount %d", + avail, mFrameCount); + break; } - // now done with mRsmpOutBuffer + // If 'avail' is non-contiguous, first read past the nominal end of buffer, then + // copy to the right place. Permitted because mRsmpInBuffer was over-allocated. + rear &= mRsmpInFramesP2 - 1; + mBytesRead = mInput->stream->read(mInput->stream, + &mRsmpInBuffer[rear * mChannelCount], mBufferSize); + if (mBytesRead <= 0) { + ALOGE("read failed: mBytesRead=%d < %u", mBytesRead, mBufferSize); + break; + } + ALOG_ASSERT((size_t) mBytesRead <= mBufferSize); + size_t framesRead = mBytesRead / mFrameSize; + ALOG_ASSERT(framesRead > 0); + madeProgress = true; + // If 'avail' was non-contiguous, we now correct for reading past end of buffer. + size_t part1 = mRsmpInFramesP2 - rear; + if (framesRead > part1) { + memcpy(mRsmpInBuffer, &mRsmpInBuffer[mRsmpInFramesP2 * mChannelCount], + (framesRead - part1) * mFrameSize); + } + mRsmpInRear += framesRead; + } + if (!madeProgress) { + ALOGV("Did not make progress"); + usleep(((mFrameCount * 1000) / mSampleRate) * 1000); } - if (mFramestoDrop == 0) { - mActiveTrack->releaseBuffer(&buffer); + + // resampler accumulates, but we only have one source track + memset(mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t)); + mResampler->resample(mRsmpOutBuffer, framesOut, + this /* AudioBufferProvider* */); + // ditherAndClamp() works as long as all buffers returned by + // activeTrack->getNextBuffer() are 32 bit aligned which should be always true. + if (mReqChannelCount == 1) { + // temporarily type pun mRsmpOutBuffer from Q19.12 to int16_t + ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut); + // the resampler always outputs stereo samples: + // do post stereo to mono conversion + downmix_to_mono_i16_from_stereo_i16(buffer.i16, (int16_t *)mRsmpOutBuffer, + framesOut); } else { - if (mFramestoDrop > 0) { - mFramestoDrop -= buffer.frameCount; - if (mFramestoDrop <= 0) { - clearSyncStartEvent(); - } - } else { - mFramestoDrop += buffer.frameCount; - if (mFramestoDrop >= 0 || mSyncStartEvent == 0 || - mSyncStartEvent->isCancelled()) { - ALOGW("Synced record %s, session %d, trigger session %d", - (mFramestoDrop >= 0) ? "timed out" : "cancelled", - mActiveTrack->sessionId(), - (mSyncStartEvent != 0) ? mSyncStartEvent->triggerSession() : 0); - clearSyncStartEvent(); - } - } + ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut); } - mActiveTrack->clearOverflow(); + // now done with mRsmpOutBuffer + } - // client isn't retrieving buffers fast enough - else { - if (!mActiveTrack->setOverflow()) { - nsecs_t now = systemTime(); - if ((now - lastWarning) > kWarningThrottleNs) { - ALOGW("RecordThread: buffer overflow"); - lastWarning = now; + if (mFramestoDrop == 0) { + activeTrack->releaseBuffer(&buffer); + } else { + if (mFramestoDrop > 0) { + mFramestoDrop -= buffer.frameCount; + if (mFramestoDrop <= 0) { + clearSyncStartEvent(); + } + } else { + mFramestoDrop += buffer.frameCount; + if (mFramestoDrop >= 0 || mSyncStartEvent == 0 || + mSyncStartEvent->isCancelled()) { + ALOGW("Synced record %s, session %d, trigger session %d", + (mFramestoDrop >= 0) ? "timed out" : "cancelled", + activeTrack->sessionId(), + (mSyncStartEvent != 0) ? mSyncStartEvent->triggerSession() : 0); + clearSyncStartEvent(); } } - // Release the processor for a while before asking for a new buffer. - // This will give the application more chance to read from the buffer and - // clear the overflow. - usleep(kRecordThreadSleepUs); } + activeTrack->clearOverflow(); } + // client isn't retrieving buffers fast enough + else { + if (!activeTrack->setOverflow()) { + nsecs_t now = systemTime(); + if ((now - lastWarning) > kWarningThrottleNs) { + ALOGW("RecordThread: buffer overflow"); + lastWarning = now; + } + } + // Release the processor for a while before asking for a new buffer. + // This will give the application more chance to read from the buffer and + // clear the overflow. + doSleep = true; + } + // enable changes in effect chain unlockEffectChains(effectChains); - effectChains.clear(); + // effectChains doesn't need to be cleared, since it is cleared by destructor at scope end } - standby(); + standbyIfNotAlreadyInStandby(); { Mutex::Autolock _l(mLock); @@ -4642,7 +4747,8 @@ bool AudioFlinger::RecordThread::threadLoop() sp<RecordTrack> track = mTracks[i]; track->invalidate(); } - mActiveTrack.clear(); + mActiveTracks.clear(); + mActiveTracksGen++; mStartStopCond.broadcast(); } @@ -4652,7 +4758,7 @@ bool AudioFlinger::RecordThread::threadLoop() return false; } -void AudioFlinger::RecordThread::standby() +void AudioFlinger::RecordThread::standbyIfNotAlreadyInStandby() { if (!mStandby) { inputStandBy(); @@ -4665,18 +4771,19 @@ void AudioFlinger::RecordThread::inputStandBy() mInput->stream->common.standby(&mInput->stream->common); } -sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l( +sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l( const sp<AudioFlinger::Client>& client, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, int sessionId, int uid, IAudioFlinger::track_flags_t *flags, pid_t tid, status_t *status) { + size_t frameCount = *pFrameCount; sp<RecordTrack> track; status_t lStatus; @@ -4735,6 +4842,7 @@ sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createR } } } + *pFrameCount = frameCount; // FIXME use flags and tid similar to createTrack_l() @@ -4744,9 +4852,9 @@ sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createR track = new RecordTrack(this, client, sampleRate, format, channelMask, frameCount, sessionId, uid); - if (track->getCblk() == 0) { - ALOGE("createRecordTrack_l() no control block"); - lStatus = NO_MEMORY; + lStatus = track->initCheck(); + if (lStatus != NO_ERROR) { + ALOGE("createRecordTrack_l() initCheck failed %d; no control block?", lStatus); track.clear(); goto Exit; } @@ -4768,9 +4876,7 @@ sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createR lStatus = NO_ERROR; Exit: - if (status) { - *status = lStatus; - } + *status = lStatus; return track; } @@ -4801,43 +4907,57 @@ status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrac } { + // This section is a rendezvous between binder thread executing start() and RecordThread AutoMutex lock(mLock); - if (mActiveTrack != 0) { - if (recordTrack != mActiveTrack.get()) { + if (mActiveTracks.size() > 0) { + // FIXME does not work for multiple active tracks + if (mActiveTracks.indexOf(recordTrack) != 0) { status = -EBUSY; - } else if (mActiveTrack->mState == TrackBase::PAUSING) { - mActiveTrack->mState = TrackBase::ACTIVE; + } else if (recordTrack->mState == TrackBase::PAUSING) { + recordTrack->mState = TrackBase::ACTIVE; } return status; } + // FIXME why? already set in constructor, 'STARTING_1' would be more accurate recordTrack->mState = TrackBase::IDLE; - mActiveTrack = recordTrack; + mActiveTracks.add(recordTrack); + mActiveTracksGen++; mLock.unlock(); status_t status = AudioSystem::startInput(mId); mLock.lock(); + // FIXME should verify that mActiveTrack is still == recordTrack if (status != NO_ERROR) { - mActiveTrack.clear(); + mActiveTracks.remove(recordTrack); + mActiveTracksGen++; clearSyncStartEvent(); return status; } + // FIXME LEGACY mRsmpInIndex = mFrameCount; + mRsmpInFront = 0; + mRsmpInRear = 0; + mRsmpInUnrel = 0; mBytesRead = 0; if (mResampler != NULL) { mResampler->reset(); } - mActiveTrack->mState = TrackBase::RESUMING; + // FIXME hijacking a playback track state name which was intended for start after pause; + // here 'STARTING_2' would be more accurate + recordTrack->mState = TrackBase::RESUMING; // signal thread to start ALOGV("Signal record thread"); mWaitWorkCV.broadcast(); // do not wait for mStartStopCond if exiting if (exitPending()) { - mActiveTrack.clear(); + mActiveTracks.remove(recordTrack); + mActiveTracksGen++; status = INVALID_OPERATION; goto startError; } + // FIXME incorrect usage of wait: no explicit predicate or loop mStartStopCond.wait(mLock); - if (mActiveTrack == 0) { + if (mActiveTracks.indexOf(recordTrack) < 0) { ALOGV("Record failed to start"); status = BAD_VALUE; goto startError; @@ -4883,17 +5003,19 @@ void AudioFlinger::RecordThread::handleSyncStartEvent(const sp<SyncEvent>& event bool AudioFlinger::RecordThread::stop(RecordThread::RecordTrack* recordTrack) { ALOGV("RecordThread::stop"); AutoMutex _l(mLock); - if (recordTrack != mActiveTrack.get() || recordTrack->mState == TrackBase::PAUSING) { + if (mActiveTracks.indexOf(recordTrack) != 0 || recordTrack->mState == TrackBase::PAUSING) { return false; } + // note that threadLoop may still be processing the track at this point [without lock] recordTrack->mState = TrackBase::PAUSING; // do not wait for mStartStopCond if exiting if (exitPending()) { return true; } + // FIXME incorrect usage of wait: no explicit predicate or loop mStartStopCond.wait(mLock); - // if we have been restarted, recordTrack == mActiveTrack.get() here - if (exitPending() || recordTrack != mActiveTrack.get()) { + // if we have been restarted, recordTrack is in mActiveTracks here + if (exitPending() || mActiveTracks.indexOf(recordTrack) != 0) { ALOGV("Record stopped OK"); return true; } @@ -4936,7 +5058,7 @@ void AudioFlinger::RecordThread::destroyTrack_l(const sp<RecordTrack>& track) track->terminate(); track->mState = TrackBase::STOPPED; // active tracks are removed by threadLoop() - if (mActiveTrack != track) { + if (mActiveTracks.indexOf(track) < 0) { removeTrack_l(track); } } @@ -4963,7 +5085,7 @@ void AudioFlinger::RecordThread::dumpInternals(int fd, const Vector<String16>& a snprintf(buffer, SIZE, "\nInput thread %p internals\n", this); result.append(buffer); - if (mActiveTrack != 0) { + if (mActiveTracks.size() > 0) { snprintf(buffer, SIZE, "In index: %d\n", mRsmpInIndex); result.append(buffer); snprintf(buffer, SIZE, "Buffer size: %u bytes\n", mBufferSize); @@ -5000,12 +5122,16 @@ void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args } } - if (mActiveTrack != 0) { + size_t size = mActiveTracks.size(); + if (size > 0) { snprintf(buffer, SIZE, "\nInput thread %p active tracks\n", this); result.append(buffer); RecordTrack::appendDumpHeader(result); - mActiveTrack->dump(buffer, SIZE); - result.append(buffer); + for (size_t i = 0; i < size; ++i) { + sp<RecordTrack> track = mActiveTracks[i]; + track->dump(buffer, SIZE); + result.append(buffer); + } } write(fd, result.string(), result.size()); @@ -5014,46 +5140,47 @@ void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args // AudioBufferProvider interface status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts) { - size_t framesReq = buffer->frameCount; - size_t framesReady = mFrameCount - mRsmpInIndex; - int channelCount; - - if (framesReady == 0) { - mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mBufferSize); - if (mBytesRead <= 0) { - if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE)) { - ALOGE("RecordThread::getNextBuffer() Error reading audio input"); - // Force input into standby so that it tries to - // recover at next read attempt - inputStandBy(); - usleep(kRecordThreadSleepUs); - } - buffer->raw = NULL; - buffer->frameCount = 0; - return NOT_ENOUGH_DATA; - } - mRsmpInIndex = 0; - framesReady = mFrameCount; - } - - if (framesReq > framesReady) { - framesReq = framesReady; - } - - if (mChannelCount == 1 && mReqChannelCount == 2) { - channelCount = 1; - } else { - channelCount = 2; - } - buffer->raw = mRsmpInBuffer + mRsmpInIndex * channelCount; - buffer->frameCount = framesReq; + int32_t rear = mRsmpInRear; + int32_t front = mRsmpInFront; + ssize_t filled = rear - front; + ALOG_ASSERT(0 <= filled && (size_t) filled <= mRsmpInFramesP2); + // 'filled' may be non-contiguous, so return only the first contiguous chunk + front &= mRsmpInFramesP2 - 1; + size_t part1 = mRsmpInFramesP2 - front; + if (part1 > (size_t) filled) { + part1 = filled; + } + size_t ask = buffer->frameCount; + ALOG_ASSERT(ask > 0); + if (part1 > ask) { + part1 = ask; + } + if (part1 == 0) { + // Higher-level should keep mRsmpInBuffer full, and not call resampler if empty + ALOGE("RecordThread::getNextBuffer() starved"); + buffer->raw = NULL; + buffer->frameCount = 0; + mRsmpInUnrel = 0; + return NOT_ENOUGH_DATA; + } + + buffer->raw = mRsmpInBuffer + front * mChannelCount; + buffer->frameCount = part1; + mRsmpInUnrel = part1; return NO_ERROR; } // AudioBufferProvider interface void AudioFlinger::RecordThread::releaseBuffer(AudioBufferProvider::Buffer* buffer) { - mRsmpInIndex += buffer->frameCount; + size_t stepCount = buffer->frameCount; + if (stepCount == 0) { + return; + } + ALOG_ASSERT(stepCount <= mRsmpInUnrel); + mRsmpInUnrel -= stepCount; + mRsmpInFront += stepCount; + buffer->raw = NULL; buffer->frameCount = 0; } @@ -5068,7 +5195,7 @@ bool AudioFlinger::RecordThread::checkForNewParameters_l() int value; audio_format_t reqFormat = mFormat; uint32_t reqSamplingRate = mReqSampleRate; - uint32_t reqChannelCount = mReqChannelCount; + audio_channel_mask_t reqChannelMask = audio_channel_in_mask_from_count(mReqChannelCount); if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) { reqSamplingRate = value; @@ -5083,14 +5210,19 @@ bool AudioFlinger::RecordThread::checkForNewParameters_l() } } if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) { - reqChannelCount = popcount(value); - reconfig = true; + audio_channel_mask_t mask = (audio_channel_mask_t) value; + if (mask != AUDIO_CHANNEL_IN_MONO && mask != AUDIO_CHANNEL_IN_STEREO) { + status = BAD_VALUE; + } else { + reqChannelMask = mask; + reconfig = true; + } } if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) { // do not accept frame count changes if tracks are open as the track buffer // size depends on frame count and correct behavior would not be guaranteed // if frame count is changed after track creation - if (mActiveTrack != 0) { + if (mActiveTracks.size() > 0) { status = INVALID_OPERATION; } else { reconfig = true; @@ -5133,6 +5265,7 @@ bool AudioFlinger::RecordThread::checkForNewParameters_l() } mAudioSource = (audio_source_t)value; } + if (status == NO_ERROR) { status = mInput->stream->common.set_parameters(&mInput->stream->common, keyValuePair.string()); @@ -5149,7 +5282,8 @@ bool AudioFlinger::RecordThread::checkForNewParameters_l() <= (2 * reqSamplingRate)) && popcount(mInput->stream->common.get_channels(&mInput->stream->common)) <= FCC_2 && - (reqChannelCount <= FCC_2)) { + (reqChannelMask == AUDIO_CHANNEL_IN_MONO || + reqChannelMask == AUDIO_CHANNEL_IN_STEREO)) { status = NO_ERROR; } if (status == NO_ERROR) { @@ -5185,7 +5319,7 @@ String8 AudioFlinger::RecordThread::getParameters(const String8& keys) void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param) { AudioSystem::OutputDescriptor desc; - void *param2 = NULL; + const void *param2 = NULL; switch (event) { case AudioSystem::INPUT_OPENED: @@ -5224,34 +5358,27 @@ void AudioFlinger::RecordThread::readInputParameters() mFrameSize = audio_stream_frame_size(&mInput->stream->common); mBufferSize = mInput->stream->common.get_buffer_size(&mInput->stream->common); mFrameCount = mBufferSize / mFrameSize; - mRsmpInBuffer = new int16_t[mFrameCount * mChannelCount]; - - if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2) - { - int channelCount; - // optimization: if mono to mono, use the resampler in stereo to stereo mode to avoid - // stereo to mono post process as the resampler always outputs stereo. - if (mChannelCount == 1 && mReqChannelCount == 2) { - channelCount = 1; - } else { - channelCount = 2; - } - mResampler = AudioResampler::create(16, channelCount, mReqSampleRate); + // With 3 HAL buffers, we can guarantee ability to down-sample the input by ratio of 2:1 to + // 1 full output buffer, regardless of the alignment of the available input. + mRsmpInFrames = mFrameCount * 3; + mRsmpInFramesP2 = roundup(mRsmpInFrames); + // Over-allocate beyond mRsmpInFramesP2 to permit a HAL read past end of buffer + mRsmpInBuffer = new int16_t[(mRsmpInFramesP2 + mFrameCount - 1) * mChannelCount]; + mRsmpInFront = 0; + mRsmpInRear = 0; + mRsmpInUnrel = 0; + + if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2) { + mResampler = AudioResampler::create(16, (int) mChannelCount, mReqSampleRate); mResampler->setSampleRate(mSampleRate); mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN); + // resampler always outputs stereo mRsmpOutBuffer = new int32_t[mFrameCount * FCC_2]; - - // optmization: if mono to mono, alter input frame count as if we were inputing - // stereo samples - if (mChannelCount == 1 && mReqChannelCount == 1) { - mFrameCount >>= 1; - } - } mRsmpInIndex = mFrameCount; } -unsigned int AudioFlinger::RecordThread::getInputFramesLost() +uint32_t AudioFlinger::RecordThread::getInputFramesLost() { Mutex::Autolock _l(mLock); if (initCheck() != NO_ERROR) { diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h index 207f1eb..8df6f94 100644 --- a/services/audioflinger/Threads.h +++ b/services/audioflinger/Threads.h @@ -36,6 +36,8 @@ public: audio_devices_t outDevice, audio_devices_t inDevice, type_t type); virtual ~ThreadBase(); + virtual status_t readyToRun(); + void dumpBase(int fd, const Vector<String16>& args); void dumpEffectChains(int fd, const Vector<String16>& args); @@ -141,6 +143,7 @@ public: void sendIoConfigEvent_l(int event, int param = 0); void sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio); void processConfigEvents(); + void processConfigEvents_l(); // see note at declaration of mStandby, mOutDevice and mInDevice bool standby() const { return mStandby; } @@ -156,7 +159,7 @@ public: int sessionId, effect_descriptor_t *desc, int *enabled, - status_t *status); + status_t *status /*non-NULL*/); void disconnectEffect(const sp< EffectModule>& effect, EffectHandle *handle, bool unpinIfLast); @@ -275,6 +278,7 @@ protected: uint32_t mChannelCount; size_t mFrameSize; audio_format_t mFormat; + size_t mBufferSize; // HAL buffer size for read() or write() // Parameter sequence by client: binder thread calling setParameters(): // 1. Lock mLock @@ -358,7 +362,6 @@ public: void dump(int fd, const Vector<String16>& args); // Thread virtuals - virtual status_t readyToRun(); virtual bool threadLoop(); // RefBase @@ -419,13 +422,13 @@ public: uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, const sp<IMemory>& sharedBuffer, int sessionId, IAudioFlinger::track_flags_t *flags, pid_t tid, int uid, - status_t *status); + status_t *status /*non-NULL*/); AudioStreamOut* getOutput() const; AudioStreamOut* clearOutput(); @@ -479,7 +482,6 @@ protected: size_t mNormalFrameCount; // normal mixer and effects int16_t* mMixBuffer; // frame size aligned mix buffer - int8_t* mAllocMixBuffer; // mixer buffer allocation address // suspend count, > 0 means suspended. While suspended, the thread continues to pull from // tracks and mix, but doesn't write to HAL. A2DP and SCO HAL implementations can't handle @@ -867,23 +869,23 @@ public: // Thread virtuals virtual bool threadLoop(); - virtual status_t readyToRun(); // RefBase virtual void onFirstRef(); virtual status_t initCheck() const { return (mInput == NULL) ? NO_INIT : NO_ERROR; } + sp<AudioFlinger::RecordThread::RecordTrack> createRecordTrack_l( const sp<AudioFlinger::Client>& client, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, int sessionId, int uid, IAudioFlinger::track_flags_t *flags, pid_t tid, - status_t *status); + status_t *status /*non-NULL*/); status_t start(RecordTrack* recordTrack, AudioSystem::sync_event_t event, @@ -905,7 +907,7 @@ public: virtual String8 getParameters(const String8& keys); virtual void audioConfigChanged_l(int event, int param = 0); void readInputParameters(); - virtual unsigned int getInputFramesLost(); + virtual uint32_t getInputFramesLost(); virtual status_t addEffectChain_l(const sp<EffectChain>& chain); virtual size_t removeEffectChain_l(const sp<EffectChain>& chain); @@ -926,30 +928,43 @@ public: bool hasFastRecorder() const { return false; } private: - void clearSyncStartEvent(); + void clearSyncStartEvent(); // Enter standby if not already in standby, and set mStandby flag - void standby(); + void standbyIfNotAlreadyInStandby(); // Call the HAL standby method unconditionally, and don't change mStandby flag - void inputStandBy(); + void inputStandBy(); AudioStreamIn *mInput; SortedVector < sp<RecordTrack> > mTracks; - // mActiveTrack has dual roles: it indicates the current active track, and + // mActiveTracks has dual roles: it indicates the current active track(s), and // is used together with mStartStopCond to indicate start()/stop() progress - sp<RecordTrack> mActiveTrack; + SortedVector< sp<RecordTrack> > mActiveTracks; + // generation counter for mActiveTracks + int mActiveTracksGen; Condition mStartStopCond; // updated by RecordThread::readInputParameters() AudioResampler *mResampler; // interleaved stereo pairs of fixed-point signed Q19.12 int32_t *mRsmpOutBuffer; - int16_t *mRsmpInBuffer; // [mFrameCount * mChannelCount] - size_t mRsmpInIndex; - size_t mBufferSize; // stream buffer size for read() + + // resampler converts input at HAL Hz to output at AudioRecord client Hz + int16_t *mRsmpInBuffer; // see new[] for details on the size + size_t mRsmpInFrames; // size of resampler input in frames + size_t mRsmpInFramesP2;// size rounded up to a power-of-2 + size_t mRsmpInUnrel; // unreleased frames remaining from + // most recent getNextBuffer + // these are rolling counters that are never cleared + int32_t mRsmpInFront; // next available frame + int32_t mRsmpInRear; // last filled frame + 1 + size_t mRsmpInIndex; // FIXME legacy + + // client's requested configuration, which may differ from the HAL configuration const uint32_t mReqChannelCount; const uint32_t mReqSampleRate; + ssize_t mBytesRead; // sync event triggering actual audio capture. Frames read before this event will // be dropped and therefore not read by the application. diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h index cd201d9..05fde7c 100644 --- a/services/audioflinger/TrackBase.h +++ b/services/audioflinger/TrackBase.h @@ -48,6 +48,7 @@ public: int uid, bool isOut); virtual ~TrackBase(); + virtual status_t initCheck() const { return getCblk() != 0 ? NO_ERROR : NO_MEMORY; } virtual status_t start(AudioSystem::sync_event_t event, int triggerSession) = 0; @@ -78,15 +79,6 @@ protected: virtual uint32_t sampleRate() const { return mSampleRate; } - // Return a pointer to the start of a contiguous slice of the track buffer. - // Parameter 'offset' is the requested start position, expressed in - // monotonically increasing frame units relative to the track epoch. - // Parameter 'frames' is the requested length, also in frame units. - // Always returns non-NULL. It is the caller's responsibility to - // verify that this will be successful; the result of calling this - // function with invalid 'offset' or 'frames' is undefined. - void* getBuffer(uint32_t offset, uint32_t frames) const; - bool isStopped() const { return (mState == STOPPED || mState == FLUSHED); } diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp index af04ce7..d8d7790 100644 --- a/services/audioflinger/Tracks.cpp +++ b/services/audioflinger/Tracks.cpp @@ -116,12 +116,11 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase( if (client != 0) { mCblkMemory = client->heap()->allocate(size); - if (mCblkMemory != 0) { - mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer()); - // can't assume mCblk != NULL - } else { + if (mCblkMemory == 0 || + (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer())) == NULL) { ALOGE("not enough memory for AudioTrack size=%u", size); client->heap()->dump("AudioTrack"); + mCblkMemory.clear(); return; } } else { @@ -134,7 +133,6 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase( if (mCblk != NULL) { new(mCblk) audio_track_cblk_t(); // clear all buffers - mCblk->frameCount_ = frameCount; if (sharedBuffer == 0) { mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t); memset(mBuffer, 0, bufferSize); @@ -275,6 +273,11 @@ status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer, if (!mTrack->isTimedTrack()) return INVALID_OPERATION; + if (buffer == 0 || buffer->pointer() == NULL) { + ALOGE("queueTimedBuffer() buffer is 0 or has NULL pointer()"); + return BAD_VALUE; + } + PlaybackThread::TimedTrack* tt = reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); return tt->queueTimedBuffer(buffer, pts); @@ -396,6 +399,15 @@ AudioFlinger::PlaybackThread::Track::~Track() } } +status_t AudioFlinger::PlaybackThread::Track::initCheck() const +{ + status_t status = TrackBase::initCheck(); + if (status == NO_ERROR && mName < 0) { + status = NO_MEMORY; + } + return status; +} + void AudioFlinger::PlaybackThread::Track::destroy() { // NOTE: destroyTrack_l() can remove a strong reference to this Track @@ -551,7 +563,7 @@ size_t AudioFlinger::PlaybackThread::Track::framesReleased() const // Don't call for fast tracks; the framesReady() could result in priority inversion bool AudioFlinger::PlaybackThread::Track::isReady() const { - if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) { + if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing() || isStopping()) { return true; } @@ -1045,15 +1057,14 @@ status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer( mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize, "AudioFlingerTimed"); - if (mTimedMemoryDealer == NULL) + if (mTimedMemoryDealer == NULL) { return NO_MEMORY; + } } sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size); - if (newBuffer == NULL) { - newBuffer = mTimedMemoryDealer->allocate(size); - if (newBuffer == NULL) - return NO_MEMORY; + if (newBuffer == 0 || newBuffer->pointer() == NULL) { + return NO_MEMORY; } *buffer = newBuffer; @@ -1504,9 +1515,9 @@ AudioFlinger::PlaybackThread::OutputTrack::OutputTrack( mOutBuffer.frameCount = 0; playbackThread->mTracks.add(this); ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, " - "mCblk->frameCount_ %u, mChannelMask 0x%08x", + "frameCount %u, mChannelMask 0x%08x", mCblk, mBuffer, - mCblk->frameCount_, mChannelMask); + frameCount, mChannelMask); // since client and server are in the same process, // the buffer has the same virtual address on both sides mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize); @@ -1764,9 +1775,7 @@ AudioFlinger::RecordThread::RecordTrack::RecordTrack( { ALOGV("RecordTrack constructor"); if (mCblk != NULL) { - mAudioRecordServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, - mFrameSize); - mServerProxy = mAudioRecordServerProxy; + mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, mFrameSize); } } diff --git a/services/audioflinger/test-resample.cpp b/services/audioflinger/test-resample.cpp index 7a314cf..66fcd90 100644 --- a/services/audioflinger/test-resample.cpp +++ b/services/audioflinger/test-resample.cpp @@ -26,54 +26,30 @@ #include <errno.h> #include <time.h> #include <math.h> +#include <audio_utils/sndfile.h> using namespace android; -struct HeaderWav { - HeaderWav(size_t size, int nc, int sr, int bits) { - strncpy(RIFF, "RIFF", 4); - chunkSize = size + sizeof(HeaderWav); - strncpy(WAVE, "WAVE", 4); - strncpy(fmt, "fmt ", 4); - fmtSize = 16; - audioFormat = 1; - numChannels = nc; - samplesRate = sr; - byteRate = sr * numChannels * (bits/8); - align = nc*(bits/8); - bitsPerSample = bits; - strncpy(data, "data", 4); - dataSize = size; - } - - char RIFF[4]; // RIFF - uint32_t chunkSize; // File size - char WAVE[4]; // WAVE - char fmt[4]; // fmt\0 - uint32_t fmtSize; // fmt size - uint16_t audioFormat; // 1=PCM - uint16_t numChannels; // num channels - uint32_t samplesRate; // sample rate in hz - uint32_t byteRate; // Bps - uint16_t align; // 2=16-bit mono, 4=16-bit stereo - uint16_t bitsPerSample; // bits per sample - char data[4]; // "data" - uint32_t dataSize; // size -}; +bool gVerbose = false; static int usage(const char* name) { - fprintf(stderr,"Usage: %s [-p] [-h] [-s] [-q {dq|lq|mq|hq|vhq}] [-i input-sample-rate] " - "[-o output-sample-rate] [<input-file>] <output-file>\n", name); + fprintf(stderr,"Usage: %s [-p] [-h] [-v] [-s] [-q {dq|lq|mq|hq|vhq|dlq|dmq|dhq}]" + " [-i input-sample-rate] [-o output-sample-rate] [<input-file>]" + " <output-file>\n", name); fprintf(stderr," -p enable profiling\n"); fprintf(stderr," -h create wav file\n"); - fprintf(stderr," -s stereo\n"); + fprintf(stderr," -v verbose : log buffer provider calls\n"); + fprintf(stderr," -s stereo (ignored if input file is specified)\n"); fprintf(stderr," -q resampler quality\n"); fprintf(stderr," dq : default quality\n"); fprintf(stderr," lq : low quality\n"); fprintf(stderr," mq : medium quality\n"); fprintf(stderr," hq : high quality\n"); fprintf(stderr," vhq : very high quality\n"); - fprintf(stderr," -i input file sample rate\n"); + fprintf(stderr," dlq : dynamic low quality\n"); + fprintf(stderr," dmq : dynamic medium quality\n"); + fprintf(stderr," dhq : dynamic high quality\n"); + fprintf(stderr," -i input file sample rate (ignored if input file is specified)\n"); fprintf(stderr," -o output file sample rate\n"); return -1; } @@ -81,7 +57,8 @@ static int usage(const char* name) { int main(int argc, char* argv[]) { const char* const progname = argv[0]; - bool profiling = false; + bool profileResample = false; + bool profileFilter = false; bool writeHeader = false; int channels = 1; int input_freq = 0; @@ -89,14 +66,20 @@ int main(int argc, char* argv[]) { AudioResampler::src_quality quality = AudioResampler::DEFAULT_QUALITY; int ch; - while ((ch = getopt(argc, argv, "phsq:i:o:")) != -1) { + while ((ch = getopt(argc, argv, "pfhvsq:i:o:")) != -1) { switch (ch) { case 'p': - profiling = true; + profileResample = true; + break; + case 'f': + profileFilter = true; break; case 'h': writeHeader = true; break; + case 'v': + gVerbose = true; + break; case 's': channels = 2; break; @@ -111,6 +94,12 @@ int main(int argc, char* argv[]) { quality = AudioResampler::HIGH_QUALITY; else if (!strcmp(optarg, "vhq")) quality = AudioResampler::VERY_HIGH_QUALITY; + else if (!strcmp(optarg, "dlq")) + quality = AudioResampler::DYN_LOW_QUALITY; + else if (!strcmp(optarg, "dmq")) + quality = AudioResampler::DYN_MED_QUALITY; + else if (!strcmp(optarg, "dhq")) + quality = AudioResampler::DYN_HIGH_QUALITY; else { usage(progname); return -1; @@ -148,25 +137,22 @@ int main(int argc, char* argv[]) { size_t input_size; void* input_vaddr; if (argc == 2) { - struct stat st; - if (stat(file_in, &st) < 0) { - fprintf(stderr, "stat: %s\n", strerror(errno)); - return -1; - } - - int input_fd = open(file_in, O_RDONLY); - if (input_fd < 0) { - fprintf(stderr, "open: %s\n", strerror(errno)); - return -1; - } - - input_size = st.st_size; - input_vaddr = mmap(0, input_size, PROT_READ, MAP_PRIVATE, input_fd, 0); - if (input_vaddr == MAP_FAILED ) { - fprintf(stderr, "mmap: %s\n", strerror(errno)); - return -1; + SF_INFO info; + info.format = 0; + SNDFILE *sf = sf_open(file_in, SFM_READ, &info); + if (sf == NULL) { + perror(file_in); + return EXIT_FAILURE; } + input_size = info.frames * info.channels * sizeof(short); + input_vaddr = malloc(input_size); + (void) sf_readf_short(sf, (short *) input_vaddr, info.frames); + sf_close(sf); + channels = info.channels; + input_freq = info.samplerate; } else { + // data for testing is exactly (input sampling rate/1000)/2 seconds + // so 44.1khz input is 22.05 seconds double k = 1000; // Hz / s double time = (input_freq / 2) / k; size_t input_frames = size_t(input_freq * time); @@ -178,7 +164,7 @@ int main(int argc, char* argv[]) { double y = sin(M_PI * k * t * t); int16_t yi = floor(y * 32767.0 + 0.5); for (size_t j=0 ; j<(size_t)channels ; j++) { - in[i*channels + j] = yi / (1+j); + in[i*channels + j] = yi / (1+j); // right ch. 1/2 left ch. } } } @@ -186,89 +172,238 @@ int main(int argc, char* argv[]) { // ---------------------------------------------------------- class Provider: public AudioBufferProvider { - int16_t* mAddr; - size_t mNumFrames; + int16_t* const mAddr; // base address + const size_t mNumFrames; // total frames + const int mChannels; + size_t mNextFrame; // index of next frame to provide + size_t mUnrel; // number of frames not yet released public: - Provider(const void* addr, size_t size, int channels) { - mAddr = (int16_t*) addr; - mNumFrames = size / (channels*sizeof(int16_t)); + Provider(const void* addr, size_t size, int channels) + : mAddr((int16_t*) addr), + mNumFrames(size / (channels*sizeof(int16_t))), + mChannels(channels), + mNextFrame(0), mUnrel(0) { } virtual status_t getNextBuffer(Buffer* buffer, int64_t pts = kInvalidPTS) { - buffer->frameCount = mNumFrames; - buffer->i16 = mAddr; - return NO_ERROR; + (void)pts; // suppress warning + size_t requestedFrames = buffer->frameCount; + if (requestedFrames > mNumFrames - mNextFrame) { + buffer->frameCount = mNumFrames - mNextFrame; + } + if (gVerbose) { + printf("getNextBuffer() requested %u frames out of %u frames available," + " and returned %u frames\n", + requestedFrames, mNumFrames - mNextFrame, buffer->frameCount); + } + mUnrel = buffer->frameCount; + if (buffer->frameCount > 0) { + buffer->i16 = &mAddr[mChannels * mNextFrame]; + return NO_ERROR; + } else { + buffer->i16 = NULL; + return NOT_ENOUGH_DATA; + } } virtual void releaseBuffer(Buffer* buffer) { + if (buffer->frameCount > mUnrel) { + fprintf(stderr, "ERROR releaseBuffer() released %u frames but only %u available " + "to release\n", buffer->frameCount, mUnrel); + mNextFrame += mUnrel; + mUnrel = 0; + } else { + if (gVerbose) { + printf("releaseBuffer() released %u frames out of %u frames available " + "to release\n", buffer->frameCount, mUnrel); + } + mNextFrame += buffer->frameCount; + mUnrel -= buffer->frameCount; + } + buffer->frameCount = 0; + buffer->i16 = NULL; + } + void reset() { + mNextFrame = 0; } } provider(input_vaddr, input_size, channels); size_t input_frames = input_size / (channels * sizeof(int16_t)); + if (gVerbose) { + printf("%u input frames\n", input_frames); + } size_t output_size = 2 * 4 * ((int64_t) input_frames * output_freq) / input_freq; output_size &= ~7; // always stereo, 32-bits - void* output_vaddr = malloc(output_size); - - if (profiling) { + if (profileFilter) { + // Check how fast sample rate changes are that require filter changes. + // The delta sample rate changes must indicate a downsampling ratio, + // and must be larger than 10% changes. + // + // On fast devices, filters should be generated between 0.1ms - 1ms. + // (single threaded). AudioResampler* resampler = AudioResampler::create(16, channels, - output_freq, quality); - - size_t out_frames = output_size/8; - resampler->setSampleRate(input_freq); - resampler->setVolume(0x1000, 0x1000); - - memset(output_vaddr, 0, output_size); + 8000, quality); + int looplimit = 100; timespec start, end; clock_gettime(CLOCK_MONOTONIC, &start); - resampler->resample((int*) output_vaddr, out_frames, &provider); - resampler->resample((int*) output_vaddr, out_frames, &provider); - resampler->resample((int*) output_vaddr, out_frames, &provider); - resampler->resample((int*) output_vaddr, out_frames, &provider); + for (int i = 0; i < looplimit; ++i) { + resampler->setSampleRate(9000); + resampler->setSampleRate(12000); + resampler->setSampleRate(20000); + resampler->setSampleRate(30000); + } clock_gettime(CLOCK_MONOTONIC, &end); int64_t start_ns = start.tv_sec * 1000000000LL + start.tv_nsec; int64_t end_ns = end.tv_sec * 1000000000LL + end.tv_nsec; - int64_t time = (end_ns - start_ns)/4; - printf("%f Mspl/s\n", out_frames/(time/1e9)/1e6); + int64_t time = end_ns - start_ns; + printf("%.2f sample rate changes with filter calculation/sec\n", + looplimit * 4 / (time / 1e9)); + // Check how fast sample rate changes are without filter changes. + // This should be very fast, probably 0.1us - 1us per sample rate + // change. + resampler->setSampleRate(1000); + looplimit = 1000; + clock_gettime(CLOCK_MONOTONIC, &start); + for (int i = 0; i < looplimit; ++i) { + resampler->setSampleRate(1000+i); + } + clock_gettime(CLOCK_MONOTONIC, &end); + start_ns = start.tv_sec * 1000000000LL + start.tv_nsec; + end_ns = end.tv_sec * 1000000000LL + end.tv_nsec; + time = end_ns - start_ns; + printf("%.2f sample rate changes without filter calculation/sec\n", + looplimit / (time / 1e9)); + resampler->reset(); delete resampler; } + void* output_vaddr = malloc(output_size); AudioResampler* resampler = AudioResampler::create(16, channels, output_freq, quality); size_t out_frames = output_size/8; + + /* set volume precision to 12 bits, so the volume scale is 1<<12. + * This means the "integer" part fits in the Q19.12 precision + * representation of output int32_t. + * + * Generally 0 < volumePrecision <= 14 (due to the limits of + * int16_t values for Volume). volumePrecision cannot be 0 due + * to rounding and shifts. + */ + const int volumePrecision = 12; // in bits + resampler->setSampleRate(input_freq); - resampler->setVolume(0x1000, 0x1000); + resampler->setVolume(1 << volumePrecision, 1 << volumePrecision); + + if (profileResample) { + /* + * For profiling on mobile devices, upon experimentation + * it is better to run a few trials with a shorter loop limit, + * and take the minimum time. + * + * Long tests can cause CPU temperature to build up and thermal throttling + * to reduce CPU frequency. + * + * For frequency checks (index=0, or 1, etc.): + * "cat /sys/devices/system/cpu/cpu${index}/cpufreq/scaling_*_freq" + * + * For temperature checks (index=0, or 1, etc.): + * "cat /sys/class/thermal/thermal_zone${index}/temp" + * + * Another way to avoid thermal throttling is to fix the CPU frequency + * at a lower level which prevents excessive temperatures. + */ + const int trials = 4; + const int looplimit = 4; + timespec start, end; + int64_t time; + + for (int n = 0; n < trials; ++n) { + clock_gettime(CLOCK_MONOTONIC, &start); + for (int i = 0; i < looplimit; ++i) { + resampler->resample((int*) output_vaddr, out_frames, &provider); + provider.reset(); // during benchmarking reset only the provider + } + clock_gettime(CLOCK_MONOTONIC, &end); + int64_t start_ns = start.tv_sec * 1000000000LL + start.tv_nsec; + int64_t end_ns = end.tv_sec * 1000000000LL + end.tv_nsec; + int64_t diff_ns = end_ns - start_ns; + if (n == 0 || diff_ns < time) { + time = diff_ns; // save the best out of our trials. + } + } + // Mfrms/s is "Millions of output frames per second". + printf("quality: %d channels: %d msec: %lld Mfrms/s: %.2lf\n", + quality, channels, time/1000000, out_frames * looplimit / (time / 1e9) / 1e6); + resampler->reset(); + } memset(output_vaddr, 0, output_size); + if (gVerbose) { + printf("resample() %u output frames\n", out_frames); + } resampler->resample((int*) output_vaddr, out_frames, &provider); + if (gVerbose) { + printf("resample() complete\n"); + } + resampler->reset(); + if (gVerbose) { + printf("reset() complete\n"); + } + delete resampler; + resampler = NULL; - // down-mix (we just truncate and keep the left channel) + // mono takes left channel only + // stereo right channel is half amplitude of stereo left channel (due to input creation) int32_t* out = (int32_t*) output_vaddr; int16_t* convert = (int16_t*) malloc(out_frames * channels * sizeof(int16_t)); + + // round to half towards zero and saturate at int16 (non-dithered) + const int roundVal = (1<<(volumePrecision-1)) - 1; // volumePrecision > 0 + for (size_t i = 0; i < out_frames; i++) { - for (int j=0 ; j<channels ; j++) { - int32_t s = out[i * 2 + j] >> 12; - if (s > 32767) s = 32767; - else if (s < -32768) s = -32768; + for (int j = 0; j < channels; j++) { + int32_t s = out[i * 2 + j] + roundVal; // add offset here + if (s < 0) { + s = (s + 1) >> volumePrecision; // round to 0 + if (s < -32768) { + s = -32768; + } + } else { + s = s >> volumePrecision; + if (s > 32767) { + s = 32767; + } + } convert[i * channels + j] = int16_t(s); } } // write output to disk - int output_fd = open(file_out, O_WRONLY | O_CREAT | O_TRUNC, - S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); - if (output_fd < 0) { - fprintf(stderr, "open: %s\n", strerror(errno)); - return -1; - } - if (writeHeader) { - HeaderWav wav(out_frames * channels * sizeof(int16_t), channels, output_freq, 16); - write(output_fd, &wav, sizeof(wav)); + SF_INFO info; + info.frames = 0; + info.samplerate = output_freq; + info.channels = channels; + info.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16; + SNDFILE *sf = sf_open(file_out, SFM_WRITE, &info); + if (sf == NULL) { + perror(file_out); + return EXIT_FAILURE; + } + (void) sf_writef_short(sf, convert, out_frames); + sf_close(sf); + } else { + int output_fd = open(file_out, O_WRONLY | O_CREAT | O_TRUNC, + S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); + if (output_fd < 0) { + perror(file_out); + return EXIT_FAILURE; + } + write(output_fd, convert, out_frames * channels * sizeof(int16_t)); + close(output_fd); } - write(output_fd, convert, out_frames * channels * sizeof(int16_t)); - close(output_fd); - - return 0; + return EXIT_SUCCESS; } diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp index 0b6ca5c..2bb3ff8 100644 --- a/services/camera/libcameraservice/api1/Camera2Client.cpp +++ b/services/camera/libcameraservice/api1/Camera2Client.cpp @@ -237,7 +237,7 @@ status_t Camera2Client::dump(int fd, const Vector<String16>& args) { result.append(" Scene mode: "); switch (p.sceneMode) { - case ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED: + case ANDROID_CONTROL_SCENE_MODE_DISABLED: result.append("AUTO\n"); break; CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_ACTION) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_PORTRAIT) @@ -731,6 +731,7 @@ status_t Camera2Client::startPreviewL(Parameters ¶ms, bool restart) { return OK; } params.state = Parameters::STOPPED; + int lastPreviewStreamId = mStreamingProcessor->getPreviewStreamId(); res = mStreamingProcessor->updatePreviewStream(params); if (res != OK) { @@ -739,6 +740,8 @@ status_t Camera2Client::startPreviewL(Parameters ¶ms, bool restart) { return res; } + bool previewStreamChanged = mStreamingProcessor->getPreviewStreamId() != lastPreviewStreamId; + // We could wait to create the JPEG output stream until first actual use // (first takePicture call). However, this would substantially increase the // first capture latency on HAL3 devices, and potentially on some HAL2 @@ -788,6 +791,19 @@ status_t Camera2Client::startPreviewL(Parameters ¶ms, bool restart) { return res; } outputStreams.push(getCallbackStreamId()); + } else if (previewStreamChanged && mCallbackProcessor->getStreamId() != NO_STREAM) { + /** + * Delete the unused callback stream when preview stream is changed and + * preview is not enabled. Don't need stop preview stream as preview is in + * STOPPED state now. + */ + ALOGV("%s: Camera %d: Delete unused preview callback stream.", __FUNCTION__, mCameraId); + res = mCallbackProcessor->deleteStream(); + if (res != OK) { + ALOGE("%s: Camera %d: Unable to delete callback stream %s (%d)", + __FUNCTION__, mCameraId, strerror(-res), res); + return res; + } } if (params.zslMode && !params.recordingHint) { res = updateProcessorStream(mZslProcessor, params); @@ -797,6 +813,8 @@ status_t Camera2Client::startPreviewL(Parameters ¶ms, bool restart) { return res; } outputStreams.push(getZslStreamId()); + } else { + mZslProcessor->deleteStream(); } outputStreams.push(getPreviewStreamId()); @@ -1143,7 +1161,7 @@ status_t Camera2Client::autoFocus() { * Handle quirk mode for AF in scene modes */ if (l.mParameters.quirks.triggerAfWithAuto && - l.mParameters.sceneMode != ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED && + l.mParameters.sceneMode != ANDROID_CONTROL_SCENE_MODE_DISABLED && l.mParameters.focusMode != Parameters::FOCUS_MODE_AUTO && !l.mParameters.focusingAreas[0].isEmpty()) { ALOGV("%s: Quirk: Switching from focusMode %d to AUTO", diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp index 77d5c8a..ec81456 100644 --- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp +++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp @@ -200,50 +200,59 @@ status_t JpegProcessor::processNewCapture() { ATRACE_CALL(); status_t res; sp<Camera2Heap> captureHeap; + sp<MemoryBase> captureBuffer; CpuConsumer::LockedBuffer imgBuffer; - res = mCaptureConsumer->lockNextBuffer(&imgBuffer); - if (res != OK) { - if (res != BAD_VALUE) { - ALOGE("%s: Camera %d: Error receiving still image buffer: " - "%s (%d)", __FUNCTION__, - mId, strerror(-res), res); + { + Mutex::Autolock l(mInputMutex); + if (mCaptureStreamId == NO_STREAM) { + ALOGW("%s: Camera %d: No stream is available", __FUNCTION__, mId); + return INVALID_OPERATION; } - return res; - } - ALOGV("%s: Camera %d: Still capture available", __FUNCTION__, - mId); + res = mCaptureConsumer->lockNextBuffer(&imgBuffer); + if (res != OK) { + if (res != BAD_VALUE) { + ALOGE("%s: Camera %d: Error receiving still image buffer: " + "%s (%d)", __FUNCTION__, + mId, strerror(-res), res); + } + return res; + } - if (imgBuffer.format != HAL_PIXEL_FORMAT_BLOB) { - ALOGE("%s: Camera %d: Unexpected format for still image: " - "%x, expected %x", __FUNCTION__, mId, - imgBuffer.format, - HAL_PIXEL_FORMAT_BLOB); - mCaptureConsumer->unlockBuffer(imgBuffer); - return OK; - } + ALOGV("%s: Camera %d: Still capture available", __FUNCTION__, + mId); - // Find size of JPEG image - size_t jpegSize = findJpegSize(imgBuffer.data, imgBuffer.width); - if (jpegSize == 0) { // failed to find size, default to whole buffer - jpegSize = imgBuffer.width; - } - size_t heapSize = mCaptureHeap->getSize(); - if (jpegSize > heapSize) { - ALOGW("%s: JPEG image is larger than expected, truncating " - "(got %d, expected at most %d bytes)", - __FUNCTION__, jpegSize, heapSize); - jpegSize = heapSize; - } + if (imgBuffer.format != HAL_PIXEL_FORMAT_BLOB) { + ALOGE("%s: Camera %d: Unexpected format for still image: " + "%x, expected %x", __FUNCTION__, mId, + imgBuffer.format, + HAL_PIXEL_FORMAT_BLOB); + mCaptureConsumer->unlockBuffer(imgBuffer); + return OK; + } - // TODO: Optimize this to avoid memcopy - sp<MemoryBase> captureBuffer = new MemoryBase(mCaptureHeap, 0, jpegSize); - void* captureMemory = mCaptureHeap->getBase(); - memcpy(captureMemory, imgBuffer.data, jpegSize); + // Find size of JPEG image + size_t jpegSize = findJpegSize(imgBuffer.data, imgBuffer.width); + if (jpegSize == 0) { // failed to find size, default to whole buffer + jpegSize = imgBuffer.width; + } + size_t heapSize = mCaptureHeap->getSize(); + if (jpegSize > heapSize) { + ALOGW("%s: JPEG image is larger than expected, truncating " + "(got %d, expected at most %d bytes)", + __FUNCTION__, jpegSize, heapSize); + jpegSize = heapSize; + } + + // TODO: Optimize this to avoid memcopy + captureBuffer = new MemoryBase(mCaptureHeap, 0, jpegSize); + void* captureMemory = mCaptureHeap->getBase(); + memcpy(captureMemory, imgBuffer.data, jpegSize); - mCaptureConsumer->unlockBuffer(imgBuffer); + mCaptureConsumer->unlockBuffer(imgBuffer); + } sp<CaptureSequencer> sequencer = mSequencer.promote(); if (sequencer != 0) { diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp index 08af566..6fae399 100644 --- a/services/camera/libcameraservice/api1/client2/Parameters.cpp +++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp @@ -466,7 +466,7 @@ status_t Parameters::initialize(const CameraMetadata *info) { supportedAntibanding); } - sceneMode = ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED; + sceneMode = ANDROID_CONTROL_SCENE_MODE_DISABLED; params.set(CameraParameters::KEY_SCENE_MODE, CameraParameters::SCENE_MODE_AUTO); @@ -482,7 +482,7 @@ status_t Parameters::initialize(const CameraMetadata *info) { if (addComma) supportedSceneModes += ","; addComma = true; switch (availableSceneModes.data.u8[i]) { - case ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED: + case ANDROID_CONTROL_SCENE_MODE_DISABLED: noSceneModes = true; break; case ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY: @@ -1446,7 +1446,7 @@ status_t Parameters::set(const String8& paramString) { newParams.get(CameraParameters::KEY_SCENE_MODE) ); if (validatedParams.sceneMode != sceneMode && validatedParams.sceneMode != - ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED) { + ANDROID_CONTROL_SCENE_MODE_DISABLED) { camera_metadata_ro_entry_t availableSceneModes = staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES); for (i = 0; i < availableSceneModes.count; i++) { @@ -1461,7 +1461,7 @@ status_t Parameters::set(const String8& paramString) { } } bool sceneModeSet = - validatedParams.sceneMode != ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED; + validatedParams.sceneMode != ANDROID_CONTROL_SCENE_MODE_DISABLED; // FLASH_MODE if (sceneModeSet) { @@ -1776,7 +1776,7 @@ status_t Parameters::updateRequest(CameraMetadata *request) const { // (face detection statistics and face priority scene mode). Map from other // to the other. bool sceneModeActive = - sceneMode != (uint8_t)ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED; + sceneMode != (uint8_t)ANDROID_CONTROL_SCENE_MODE_DISABLED; uint8_t reqControlMode = ANDROID_CONTROL_MODE_AUTO; if (enableFaceDetect || sceneModeActive) { reqControlMode = ANDROID_CONTROL_MODE_USE_SCENE_MODE; @@ -1788,7 +1788,7 @@ status_t Parameters::updateRequest(CameraMetadata *request) const { uint8_t reqSceneMode = sceneModeActive ? sceneMode : enableFaceDetect ? (uint8_t)ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY : - (uint8_t)ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED; + (uint8_t)ANDROID_CONTROL_SCENE_MODE_DISABLED; res = request->update(ANDROID_CONTROL_SCENE_MODE, &reqSceneMode, 1); if (res != OK) return res; @@ -2149,9 +2149,9 @@ int Parameters::abModeStringToEnum(const char *abMode) { int Parameters::sceneModeStringToEnum(const char *sceneMode) { return !sceneMode ? - ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED : + ANDROID_CONTROL_SCENE_MODE_DISABLED : !strcmp(sceneMode, CameraParameters::SCENE_MODE_AUTO) ? - ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED : + ANDROID_CONTROL_SCENE_MODE_DISABLED : !strcmp(sceneMode, CameraParameters::SCENE_MODE_ACTION) ? ANDROID_CONTROL_SCENE_MODE_ACTION : !strcmp(sceneMode, CameraParameters::SCENE_MODE_PORTRAIT) ? diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp index 776ebe2..e17d05d 100644 --- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp +++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp @@ -273,6 +273,15 @@ status_t ZslProcessor3::pushToReprocess(int32_t requestId) { return INVALID_OPERATION; } + // Flush device to clear out all in-flight requests pending in HAL. + res = client->getCameraDevice()->flush(); + if (res != OK) { + ALOGE("%s: Camera %d: Failed to flush device: " + "%s (%d)", + __FUNCTION__, client->getCameraId(), strerror(-res), res); + return res; + } + // Update JPEG settings { SharedParameters::Lock l(client->getParameters()); diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp index 1cdf8dc..187220e 100644 --- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp +++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp @@ -635,26 +635,56 @@ status_t CameraDeviceClient::getRotationTransformLocked(int32_t* transform) { return INVALID_OPERATION; } + camera_metadata_ro_entry_t entryFacing = staticInfo.find(ANDROID_LENS_FACING); + if (entry.count == 0) { + ALOGE("%s: Camera %d: Can't find android.lens.facing in " + "static metadata!", __FUNCTION__, mCameraId); + return INVALID_OPERATION; + } + int32_t& flags = *transform; + bool mirror = (entryFacing.data.u8[0] == ANDROID_LENS_FACING_FRONT); int orientation = entry.data.i32[0]; - switch (orientation) { - case 0: - flags = 0; - break; - case 90: - flags = NATIVE_WINDOW_TRANSFORM_ROT_90; - break; - case 180: - flags = NATIVE_WINDOW_TRANSFORM_ROT_180; - break; - case 270: - flags = NATIVE_WINDOW_TRANSFORM_ROT_270; - break; - default: - ALOGE("%s: Invalid HAL android.sensor.orientation value: %d", - __FUNCTION__, orientation); - return INVALID_OPERATION; + if (!mirror) { + switch (orientation) { + case 0: + flags = 0; + break; + case 90: + flags = NATIVE_WINDOW_TRANSFORM_ROT_90; + break; + case 180: + flags = NATIVE_WINDOW_TRANSFORM_ROT_180; + break; + case 270: + flags = NATIVE_WINDOW_TRANSFORM_ROT_270; + break; + default: + ALOGE("%s: Invalid HAL android.sensor.orientation value: %d", + __FUNCTION__, orientation); + return INVALID_OPERATION; + } + } else { + switch (orientation) { + case 0: + flags = HAL_TRANSFORM_FLIP_H; + break; + case 90: + flags = HAL_TRANSFORM_FLIP_H | HAL_TRANSFORM_ROT_90; + break; + case 180: + flags = HAL_TRANSFORM_FLIP_V; + break; + case 270: + flags = HAL_TRANSFORM_FLIP_V | HAL_TRANSFORM_ROT_90; + break; + default: + ALOGE("%s: Invalid HAL android.sensor.orientation value: %d", + __FUNCTION__, orientation); + return INVALID_OPERATION; + } + } /** diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp index 3dbc1b0..da3e121 100644 --- a/services/camera/libcameraservice/device3/Camera3Device.cpp +++ b/services/camera/libcameraservice/device3/Camera3Device.cpp @@ -100,8 +100,10 @@ status_t Camera3Device::initialize(camera_module_t *module) camera3_device_t *device; + ATRACE_BEGIN("camera3->open"); res = module->common.methods->open(&module->common, deviceName.string(), reinterpret_cast<hw_device_t**>(&device)); + ATRACE_END(); if (res != OK) { SET_ERR_L("Could not open camera: %s (%d)", strerror(-res), res); @@ -269,7 +271,9 @@ status_t Camera3Device::disconnect() { mStatusTracker.clear(); if (mHal3Device != NULL) { + ATRACE_BEGIN("camera3->close"); mHal3Device->common.close(&mHal3Device->common); + ATRACE_END(); mHal3Device = NULL; } @@ -836,16 +840,20 @@ status_t Camera3Device::deleteStream(int id) { } sp<Camera3StreamInterface> deletedStream; + ssize_t outputStreamIdx = mOutputStreams.indexOfKey(id); if (mInputStream != NULL && id == mInputStream->getId()) { deletedStream = mInputStream; mInputStream.clear(); } else { - ssize_t idx = mOutputStreams.indexOfKey(id); - if (idx == NAME_NOT_FOUND) { + if (outputStreamIdx == NAME_NOT_FOUND) { CLOGE("Stream %d does not exist", id); return BAD_VALUE; } - deletedStream = mOutputStreams.editValueAt(idx); + } + + // Delete output stream or the output part of a bi-directional stream. + if (outputStreamIdx != NAME_NOT_FOUND) { + deletedStream = mOutputStreams.editValueAt(outputStreamIdx); mOutputStreams.removeItem(id); } @@ -1120,7 +1128,14 @@ status_t Camera3Device::flush() { Mutex::Autolock l(mLock); mRequestThread->clear(); - return mHal3Device->ops->flush(mHal3Device); + status_t res; + if (mHal3Device->common.version >= CAMERA_DEVICE_API_VERSION_3_1) { + res = mHal3Device->ops->flush(mHal3Device); + } else { + res = waitUntilDrained(); + } + + return res; } /** @@ -1664,8 +1679,10 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) { return; } - // Check if everything has arrived for this result (buffers and metadata) - if (request.haveResultMetadata && request.numBuffersLeft == 0) { + // Check if everything has arrived for this result (buffers and metadata), remove it from + // InFlightMap if both arrived or HAL reports error for this request (i.e. during flush). + if ((request.requestStatus != OK) || + (request.haveResultMetadata && request.numBuffersLeft == 0)) { ATRACE_ASYNC_END("frame capture", frameNumber); mInFlightMap.removeItemsAt(idx, 1); } diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp index 5aa9a3e..e1c492b 100644 --- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp +++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp @@ -199,13 +199,33 @@ status_t Camera3InputStream::configureQueueLocked() { assert(mMaxSize == 0); assert(camera3_stream::format != HAL_PIXEL_FORMAT_BLOB); - mTotalBufferCount = BufferQueue::MIN_UNDEQUEUED_BUFFERS + - camera3_stream::max_buffers; mDequeuedBufferCount = 0; mFrameCount = 0; if (mConsumer.get() == 0) { sp<BufferQueue> bq = new BufferQueue(); + + int minUndequeuedBuffers = 0; + res = bq->query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBuffers); + if (res != OK || minUndequeuedBuffers < 0) { + ALOGE("%s: Stream %d: Could not query min undequeued buffers (error %d, bufCount %d)", + __FUNCTION__, mId, res, minUndequeuedBuffers); + return res; + } + size_t minBufs = static_cast<size_t>(minUndequeuedBuffers); + /* + * We promise never to 'acquire' more than camera3_stream::max_buffers + * at any one time. + * + * Boost the number up to meet the minimum required buffer count. + * + * (Note that this sets consumer-side buffer count only, + * and not the sum of producer+consumer side as in other camera streams). + */ + mTotalBufferCount = camera3_stream::max_buffers > minBufs ? + camera3_stream::max_buffers : minBufs; + // TODO: somehow set the total buffer count when producer connects? + mConsumer = new BufferItemConsumer(bq, camera3_stream::usage, mTotalBufferCount); mConsumer->setName(String8::format("Camera3-InputStream-%d", mId)); diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h index 681d684..ae49467 100644 --- a/services/camera/libcameraservice/device3/Camera3InputStream.h +++ b/services/camera/libcameraservice/device3/Camera3InputStream.h @@ -44,6 +44,8 @@ class Camera3InputStream : public Camera3IOStreamBase { virtual void dump(int fd, const Vector<String16> &args) const; + // TODO: expose an interface to get the IGraphicBufferProducer + private: typedef BufferItemConsumer::BufferItem BufferItem; diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp index 04f5dc5..5f63a6e 100644 --- a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp +++ b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp @@ -109,14 +109,14 @@ struct TimestampFinder : public RingBufferConsumer::RingBufferComparator { } // namespace anonymous Camera3ZslStream::Camera3ZslStream(int id, uint32_t width, uint32_t height, - int depth) : + int bufferCount) : Camera3OutputStream(id, CAMERA3_STREAM_BIDIRECTIONAL, width, height, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED), - mDepth(depth) { + mDepth(bufferCount) { sp<BufferQueue> bq = new BufferQueue(); - mProducer = new RingBufferConsumer(bq, GRALLOC_USAGE_HW_CAMERA_ZSL, depth); + mProducer = new RingBufferConsumer(bq, GRALLOC_USAGE_HW_CAMERA_ZSL, bufferCount); mConsumer = new Surface(bq); } diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.h b/services/camera/libcameraservice/device3/Camera3ZslStream.h index c7f4490..6721832 100644 --- a/services/camera/libcameraservice/device3/Camera3ZslStream.h +++ b/services/camera/libcameraservice/device3/Camera3ZslStream.h @@ -37,10 +37,10 @@ class Camera3ZslStream : public Camera3OutputStream { public: /** - * Set up a ZSL stream of a given resolution. Depth is the number of buffers + * Set up a ZSL stream of a given resolution. bufferCount is the number of buffers * cached within the stream that can be retrieved for input. */ - Camera3ZslStream(int id, uint32_t width, uint32_t height, int depth); + Camera3ZslStream(int id, uint32_t width, uint32_t height, int bufferCount); ~Camera3ZslStream(); virtual void dump(int fd, const Vector<String16> &args) const; diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.h b/services/camera/libcameraservice/gui/RingBufferConsumer.h index b4ad824..a03736d 100644 --- a/services/camera/libcameraservice/gui/RingBufferConsumer.h +++ b/services/camera/libcameraservice/gui/RingBufferConsumer.h @@ -64,7 +64,7 @@ class RingBufferConsumer : public ConsumerBase, // bufferCount parameter specifies how many buffers can be pinned for user // access at the same time. RingBufferConsumer(const sp<IGraphicBufferConsumer>& consumer, uint32_t consumerUsage, - int bufferCount = BufferQueue::MIN_UNDEQUEUED_BUFFERS); + int bufferCount); virtual ~RingBufferConsumer(); diff --git a/tools/resampler_tools/fir.cpp b/tools/resampler_tools/fir.cpp index cc3d509..3d6a74d 100644 --- a/tools/resampler_tools/fir.cpp +++ b/tools/resampler_tools/fir.cpp @@ -20,15 +20,25 @@ #include <stdlib.h> #include <string.h> -static double sinc(double x) { +static inline double sinc(double x) { if (fabs(x) == 0.0f) return 1.0f; return sin(x) / x; } -static double sqr(double x) { +static inline double sqr(double x) { return x*x; } +static inline int64_t toint(double x, int64_t maxval) { + int64_t v; + + v = static_cast<int64_t>(floor(x * maxval + 0.5)); + if (v >= maxval) { + return maxval - 1; // error! + } + return v; +} + static double I0(double x) { // from the Numerical Recipes in C p. 237 double ax,ans,y; @@ -54,11 +64,12 @@ static double kaiser(int k, int N, double beta) { return I0(beta * sqrt(1.0 - sqr((2.0*k)/N - 1.0))) / I0(beta); } - static void usage(char* name) { fprintf(stderr, - "usage: %s [-h] [-d] [-s sample_rate] [-c cut-off_frequency] [-n half_zero_crossings] [-f {float|fixed}] [-b beta] [-v dBFS] [-l lerp]\n" - " %s [-h] [-d] [-s sample_rate] [-c cut-off_frequency] [-n half_zero_crossings] [-f {float|fixed}] [-b beta] [-v dBFS] -p M/N\n" + "usage: %s [-h] [-d] [-s sample_rate] [-c cut-off_frequency] [-n half_zero_crossings]" + " [-f {float|fixed|fixed16}] [-b beta] [-v dBFS] [-l lerp]\n" + " %s [-h] [-d] [-s sample_rate] [-c cut-off_frequency] [-n half_zero_crossings]" + " [-f {float|fixed|fixed16}] [-b beta] [-v dBFS] -p M/N\n" " -h this help message\n" " -d debug, print comma-separated coefficient table\n" " -p generate poly-phase filter coefficients, with sample increment M/N\n" @@ -66,6 +77,7 @@ static void usage(char* name) { " -c cut-off frequency (20478)\n" " -n number of zero-crossings on one side (8)\n" " -l number of lerping bits (4)\n" + " -m number of polyphases (related to -l, default 16)\n" " -f output format, can be fixed-point or floating-point (fixed)\n" " -b kaiser window parameter beta (7.865 [-80dB])\n" " -v attenuation in dBFS (0)\n", @@ -77,8 +89,7 @@ static void usage(char* name) { int main(int argc, char** argv) { // nc is the number of bits to store the coefficients - const int nc = 32; - + int nc = 32; bool polyphase = false; unsigned int polyM = 160; unsigned int polyN = 147; @@ -88,7 +99,6 @@ int main(int argc, char** argv) double atten = 1; int format = 0; - // in order to keep the errors associated with the linear // interpolation of the coefficients below the quantization error // we must satisfy: @@ -104,7 +114,6 @@ int main(int argc, char** argv) // Smith, J.O. Digital Audio Resampling Home Page // https://ccrma.stanford.edu/~jos/resample/, 2011-03-29 // - int nz = 4; // | 0.1102*(A - 8.7) A > 50 // beta = | 0.5842*(A - 21)^0.4 + 0.07886*(A - 21) 21 <= A <= 50 @@ -123,7 +132,6 @@ int main(int argc, char** argv) // 100 dB 10.056 double beta = 7.865; - // 2*nzc = (A - 8) / (2.285 * dw) // with dw the transition width = 2*pi*dF/Fs // @@ -148,8 +156,9 @@ int main(int argc, char** argv) // nzc = 20 // + int M = 1 << 4; // number of phases for interpolation int ch; - while ((ch = getopt(argc, argv, ":hds:c:n:f:l:b:p:v:")) != -1) { + while ((ch = getopt(argc, argv, ":hds:c:n:f:l:m:b:p:v:z:")) != -1) { switch (ch) { case 'd': debug = true; @@ -169,13 +178,26 @@ int main(int argc, char** argv) case 'n': nzc = atoi(optarg); break; + case 'm': + M = atoi(optarg); + break; case 'l': - nz = atoi(optarg); + M = 1 << atoi(optarg); break; case 'f': - if (!strcmp(optarg,"fixed")) format = 0; - else if (!strcmp(optarg,"float")) format = 1; - else usage(argv[0]); + if (!strcmp(optarg, "fixed")) { + format = 0; + } + else if (!strcmp(optarg, "fixed16")) { + format = 0; + nc = 16; + } + else if (!strcmp(optarg, "float")) { + format = 1; + } + else { + usage(argv[0]); + } break; case 'b': beta = atof(optarg); @@ -193,11 +215,14 @@ int main(int argc, char** argv) // cut off frequency ratio Fc/Fs double Fcr = Fc / Fs; - // total number of coefficients (one side) - const int M = (1 << nz); + const int N = M * nzc; + // lerp (which is most useful if M is a power of 2) + + int nz = 0; // recalculate nz as the bits needed to represent M + for (int i = M-1 ; i; i>>=1, nz++); // generate the right half of the filter if (!debug) { printf("// cmd-line: "); @@ -207,7 +232,7 @@ int main(int argc, char** argv) printf("\n"); if (!polyphase) { printf("const int32_t RESAMPLE_FIR_SIZE = %d;\n", N); - printf("const int32_t RESAMPLE_FIR_LERP_INT_BITS = %d;\n", nz); + printf("const int32_t RESAMPLE_FIR_INT_PHASES = %d;\n", M); printf("const int32_t RESAMPLE_FIR_NUM_COEF = %d;\n", nzc); } else { printf("const int32_t RESAMPLE_FIR_SIZE = %d;\n", 2*nzc*polyN); @@ -224,7 +249,7 @@ int main(int argc, char** argv) for (int i=0 ; i<=M ; i++) { // an extra set of coefs for interpolation for (int j=0 ; j<nzc ; j++) { int ix = j*M + i; - double x = (2.0 * M_PI * ix * Fcr) / (1 << nz); + double x = (2.0 * M_PI * ix * Fcr) / M; double y = kaiser(ix+N, 2*N, beta) * sinc(x) * 2.0 * Fcr; y *= atten; @@ -232,11 +257,13 @@ int main(int argc, char** argv) if (j == 0) printf("\n "); } - if (!format) { - int64_t yi = floor(y * ((1ULL<<(nc-1))) + 0.5); - if (yi >= (1LL<<(nc-1))) yi = (1LL<<(nc-1))-1; - printf("0x%08x, ", int32_t(yi)); + int64_t yi = toint(y, 1ULL<<(nc-1)); + if (nc > 16) { + printf("0x%08x, ", int32_t(yi)); + } else { + printf("0x%04x, ", int32_t(yi)&0xffff); + } } else { printf("%.9g%s ", y, debug ? "," : "f,"); } @@ -254,9 +281,12 @@ int main(int argc, char** argv) double y = kaiser(i+N, 2*N, beta) * sinc(x) * 2.0 * Fcr;; y *= atten; if (!format) { - int64_t yi = floor(y * ((1ULL<<(nc-1))) + 0.5); - if (yi >= (1LL<<(nc-1))) yi = (1LL<<(nc-1))-1; - printf("0x%08x", int32_t(yi)); + int64_t yi = toint(y, 1ULL<<(nc-1)); + if (nc > 16) { + printf("0x%08x, ", int32_t(yi)); + } else { + printf("0x%04x, ", int32_t(yi)&0xffff); + } } else { printf("%.9g%s", y, debug ? "" : "f"); } @@ -277,5 +307,3 @@ int main(int argc, char** argv) } // http://www.csee.umbc.edu/help/sound/AFsp-V2R1/html/audio/ResampAudio.html - - |