summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cmds/screenrecord/screenrecord.cpp11
-rw-r--r--cmds/stagefright/Android.mk42
-rw-r--r--cmds/stagefright/audioloop.cpp11
-rw-r--r--cmds/stagefright/codec.cpp49
-rw-r--r--cmds/stagefright/filters/argbtorgba.rs (renamed from include/media/nbaio/roundup.h)25
-rw-r--r--cmds/stagefright/filters/nightvision.rs38
-rw-r--r--cmds/stagefright/filters/saturation.rs40
-rw-r--r--cmds/stagefright/mediafilter.cpp785
-rw-r--r--cmds/stagefright/muxer.cpp12
-rw-r--r--cmds/stagefright/recordvideo.cpp12
-rw-r--r--cmds/stagefright/stagefright.cpp9
-rw-r--r--include/media/AudioTrack.h34
-rw-r--r--include/media/EffectsFactoryApi.h2
-rw-r--r--include/media/IMediaPlayerService.h3
-rw-r--r--include/media/IMediaRecorder.h1
-rw-r--r--include/media/MediaRecorderBase.h1
-rw-r--r--include/media/SingleStateQueue.h106
-rw-r--r--include/media/StringArray.h2
-rw-r--r--include/media/mediarecorder.h1
-rw-r--r--include/media/nbaio/NBAIO.h6
-rw-r--r--include/media/nbaio/NBLog.h2
-rw-r--r--include/media/stagefright/AACWriter.h1
-rw-r--r--include/media/stagefright/AMRWriter.h1
-rw-r--r--include/media/stagefright/MPEG2TSWriter.h1
-rw-r--r--include/media/stagefright/MPEG4Writer.h1
-rw-r--r--include/media/stagefright/MediaFilter.h167
-rw-r--r--include/media/stagefright/MediaMuxer.h3
-rw-r--r--include/media/stagefright/RenderScriptWrapper.h42
-rw-r--r--include/ndk/NdkMediaCodec.h3
-rw-r--r--include/ndk/NdkMediaExtractor.h6
-rw-r--r--include/private/media/AudioTrackShared.h90
-rw-r--r--include/private/media/StaticAudioTrackState.h39
-rw-r--r--media/libeffects/factory/EffectsFactory.c131
-rw-r--r--media/libmedia/Android.mk13
-rw-r--r--media/libmedia/AudioSystem.cpp8
-rw-r--r--media/libmedia/AudioTrack.cpp218
-rw-r--r--media/libmedia/AudioTrackShared.cpp227
-rw-r--r--media/libmedia/IMediaRecorder.cpp24
-rw-r--r--media/libmedia/JetPlayer.cpp6
-rw-r--r--media/libmedia/MediaProfiles.cpp26
-rw-r--r--media/libmedia/SingleStateQueue.cpp106
-rw-r--r--media/libmedia/SingleStateQueueInstantiations.cpp28
-rw-r--r--media/libmedia/StringArray.cpp2
-rw-r--r--media/libmedia/docs/Makefile2
-rw-r--r--media/libmedia/docs/paused.dot85
-rw-r--r--media/libmedia/mediaplayer.cpp11
-rw-r--r--media/libmedia/mediarecorder.cpp26
-rw-r--r--media/libmediaplayerservice/MediaRecorderClient.cpp11
-rw-r--r--media/libmediaplayerservice/MediaRecorderClient.h1
-rw-r--r--media/libmediaplayerservice/StagefrightRecorder.cpp8
-rw-r--r--media/libmediaplayerservice/StagefrightRecorder.h1
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp8
-rw-r--r--media/libnbaio/Android.mk4
-rw-r--r--media/libnbaio/MonoPipe.cpp2
-rw-r--r--media/libnbaio/MonoPipeReader.cpp2
-rw-r--r--media/libnbaio/Pipe.cpp2
-rw-r--r--media/libnbaio/roundup.c32
-rw-r--r--media/libstagefright/AACWriter.cpp24
-rw-r--r--media/libstagefright/AMRWriter.cpp13
-rw-r--r--media/libstagefright/Android.mk6
-rw-r--r--media/libstagefright/FileSource.cpp4
-rw-r--r--media/libstagefright/MPEG2TSWriter.cpp13
-rw-r--r--media/libstagefright/MPEG4Writer.cpp25
-rw-r--r--media/libstagefright/MediaCodec.cpp12
-rw-r--r--media/libstagefright/MediaMuxer.cpp15
-rw-r--r--media/libstagefright/codecs/on2/dec/SoftVPX.cpp231
-rw-r--r--media/libstagefright/codecs/on2/dec/SoftVPX.h14
-rw-r--r--media/libstagefright/colorconversion/SoftwareRenderer.cpp105
-rw-r--r--media/libstagefright/filters/Android.mk27
-rw-r--r--media/libstagefright/filters/ColorConvert.cpp111
-rw-r--r--media/libstagefright/filters/ColorConvert.h43
-rw-r--r--media/libstagefright/filters/GraphicBufferListener.cpp154
-rw-r--r--media/libstagefright/filters/GraphicBufferListener.h70
-rw-r--r--media/libstagefright/filters/IntrinsicBlurFilter.cpp99
-rw-r--r--media/libstagefright/filters/IntrinsicBlurFilter.h50
-rw-r--r--media/libstagefright/filters/MediaFilter.cpp816
-rw-r--r--media/libstagefright/filters/RSFilter.cpp96
-rw-r--r--media/libstagefright/filters/RSFilter.h53
-rw-r--r--media/libstagefright/filters/SaturationFilter.cpp99
-rw-r--r--media/libstagefright/filters/SaturationFilter.h52
-rw-r--r--media/libstagefright/filters/SimpleFilter.cpp39
-rw-r--r--media/libstagefright/filters/SimpleFilter.h52
-rw-r--r--media/libstagefright/filters/ZeroFilter.cpp57
-rw-r--r--media/libstagefright/filters/ZeroFilter.h43
-rw-r--r--media/libstagefright/filters/saturation.rs40
-rw-r--r--media/libstagefright/filters/saturationARGB.rs40
-rw-r--r--media/libstagefright/mpeg2ts/ESQueue.cpp46
-rw-r--r--media/libstagefright/mpeg2ts/ESQueue.h1
-rw-r--r--media/libstagefright/webm/WebmWriter.cpp32
-rw-r--r--media/libstagefright/webm/WebmWriter.h1
-rw-r--r--media/mediaserver/Android.mk2
-rw-r--r--media/ndk/NdkMediaCodec.cpp3
-rw-r--r--media/ndk/NdkMediaExtractor.cpp3
-rw-r--r--services/audioflinger/AudioFlinger.cpp6
-rw-r--r--services/audioflinger/AudioMixer.cpp186
-rw-r--r--services/audioflinger/AudioMixer.h48
-rw-r--r--services/audioflinger/AudioResamplerFirGen.h3
-rw-r--r--services/audioflinger/AudioResamplerFirProcess.h6
-rw-r--r--services/audioflinger/FastMixer.cpp3
-rw-r--r--services/audioflinger/PlaybackTracks.h4
-rw-r--r--services/audioflinger/Threads.cpp177
-rw-r--r--services/audioflinger/Threads.h6
-rw-r--r--services/audioflinger/Tracks.cpp54
-rwxr-xr-xservices/audioflinger/tests/mixer_to_wav_tests.sh12
-rw-r--r--services/audioflinger/tests/test-mixer.cpp92
-rw-r--r--services/medialog/Android.mk2
-rw-r--r--tools/resampler_tools/Android.mk2
107 files changed, 4571 insertions, 1051 deletions
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 02df1d2..36a7e73 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -23,7 +23,10 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
#include <sys/wait.h>
+
#include <termios.h>
#include <unistd.h>
@@ -637,7 +640,13 @@ static status_t recordScreen(const char* fileName) {
case FORMAT_MP4: {
// Configure muxer. We have to wait for the CSD blob from the encoder
// before we can start it.
- muxer = new MediaMuxer(fileName, MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+ int fd = open(fileName, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+ if (fd < 0) {
+ fprintf(stderr, "ERROR: couldn't open file\n");
+ abort();
+ }
+ muxer = new MediaMuxer(fd, MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+ close(fd);
if (gRotate) {
muxer->setOrientationHint(90); // TODO: does this do anything?
}
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index 561ce02..0e3bc68 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -169,6 +169,48 @@ include $(BUILD_EXECUTABLE)
include $(CLEAR_VARS)
+LOCAL_SRC_FILES:= \
+ filters/argbtorgba.rs \
+ filters/nightvision.rs \
+ filters/saturation.rs \
+ mediafilter.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+ libstagefright \
+ liblog \
+ libutils \
+ libbinder \
+ libstagefright_foundation \
+ libmedia \
+ libgui \
+ libcutils \
+ libui \
+ libRScpp \
+
+LOCAL_C_INCLUDES:= \
+ $(TOP)/frameworks/av/media/libstagefright \
+ $(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/frameworks/rs/cpp \
+ $(TOP)/frameworks/rs \
+
+intermediates := $(call intermediates-dir-for,STATIC_LIBRARIES,libRS,TARGET,)
+LOCAL_C_INCLUDES += $(intermediates)
+
+LOCAL_STATIC_LIBRARIES:= \
+ libstagefright_mediafilter
+
+LOCAL_CFLAGS += -Wno-multichar
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_MODULE:= mediafilter
+
+include $(BUILD_EXECUTABLE)
+
+################################################################################
+
+include $(CLEAR_VARS)
+
LOCAL_SRC_FILES:= \
muxer.cpp \
diff --git a/cmds/stagefright/audioloop.cpp b/cmds/stagefright/audioloop.cpp
index 96073f1..7b0de24 100644
--- a/cmds/stagefright/audioloop.cpp
+++ b/cmds/stagefright/audioloop.cpp
@@ -14,6 +14,10 @@
* limitations under the License.
*/
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
#include <binder/ProcessState.h>
#include <media/mediarecorder.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -109,7 +113,12 @@ int main(int argc, char* argv[])
if (fileOut != NULL) {
// target file specified, write encoded AMR output
- sp<AMRWriter> writer = new AMRWriter(fileOut);
+ int fd = open(fileOut, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+ if (fd < 0) {
+ return 1;
+ }
+ sp<AMRWriter> writer = new AMRWriter(fd);
+ close(fd);
writer->addSource(encoder);
writer->start();
sleep(duration);
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
index fd02bcc..d987250 100644
--- a/cmds/stagefright/codec.cpp
+++ b/cmds/stagefright/codec.cpp
@@ -45,9 +45,10 @@ static void usage(const char *me) {
fprintf(stderr, "usage: %s [-a] use audio\n"
"\t\t[-v] use video\n"
"\t\t[-p] playback\n"
- "\t\t[-S] allocate buffers from a surface\n",
+ "\t\t[-S] allocate buffers from a surface\n"
+ "\t\t[-R] render output to surface (enables -S)\n"
+ "\t\t[-T] use render timestamps (enables -R)\n",
me);
-
exit(1);
}
@@ -71,7 +72,9 @@ static int decode(
const char *path,
bool useAudio,
bool useVideo,
- const android::sp<android::Surface> &surface) {
+ const android::sp<android::Surface> &surface,
+ bool renderSurface,
+ bool useTimestamp) {
using namespace android;
static int64_t kTimeout = 500ll;
@@ -136,6 +139,7 @@ static int decode(
CHECK(!stateByTrack.isEmpty());
int64_t startTimeUs = ALooper::GetNowUs();
+ int64_t startTimeRender = -1;
for (size_t i = 0; i < stateByTrack.size(); ++i) {
CodecState *state = &stateByTrack.editValueAt(i);
@@ -260,7 +264,23 @@ static int decode(
++state->mNumBuffersDecoded;
state->mNumBytesDecoded += size;
- err = state->mCodec->releaseOutputBuffer(index);
+ if (surface == NULL || !renderSurface) {
+ err = state->mCodec->releaseOutputBuffer(index);
+ } else if (useTimestamp) {
+ if (startTimeRender == -1) {
+ // begin rendering 2 vsyncs (~33ms) after first decode
+ startTimeRender =
+ systemTime(SYSTEM_TIME_MONOTONIC) + 33000000
+ - (presentationTimeUs * 1000);
+ }
+ presentationTimeUs =
+ (presentationTimeUs * 1000) + startTimeRender;
+ err = state->mCodec->renderOutputBufferAndRelease(
+ index, presentationTimeUs);
+ } else {
+ err = state->mCodec->renderOutputBufferAndRelease(index);
+ }
+
CHECK_EQ(err, (status_t)OK);
if (flags & MediaCodec::BUFFER_FLAG_EOS) {
@@ -320,34 +340,42 @@ int main(int argc, char **argv) {
bool useVideo = false;
bool playback = false;
bool useSurface = false;
+ bool renderSurface = false;
+ bool useTimestamp = false;
int res;
- while ((res = getopt(argc, argv, "havpSD")) >= 0) {
+ while ((res = getopt(argc, argv, "havpSDRT")) >= 0) {
switch (res) {
case 'a':
{
useAudio = true;
break;
}
-
case 'v':
{
useVideo = true;
break;
}
-
case 'p':
{
playback = true;
break;
}
-
+ case 'T':
+ {
+ useTimestamp = true;
+ }
+ // fall through
+ case 'R':
+ {
+ renderSurface = true;
+ }
+ // fall through
case 'S':
{
useSurface = true;
break;
}
-
case '?':
case 'h':
default:
@@ -422,7 +450,8 @@ int main(int argc, char **argv) {
player->stop();
player->reset();
} else {
- decode(looper, argv[0], useAudio, useVideo, surface);
+ decode(looper, argv[0], useAudio, useVideo, surface, renderSurface,
+ useTimestamp);
}
if (playback || (useSurface && useVideo)) {
diff --git a/include/media/nbaio/roundup.h b/cmds/stagefright/filters/argbtorgba.rs
index 4c3cc25..229ff8c 100644
--- a/include/media/nbaio/roundup.h
+++ b/cmds/stagefright/filters/argbtorgba.rs
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,18 +14,13 @@
* limitations under the License.
*/
-#ifndef ROUNDUP_H
-#define ROUNDUP_H
+#pragma version(1)
+#pragma rs java_package_name(com.android.rs.cppbasic)
+#pragma rs_fp_relaxed
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// Round up to the next highest power of 2
-unsigned roundup(unsigned v);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // ROUNDUP_H
+void root(const uchar4 *v_in, uchar4 *v_out) {
+ v_out->x = v_in->y;
+ v_out->y = v_in->z;
+ v_out->z = v_in->w;
+ v_out->w = v_in->x;
+} \ No newline at end of file
diff --git a/cmds/stagefright/filters/nightvision.rs b/cmds/stagefright/filters/nightvision.rs
new file mode 100644
index 0000000..f61413c
--- /dev/null
+++ b/cmds/stagefright/filters/nightvision.rs
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma version(1)
+#pragma rs java_package_name(com.android.rs.cppbasic)
+#pragma rs_fp_relaxed
+
+const static float3 gMonoMult = {0.299f, 0.587f, 0.114f};
+const static float3 gNightVisionMult = {0.5f, 1.f, 0.5f};
+
+// calculates luminance of pixel, then biases color balance toward green
+void root(const uchar4 *v_in, uchar4 *v_out) {
+ v_out->x = v_in->x; // don't modify A
+
+ // get RGB, scale 0-255 uchar to 0-1.0 float
+ float3 rgb = {v_in->y * 0.003921569f, v_in->z * 0.003921569f,
+ v_in->w * 0.003921569f};
+
+ // apply filter
+ float3 result = dot(rgb, gMonoMult) * gNightVisionMult;
+
+ v_out->y = (uchar)clamp((result.r * 255.f + 0.5f), 0.f, 255.f);
+ v_out->z = (uchar)clamp((result.g * 255.f + 0.5f), 0.f, 255.f);
+ v_out->w = (uchar)clamp((result.b * 255.f + 0.5f), 0.f, 255.f);
+}
diff --git a/cmds/stagefright/filters/saturation.rs b/cmds/stagefright/filters/saturation.rs
new file mode 100644
index 0000000..1de9dd8
--- /dev/null
+++ b/cmds/stagefright/filters/saturation.rs
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma version(1)
+#pragma rs java_package_name(com.android.rs.cppbasic)
+#pragma rs_fp_relaxed
+
+const static float3 gMonoMult = {0.299f, 0.587f, 0.114f};
+
+// global variables (parameters accessible to application code)
+float gSaturation = 1.0f;
+
+void root(const uchar4 *v_in, uchar4 *v_out) {
+ v_out->x = v_in->x; // don't modify A
+
+ // get RGB, scale 0-255 uchar to 0-1.0 float
+ float3 rgb = {v_in->y * 0.003921569f, v_in->z * 0.003921569f,
+ v_in->w * 0.003921569f};
+
+ // apply saturation filter
+ float3 result = dot(rgb, gMonoMult);
+ result = mix(result, rgb, gSaturation);
+
+ v_out->y = (uchar)clamp((result.r * 255.f + 0.5f), 0.f, 255.f);
+ v_out->z = (uchar)clamp((result.g * 255.f + 0.5f), 0.f, 255.f);
+ v_out->w = (uchar)clamp((result.b * 255.f + 0.5f), 0.f, 255.f);
+}
diff --git a/cmds/stagefright/mediafilter.cpp b/cmds/stagefright/mediafilter.cpp
new file mode 100644
index 0000000..f77b38b
--- /dev/null
+++ b/cmds/stagefright/mediafilter.cpp
@@ -0,0 +1,785 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "mediafilterTest"
+
+#include <inttypes.h>
+
+#include <binder/ProcessState.h>
+#include <filters/ColorConvert.h>
+#include <gui/ISurfaceComposer.h>
+#include <gui/SurfaceComposerClient.h>
+#include <gui/Surface.h>
+#include <media/ICrypto.h>
+#include <media/IMediaHTTPService.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/NuMediaExtractor.h>
+#include <media/stagefright/RenderScriptWrapper.h>
+#include <OMX_IVCommon.h>
+#include <ui/DisplayInfo.h>
+
+#include "RenderScript.h"
+#include "ScriptC_argbtorgba.h"
+#include "ScriptC_nightvision.h"
+#include "ScriptC_saturation.h"
+
+// test parameters
+static const bool kTestFlush = true; // Note: true will drop 1 out of
+static const int kFlushAfterFrames = 25; // kFlushAfterFrames output frames
+static const int64_t kTimeout = 500ll;
+
+// built-in filter parameters
+static const int32_t kInvert = false; // ZeroFilter param
+static const float kBlurRadius = 15.0f; // IntrinsicBlurFilter param
+static const float kSaturation = 0.0f; // SaturationFilter param
+
+static void usage(const char *me) {
+ fprintf(stderr, "usage: [flags] %s\n"
+ "\t[-b] use IntrinsicBlurFilter\n"
+ "\t[-c] use argb to rgba conversion RSFilter\n"
+ "\t[-n] use night vision RSFilter\n"
+ "\t[-r] use saturation RSFilter\n"
+ "\t[-s] use SaturationFilter\n"
+ "\t[-z] use ZeroFilter (copy filter)\n"
+ "\t[-R] render output to surface (enables -S)\n"
+ "\t[-S] allocate buffers from a surface\n"
+ "\t[-T] use render timestamps (enables -R)\n",
+ me);
+ exit(1);
+}
+
+namespace android {
+
+struct SaturationRSFilter : RenderScriptWrapper::RSFilterCallback {
+ void init(RSC::sp<RSC::RS> context) {
+ mScript = new ScriptC_saturation(context);
+ mScript->set_gSaturation(3.f);
+ }
+
+ virtual status_t processBuffers(
+ RSC::Allocation *inBuffer, RSC::Allocation *outBuffer) {
+ mScript->forEach_root(inBuffer, outBuffer);
+
+ return OK;
+ }
+
+ status_t handleSetParameters(const sp<AMessage> &msg) {
+ return OK;
+ }
+
+private:
+ RSC::sp<ScriptC_saturation> mScript;
+};
+
+struct NightVisionRSFilter : RenderScriptWrapper::RSFilterCallback {
+ void init(RSC::sp<RSC::RS> context) {
+ mScript = new ScriptC_nightvision(context);
+ }
+
+ virtual status_t processBuffers(
+ RSC::Allocation *inBuffer, RSC::Allocation *outBuffer) {
+ mScript->forEach_root(inBuffer, outBuffer);
+
+ return OK;
+ }
+
+ status_t handleSetParameters(const sp<AMessage> &msg) {
+ return OK;
+ }
+
+private:
+ RSC::sp<ScriptC_nightvision> mScript;
+};
+
+struct ARGBToRGBARSFilter : RenderScriptWrapper::RSFilterCallback {
+ void init(RSC::sp<RSC::RS> context) {
+ mScript = new ScriptC_argbtorgba(context);
+ }
+
+ virtual status_t processBuffers(
+ RSC::Allocation *inBuffer, RSC::Allocation *outBuffer) {
+ mScript->forEach_root(inBuffer, outBuffer);
+
+ return OK;
+ }
+
+ status_t handleSetParameters(const sp<AMessage> &msg) {
+ return OK;
+ }
+
+private:
+ RSC::sp<ScriptC_argbtorgba> mScript;
+};
+
+struct CodecState {
+ sp<MediaCodec> mCodec;
+ Vector<sp<ABuffer> > mInBuffers;
+ Vector<sp<ABuffer> > mOutBuffers;
+ bool mSignalledInputEOS;
+ bool mSawOutputEOS;
+ int64_t mNumBuffersDecoded;
+};
+
+struct DecodedFrame {
+ size_t index;
+ size_t offset;
+ size_t size;
+ int64_t presentationTimeUs;
+ uint32_t flags;
+};
+
+enum FilterType {
+ FILTERTYPE_ZERO,
+ FILTERTYPE_INTRINSIC_BLUR,
+ FILTERTYPE_SATURATION,
+ FILTERTYPE_RS_SATURATION,
+ FILTERTYPE_RS_NIGHT_VISION,
+ FILTERTYPE_RS_ARGB_TO_RGBA,
+};
+
+size_t inputFramesSinceFlush = 0;
+void tryCopyDecodedBuffer(
+ List<DecodedFrame> *decodedFrameIndices,
+ CodecState *filterState,
+ CodecState *vidState) {
+ if (decodedFrameIndices->empty()) {
+ return;
+ }
+
+ size_t filterIndex;
+ status_t err = filterState->mCodec->dequeueInputBuffer(
+ &filterIndex, kTimeout);
+ if (err != OK) {
+ return;
+ }
+
+ ++inputFramesSinceFlush;
+
+ DecodedFrame frame = *decodedFrameIndices->begin();
+
+ // only consume a buffer if we are not going to flush, since we expect
+ // the dequeue -> flush -> queue operation to cause an error and
+ // not produce an output frame
+ if (!kTestFlush || inputFramesSinceFlush < kFlushAfterFrames) {
+ decodedFrameIndices->erase(decodedFrameIndices->begin());
+ }
+ size_t outIndex = frame.index;
+
+ const sp<ABuffer> &srcBuffer =
+ vidState->mOutBuffers.itemAt(outIndex);
+ const sp<ABuffer> &destBuffer =
+ filterState->mInBuffers.itemAt(filterIndex);
+
+ sp<AMessage> srcFormat, destFormat;
+ vidState->mCodec->getOutputFormat(&srcFormat);
+ filterState->mCodec->getInputFormat(&destFormat);
+
+ int32_t srcWidth, srcHeight, srcStride, srcSliceHeight;
+ int32_t srcColorFormat, destColorFormat;
+ int32_t destWidth, destHeight, destStride, destSliceHeight;
+ CHECK(srcFormat->findInt32("stride", &srcStride)
+ && srcFormat->findInt32("slice-height", &srcSliceHeight)
+ && srcFormat->findInt32("width", &srcWidth)
+ && srcFormat->findInt32("height", & srcHeight)
+ && srcFormat->findInt32("color-format", &srcColorFormat));
+ CHECK(destFormat->findInt32("stride", &destStride)
+ && destFormat->findInt32("slice-height", &destSliceHeight)
+ && destFormat->findInt32("width", &destWidth)
+ && destFormat->findInt32("height", & destHeight)
+ && destFormat->findInt32("color-format", &destColorFormat));
+
+ CHECK(srcWidth <= destStride && srcHeight <= destSliceHeight);
+
+ convertYUV420spToARGB(
+ srcBuffer->data(),
+ srcBuffer->data() + srcStride * srcSliceHeight,
+ srcWidth,
+ srcHeight,
+ destBuffer->data());
+
+ // copy timestamp
+ int64_t timeUs;
+ CHECK(srcBuffer->meta()->findInt64("timeUs", &timeUs));
+ destBuffer->meta()->setInt64("timeUs", timeUs);
+
+ if (kTestFlush && inputFramesSinceFlush >= kFlushAfterFrames) {
+ inputFramesSinceFlush = 0;
+
+ // check that queueing a buffer that was dequeued before flush
+ // fails with expected error EACCES
+ filterState->mCodec->flush();
+
+ err = filterState->mCodec->queueInputBuffer(
+ filterIndex, 0 /* offset */, destBuffer->size(),
+ timeUs, frame.flags);
+
+ if (err == OK) {
+ ALOGE("FAIL: queue after flush returned OK");
+ } else if (err != -EACCES) {
+ ALOGE("queueInputBuffer after flush returned %d, "
+ "expected -EACCES (-13)", err);
+ }
+ } else {
+ err = filterState->mCodec->queueInputBuffer(
+ filterIndex, 0 /* offset */, destBuffer->size(),
+ timeUs, frame.flags);
+ CHECK(err == OK);
+
+ err = vidState->mCodec->releaseOutputBuffer(outIndex);
+ CHECK(err == OK);
+ }
+}
+
+size_t outputFramesSinceFlush = 0;
+void tryDrainOutputBuffer(
+ CodecState *filterState,
+ const sp<Surface> &surface, bool renderSurface,
+ bool useTimestamp, int64_t *startTimeRender) {
+ size_t index;
+ size_t offset;
+ size_t size;
+ int64_t presentationTimeUs;
+ uint32_t flags;
+ status_t err = filterState->mCodec->dequeueOutputBuffer(
+ &index, &offset, &size, &presentationTimeUs, &flags,
+ kTimeout);
+
+ if (err != OK) {
+ return;
+ }
+
+ ++outputFramesSinceFlush;
+
+ if (kTestFlush && outputFramesSinceFlush >= kFlushAfterFrames) {
+ filterState->mCodec->flush();
+ }
+
+ if (surface == NULL || !renderSurface) {
+ err = filterState->mCodec->releaseOutputBuffer(index);
+ } else if (useTimestamp) {
+ if (*startTimeRender == -1) {
+ // begin rendering 2 vsyncs after first decode
+ *startTimeRender = systemTime(SYSTEM_TIME_MONOTONIC)
+ + 33000000 - (presentationTimeUs * 1000);
+ }
+ presentationTimeUs =
+ (presentationTimeUs * 1000) + *startTimeRender;
+ err = filterState->mCodec->renderOutputBufferAndRelease(
+ index, presentationTimeUs);
+ } else {
+ err = filterState->mCodec->renderOutputBufferAndRelease(index);
+ }
+
+ if (kTestFlush && outputFramesSinceFlush >= kFlushAfterFrames) {
+ outputFramesSinceFlush = 0;
+
+ // releasing the buffer dequeued before flush should cause an error
+ // if so, the frame will also be skipped in output stream
+ if (err == OK) {
+ ALOGE("FAIL: release after flush returned OK");
+ } else if (err != -EACCES) {
+ ALOGE("releaseOutputBuffer after flush returned %d, "
+ "expected -EACCES (-13)", err);
+ }
+ } else {
+ CHECK(err == OK);
+ }
+
+ if (flags & MediaCodec::BUFFER_FLAG_EOS) {
+ ALOGV("reached EOS on output.");
+ filterState->mSawOutputEOS = true;
+ }
+}
+
+static int decode(
+ const sp<ALooper> &looper,
+ const char *path,
+ const sp<Surface> &surface,
+ bool renderSurface,
+ bool useTimestamp,
+ FilterType filterType) {
+
+ static int64_t kTimeout = 500ll;
+
+ sp<NuMediaExtractor> extractor = new NuMediaExtractor;
+ if (extractor->setDataSource(NULL /* httpService */, path) != OK) {
+ fprintf(stderr, "unable to instantiate extractor.\n");
+ return 1;
+ }
+
+ KeyedVector<size_t, CodecState> stateByTrack;
+
+ CodecState *vidState = NULL;
+ for (size_t i = 0; i < extractor->countTracks(); ++i) {
+ sp<AMessage> format;
+ status_t err = extractor->getTrackFormat(i, &format);
+ CHECK(err == OK);
+
+ AString mime;
+ CHECK(format->findString("mime", &mime));
+ bool isVideo = !strncasecmp(mime.c_str(), "video/", 6);
+ if (!isVideo) {
+ continue;
+ }
+
+ ALOGV("selecting track %zu", i);
+
+ err = extractor->selectTrack(i);
+ CHECK(err == OK);
+
+ CodecState *state =
+ &stateByTrack.editValueAt(stateByTrack.add(i, CodecState()));
+
+ vidState = state;
+
+ state->mNumBuffersDecoded = 0;
+
+ state->mCodec = MediaCodec::CreateByType(
+ looper, mime.c_str(), false /* encoder */);
+
+ CHECK(state->mCodec != NULL);
+
+ err = state->mCodec->configure(
+ format, NULL /* surface */, NULL /* crypto */, 0 /* flags */);
+
+ CHECK(err == OK);
+
+ state->mSignalledInputEOS = false;
+ state->mSawOutputEOS = false;
+
+ break;
+ }
+ CHECK(!stateByTrack.isEmpty());
+ CHECK(vidState != NULL);
+ sp<AMessage> vidFormat;
+ vidState->mCodec->getOutputFormat(&vidFormat);
+
+ // set filter to use ARGB8888
+ vidFormat->setInt32("color-format", OMX_COLOR_Format32bitARGB8888);
+ // set app cache directory path
+ vidFormat->setString("cacheDir", "/system/bin");
+
+ // create RenderScript context for RSFilters
+ RSC::sp<RSC::RS> context = new RSC::RS();
+ context->init("/system/bin");
+
+ sp<RenderScriptWrapper::RSFilterCallback> rsFilter;
+
+ // create renderscript wrapper for RSFilters
+ sp<RenderScriptWrapper> rsWrapper = new RenderScriptWrapper;
+ rsWrapper->mContext = context.get();
+
+ CodecState *filterState = new CodecState();
+ filterState->mNumBuffersDecoded = 0;
+
+ sp<AMessage> params = new AMessage();
+
+ switch (filterType) {
+ case FILTERTYPE_ZERO:
+ {
+ filterState->mCodec = MediaCodec::CreateByComponentName(
+ looper, "android.filter.zerofilter");
+ params->setInt32("invert", kInvert);
+ break;
+ }
+ case FILTERTYPE_INTRINSIC_BLUR:
+ {
+ filterState->mCodec = MediaCodec::CreateByComponentName(
+ looper, "android.filter.intrinsicblur");
+ params->setFloat("blur-radius", kBlurRadius);
+ break;
+ }
+ case FILTERTYPE_SATURATION:
+ {
+ filterState->mCodec = MediaCodec::CreateByComponentName(
+ looper, "android.filter.saturation");
+ params->setFloat("saturation", kSaturation);
+ break;
+ }
+ case FILTERTYPE_RS_SATURATION:
+ {
+ SaturationRSFilter *satFilter = new SaturationRSFilter;
+ satFilter->init(context);
+ rsFilter = satFilter;
+ rsWrapper->mCallback = rsFilter;
+ vidFormat->setObject("rs-wrapper", rsWrapper);
+
+ filterState->mCodec = MediaCodec::CreateByComponentName(
+ looper, "android.filter.RenderScript");
+ break;
+ }
+ case FILTERTYPE_RS_NIGHT_VISION:
+ {
+ NightVisionRSFilter *nightVisionFilter = new NightVisionRSFilter;
+ nightVisionFilter->init(context);
+ rsFilter = nightVisionFilter;
+ rsWrapper->mCallback = rsFilter;
+ vidFormat->setObject("rs-wrapper", rsWrapper);
+
+ filterState->mCodec = MediaCodec::CreateByComponentName(
+ looper, "android.filter.RenderScript");
+ break;
+ }
+ case FILTERTYPE_RS_ARGB_TO_RGBA:
+ {
+ ARGBToRGBARSFilter *argbToRgbaFilter = new ARGBToRGBARSFilter;
+ argbToRgbaFilter->init(context);
+ rsFilter = argbToRgbaFilter;
+ rsWrapper->mCallback = rsFilter;
+ vidFormat->setObject("rs-wrapper", rsWrapper);
+
+ filterState->mCodec = MediaCodec::CreateByComponentName(
+ looper, "android.filter.RenderScript");
+ break;
+ }
+ default:
+ {
+ LOG_ALWAYS_FATAL("mediacodec.cpp error: unrecognized FilterType");
+ break;
+ }
+ }
+ CHECK(filterState->mCodec != NULL);
+
+ status_t err = filterState->mCodec->configure(
+ vidFormat /* format */, surface, NULL /* crypto */, 0 /* flags */);
+ CHECK(err == OK);
+
+ filterState->mSignalledInputEOS = false;
+ filterState->mSawOutputEOS = false;
+
+ int64_t startTimeUs = ALooper::GetNowUs();
+ int64_t startTimeRender = -1;
+
+ for (size_t i = 0; i < stateByTrack.size(); ++i) {
+ CodecState *state = &stateByTrack.editValueAt(i);
+
+ sp<MediaCodec> codec = state->mCodec;
+
+ CHECK_EQ((status_t)OK, codec->start());
+
+ CHECK_EQ((status_t)OK, codec->getInputBuffers(&state->mInBuffers));
+ CHECK_EQ((status_t)OK, codec->getOutputBuffers(&state->mOutBuffers));
+
+ ALOGV("got %zu input and %zu output buffers",
+ state->mInBuffers.size(), state->mOutBuffers.size());
+ }
+
+ CHECK_EQ((status_t)OK, filterState->mCodec->setParameters(params));
+
+ if (kTestFlush) {
+ status_t flushErr = filterState->mCodec->flush();
+ if (flushErr == OK) {
+ ALOGE("FAIL: Flush before start returned OK");
+ } else {
+ ALOGV("Flush before start returned status %d, usually ENOSYS (-38)",
+ flushErr);
+ }
+ }
+
+ CHECK_EQ((status_t)OK, filterState->mCodec->start());
+ CHECK_EQ((status_t)OK, filterState->mCodec->getInputBuffers(
+ &filterState->mInBuffers));
+ CHECK_EQ((status_t)OK, filterState->mCodec->getOutputBuffers(
+ &filterState->mOutBuffers));
+
+ if (kTestFlush) {
+ status_t flushErr = filterState->mCodec->flush();
+ if (flushErr != OK) {
+ ALOGE("FAIL: Flush after start returned %d, expect OK (0)",
+ flushErr);
+ } else {
+ ALOGV("Flush immediately after start OK");
+ }
+ }
+
+ List<DecodedFrame> decodedFrameIndices;
+
+ // loop until decoder reaches EOS
+ bool sawInputEOS = false;
+ bool sawOutputEOSOnAllTracks = false;
+ while (!sawOutputEOSOnAllTracks) {
+ if (!sawInputEOS) {
+ size_t trackIndex;
+ status_t err = extractor->getSampleTrackIndex(&trackIndex);
+
+ if (err != OK) {
+ ALOGV("saw input eos");
+ sawInputEOS = true;
+ } else {
+ CodecState *state = &stateByTrack.editValueFor(trackIndex);
+
+ size_t index;
+ err = state->mCodec->dequeueInputBuffer(&index, kTimeout);
+
+ if (err == OK) {
+ ALOGV("filling input buffer %zu", index);
+
+ const sp<ABuffer> &buffer = state->mInBuffers.itemAt(index);
+
+ err = extractor->readSampleData(buffer);
+ CHECK(err == OK);
+
+ int64_t timeUs;
+ err = extractor->getSampleTime(&timeUs);
+ CHECK(err == OK);
+
+ uint32_t bufferFlags = 0;
+
+ err = state->mCodec->queueInputBuffer(
+ index, 0 /* offset */, buffer->size(),
+ timeUs, bufferFlags);
+
+ CHECK(err == OK);
+
+ extractor->advance();
+ } else {
+ CHECK_EQ(err, -EAGAIN);
+ }
+ }
+ } else {
+ for (size_t i = 0; i < stateByTrack.size(); ++i) {
+ CodecState *state = &stateByTrack.editValueAt(i);
+
+ if (!state->mSignalledInputEOS) {
+ size_t index;
+ status_t err =
+ state->mCodec->dequeueInputBuffer(&index, kTimeout);
+
+ if (err == OK) {
+ ALOGV("signalling input EOS on track %zu", i);
+
+ err = state->mCodec->queueInputBuffer(
+ index, 0 /* offset */, 0 /* size */,
+ 0ll /* timeUs */, MediaCodec::BUFFER_FLAG_EOS);
+
+ CHECK(err == OK);
+
+ state->mSignalledInputEOS = true;
+ } else {
+ CHECK_EQ(err, -EAGAIN);
+ }
+ }
+ }
+ }
+
+ sawOutputEOSOnAllTracks = true;
+ for (size_t i = 0; i < stateByTrack.size(); ++i) {
+ CodecState *state = &stateByTrack.editValueAt(i);
+
+ if (state->mSawOutputEOS) {
+ continue;
+ } else {
+ sawOutputEOSOnAllTracks = false;
+ }
+
+ DecodedFrame frame;
+ status_t err = state->mCodec->dequeueOutputBuffer(
+ &frame.index, &frame.offset, &frame.size,
+ &frame.presentationTimeUs, &frame.flags, kTimeout);
+
+ if (err == OK) {
+ ALOGV("draining decoded buffer %zu, time = %lld us",
+ frame.index, frame.presentationTimeUs);
+
+ ++(state->mNumBuffersDecoded);
+
+ decodedFrameIndices.push_back(frame);
+
+ if (frame.flags & MediaCodec::BUFFER_FLAG_EOS) {
+ ALOGV("reached EOS on decoder output.");
+ state->mSawOutputEOS = true;
+ }
+
+ } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
+ ALOGV("INFO_OUTPUT_BUFFERS_CHANGED");
+ CHECK_EQ((status_t)OK, state->mCodec->getOutputBuffers(
+ &state->mOutBuffers));
+
+ ALOGV("got %zu output buffers", state->mOutBuffers.size());
+ } else if (err == INFO_FORMAT_CHANGED) {
+ sp<AMessage> format;
+ CHECK_EQ((status_t)OK, state->mCodec->getOutputFormat(&format));
+
+ ALOGV("INFO_FORMAT_CHANGED: %s",
+ format->debugString().c_str());
+ } else {
+ CHECK_EQ(err, -EAGAIN);
+ }
+
+ tryCopyDecodedBuffer(&decodedFrameIndices, filterState, vidState);
+
+ tryDrainOutputBuffer(
+ filterState, surface, renderSurface,
+ useTimestamp, &startTimeRender);
+ }
+ }
+
+ // after EOS on decoder, let filter reach EOS
+ while (!filterState->mSawOutputEOS) {
+ tryCopyDecodedBuffer(&decodedFrameIndices, filterState, vidState);
+
+ tryDrainOutputBuffer(
+ filterState, surface, renderSurface,
+ useTimestamp, &startTimeRender);
+ }
+
+ int64_t elapsedTimeUs = ALooper::GetNowUs() - startTimeUs;
+
+ for (size_t i = 0; i < stateByTrack.size(); ++i) {
+ CodecState *state = &stateByTrack.editValueAt(i);
+
+ CHECK_EQ((status_t)OK, state->mCodec->release());
+
+ printf("track %zu: %" PRId64 " frames decoded and filtered, "
+ "%.2f fps.\n", i, state->mNumBuffersDecoded,
+ state->mNumBuffersDecoded * 1E6 / elapsedTimeUs);
+ }
+
+ return 0;
+}
+
+} // namespace android
+
+int main(int argc, char **argv) {
+ using namespace android;
+
+ const char *me = argv[0];
+
+ bool useSurface = false;
+ bool renderSurface = false;
+ bool useTimestamp = false;
+ FilterType filterType = FILTERTYPE_ZERO;
+
+ int res;
+ while ((res = getopt(argc, argv, "bcnrszTRSh")) >= 0) {
+ switch (res) {
+ case 'b':
+ {
+ filterType = FILTERTYPE_INTRINSIC_BLUR;
+ break;
+ }
+ case 'c':
+ {
+ filterType = FILTERTYPE_RS_ARGB_TO_RGBA;
+ break;
+ }
+ case 'n':
+ {
+ filterType = FILTERTYPE_RS_NIGHT_VISION;
+ break;
+ }
+ case 'r':
+ {
+ filterType = FILTERTYPE_RS_SATURATION;
+ break;
+ }
+ case 's':
+ {
+ filterType = FILTERTYPE_SATURATION;
+ break;
+ }
+ case 'z':
+ {
+ filterType = FILTERTYPE_ZERO;
+ break;
+ }
+ case 'T':
+ {
+ useTimestamp = true;
+ }
+ // fall through
+ case 'R':
+ {
+ renderSurface = true;
+ }
+ // fall through
+ case 'S':
+ {
+ useSurface = true;
+ break;
+ }
+ case '?':
+ case 'h':
+ default:
+ {
+ usage(me);
+ break;
+ }
+ }
+ }
+
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1) {
+ usage(me);
+ }
+
+ ProcessState::self()->startThreadPool();
+
+ DataSource::RegisterDefaultSniffers();
+
+ android::sp<ALooper> looper = new ALooper;
+ looper->start();
+
+ android::sp<SurfaceComposerClient> composerClient;
+ android::sp<SurfaceControl> control;
+ android::sp<Surface> surface;
+
+ if (useSurface) {
+ composerClient = new SurfaceComposerClient;
+ CHECK_EQ((status_t)OK, composerClient->initCheck());
+
+ android::sp<IBinder> display(SurfaceComposerClient::getBuiltInDisplay(
+ ISurfaceComposer::eDisplayIdMain));
+ DisplayInfo info;
+ SurfaceComposerClient::getDisplayInfo(display, &info);
+ ssize_t displayWidth = info.w;
+ ssize_t displayHeight = info.h;
+
+ ALOGV("display is %zd x %zd", displayWidth, displayHeight);
+
+ control = composerClient->createSurface(
+ String8("A Surface"), displayWidth, displayHeight,
+ PIXEL_FORMAT_RGBA_8888, 0);
+
+ CHECK(control != NULL);
+ CHECK(control->isValid());
+
+ SurfaceComposerClient::openGlobalTransaction();
+ CHECK_EQ((status_t)OK, control->setLayer(INT_MAX));
+ CHECK_EQ((status_t)OK, control->show());
+ SurfaceComposerClient::closeGlobalTransaction();
+
+ surface = control->getSurface();
+ CHECK(surface != NULL);
+ }
+
+ decode(looper, argv[0], surface, renderSurface, useTimestamp, filterType);
+
+ if (useSurface) {
+ composerClient->dispose();
+ }
+
+ looper->stop();
+
+ return 0;
+}
diff --git a/cmds/stagefright/muxer.cpp b/cmds/stagefright/muxer.cpp
index f4a33e8..461b56c 100644
--- a/cmds/stagefright/muxer.cpp
+++ b/cmds/stagefright/muxer.cpp
@@ -17,6 +17,9 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "muxer"
#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
#include <utils/Log.h>
#include <binder/ProcessState.h>
@@ -72,8 +75,15 @@ static int muxing(
ALOGV("input file %s, output file %s", path, outputFileName);
ALOGV("useAudio %d, useVideo %d", useAudio, useVideo);
- sp<MediaMuxer> muxer = new MediaMuxer(outputFileName,
+ int fd = open(outputFileName, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+
+ if (fd < 0) {
+ ALOGE("couldn't open file");
+ return fd;
+ }
+ sp<MediaMuxer> muxer = new MediaMuxer(fd,
MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+ close(fd);
size_t trackCount = extractor->countTracks();
// Map the extractor's track index to the muxer's track index.
diff --git a/cmds/stagefright/recordvideo.cpp b/cmds/stagefright/recordvideo.cpp
index 9f547c7..2ad40bd 100644
--- a/cmds/stagefright/recordvideo.cpp
+++ b/cmds/stagefright/recordvideo.cpp
@@ -17,6 +17,10 @@
#include "SineSource.h"
#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
#include <binder/ProcessState.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/AudioPlayer.h>
@@ -300,7 +304,13 @@ int main(int argc, char **argv) {
client.interface(), enc_meta, true /* createEncoder */, source,
0, preferSoftwareCodec ? OMXCodec::kPreferSoftwareCodecs : 0);
- sp<MPEG4Writer> writer = new MPEG4Writer(fileName);
+ int fd = open(fileName, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+ if (fd < 0) {
+ fprintf(stderr, "couldn't open file");
+ return 1;
+ }
+ sp<MPEG4Writer> writer = new MPEG4Writer(fd);
+ close(fd);
writer->addSource(encoder);
int64_t start = systemTime();
CHECK_EQ((status_t)OK, writer->start());
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 81edcb4..318b56d 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -19,6 +19,8 @@
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/stat.h>
//#define LOG_NDEBUG 0
#define LOG_TAG "stagefright"
@@ -506,8 +508,13 @@ static void writeSourcesToMP4(
sp<MPEG4Writer> writer =
new MPEG4Writer(gWriteMP4Filename.string());
#else
+ int fd = open(gWriteMP4Filename.string(), O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+ if (fd < 0) {
+ fprintf(stderr, "couldn't open file");
+ return;
+ }
sp<MPEG2TSWriter> writer =
- new MPEG2TSWriter(gWriteMP4Filename.string());
+ new MPEG2TSWriter(fd);
#endif
// at most one minute.
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index fd51b8f..2e1ed6c 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -79,9 +79,7 @@ public:
size_t size; // input/output in bytes == frameCount * frameSize
// on input it is unused
// on output is the number of bytes actually filled
- // FIXME this is redundant with respect to frameCount,
- // and TRANSFER_OBTAIN mode is broken for 8-bit data
- // since we don't define the frame format
+ // FIXME this is redundant with respect to frameCount.
union {
void* raw;
@@ -154,9 +152,9 @@ public:
* streamType: Select the type of audio stream this track is attached to
* (e.g. AUDIO_STREAM_MUSIC).
* sampleRate: Data source sampling rate in Hz.
- * format: Audio format. For mixed tracks, any PCM format supported by server is OK
- * or AUDIO_FORMAT_PCM_8_BIT which is handled on client side. For direct
- * and offloaded tracks, the possible format(s) depends on the output sink.
+ * format: Audio format. For mixed tracks, any PCM format supported by server is OK.
+ * For direct and offloaded tracks, the possible format(s) depends on the
+ * output sink.
* channelMask: Channel mask, such that audio_is_output_channel(channelMask) is true.
* frameCount: Minimum size of track PCM buffer in frames. This defines the
* application's contribution to the
@@ -193,7 +191,6 @@ public:
/* Creates an audio track and registers it with AudioFlinger.
* With this constructor, the track is configured for static buffer mode.
- * The format must not be 8-bit linear PCM.
* Data to be rendered is passed in a shared memory buffer
* identified by the argument sharedBuffer, which must be non-0.
* The memory should be initialized to the desired data before calling start().
@@ -614,6 +611,7 @@ protected:
void pause(); // suspend thread from execution at next loop boundary
void resume(); // allow thread to execute, if not requested to exit
+ void wake(); // wake to handle changed notification conditions.
private:
void pauseInternal(nsecs_t ns = 0LL);
@@ -628,7 +626,9 @@ protected:
bool mPaused; // whether thread is requested to pause at next loop entry
bool mPausedInt; // whether thread internally requests pause
nsecs_t mPausedNs; // if mPausedInt then associated timeout, otherwise ignored
- bool mIgnoreNextPausedInt; // whether to ignore next mPausedInt request
+ bool mIgnoreNextPausedInt; // skip any internal pause and go immediately
+ // to processAudioBuffer() as state may have changed
+ // since pause time calculated.
};
// body of AudioTrackThread::threadLoop()
@@ -680,7 +680,7 @@ protected:
float mVolume[2];
float mSendLevel;
- mutable uint32_t mSampleRate; // mutable because getSampleRate() can update it.
+ mutable uint32_t mSampleRate; // mutable because getSampleRate() can update it
size_t mFrameCount; // corresponds to current IAudioTrack, value is
// reported back by AudioFlinger to the client
size_t mReqFrameCount; // frame count to request the first or next time
@@ -698,10 +698,7 @@ protected:
const audio_offload_info_t* mOffloadInfo;
audio_attributes_t mAttributes;
- // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data. For 8-bit PCM data, it's
- // twice as large as mFrameSize because data is expanded to 16-bit before it's stored in buffer.
- size_t mFrameSize; // app-level frame size
- size_t mFrameSizeAF; // AudioFlinger frame size
+ size_t mFrameSize; // frame size in bytes
status_t mStatus;
@@ -732,13 +729,20 @@ protected:
bool mRefreshRemaining; // processAudioBuffer() should refresh
// mRemainingFrames and mRetryOnPartialBuffer
+ // used for static track cbf and restoration
+ int32_t mLoopCount; // last setLoop loopCount; zero means disabled
+ uint32_t mLoopStart; // last setLoop loopStart
+ uint32_t mLoopEnd; // last setLoop loopEnd
+ int32_t mLoopCountNotified; // the last loopCount notified by callback.
+ // mLoopCountNotified counts down, matching
+ // the remaining loop count for static track
+ // playback.
+
// These are private to processAudioBuffer(), and are not protected by a lock
uint32_t mRemainingFrames; // number of frames to request in obtainBuffer()
bool mRetryOnPartialBuffer; // sleep and retry after partial obtainBuffer()
uint32_t mObservedSequence; // last observed value of mSequence
- uint32_t mLoopPeriod; // in frames, zero means looping is disabled
-
uint32_t mMarkerPosition; // in wrapping (overflow) frame units
bool mMarkerReached;
uint32_t mNewPosition; // in frames
diff --git a/include/media/EffectsFactoryApi.h b/include/media/EffectsFactoryApi.h
index b1ed7b0..64a3212 100644
--- a/include/media/EffectsFactoryApi.h
+++ b/include/media/EffectsFactoryApi.h
@@ -171,6 +171,8 @@ int EffectGetDescriptor(const effect_uuid_t *pEffectUuid, effect_descriptor_t *p
////////////////////////////////////////////////////////////////////////////////
int EffectIsNullUuid(const effect_uuid_t *pEffectUuid);
+int EffectDumpEffects(int fd);
+
#if __cplusplus
} // extern "C"
#endif
diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h
index 67b599a..49a3d61 100644
--- a/include/media/IMediaPlayerService.h
+++ b/include/media/IMediaPlayerService.h
@@ -49,7 +49,8 @@ public:
virtual sp<IMediaRecorder> createMediaRecorder() = 0;
virtual sp<IMediaMetadataRetriever> createMetadataRetriever() = 0;
- virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client, int audioSessionId = 0) = 0;
+ virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client, int audioSessionId = 0)
+ = 0;
virtual sp<IOMX> getOMX() = 0;
virtual sp<ICrypto> makeCrypto() = 0;
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
index 3e67550..509c06b 100644
--- a/include/media/IMediaRecorder.h
+++ b/include/media/IMediaRecorder.h
@@ -41,7 +41,6 @@ public:
virtual status_t setOutputFormat(int of) = 0;
virtual status_t setVideoEncoder(int ve) = 0;
virtual status_t setAudioEncoder(int ae) = 0;
- virtual status_t setOutputFile(const char* path) = 0;
virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0;
virtual status_t setVideoSize(int width, int height) = 0;
virtual status_t setVideoFrameRate(int frames_per_second) = 0;
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
index d7ac302..f55063e 100644
--- a/include/media/MediaRecorderBase.h
+++ b/include/media/MediaRecorderBase.h
@@ -43,7 +43,6 @@ struct MediaRecorderBase {
virtual status_t setCamera(const sp<ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy) = 0;
virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0;
- virtual status_t setOutputFile(const char *path) = 0;
virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0;
virtual status_t setOutputFileAuxiliary(int fd) {return INVALID_OPERATION;}
virtual status_t setParameters(const String8& params) = 0;
diff --git a/include/media/SingleStateQueue.h b/include/media/SingleStateQueue.h
index 04c5fd0..d423962 100644
--- a/include/media/SingleStateQueue.h
+++ b/include/media/SingleStateQueue.h
@@ -21,6 +21,7 @@
// Non-blocking single-reader / single-writer multi-word atomic load / store
#include <stdint.h>
+#include <cutils/atomic.h>
namespace android {
@@ -31,6 +32,12 @@ public:
class Mutator;
class Observer;
+ enum SSQ_STATUS {
+ SSQ_PENDING, /* = 0 */
+ SSQ_READ,
+ SSQ_DONE,
+ };
+
struct Shared {
// needs to be part of a union so don't define constructor or destructor
@@ -41,28 +48,56 @@ private:
void init() { mAck = 0; mSequence = 0; }
volatile int32_t mAck;
-#if 0
- int mPad[7];
- // cache line boundary
-#endif
volatile int32_t mSequence;
T mValue;
};
class Mutator {
public:
- Mutator(Shared *shared);
- /*virtual*/ ~Mutator() { }
+ Mutator(Shared *shared)
+ : mSequence(0), mShared(shared)
+ {
+ // exactly one of Mutator and Observer must initialize, currently it is Observer
+ // shared->init();
+ }
// push new value onto state queue, overwriting previous value;
// returns a sequence number which can be used with ack()
- int32_t push(const T& value);
-
- // return true if most recent push has been observed
- bool ack();
+ int32_t push(const T& value)
+ {
+ Shared *shared = mShared;
+ int32_t sequence = mSequence;
+ sequence++;
+ android_atomic_acquire_store(sequence, &shared->mSequence);
+ shared->mValue = value;
+ sequence++;
+ android_atomic_release_store(sequence, &shared->mSequence);
+ mSequence = sequence;
+ // consider signalling a futex here, if we know that observer is waiting
+ return sequence;
+ }
+
+ // returns the status of the last state push. This may be a stale value.
+ //
+ // SSQ_PENDING, or 0, means it has not been observed
+ // SSQ_READ means it has been read
+ // SSQ_DONE means it has been acted upon, after Observer::done() is called
+ enum SSQ_STATUS ack() const
+ {
+ // in the case of SSQ_DONE, prevent any subtle data-races of subsequent reads
+ // being performed (out-of-order) before the ack read, should the caller be
+ // depending on sequentiality of reads.
+ const int32_t ack = android_atomic_acquire_load(&mShared->mAck);
+ return ack - mSequence & ~1 ? SSQ_PENDING /* seq differ */ :
+ ack & 1 ? SSQ_DONE : SSQ_READ;
+ }
// return true if a push with specified sequence number or later has been observed
- bool ack(int32_t sequence);
+ bool ack(int32_t sequence) const
+ {
+ // this relies on 2's complement rollover to detect an ancient sequence number
+ return mShared->mAck - sequence >= 0;
+ }
private:
int32_t mSequence;
@@ -71,11 +106,54 @@ private:
class Observer {
public:
- Observer(Shared *shared);
- /*virtual*/ ~Observer() { }
+ Observer(Shared *shared)
+ : mSequence(0), mSeed(1), mShared(shared)
+ {
+ // exactly one of Mutator and Observer must initialize, currently it is Observer
+ shared->init();
+ }
// return true if value has changed
- bool poll(T& value);
+ bool poll(T& value)
+ {
+ Shared *shared = mShared;
+ int32_t before = shared->mSequence;
+ if (before == mSequence) {
+ return false;
+ }
+ for (int tries = 0; ; ) {
+ const int MAX_TRIES = 5;
+ if (before & 1) {
+ if (++tries >= MAX_TRIES) {
+ return false;
+ }
+ before = shared->mSequence;
+ } else {
+ android_memory_barrier();
+ T temp = shared->mValue;
+ int32_t after = android_atomic_release_load(&shared->mSequence);
+ if (after == before) {
+ value = temp;
+ shared->mAck = before;
+ mSequence = before; // mSequence is even after poll success
+ return true;
+ }
+ if (++tries >= MAX_TRIES) {
+ return false;
+ }
+ before = after;
+ }
+ }
+ }
+
+ // (optional) used to indicate to the Mutator that the state that has been polled
+ // has also been acted upon.
+ void done()
+ {
+ const int32_t ack = mShared->mAck + 1;
+ // ensure all previous writes have been performed.
+ android_atomic_release_store(ack, &mShared->mAck); // mSequence is odd after "done"
+ }
private:
int32_t mSequence;
diff --git a/include/media/StringArray.h b/include/media/StringArray.h
index ae47085..48d98bf 100644
--- a/include/media/StringArray.h
+++ b/include/media/StringArray.h
@@ -16,7 +16,7 @@
//
// Sortable array of strings. STL-ish, but STL-free.
-//
+//
#ifndef _LIBS_MEDIA_STRING_ARRAY_H
#define _LIBS_MEDIA_STRING_ARRAY_H
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index b0a62a7..74a6469 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -221,7 +221,6 @@ public:
status_t setOutputFormat(int of);
status_t setVideoEncoder(int ve);
status_t setAudioEncoder(int ae);
- status_t setOutputFile(const char* path);
status_t setOutputFile(int fd, int64_t offset, int64_t length);
status_t setVideoSize(int width, int height);
status_t setVideoFrameRate(int frames_per_second);
diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h
index d422576..d9bbc8d 100644
--- a/include/media/nbaio/NBAIO.h
+++ b/include/media/nbaio/NBAIO.h
@@ -231,7 +231,8 @@ public:
virtual status_t getTimestamp(AudioTimestamp& timestamp) { return INVALID_OPERATION; }
protected:
- NBAIO_Sink(const NBAIO_Format& format = Format_Invalid) : NBAIO_Port(format), mFramesWritten(0) { }
+ NBAIO_Sink(const NBAIO_Format& format = Format_Invalid) : NBAIO_Port(format), mFramesWritten(0)
+ { }
virtual ~NBAIO_Sink() { }
// Implementations are free to ignore these if they don't need them
@@ -322,7 +323,8 @@ public:
virtual void onTimestamp(const AudioTimestamp& timestamp) { }
protected:
- NBAIO_Source(const NBAIO_Format& format = Format_Invalid) : NBAIO_Port(format), mFramesRead(0) { }
+ NBAIO_Source(const NBAIO_Format& format = Format_Invalid) : NBAIO_Port(format), mFramesRead(0)
+ { }
virtual ~NBAIO_Source() { }
// Implementations are free to ignore these if they don't need them
diff --git a/include/media/nbaio/NBLog.h b/include/media/nbaio/NBLog.h
index bcbbc04..1297b51 100644
--- a/include/media/nbaio/NBLog.h
+++ b/include/media/nbaio/NBLog.h
@@ -21,7 +21,7 @@
#include <binder/IMemory.h>
#include <utils/Mutex.h>
-#include <media/nbaio/roundup.h>
+#include <audio_utils/roundup.h>
namespace android {
diff --git a/include/media/stagefright/AACWriter.h b/include/media/stagefright/AACWriter.h
index d22707a..86417a5 100644
--- a/include/media/stagefright/AACWriter.h
+++ b/include/media/stagefright/AACWriter.h
@@ -27,7 +27,6 @@ struct MediaSource;
struct MetaData;
struct AACWriter : public MediaWriter {
- AACWriter(const char *filename);
AACWriter(int fd);
status_t initCheck() const;
diff --git a/include/media/stagefright/AMRWriter.h b/include/media/stagefright/AMRWriter.h
index 392f968..bac878b 100644
--- a/include/media/stagefright/AMRWriter.h
+++ b/include/media/stagefright/AMRWriter.h
@@ -29,7 +29,6 @@ struct MediaSource;
struct MetaData;
struct AMRWriter : public MediaWriter {
- AMRWriter(const char *filename);
AMRWriter(int fd);
status_t initCheck() const;
diff --git a/include/media/stagefright/MPEG2TSWriter.h b/include/media/stagefright/MPEG2TSWriter.h
index 2e2922e..3d7960b 100644
--- a/include/media/stagefright/MPEG2TSWriter.h
+++ b/include/media/stagefright/MPEG2TSWriter.h
@@ -29,7 +29,6 @@ struct ABuffer;
struct MPEG2TSWriter : public MediaWriter {
MPEG2TSWriter(int fd);
- MPEG2TSWriter(const char *filename);
MPEG2TSWriter(
void *cookie,
diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h
index 26ce5f9..899b324 100644
--- a/include/media/stagefright/MPEG4Writer.h
+++ b/include/media/stagefright/MPEG4Writer.h
@@ -32,7 +32,6 @@ class MetaData;
class MPEG4Writer : public MediaWriter {
public:
- MPEG4Writer(const char *filename);
MPEG4Writer(int fd);
// Limitations
diff --git a/include/media/stagefright/MediaFilter.h b/include/media/stagefright/MediaFilter.h
new file mode 100644
index 0000000..7b3f700
--- /dev/null
+++ b/include/media/stagefright/MediaFilter.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_FILTER_H_
+#define MEDIA_FILTER_H_
+
+#include <media/stagefright/CodecBase.h>
+
+namespace android {
+
+struct ABuffer;
+struct GraphicBufferListener;
+struct MemoryDealer;
+struct SimpleFilter;
+
+struct MediaFilter : public CodecBase {
+ MediaFilter();
+
+ virtual void setNotificationMessage(const sp<AMessage> &msg);
+
+ virtual void initiateAllocateComponent(const sp<AMessage> &msg);
+ virtual void initiateConfigureComponent(const sp<AMessage> &msg);
+ virtual void initiateCreateInputSurface();
+ virtual void initiateStart();
+ virtual void initiateShutdown(bool keepComponentAllocated = false);
+
+ virtual void signalFlush();
+ virtual void signalResume();
+
+ virtual void signalRequestIDRFrame();
+ virtual void signalSetParameters(const sp<AMessage> &msg);
+ virtual void signalEndOfInputStream();
+
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
+ struct PortDescription : public CodecBase::PortDescription {
+ virtual size_t countBuffers();
+ virtual IOMX::buffer_id bufferIDAt(size_t index) const;
+ virtual sp<ABuffer> bufferAt(size_t index) const;
+
+ protected:
+ PortDescription();
+
+ private:
+ friend struct MediaFilter;
+
+ Vector<IOMX::buffer_id> mBufferIDs;
+ Vector<sp<ABuffer> > mBuffers;
+
+ void addBuffer(IOMX::buffer_id id, const sp<ABuffer> &buffer);
+
+ DISALLOW_EVIL_CONSTRUCTORS(PortDescription);
+ };
+
+protected:
+ virtual ~MediaFilter();
+
+private:
+ struct BufferInfo {
+ enum Status {
+ OWNED_BY_US,
+ OWNED_BY_UPSTREAM,
+ };
+
+ IOMX::buffer_id mBufferID;
+ int32_t mGeneration;
+ int32_t mOutputFlags;
+ Status mStatus;
+
+ sp<ABuffer> mData;
+ };
+
+ enum State {
+ UNINITIALIZED,
+ INITIALIZED,
+ CONFIGURED,
+ STARTED,
+ };
+
+ enum {
+ kWhatInputBufferFilled = 'inpF',
+ kWhatOutputBufferDrained = 'outD',
+ kWhatShutdown = 'shut',
+ kWhatFlush = 'flus',
+ kWhatResume = 'resm',
+ kWhatAllocateComponent = 'allo',
+ kWhatConfigureComponent = 'conf',
+ kWhatCreateInputSurface = 'cisf',
+ kWhatSignalEndOfInputStream = 'eois',
+ kWhatStart = 'star',
+ kWhatSetParameters = 'setP',
+ kWhatProcessBuffers = 'proc',
+ };
+
+ enum {
+ kPortIndexInput = 0,
+ kPortIndexOutput = 1
+ };
+
+ // member variables
+ AString mComponentName;
+ State mState;
+ status_t mInputEOSResult;
+ int32_t mWidth, mHeight;
+ int32_t mStride, mSliceHeight;
+ int32_t mColorFormatIn, mColorFormatOut;
+ size_t mMaxInputSize, mMaxOutputSize;
+ int32_t mGeneration;
+ sp<AMessage> mNotify;
+ sp<AMessage> mInputFormat;
+ sp<AMessage> mOutputFormat;
+
+ sp<MemoryDealer> mDealer[2];
+ Vector<BufferInfo> mBuffers[2];
+ Vector<BufferInfo*> mAvailableInputBuffers;
+ Vector<BufferInfo*> mAvailableOutputBuffers;
+ bool mPortEOS[2];
+
+ sp<SimpleFilter> mFilter;
+ sp<GraphicBufferListener> mGraphicBufferListener;
+
+ // helper functions
+ void signalProcessBuffers();
+ void signalError(status_t error);
+
+ status_t allocateBuffersOnPort(OMX_U32 portIndex);
+ BufferInfo *findBufferByID(
+ uint32_t portIndex, IOMX::buffer_id bufferID,
+ ssize_t *index = NULL);
+ void postFillThisBuffer(BufferInfo *info);
+ void postDrainThisBuffer(BufferInfo *info);
+ void postEOS();
+ void sendFormatChange();
+ void requestFillEmptyInput();
+ void processBuffers();
+
+ void onAllocateComponent(const sp<AMessage> &msg);
+ void onConfigureComponent(const sp<AMessage> &msg);
+ void onStart();
+ void onInputBufferFilled(const sp<AMessage> &msg);
+ void onOutputBufferDrained(const sp<AMessage> &msg);
+ void onShutdown(const sp<AMessage> &msg);
+ void onFlush();
+ void onSetParameters(const sp<AMessage> &msg);
+ void onCreateInputSurface();
+ void onInputFrameAvailable();
+ void onSignalEndOfInputStream();
+
+ DISALLOW_EVIL_CONSTRUCTORS(MediaFilter);
+};
+
+} // namespace android
+
+#endif // MEDIA_FILTER_H_
diff --git a/include/media/stagefright/MediaMuxer.h b/include/media/stagefright/MediaMuxer.h
index 9da98d9..e6538d1 100644
--- a/include/media/stagefright/MediaMuxer.h
+++ b/include/media/stagefright/MediaMuxer.h
@@ -50,9 +50,6 @@ public:
OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
};
- // Construct the muxer with the output file path.
- MediaMuxer(const char *path, OutputFormat format);
-
// Construct the muxer with the file descriptor. Note that the MediaMuxer
// will close this file at stop().
MediaMuxer(int fd, OutputFormat format);
diff --git a/include/media/stagefright/RenderScriptWrapper.h b/include/media/stagefright/RenderScriptWrapper.h
new file mode 100644
index 0000000..b42649e
--- /dev/null
+++ b/include/media/stagefright/RenderScriptWrapper.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RENDERSCRIPT_WRAPPER_H_
+#define RENDERSCRIPT_WRAPPER_H_
+
+#include <RenderScript.h>
+
+namespace android {
+
+struct RenderScriptWrapper : public RefBase {
+public:
+ struct RSFilterCallback : public RefBase {
+ public:
+ // called by RSFilter to process each input buffer
+ virtual status_t processBuffers(
+ RSC::Allocation* inBuffer,
+ RSC::Allocation* outBuffer) = 0;
+
+ virtual status_t handleSetParameters(const sp<AMessage> &msg) = 0;
+ };
+
+ sp<RSFilterCallback> mCallback;
+ RSC::sp<RSC::RS> mContext;
+};
+
+} // namespace android
+
+#endif // RENDERSCRIPT_WRAPPER_H_
diff --git a/include/ndk/NdkMediaCodec.h b/include/ndk/NdkMediaCodec.h
index c07f4c9..4f6a1ef 100644
--- a/include/ndk/NdkMediaCodec.h
+++ b/include/ndk/NdkMediaCodec.h
@@ -142,7 +142,8 @@ media_status_t AMediaCodec_queueSecureInputBuffer(AMediaCodec*,
/**
* Get the index of the next available buffer of processed data.
*/
-ssize_t AMediaCodec_dequeueOutputBuffer(AMediaCodec*, AMediaCodecBufferInfo *info, int64_t timeoutUs);
+ssize_t AMediaCodec_dequeueOutputBuffer(AMediaCodec*, AMediaCodecBufferInfo *info,
+ int64_t timeoutUs);
AMediaFormat* AMediaCodec_getOutputFormat(AMediaCodec*);
/**
diff --git a/include/ndk/NdkMediaExtractor.h b/include/ndk/NdkMediaExtractor.h
index 7a4e702..7324d31 100644
--- a/include/ndk/NdkMediaExtractor.h
+++ b/include/ndk/NdkMediaExtractor.h
@@ -55,12 +55,14 @@ media_status_t AMediaExtractor_delete(AMediaExtractor*);
/**
* Set the file descriptor from which the extractor will read.
*/
-media_status_t AMediaExtractor_setDataSourceFd(AMediaExtractor*, int fd, off64_t offset, off64_t length);
+media_status_t AMediaExtractor_setDataSourceFd(AMediaExtractor*, int fd, off64_t offset,
+ off64_t length);
/**
* Set the URI from which the extractor will read.
*/
-media_status_t AMediaExtractor_setDataSource(AMediaExtractor*, const char *location); // TODO support headers
+media_status_t AMediaExtractor_setDataSource(AMediaExtractor*, const char *location);
+ // TODO support headers
/**
* Return the number of tracks in the previously specified media file
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 31dff36..7143f1a 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -24,9 +24,8 @@
#include <utils/threads.h>
#include <utils/Log.h>
#include <utils/RefBase.h>
-#include <media/nbaio/roundup.h>
+#include <audio_utils/roundup.h>
#include <media/SingleStateQueue.h>
-#include <private/media/StaticAudioTrackState.h>
namespace android {
@@ -61,15 +60,57 @@ struct AudioTrackSharedStreaming {
volatile uint32_t mUnderrunFrames; // server increments for each unavailable but desired frame
};
+// Represents a single state of an AudioTrack that was created in static mode (shared memory buffer
+// supplied by the client). This state needs to be communicated from the client to server. As this
+// state is too large to be updated atomically without a mutex, and mutexes aren't allowed here, the
+// state is wrapped by a SingleStateQueue.
+struct StaticAudioTrackState {
+ // Do not define constructors, destructors, or virtual methods as this is part of a
+ // union in shared memory and they will not get called properly.
+
+ // These fields should both be size_t, but since they are located in shared memory we
+ // force to 32-bit. The client and server may have different typedefs for size_t.
+
+ // The state has a sequence counter to indicate whether changes are made to loop or position.
+ // The sequence counter also currently indicates whether loop or position is first depending
+ // on which is greater; it jumps by max(mLoopSequence, mPositionSequence) + 1.
+
+ uint32_t mLoopStart;
+ uint32_t mLoopEnd;
+ int32_t mLoopCount;
+ uint32_t mLoopSequence; // a sequence counter to indicate changes to loop
+ uint32_t mPosition;
+ uint32_t mPositionSequence; // a sequence counter to indicate changes to position
+};
+
typedef SingleStateQueue<StaticAudioTrackState> StaticAudioTrackSingleStateQueue;
+struct StaticAudioTrackPosLoop {
+ // Do not define constructors, destructors, or virtual methods as this is part of a
+ // union in shared memory and will not get called properly.
+
+ // These fields should both be size_t, but since they are located in shared memory we
+ // force to 32-bit. The client and server may have different typedefs for size_t.
+
+ // This struct information is stored in a single state queue to communicate the
+ // static AudioTrack server state to the client while data is consumed.
+ // It is smaller than StaticAudioTrackState to prevent unnecessary information from
+ // being sent.
+
+ uint32_t mBufferPosition;
+ int32_t mLoopCount;
+};
+
+typedef SingleStateQueue<StaticAudioTrackPosLoop> StaticAudioTrackPosLoopQueue;
+
struct AudioTrackSharedStatic {
+ // client requests to the server for loop or position changes.
StaticAudioTrackSingleStateQueue::Shared
mSingleStateQueue;
- // This field should be a size_t, but since it is located in shared memory we
- // force to 32-bit. The client and server may have different typedefs for size_t.
- uint32_t mBufferPosition; // updated asynchronously by server,
- // "for entertainment purposes only"
+ // position info updated asynchronously by server and read by client,
+ // "for entertainment purposes only"
+ StaticAudioTrackPosLoopQueue::Shared
+ mPosLoopQueue;
};
// ----------------------------------------------------------------------------
@@ -96,7 +137,8 @@ struct audio_track_cblk_t
uint32_t mServer; // Number of filled frames consumed by server (mIsOut),
// or filled frames provided by server (!mIsOut).
// It is updated asynchronously by server without a barrier.
- // The value should be used "for entertainment purposes only",
+ // The value should be used
+ // "for entertainment purposes only",
// which means don't make important decisions based on it.
uint32_t mPad1; // unused
@@ -313,8 +355,28 @@ public:
virtual void flush();
#define MIN_LOOP 16 // minimum length of each loop iteration in frames
+
+ // setLoop(), setBufferPosition(), and setBufferPositionAndLoop() set the
+ // static buffer position and looping parameters. These commands are not
+ // synchronous (they do not wait or block); instead they take effect at the
+ // next buffer data read from the server side. However, the client side
+ // getters will read a cached version of the position and loop variables
+ // until the setting takes effect.
+ //
+ // setBufferPositionAndLoop() is equivalent to calling, in order, setLoop() and
+ // setBufferPosition().
+ //
+ // The functions should not be relied upon to do parameter or state checking.
+ // That is done at the AudioTrack level.
+
void setLoop(size_t loopStart, size_t loopEnd, int loopCount);
+ void setBufferPosition(size_t position);
+ void setBufferPositionAndLoop(size_t position, size_t loopStart, size_t loopEnd,
+ int loopCount);
size_t getBufferPosition();
+ // getBufferPositionAndLoopCount() provides the proper snapshot of
+ // position and loopCount together.
+ void getBufferPositionAndLoopCount(size_t *position, int *loopCount);
virtual size_t getMisalignment() {
return 0;
@@ -326,7 +388,9 @@ public:
private:
StaticAudioTrackSingleStateQueue::Mutator mMutator;
- size_t mBufferPosition; // so that getBufferPosition() appears to be synchronous
+ StaticAudioTrackPosLoopQueue::Observer mPosLoopObserver;
+ StaticAudioTrackState mState; // last communicated state to server
+ StaticAudioTrackPosLoop mPosLoop; // snapshot of position and loop.
};
// ----------------------------------------------------------------------------
@@ -447,10 +511,13 @@ public:
virtual uint32_t getUnderrunFrames() const { return 0; }
private:
+ status_t updateStateWithLoop(StaticAudioTrackState *localState,
+ const StaticAudioTrackState &update) const;
+ status_t updateStateWithPosition(StaticAudioTrackState *localState,
+ const StaticAudioTrackState &update) const;
ssize_t pollPosition(); // poll for state queue update, and return current position
StaticAudioTrackSingleStateQueue::Observer mObserver;
- size_t mPosition; // server's current play position in frames, relative to 0
-
+ StaticAudioTrackPosLoopQueue::Mutator mPosLoopMutator;
size_t mFramesReadySafe; // Assuming size_t read/writes are atomic on 32 / 64 bit
// processors, this is a thread-safe version of
// mFramesReady.
@@ -459,7 +526,8 @@ private:
// can cause a track to appear to have a large number
// of frames. INT64_MAX means an infinite loop.
bool mFramesReadyIsCalledByMultipleThreads;
- StaticAudioTrackState mState;
+ StaticAudioTrackState mState; // Server side state. Any updates from client must be
+ // passed by the mObserver SingleStateQueue.
};
// Proxy used by AudioFlinger for servicing AudioRecord
diff --git a/include/private/media/StaticAudioTrackState.h b/include/private/media/StaticAudioTrackState.h
deleted file mode 100644
index d483061..0000000
--- a/include/private/media/StaticAudioTrackState.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef STATIC_AUDIO_TRACK_STATE_H
-#define STATIC_AUDIO_TRACK_STATE_H
-
-namespace android {
-
-// Represents a single state of an AudioTrack that was created in static mode (shared memory buffer
-// supplied by the client). This state needs to be communicated from the client to server. As this
-// state is too large to be updated atomically without a mutex, and mutexes aren't allowed here, the
-// state is wrapped by a SingleStateQueue.
-struct StaticAudioTrackState {
- // do not define constructors, destructors, or virtual methods
-
- // These fields should both be size_t, but since they are located in shared memory we
- // force to 32-bit. The client and server may have different typedefs for size_t.
- uint32_t mLoopStart;
- uint32_t mLoopEnd;
-
- int mLoopCount;
-};
-
-} // namespace android
-
-#endif // STATIC_AUDIO_TRACK_STATE_H
diff --git a/media/libeffects/factory/EffectsFactory.c b/media/libeffects/factory/EffectsFactory.c
index 6d30d64..c310fe2 100644
--- a/media/libeffects/factory/EffectsFactory.c
+++ b/media/libeffects/factory/EffectsFactory.c
@@ -28,6 +28,7 @@
static list_elem_t *gEffectList; // list of effect_entry_t: all currently created effects
static list_elem_t *gLibraryList; // list of lib_entry_t: all currently loaded libraries
+static list_elem_t *gSkippedEffects; // list of effects skipped because of duplicate uuid
// list of effect_descriptor and list of sub effects : all currently loaded
// It does not contain effects without sub effects.
static list_sub_elem_t *gSubEffectList;
@@ -63,10 +64,10 @@ static int findEffect(const effect_uuid_t *type,
lib_entry_t **lib,
effect_descriptor_t **desc);
// To search a subeffect in the gSubEffectList
-int findSubEffect(const effect_uuid_t *uuid,
+static int findSubEffect(const effect_uuid_t *uuid,
lib_entry_t **lib,
effect_descriptor_t **desc);
-static void dumpEffectDescriptor(effect_descriptor_t *desc, char *str, size_t len);
+static void dumpEffectDescriptor(effect_descriptor_t *desc, char *str, size_t len, int indent);
static int stringToUuid(const char *str, effect_uuid_t *uuid);
static int uuidToString(const effect_uuid_t *uuid, char *str, size_t maxLen);
@@ -237,8 +238,8 @@ int EffectQueryEffect(uint32_t index, effect_descriptor_t *pDescriptor)
}
#if (LOG_NDEBUG == 0)
- char str[256];
- dumpEffectDescriptor(pDescriptor, str, 256);
+ char str[512];
+ dumpEffectDescriptor(pDescriptor, str, sizeof(str), 0 /* indent */);
ALOGV("EffectQueryEffect() desc:%s", str);
#endif
pthread_mutex_unlock(&gLibLock);
@@ -503,15 +504,31 @@ int loadLibrary(cnode *root, const char *name)
audio_effect_library_t *desc;
list_elem_t *e;
lib_entry_t *l;
+ char path[PATH_MAX];
+ char *str;
+ size_t len;
node = config_find(root, PATH_TAG);
if (node == NULL) {
return -EINVAL;
}
+ // audio_effects.conf always specifies 32 bit lib path: convert to 64 bit path if needed
+ strlcpy(path, node->value, PATH_MAX);
+#ifdef __LP64__
+ str = strstr(path, "/lib/");
+ if (str == NULL)
+ return -EINVAL;
+ len = str - path;
+ path[len] = '\0';
+ strlcat(path, "/lib64/", PATH_MAX);
+ strlcat(path, node->value + len + strlen("/lib/"), PATH_MAX);
+#endif
+ if (strlen(path) >= PATH_MAX - 1)
+ return -EINVAL;
- hdl = dlopen(node->value, RTLD_NOW);
+ hdl = dlopen(path, RTLD_NOW);
if (hdl == NULL) {
- ALOGW("loadLibrary() failed to open %s", node->value);
+ ALOGW("loadLibrary() failed to open %s", path);
goto error;
}
@@ -535,7 +552,7 @@ int loadLibrary(cnode *root, const char *name)
// add entry for library in gLibraryList
l = malloc(sizeof(lib_entry_t));
l->name = strndup(name, PATH_MAX);
- l->path = strndup(node->value, PATH_MAX);
+ l->path = strndup(path, PATH_MAX);
l->handle = hdl;
l->desc = desc;
l->effects = NULL;
@@ -547,7 +564,7 @@ int loadLibrary(cnode *root, const char *name)
e->next = gLibraryList;
gLibraryList = e;
pthread_mutex_unlock(&gLibLock);
- ALOGV("getLibrary() linked library %p for path %s", l, node->value);
+ ALOGV("getLibrary() linked library %p for path %s", l, path);
return 0;
@@ -595,8 +612,8 @@ int addSubEffect(cnode *root)
return -EINVAL;
}
#if (LOG_NDEBUG==0)
- char s[256];
- dumpEffectDescriptor(d, s, 256);
+ char s[512];
+ dumpEffectDescriptor(d, s, sizeof(s), 0 /* indent */);
ALOGV("addSubEffect() read descriptor %p:%s",d, s);
#endif
if (EFFECT_API_VERSION_MAJOR(d->apiVersion) !=
@@ -660,6 +677,13 @@ int loadEffect(cnode *root)
ALOGW("loadEffect() invalid uuid %s", node->value);
return -EINVAL;
}
+ lib_entry_t *tmp;
+ bool skip = false;
+ if (findEffect(NULL, &uuid, &tmp, NULL) == 0) {
+ ALOGW("skipping duplicate uuid %s %s", node->value,
+ node->next ? "and its sub-effects" : "");
+ skip = true;
+ }
d = malloc(sizeof(effect_descriptor_t));
if (l->desc->get_descriptor(&uuid, d) != 0) {
@@ -670,8 +694,8 @@ int loadEffect(cnode *root)
return -EINVAL;
}
#if (LOG_NDEBUG==0)
- char s[256];
- dumpEffectDescriptor(d, s, 256);
+ char s[512];
+ dumpEffectDescriptor(d, s, sizeof(s), 0 /* indent */);
ALOGV("loadEffect() read descriptor %p:%s",d, s);
#endif
if (EFFECT_API_VERSION_MAJOR(d->apiVersion) !=
@@ -682,8 +706,14 @@ int loadEffect(cnode *root)
}
e = malloc(sizeof(list_elem_t));
e->object = d;
- e->next = l->effects;
- l->effects = e;
+ if (skip) {
+ e->next = gSkippedEffects;
+ gSkippedEffects = e;
+ return -EINVAL;
+ } else {
+ e->next = l->effects;
+ l->effects = e;
+ }
// After the UUID node in the config_tree, if node->next is valid,
// that would be sub effect node.
@@ -876,22 +906,30 @@ int findEffect(const effect_uuid_t *type,
return ret;
}
-void dumpEffectDescriptor(effect_descriptor_t *desc, char *str, size_t len) {
+void dumpEffectDescriptor(effect_descriptor_t *desc, char *str, size_t len, int indent) {
char s[256];
+ char ss[256];
+ char idt[indent + 1];
- snprintf(str, len, "\nEffect Descriptor %p:\n", desc);
- strncat(str, "- TYPE: ", len);
- uuidToString(&desc->uuid, s, 256);
- snprintf(str, len, "- UUID: %s\n", s);
- uuidToString(&desc->type, s, 256);
- snprintf(str, len, "- TYPE: %s\n", s);
- sprintf(s, "- apiVersion: %08X\n- flags: %08X\n",
- desc->apiVersion, desc->flags);
- strncat(str, s, len);
- sprintf(s, "- name: %s\n", desc->name);
- strncat(str, s, len);
- sprintf(s, "- implementor: %s\n", desc->implementor);
- strncat(str, s, len);
+ memset(idt, ' ', indent);
+ idt[indent] = 0;
+
+ str[0] = 0;
+
+ snprintf(s, sizeof(s), "%s%s / %s\n", idt, desc->name, desc->implementor);
+ strlcat(str, s, len);
+
+ uuidToString(&desc->uuid, s, sizeof(s));
+ snprintf(ss, sizeof(ss), "%s UUID: %s\n", idt, s);
+ strlcat(str, ss, len);
+
+ uuidToString(&desc->type, s, sizeof(s));
+ snprintf(ss, sizeof(ss), "%s TYPE: %s\n", idt, s);
+ strlcat(str, ss, len);
+
+ sprintf(s, "%s apiVersion: %08X\n%s flags: %08X\n", idt,
+ desc->apiVersion, idt, desc->flags);
+ strlcat(str, s, len);
}
int stringToUuid(const char *str, effect_uuid_t *uuid)
@@ -934,3 +972,40 @@ int uuidToString(const effect_uuid_t *uuid, char *str, size_t maxLen)
return 0;
}
+int EffectDumpEffects(int fd) {
+ char s[512];
+ list_elem_t *e = gLibraryList;
+ lib_entry_t *l = NULL;
+ effect_descriptor_t *d = NULL;
+ int found = 0;
+ int ret = 0;
+
+ while (e) {
+ l = (lib_entry_t *)e->object;
+ list_elem_t *efx = l->effects;
+ dprintf(fd, "Library %s\n", l->name);
+ if (!efx) {
+ dprintf(fd, " (no effects)\n");
+ }
+ while (efx) {
+ d = (effect_descriptor_t *)efx->object;
+ dumpEffectDescriptor(d, s, sizeof(s), 2);
+ dprintf(fd, "%s", s);
+ efx = efx->next;
+ }
+ e = e->next;
+ }
+
+ e = gSkippedEffects;
+ if (e) {
+ dprintf(fd, "Skipped effects\n");
+ while(e) {
+ d = (effect_descriptor_t *)e->object;
+ dumpEffectDescriptor(d, s, sizeof(s), 2 /* indent */);
+ dprintf(fd, "%s", s);
+ e = e->next;
+ }
+ }
+ return ret;
+}
+
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 6c585fb..5378bf2 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -61,15 +61,11 @@ LOCAL_SRC_FILES:= \
StringArray.cpp \
AudioPolicy.cpp
-LOCAL_SRC_FILES += ../libnbaio/roundup.c
-
LOCAL_SHARED_LIBRARIES := \
libui liblog libcutils libutils libbinder libsonivox libicuuc libicui18n libexpat \
libcamera_client libstagefright_foundation \
libgui libdl libaudioutils libnbaio
-LOCAL_STATIC_LIBRARIES += libinstantssq
-
LOCAL_WHOLE_STATIC_LIBRARIES := libmedia_helper
LOCAL_MODULE:= libmedia
@@ -85,12 +81,3 @@ LOCAL_C_INCLUDES := \
include $(BUILD_SHARED_LIBRARY)
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES += SingleStateQueue.cpp
-LOCAL_CFLAGS += -DSINGLE_STATE_QUEUE_INSTANTIATIONS='"SingleStateQueueInstantiations.cpp"'
-
-LOCAL_MODULE := libinstantssq
-LOCAL_MODULE_TAGS := optional
-
-include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 9cae21c..7a18cba 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -499,8 +499,8 @@ void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, audio_io_handle
OutputDescriptor *outputDesc = new OutputDescriptor(*desc);
gOutputs.add(ioHandle, outputDesc);
- ALOGV("ioConfigChanged() new output samplingRate %u, format %#x channel mask %#x frameCount %zu "
- "latency %d",
+ ALOGV("ioConfigChanged() new output samplingRate %u, format %#x channel mask %#x "
+ "frameCount %zu latency %d",
outputDesc->samplingRate, outputDesc->format, outputDesc->channelMask,
outputDesc->frameCount, outputDesc->latency);
} break;
@@ -523,8 +523,8 @@ void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, audio_io_handle
if (param2 == NULL) break;
desc = (const OutputDescriptor *)param2;
- ALOGV("ioConfigChanged() new config for output %d samplingRate %u, format %#x channel mask %#x "
- "frameCount %zu latency %d",
+ ALOGV("ioConfigChanged() new config for output %d samplingRate %u, format %#x "
+ "channel mask %#x frameCount %zu latency %d",
ioHandle, desc->samplingRate, desc->format,
desc->channelMask, desc->frameCount, desc->latency);
OutputDescriptor *outputDesc = gOutputs.valueAt(index);
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 735db5c..d4bacc0 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -33,11 +33,16 @@
#define WAIT_PERIOD_MS 10
#define WAIT_STREAM_END_TIMEOUT_SEC 120
-
+static const int kMaxLoopCountNotifications = 32;
namespace android {
// ---------------------------------------------------------------------------
+template <typename T>
+const T &min(const T &x, const T &y) {
+ return x < y ? x : y;
+}
+
static int64_t convertTimespecToUs(const struct timespec &tv)
{
return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
@@ -317,12 +322,6 @@ status_t AudioTrack::set(
uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
mChannelCount = channelCount;
- // AudioFlinger does not currently support 8-bit data in shared memory
- if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
- ALOGE("8-bit data in shared memory is not supported");
- return BAD_VALUE;
- }
-
// force direct flag if format is not linear PCM
// or offload was requested
if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
@@ -346,12 +345,9 @@ status_t AudioTrack::set(
} else {
mFrameSize = sizeof(uint8_t);
}
- mFrameSizeAF = mFrameSize;
} else {
ALOG_ASSERT(audio_is_linear_pcm(format));
mFrameSize = channelCount * audio_bytes_per_sample(format);
- mFrameSizeAF = channelCount * audio_bytes_per_sample(
- format == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : format);
// createTrack will return an error if PCM format is not supported by server,
// so no need to check for specific PCM formats here
}
@@ -420,7 +416,10 @@ status_t AudioTrack::set(
mStatus = NO_ERROR;
mState = STATE_STOPPED;
mUserData = user;
- mLoopPeriod = 0;
+ mLoopCount = 0;
+ mLoopStart = 0;
+ mLoopEnd = 0;
+ mLoopCountNotified = 0;
mMarkerPosition = 0;
mMarkerReached = false;
mNewPosition = 0;
@@ -531,14 +530,12 @@ void AudioTrack::stop()
// the playback head position will reset to 0, so if a marker is set, we need
// to activate it again
mMarkerReached = false;
-#if 0
- // Force flush if a shared buffer is used otherwise audioflinger
- // will not stop before end of buffer is reached.
- // It may be needed to make sure that we stop playback, likely in case looping is on.
+
if (mSharedBuffer != 0) {
- flush_l();
+ // clear buffer position and loop count.
+ mStaticProxy->setBufferPositionAndLoop(0 /* position */,
+ 0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
}
-#endif
sp<AudioTrackThread> t = mAudioTrackThread;
if (t != 0) {
@@ -740,10 +737,15 @@ status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount
void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
{
- // Setting the loop will reset next notification update period (like setPosition).
- mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
- mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
+ // We do not update the periodic notification point.
+ // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
+ mLoopCount = loopCount;
+ mLoopEnd = loopEnd;
+ mLoopStart = loopStart;
+ mLoopCountNotified = loopCount;
mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
+
+ // Waking the AudioTrackThread is not needed as this cannot be called when active.
}
status_t AudioTrack::setMarkerPosition(uint32_t marker)
@@ -757,6 +759,10 @@ status_t AudioTrack::setMarkerPosition(uint32_t marker)
mMarkerPosition = marker;
mMarkerReached = false;
+ sp<AudioTrackThread> t = mAudioTrackThread;
+ if (t != 0) {
+ t->wake();
+ }
return NO_ERROR;
}
@@ -786,6 +792,10 @@ status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
mNewPosition = updateAndGetPosition_l() + updatePeriod;
mUpdatePeriod = updatePeriod;
+ sp<AudioTrackThread> t = mAudioTrackThread;
+ if (t != 0) {
+ t->wake();
+ }
return NO_ERROR;
}
@@ -823,12 +833,11 @@ status_t AudioTrack::setPosition(uint32_t position)
if (mState == STATE_ACTIVE) {
return INVALID_OPERATION;
}
+ // After setting the position, use full update period before notification.
mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
- mLoopPeriod = 0;
- // FIXME Check whether loops and setting position are incompatible in old code.
- // If we use setLoop for both purposes we lose the capability to set the position while looping.
- mStaticProxy->setLoop(position, mFrameCount, 0);
+ mStaticProxy->setBufferPosition(position);
+ // Waking the AudioTrackThread is not needed as this cannot be called when active.
return NO_ERROR;
}
@@ -893,10 +902,18 @@ status_t AudioTrack::reload()
return INVALID_OPERATION;
}
mNewPosition = mUpdatePeriod;
- mLoopPeriod = 0;
- // FIXME The new code cannot reload while keeping a loop specified.
- // Need to check how the old code handled this, and whether it's a significant change.
- mStaticProxy->setLoop(0, mFrameCount, 0);
+ (void) updateAndGetPosition_l();
+ mPosition = 0;
+#if 0
+ // The documentation is not clear on the behavior of reload() and the restoration
+ // of loop count. Historically we have not restored loop count, start, end,
+ // but it makes sense if one desires to repeat playing a particular sound.
+ if (mLoopCount != 0) {
+ mLoopCountNotified = mLoopCount;
+ mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
+ }
+#endif
+ mStaticProxy->setBufferPosition(0);
return NO_ERROR;
}
@@ -1019,12 +1036,12 @@ status_t AudioTrack::createTrack_l()
mNotificationFramesAct = frameCount;
}
} else if (mSharedBuffer != 0) {
-
- // Ensure that buffer alignment matches channel count
- // 8-bit data in shared memory is not currently supported by AudioFlinger
- size_t alignment = audio_bytes_per_sample(
- mFormat == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : mFormat);
+ // FIXME: Ensure client side memory buffers need
+ // not have additional alignment beyond sample
+ // (e.g. 16 bit stereo accessed as 32 bit frame).
+ size_t alignment = audio_bytes_per_sample(mFormat);
if (alignment & 1) {
+ // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
alignment = 1;
}
if (mChannelCount > 1) {
@@ -1042,7 +1059,7 @@ status_t AudioTrack::createTrack_l()
// there's no frameCount parameter.
// But when initializing a shared buffer AudioTrack via set(),
// there _is_ a frameCount parameter. We silently ignore it.
- frameCount = mSharedBuffer->size() / mFrameSizeAF;
+ frameCount = mSharedBuffer->size() / mFrameSize;
} else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
@@ -1103,10 +1120,7 @@ status_t AudioTrack::createTrack_l()
// but we will still need the original value also
sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
mSampleRate,
- // AudioFlinger only sees 16-bit PCM
- mFormat == AUDIO_FORMAT_PCM_8_BIT &&
- !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT) ?
- AUDIO_FORMAT_PCM_16_BIT : mFormat,
+ mFormat,
mChannelMask,
&temp,
&trackFlags,
@@ -1230,9 +1244,9 @@ status_t AudioTrack::createTrack_l()
// update proxy
if (mSharedBuffer == 0) {
mStaticProxy.clear();
- mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
+ mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
} else {
- mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
+ mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
mProxy = mStaticProxy;
}
@@ -1352,7 +1366,7 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *re
} while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
audioBuffer->frameCount = buffer.mFrameCount;
- audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
+ audioBuffer->size = buffer.mFrameCount * mFrameSize;
audioBuffer->raw = buffer.mRaw;
if (nonContig != NULL) {
*nonContig = buffer.mNonContig;
@@ -1366,7 +1380,7 @@ void AudioTrack::releaseBuffer(Buffer* audioBuffer)
return;
}
- size_t stepCount = audioBuffer->size / mFrameSizeAF;
+ size_t stepCount = audioBuffer->size / mFrameSize;
if (stepCount == 0) {
return;
}
@@ -1432,14 +1446,8 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
}
size_t toWrite;
- if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
- // Divide capacity by 2 to take expansion into account
- toWrite = audioBuffer.size >> 1;
- memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
- } else {
- toWrite = audioBuffer.size;
- memcpy(audioBuffer.i8, buffer, toWrite);
- }
+ toWrite = audioBuffer.size;
+ memcpy(audioBuffer.i8, buffer, toWrite);
buffer = ((const char *) buffer) + toWrite;
userSize -= toWrite;
written += toWrite;
@@ -1559,9 +1567,8 @@ nsecs_t AudioTrack::processAudioBuffer()
// that the upper layers can recreate the track
if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
status_t status = restoreTrack_l("processAudioBuffer");
- mLock.unlock();
- // Run again immediately, but with a new IAudioTrack
- return 0;
+ // after restoration, continue below to make sure that the loop and buffer events
+ // are notified because they have been cleared from mCblk->mFlags above.
}
}
@@ -1610,7 +1617,6 @@ nsecs_t AudioTrack::processAudioBuffer()
}
// Cache other fields that will be needed soon
- uint32_t loopPeriod = mLoopPeriod;
uint32_t sampleRate = mSampleRate;
uint32_t notificationFrames = mNotificationFramesAct;
if (mRefreshRemaining) {
@@ -1622,8 +1628,30 @@ nsecs_t AudioTrack::processAudioBuffer()
uint32_t sequence = mSequence;
sp<AudioTrackClientProxy> proxy = mProxy;
+ // Determine the number of new loop callback(s) that will be needed, while locked.
+ int loopCountNotifications = 0;
+ uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
+
+ if (mLoopCount > 0) {
+ int loopCount;
+ size_t bufferPosition;
+ mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
+ loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
+ loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
+ mLoopCountNotified = loopCount; // discard any excess notifications
+ } else if (mLoopCount < 0) {
+ // FIXME: We're not accurate with notification count and position with infinite looping
+ // since loopCount from server side will always return -1 (we could decrement it).
+ size_t bufferPosition = mStaticProxy->getBufferPosition();
+ loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
+ loopPeriod = mLoopEnd - bufferPosition;
+ } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
+ size_t bufferPosition = mStaticProxy->getBufferPosition();
+ loopPeriod = mFrameCount - bufferPosition;
+ }
+
// These fields don't need to be cached, because they are assigned only by set():
- // mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
+ // mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
// mFlags is also assigned by createTrack_l(), but not the bit we care about.
mLock.unlock();
@@ -1662,10 +1690,9 @@ nsecs_t AudioTrack::processAudioBuffer()
if (newUnderrun) {
mCbf(EVENT_UNDERRUN, mUserData, NULL);
}
- // FIXME we will miss loops if loop cycle was signaled several times since last call
- // to processAudioBuffer()
- if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
+ while (loopCountNotifications > 0) {
mCbf(EVENT_LOOP_END, mUserData, NULL);
+ --loopCountNotifications;
}
if (flags & CBLK_BUFFER_END) {
mCbf(EVENT_BUFFER_END, mUserData, NULL);
@@ -1701,10 +1728,11 @@ nsecs_t AudioTrack::processAudioBuffer()
minFrames = markerPosition - position;
}
if (loopPeriod > 0 && loopPeriod < minFrames) {
+ // loopPeriod is already adjusted for actual position.
minFrames = loopPeriod;
}
- if (updatePeriod > 0 && updatePeriod < minFrames) {
- minFrames = updatePeriod;
+ if (updatePeriod > 0) {
+ minFrames = min(minFrames, uint32_t(newPosition - position));
}
// If > 0, poll periodically to recover from a stuck server. A good value is 2.
@@ -1767,13 +1795,6 @@ nsecs_t AudioTrack::processAudioBuffer()
}
}
- // Divide buffer size by 2 to take into account the expansion
- // due to 8 to 16 bit conversion: the callback must fill only half
- // of the destination buffer
- if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
- audioBuffer.size >>= 1;
- }
-
size_t reqSize = audioBuffer.size;
mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
size_t writtenSize = audioBuffer.size;
@@ -1793,13 +1814,7 @@ nsecs_t AudioTrack::processAudioBuffer()
return WAIT_PERIOD_MS * 1000000LL;
}
- if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
- // 8 to 16 bit conversion, note that source and destination are the same address
- memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
- audioBuffer.size <<= 1;
- }
-
- size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
+ size_t releasedFrames = audioBuffer.size / mFrameSize;
audioBuffer.frameCount = releasedFrames;
mRemainingFrames -= releasedFrames;
if (misalignment >= releasedFrames) {
@@ -1856,7 +1871,11 @@ status_t AudioTrack::restoreTrack_l(const char *from)
}
// save the old static buffer position
- size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
+ size_t bufferPosition = 0;
+ int loopCount = 0;
+ if (mStaticProxy != 0) {
+ mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
+ }
// If a new IAudioTrack is successfully created, createTrack_l() will modify the
// following member variables: mAudioTrack, mCblkMemory and mCblk.
@@ -1865,30 +1884,26 @@ status_t AudioTrack::restoreTrack_l(const char *from)
result = createTrack_l();
// take the frames that will be lost by track recreation into account in saved position
+ // For streaming tracks, this is the amount we obtained from the user/client
+ // (not the number actually consumed at the server - those are already lost).
(void) updateAndGetPosition_l();
- mPosition = mReleased;
+ if (mStaticProxy != 0) {
+ mPosition = mReleased;
+ }
if (result == NO_ERROR) {
- // continue playback from last known position, but
- // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
- if (mStaticProxy != NULL) {
- mLoopPeriod = 0;
- mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
- }
- // FIXME How do we simulate the fact that all frames present in the buffer at the time of
- // track destruction have been played? This is critical for SoundPool implementation
- // This must be broken, and needs to be tested/debugged.
-#if 0
- // restore write index and set other indexes to reflect empty buffer status
- if (!strcmp(from, "start")) {
- // Make sure that a client relying on callback events indicating underrun or
- // the actual amount of audio frames played (e.g SoundPool) receives them.
- if (mSharedBuffer == 0) {
- // restart playback even if buffer is not completely filled.
- android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
+ // Continue playback from last known position and restore loop.
+ if (mStaticProxy != 0) {
+ if (loopCount != 0) {
+ mStaticProxy->setBufferPositionAndLoop(bufferPosition,
+ mLoopStart, mLoopEnd, loopCount);
+ } else {
+ mStaticProxy->setBufferPosition(bufferPosition);
+ if (bufferPosition == mFrameCount) {
+ ALOGD("restoring track at end of static buffer");
+ }
}
}
-#endif
if (mState == STATE_ACTIVE) {
result = mAudioTrack->start();
}
@@ -2148,8 +2163,8 @@ bool AudioTrack::AudioTrackThread::threadLoop()
case NS_NEVER:
return false;
case NS_WHENEVER:
- // FIXME increase poll interval, or make event-driven
- ns = 1000000000LL;
+ // Event driven: call wake() when callback notifications conditions change.
+ ns = INT64_MAX;
// fall through
default:
LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
@@ -2182,6 +2197,17 @@ void AudioTrack::AudioTrackThread::resume()
}
}
+void AudioTrack::AudioTrackThread::wake()
+{
+ AutoMutex _l(mMyLock);
+ if (!mPaused && mPausedInt && mPausedNs > 0) {
+ // audio track is active and internally paused with timeout.
+ mIgnoreNextPausedInt = true;
+ mPausedInt = false;
+ mMyCond.signal();
+ }
+}
+
void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
{
AutoMutex _l(mMyLock);
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index ff24475..08241e2 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -28,7 +28,21 @@ namespace android {
// used to clamp a value to size_t. TODO: move to another file.
template <typename T>
size_t clampToSize(T x) {
- return x > SIZE_MAX ? SIZE_MAX : x < 0 ? 0 : (size_t) x;
+ return sizeof(T) > sizeof(size_t) && x > (T) SIZE_MAX ? SIZE_MAX : x < 0 ? 0 : (size_t) x;
+}
+
+// incrementSequence is used to determine the next sequence value
+// for the loop and position sequence counters. It should return
+// a value between "other" + 1 and "other" + INT32_MAX, the choice of
+// which needs to be the "least recently used" sequence value for "self".
+// In general, this means (new_self) returned is max(self, other) + 1.
+
+static uint32_t incrementSequence(uint32_t self, uint32_t other) {
+ int32_t diff = self - other;
+ if (diff >= 0 && diff < INT32_MAX) {
+ return self + 1; // we're already ahead of other.
+ }
+ return other + 1; // we're behind, so move just ahead of other.
}
audio_track_cblk_t::audio_track_cblk_t()
@@ -485,8 +499,11 @@ end:
StaticAudioTrackClientProxy::StaticAudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers,
size_t frameCount, size_t frameSize)
: AudioTrackClientProxy(cblk, buffers, frameCount, frameSize),
- mMutator(&cblk->u.mStatic.mSingleStateQueue), mBufferPosition(0)
+ mMutator(&cblk->u.mStatic.mSingleStateQueue),
+ mPosLoopObserver(&cblk->u.mStatic.mPosLoopQueue)
{
+ memset(&mState, 0, sizeof(mState));
+ memset(&mPosLoop, 0, sizeof(mPosLoop));
}
void StaticAudioTrackClientProxy::flush()
@@ -501,30 +518,72 @@ void StaticAudioTrackClientProxy::setLoop(size_t loopStart, size_t loopEnd, int
// FIXME Should return an error status
return;
}
- StaticAudioTrackState newState;
- newState.mLoopStart = (uint32_t) loopStart;
- newState.mLoopEnd = (uint32_t) loopEnd;
- newState.mLoopCount = loopCount;
- size_t bufferPosition;
- if (loopCount == 0 || (bufferPosition = getBufferPosition()) >= loopEnd) {
- bufferPosition = loopStart;
+ mState.mLoopStart = (uint32_t) loopStart;
+ mState.mLoopEnd = (uint32_t) loopEnd;
+ mState.mLoopCount = loopCount;
+ mState.mLoopSequence = incrementSequence(mState.mLoopSequence, mState.mPositionSequence);
+ // set patch-up variables until the mState is acknowledged by the ServerProxy.
+ // observed buffer position and loop count will freeze until then to give the
+ // illusion of a synchronous change.
+ getBufferPositionAndLoopCount(NULL, NULL);
+ // preserve behavior to restart at mState.mLoopStart if position exceeds mState.mLoopEnd.
+ if (mState.mLoopCount != 0 && mPosLoop.mBufferPosition >= mState.mLoopEnd) {
+ mPosLoop.mBufferPosition = mState.mLoopStart;
}
- mBufferPosition = bufferPosition; // snapshot buffer position until loop is acknowledged.
- (void) mMutator.push(newState);
+ mPosLoop.mLoopCount = mState.mLoopCount;
+ (void) mMutator.push(mState);
+}
+
+void StaticAudioTrackClientProxy::setBufferPosition(size_t position)
+{
+ // This can only happen on a 64-bit client
+ if (position > UINT32_MAX) {
+ // FIXME Should return an error status
+ return;
+ }
+ mState.mPosition = (uint32_t) position;
+ mState.mPositionSequence = incrementSequence(mState.mPositionSequence, mState.mLoopSequence);
+ // set patch-up variables until the mState is acknowledged by the ServerProxy.
+ // observed buffer position and loop count will freeze until then to give the
+ // illusion of a synchronous change.
+ if (mState.mLoopCount > 0) { // only check if loop count is changing
+ getBufferPositionAndLoopCount(NULL, NULL); // get last position
+ }
+ mPosLoop.mBufferPosition = position;
+ if (position >= mState.mLoopEnd) {
+ // no ongoing loop is possible if position is greater than loopEnd.
+ mPosLoop.mLoopCount = 0;
+ }
+ (void) mMutator.push(mState);
+}
+
+void StaticAudioTrackClientProxy::setBufferPositionAndLoop(size_t position, size_t loopStart,
+ size_t loopEnd, int loopCount)
+{
+ setLoop(loopStart, loopEnd, loopCount);
+ setBufferPosition(position);
}
size_t StaticAudioTrackClientProxy::getBufferPosition()
{
- size_t bufferPosition;
- if (mMutator.ack()) {
- bufferPosition = (size_t) mCblk->u.mStatic.mBufferPosition;
- if (bufferPosition > mFrameCount) {
- bufferPosition = mFrameCount;
- }
- } else {
- bufferPosition = mBufferPosition;
+ getBufferPositionAndLoopCount(NULL, NULL);
+ return mPosLoop.mBufferPosition;
+}
+
+void StaticAudioTrackClientProxy::getBufferPositionAndLoopCount(
+ size_t *position, int *loopCount)
+{
+ if (mMutator.ack() == StaticAudioTrackSingleStateQueue::SSQ_DONE) {
+ if (mPosLoopObserver.poll(mPosLoop)) {
+ ; // a valid mPosLoop should be available if ackDone is true.
+ }
+ }
+ if (position != NULL) {
+ *position = mPosLoop.mBufferPosition;
+ }
+ if (loopCount != NULL) {
+ *loopCount = mPosLoop.mLoopCount;
}
- return bufferPosition;
}
// ---------------------------------------------------------------------------
@@ -560,7 +619,8 @@ status_t ServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush)
ssize_t filled = rear - newFront;
// Rather than shutting down on a corrupt flush, just treat it as a full flush
if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
- ALOGE("mFlush %#x -> %#x, front %#x, rear %#x, mask %#x, newFront %#x, filled %d=%#x",
+ ALOGE("mFlush %#x -> %#x, front %#x, rear %#x, mask %#x, newFront %#x, "
+ "filled %d=%#x",
mFlush, flush, front, rear, mask, newFront, filled, filled);
newFront = rear;
}
@@ -739,13 +799,12 @@ void AudioTrackServerProxy::tallyUnderrunFrames(uint32_t frameCount)
StaticAudioTrackServerProxy::StaticAudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers,
size_t frameCount, size_t frameSize)
: AudioTrackServerProxy(cblk, buffers, frameCount, frameSize),
- mObserver(&cblk->u.mStatic.mSingleStateQueue), mPosition(0),
+ mObserver(&cblk->u.mStatic.mSingleStateQueue),
+ mPosLoopMutator(&cblk->u.mStatic.mPosLoopQueue),
mFramesReadySafe(frameCount), mFramesReady(frameCount),
mFramesReadyIsCalledByMultipleThreads(false)
{
- mState.mLoopStart = 0;
- mState.mLoopEnd = 0;
- mState.mLoopCount = 0;
+ memset(&mState, 0, sizeof(mState));
}
void StaticAudioTrackServerProxy::framesReadyIsCalledByMultipleThreads()
@@ -762,55 +821,97 @@ size_t StaticAudioTrackServerProxy::framesReady()
return mFramesReadySafe;
}
-ssize_t StaticAudioTrackServerProxy::pollPosition()
+status_t StaticAudioTrackServerProxy::updateStateWithLoop(
+ StaticAudioTrackState *localState, const StaticAudioTrackState &update) const
{
- size_t position = mPosition;
- StaticAudioTrackState state;
- if (mObserver.poll(state)) {
+ if (localState->mLoopSequence != update.mLoopSequence) {
bool valid = false;
- size_t loopStart = state.mLoopStart;
- size_t loopEnd = state.mLoopEnd;
- if (state.mLoopCount == 0) {
- if (loopStart > mFrameCount) {
- loopStart = mFrameCount;
- }
- // ignore loopEnd
- mPosition = position = loopStart;
- mFramesReady = mFrameCount - mPosition;
- mState.mLoopCount = 0;
+ const size_t loopStart = update.mLoopStart;
+ const size_t loopEnd = update.mLoopEnd;
+ size_t position = localState->mPosition;
+ if (update.mLoopCount == 0) {
valid = true;
- } else if (state.mLoopCount >= -1) {
+ } else if (update.mLoopCount >= -1) {
if (loopStart < loopEnd && loopEnd <= mFrameCount &&
loopEnd - loopStart >= MIN_LOOP) {
// If the current position is greater than the end of the loop
// we "wrap" to the loop start. This might cause an audible pop.
if (position >= loopEnd) {
- mPosition = position = loopStart;
- }
- if (state.mLoopCount == -1) {
- mFramesReady = INT64_MAX;
- } else {
- // mFramesReady is 64 bits to handle the effective number of frames
- // that the static audio track contains, including loops.
- // TODO: Later consider fixing overflow, but does not seem needed now
- // as will not overflow if loopStart and loopEnd are Java "ints".
- mFramesReady = int64_t(state.mLoopCount) * (loopEnd - loopStart)
- + mFrameCount - mPosition;
+ position = loopStart;
}
- mState = state;
valid = true;
}
}
- if (!valid || mPosition > mFrameCount) {
+ if (!valid || position > mFrameCount) {
+ return NO_INIT;
+ }
+ localState->mPosition = position;
+ localState->mLoopCount = update.mLoopCount;
+ localState->mLoopEnd = loopEnd;
+ localState->mLoopStart = loopStart;
+ localState->mLoopSequence = update.mLoopSequence;
+ }
+ return OK;
+}
+
+status_t StaticAudioTrackServerProxy::updateStateWithPosition(
+ StaticAudioTrackState *localState, const StaticAudioTrackState &update) const
+{
+ if (localState->mPositionSequence != update.mPositionSequence) {
+ if (update.mPosition > mFrameCount) {
+ return NO_INIT;
+ } else if (localState->mLoopCount != 0 && update.mPosition >= localState->mLoopEnd) {
+ localState->mLoopCount = 0; // disable loop count if position is beyond loop end.
+ }
+ localState->mPosition = update.mPosition;
+ localState->mPositionSequence = update.mPositionSequence;
+ }
+ return OK;
+}
+
+ssize_t StaticAudioTrackServerProxy::pollPosition()
+{
+ StaticAudioTrackState state;
+ if (mObserver.poll(state)) {
+ StaticAudioTrackState trystate = mState;
+ bool result;
+ const int32_t diffSeq = state.mLoopSequence - state.mPositionSequence;
+
+ if (diffSeq < 0) {
+ result = updateStateWithLoop(&trystate, state) == OK &&
+ updateStateWithPosition(&trystate, state) == OK;
+ } else {
+ result = updateStateWithPosition(&trystate, state) == OK &&
+ updateStateWithLoop(&trystate, state) == OK;
+ }
+ if (!result) {
+ mObserver.done();
+ // caution: no update occurs so server state will be inconsistent with client state.
ALOGE("%s client pushed an invalid state, shutting down", __func__);
mIsShutdown = true;
return (ssize_t) NO_INIT;
}
+ mState = trystate;
+ if (mState.mLoopCount == -1) {
+ mFramesReady = INT64_MAX;
+ } else if (mState.mLoopCount == 0) {
+ mFramesReady = mFrameCount - mState.mPosition;
+ } else if (mState.mLoopCount > 0) {
+ // TODO: Later consider fixing overflow, but does not seem needed now
+ // as will not overflow if loopStart and loopEnd are Java "ints".
+ mFramesReady = int64_t(mState.mLoopCount) * (mState.mLoopEnd - mState.mLoopStart)
+ + mFrameCount - mState.mPosition;
+ }
mFramesReadySafe = clampToSize(mFramesReady);
// This may overflow, but client is not supposed to rely on it
- mCblk->u.mStatic.mBufferPosition = (uint32_t) position;
+ StaticAudioTrackPosLoop posLoop;
+
+ posLoop.mLoopCount = (int32_t) mState.mLoopCount;
+ posLoop.mBufferPosition = (uint32_t) mState.mPosition;
+ mPosLoopMutator.push(posLoop);
+ mObserver.done(); // safe to read mStatic variables.
}
- return (ssize_t) position;
+ return (ssize_t) mState.mPosition;
}
status_t StaticAudioTrackServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush __unused)
@@ -849,7 +950,7 @@ status_t StaticAudioTrackServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush
}
// As mFramesReady is the total remaining frames in the static audio track,
// it is always larger or equal to avail.
- LOG_ALWAYS_FATAL_IF(mFramesReady < avail);
+ LOG_ALWAYS_FATAL_IF(mFramesReady < (int64_t) avail);
buffer->mNonContig = mFramesReady == INT64_MAX ? SIZE_MAX : clampToSize(mFramesReady - avail);
mUnreleased = avail;
return NO_ERROR;
@@ -858,7 +959,7 @@ status_t StaticAudioTrackServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush
void StaticAudioTrackServerProxy::releaseBuffer(Buffer* buffer)
{
size_t stepCount = buffer->mFrameCount;
- LOG_ALWAYS_FATAL_IF(!(stepCount <= mFramesReady));
+ LOG_ALWAYS_FATAL_IF(!((int64_t) stepCount <= mFramesReady));
LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased));
if (stepCount == 0) {
// prevent accidental re-use of buffer
@@ -868,11 +969,12 @@ void StaticAudioTrackServerProxy::releaseBuffer(Buffer* buffer)
}
mUnreleased -= stepCount;
audio_track_cblk_t* cblk = mCblk;
- size_t position = mPosition;
+ size_t position = mState.mPosition;
size_t newPosition = position + stepCount;
int32_t setFlags = 0;
if (!(position <= newPosition && newPosition <= mFrameCount)) {
- ALOGW("%s newPosition %zu outside [%zu, %zu]", __func__, newPosition, position, mFrameCount);
+ ALOGW("%s newPosition %zu outside [%zu, %zu]", __func__, newPosition, position,
+ mFrameCount);
newPosition = mFrameCount;
} else if (mState.mLoopCount != 0 && newPosition == mState.mLoopEnd) {
newPosition = mState.mLoopStart;
@@ -885,7 +987,7 @@ void StaticAudioTrackServerProxy::releaseBuffer(Buffer* buffer)
if (newPosition == mFrameCount) {
setFlags |= CBLK_BUFFER_END;
}
- mPosition = newPosition;
+ mState.mPosition = newPosition;
if (mFramesReady != INT64_MAX) {
mFramesReady -= stepCount;
}
@@ -893,7 +995,10 @@ void StaticAudioTrackServerProxy::releaseBuffer(Buffer* buffer)
cblk->mServer += stepCount;
// This may overflow, but client is not supposed to rely on it
- cblk->u.mStatic.mBufferPosition = (uint32_t) newPosition;
+ StaticAudioTrackPosLoop posLoop;
+ posLoop.mBufferPosition = mState.mPosition;
+ posLoop.mLoopCount = mState.mLoopCount;
+ mPosLoopMutator.push(posLoop);
if (setFlags != 0) {
(void) android_atomic_or(setFlags, &cblk->mFlags);
// this would be a good place to wake a futex
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index a733b68..9181f86 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -46,7 +46,6 @@ enum {
SET_OUTPUT_FORMAT,
SET_VIDEO_ENCODER,
SET_AUDIO_ENCODER,
- SET_OUTPUT_FILE_PATH,
SET_OUTPUT_FILE_FD,
SET_VIDEO_SIZE,
SET_VIDEO_FRAMERATE,
@@ -158,16 +157,6 @@ public:
return reply.readInt32();
}
- status_t setOutputFile(const char* path)
- {
- ALOGV("setOutputFile(%s)", path);
- Parcel data, reply;
- data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
- data.writeCString(path);
- remote()->transact(SET_OUTPUT_FILE_PATH, data, &reply);
- return reply.readInt32();
- }
-
status_t setOutputFile(int fd, int64_t offset, int64_t length) {
ALOGV("setOutputFile(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
Parcel data, reply;
@@ -300,7 +289,8 @@ IMPLEMENT_META_INTERFACE(MediaRecorder, "android.media.IMediaRecorder");
// ----------------------------------------------------------------------
status_t BnMediaRecorder::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+ uint32_t code, const Parcel& data, Parcel* reply,
+ uint32_t flags)
{
switch (code) {
case RELEASE: {
@@ -390,13 +380,6 @@ status_t BnMediaRecorder::onTransact(
return NO_ERROR;
} break;
- case SET_OUTPUT_FILE_PATH: {
- ALOGV("SET_OUTPUT_FILE_PATH");
- CHECK_INTERFACE(IMediaRecorder, data, reply);
- const char* path = data.readCString();
- reply->writeInt32(setOutputFile(path));
- return NO_ERROR;
- } break;
case SET_OUTPUT_FILE_FD: {
ALOGV("SET_OUTPUT_FILE_FD");
CHECK_INTERFACE(IMediaRecorder, data, reply);
@@ -445,7 +428,8 @@ status_t BnMediaRecorder::onTransact(
case SET_PREVIEW_SURFACE: {
ALOGV("SET_PREVIEW_SURFACE");
CHECK_INTERFACE(IMediaRecorder, data, reply);
- sp<IGraphicBufferProducer> surface = interface_cast<IGraphicBufferProducer>(data.readStrongBinder());
+ sp<IGraphicBufferProducer> surface = interface_cast<IGraphicBufferProducer>(
+ data.readStrongBinder());
reply->writeInt32(setPreviewSurface(surface));
return NO_ERROR;
} break;
diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp
index 721d8d7..271be0c 100644
--- a/media/libmedia/JetPlayer.cpp
+++ b/media/libmedia/JetPlayer.cpp
@@ -408,7 +408,8 @@ int JetPlayer::queueSegment(int segmentNum, int libNum, int repeatCount, int tra
ALOGV("JetPlayer::queueSegment segmentNum=%d, libNum=%d, repeatCount=%d, transpose=%d",
segmentNum, libNum, repeatCount, transpose);
Mutex::Autolock lock(mMutex);
- return JET_QueueSegment(mEasData, segmentNum, libNum, repeatCount, transpose, muteFlags, userID);
+ return JET_QueueSegment(mEasData, segmentNum, libNum, repeatCount, transpose, muteFlags,
+ userID);
}
//-------------------------------------------------------------------------------------------------
@@ -449,7 +450,8 @@ void JetPlayer::dump()
void JetPlayer::dumpJetStatus(S_JET_STATUS* pJetStatus)
{
if (pJetStatus!=NULL)
- ALOGV(">> current JET player status: userID=%d segmentRepeatCount=%d numQueuedSegments=%d paused=%d",
+ ALOGV(">> current JET player status: userID=%d segmentRepeatCount=%d numQueuedSegments=%d "
+ "paused=%d",
pJetStatus->currentUserID, pJetStatus->segmentRepeatCount,
pJetStatus->numQueuedSegments, pJetStatus->paused);
else
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index e2e6042..47f9258 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -163,7 +163,8 @@ MediaProfiles::logVideoEditorCap(const MediaProfiles::VideoEditorCap& cap UNUSED
}
/*static*/ int
-MediaProfiles::findTagForName(const MediaProfiles::NameToTagMap *map, size_t nMappings, const char *name)
+MediaProfiles::findTagForName(const MediaProfiles::NameToTagMap *map, size_t nMappings,
+ const char *name)
{
int tag = -1;
for (size_t i = 0; i < nMappings; ++i) {
@@ -295,9 +296,8 @@ MediaProfiles::createAudioEncoderCap(const char **atts)
CHECK(codec != -1);
MediaProfiles::AudioEncoderCap *cap =
- new MediaProfiles::AudioEncoderCap(static_cast<audio_encoder>(codec), atoi(atts[5]), atoi(atts[7]),
- atoi(atts[9]), atoi(atts[11]), atoi(atts[13]),
- atoi(atts[15]));
+ new MediaProfiles::AudioEncoderCap(static_cast<audio_encoder>(codec), atoi(atts[5]),
+ atoi(atts[7]), atoi(atts[9]), atoi(atts[11]), atoi(atts[13]), atoi(atts[15]));
logAudioEncoderCap(*cap);
return cap;
}
@@ -330,7 +330,8 @@ MediaProfiles::createCamcorderProfile(int cameraId, const char **atts, Vector<in
!strcmp("fileFormat", atts[2]) &&
!strcmp("duration", atts[4]));
- const size_t nProfileMappings = sizeof(sCamcorderQualityNameMap)/sizeof(sCamcorderQualityNameMap[0]);
+ const size_t nProfileMappings = sizeof(sCamcorderQualityNameMap)/
+ sizeof(sCamcorderQualityNameMap[0]);
const int quality = findTagForName(sCamcorderQualityNameMap, nProfileMappings, atts[1]);
CHECK(quality != -1);
@@ -722,16 +723,20 @@ MediaProfiles::createDefaultCamcorderTimeLapse480pProfile(camcorder_quality qual
MediaProfiles::createDefaultCamcorderTimeLapseLowProfiles(
MediaProfiles::CamcorderProfile **lowTimeLapseProfile,
MediaProfiles::CamcorderProfile **lowSpecificTimeLapseProfile) {
- *lowTimeLapseProfile = createDefaultCamcorderTimeLapseQcifProfile(CAMCORDER_QUALITY_TIME_LAPSE_LOW);
- *lowSpecificTimeLapseProfile = createDefaultCamcorderTimeLapseQcifProfile(CAMCORDER_QUALITY_TIME_LAPSE_QCIF);
+ *lowTimeLapseProfile = createDefaultCamcorderTimeLapseQcifProfile(
+ CAMCORDER_QUALITY_TIME_LAPSE_LOW);
+ *lowSpecificTimeLapseProfile = createDefaultCamcorderTimeLapseQcifProfile(
+ CAMCORDER_QUALITY_TIME_LAPSE_QCIF);
}
/*static*/ void
MediaProfiles::createDefaultCamcorderTimeLapseHighProfiles(
MediaProfiles::CamcorderProfile **highTimeLapseProfile,
MediaProfiles::CamcorderProfile **highSpecificTimeLapseProfile) {
- *highTimeLapseProfile = createDefaultCamcorderTimeLapse480pProfile(CAMCORDER_QUALITY_TIME_LAPSE_HIGH);
- *highSpecificTimeLapseProfile = createDefaultCamcorderTimeLapse480pProfile(CAMCORDER_QUALITY_TIME_LAPSE_480P);
+ *highTimeLapseProfile = createDefaultCamcorderTimeLapse480pProfile(
+ CAMCORDER_QUALITY_TIME_LAPSE_HIGH);
+ *highSpecificTimeLapseProfile = createDefaultCamcorderTimeLapse480pProfile(
+ CAMCORDER_QUALITY_TIME_LAPSE_480P);
}
/*static*/ MediaProfiles::CamcorderProfile*
@@ -809,7 +814,8 @@ MediaProfiles::createDefaultCamcorderProfiles(MediaProfiles *profiles)
// high camcorder time lapse profiles.
MediaProfiles::CamcorderProfile *highTimeLapseProfile, *highSpecificTimeLapseProfile;
- createDefaultCamcorderTimeLapseHighProfiles(&highTimeLapseProfile, &highSpecificTimeLapseProfile);
+ createDefaultCamcorderTimeLapseHighProfiles(&highTimeLapseProfile,
+ &highSpecificTimeLapseProfile);
profiles->mCamcorderProfiles.add(highTimeLapseProfile);
profiles->mCamcorderProfiles.add(highSpecificTimeLapseProfile);
diff --git a/media/libmedia/SingleStateQueue.cpp b/media/libmedia/SingleStateQueue.cpp
deleted file mode 100644
index c241184..0000000
--- a/media/libmedia/SingleStateQueue.cpp
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <new>
-#include <cutils/atomic.h>
-#include <media/SingleStateQueue.h>
-
-namespace android {
-
-template<typename T> SingleStateQueue<T>::Mutator::Mutator(Shared *shared)
- : mSequence(0), mShared((Shared *) shared)
-{
- // exactly one of Mutator and Observer must initialize, currently it is Observer
- //shared->init();
-}
-
-template<typename T> int32_t SingleStateQueue<T>::Mutator::push(const T& value)
-{
- Shared *shared = mShared;
- int32_t sequence = mSequence;
- sequence++;
- android_atomic_acquire_store(sequence, &shared->mSequence);
- shared->mValue = value;
- sequence++;
- android_atomic_release_store(sequence, &shared->mSequence);
- mSequence = sequence;
- // consider signalling a futex here, if we know that observer is waiting
- return sequence;
-}
-
-template<typename T> bool SingleStateQueue<T>::Mutator::ack()
-{
- return mShared->mAck - mSequence == 0;
-}
-
-template<typename T> bool SingleStateQueue<T>::Mutator::ack(int32_t sequence)
-{
- // this relies on 2's complement rollover to detect an ancient sequence number
- return mShared->mAck - sequence >= 0;
-}
-
-template<typename T> SingleStateQueue<T>::Observer::Observer(Shared *shared)
- : mSequence(0), mSeed(1), mShared((Shared *) shared)
-{
- // exactly one of Mutator and Observer must initialize, currently it is Observer
- shared->init();
-}
-
-template<typename T> bool SingleStateQueue<T>::Observer::poll(T& value)
-{
- Shared *shared = mShared;
- int32_t before = shared->mSequence;
- if (before == mSequence) {
- return false;
- }
- for (int tries = 0; ; ) {
- const int MAX_TRIES = 5;
- if (before & 1) {
- if (++tries >= MAX_TRIES) {
- return false;
- }
- before = shared->mSequence;
- } else {
- android_memory_barrier();
- T temp = shared->mValue;
- int32_t after = android_atomic_release_load(&shared->mSequence);
- if (after == before) {
- value = temp;
- shared->mAck = before;
- mSequence = before;
- return true;
- }
- if (++tries >= MAX_TRIES) {
- return false;
- }
- before = after;
- }
- }
-}
-
-#if 0
-template<typename T> SingleStateQueue<T>::SingleStateQueue(void /*Shared*/ *shared)
-{
- ((Shared *) shared)->init();
-}
-#endif
-
-} // namespace android
-
-// hack for gcc
-#ifdef SINGLE_STATE_QUEUE_INSTANTIATIONS
-#include SINGLE_STATE_QUEUE_INSTANTIATIONS
-#endif
diff --git a/media/libmedia/SingleStateQueueInstantiations.cpp b/media/libmedia/SingleStateQueueInstantiations.cpp
deleted file mode 100644
index 0265c8c..0000000
--- a/media/libmedia/SingleStateQueueInstantiations.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <media/SingleStateQueue.h>
-#include <private/media/StaticAudioTrackState.h>
-#include <media/AudioTimestamp.h>
-
-// FIXME hack for gcc
-
-namespace android {
-
-template class SingleStateQueue<StaticAudioTrackState>; // typedef StaticAudioTrackSingleStateQueue
-template class SingleStateQueue<AudioTimestamp>; // typedef AudioTimestampSingleStateQueue
-
-}
diff --git a/media/libmedia/StringArray.cpp b/media/libmedia/StringArray.cpp
index 5f5b57a..477e3fd 100644
--- a/media/libmedia/StringArray.cpp
+++ b/media/libmedia/StringArray.cpp
@@ -16,7 +16,7 @@
//
// Sortable array of strings. STL-ish, but STL-free.
-//
+//
#include <stdlib.h>
#include <string.h>
diff --git a/media/libmedia/docs/Makefile b/media/libmedia/docs/Makefile
new file mode 100644
index 0000000..bddbc9b
--- /dev/null
+++ b/media/libmedia/docs/Makefile
@@ -0,0 +1,2 @@
+paused.png : paused.dot
+ dot -Tpng < $< > $@
diff --git a/media/libmedia/docs/paused.dot b/media/libmedia/docs/paused.dot
new file mode 100644
index 0000000..11e1777
--- /dev/null
+++ b/media/libmedia/docs/paused.dot
@@ -0,0 +1,85 @@
+digraph paused {
+initial [label="INITIAL\n\
+mIgnoreNextPausedInt = false\n\
+mPaused = false\n\
+mPausedInt = false"];
+
+resume_body [label="mIgnoreNextPausedInt = true\nif (mPaused || mPausedInt)"];
+resume_paused [label="mPaused = false\nmPausedInt = false\nsignal()"];
+resume_paused -> resume_merged;
+resume_merged [label="return"];
+
+Application -> ATstop;
+ATstop [label="AudioTrack::stop()"];
+ATstop -> pause;
+Application -> ATpause;
+ATpause [label="AudioTrack::pause()"];
+ATpause -> pause;
+ATstart -> resume;
+ATstart [label="AudioTrack::start()"];
+destructor [label="~AudioTrack()"];
+destructor -> requestExit;
+requestExit [label="AudioTrackThread::requestExit()"];
+requestExit -> resume;
+Application -> ATsetMarkerPosition
+ATsetMarkerPosition [label="AudioTrack::setMarkerPosition()\n[sets marker variables]"];
+ATsetMarkerPosition -> ATTwake
+Application -> ATsetPositionUpdatePeriod
+ATsetPositionUpdatePeriod [label="AudioTrack::setPositionUpdatePeriod()\n[sets update period variables]"];
+ATsetPositionUpdatePeriod -> ATTwake
+Application -> ATstart;
+
+resume [label="AudioTrackThread::resume()"];
+resume -> resume_body;
+
+resume_body -> resume_paused [label="true"];
+resume_body -> resume_merged [label="false"];
+
+ATTwake [label="AudioTrackThread::wake()\nif (!mPaused && mPausedInt && mPausedNs > 0)"];
+ATTwake-> ATTWake_wakeable [label="true"];
+ATTWake_wakeable [label="mIgnoreNextPausedInt = true\nmPausedInt = false\nsignal()"];
+ATTwake-> ATTWake_cannotwake [label="false"]
+ATTWake_cannotwake [label="ignore"];
+
+pause [label="mPaused = true"];
+pause -> return;
+
+threadLoop [label="AudioTrackThread::threadLoop()\nENTRY"];
+threadLoop -> threadLoop_1;
+threadLoop_1 [label="if (mPaused)"];
+threadLoop_1 -> threadLoop_1_true [label="true"];
+threadLoop_1 -> threadLoop_2 [label="false"];
+threadLoop_1_true [label="wait()\nreturn true"];
+threadLoop_2 [label="if (mIgnoreNextPausedInt)"];
+threadLoop_2 -> threadLoop_2_true [label="true"];
+threadLoop_2 -> threadLoop_3 [label="false"];
+threadLoop_2_true [label="mIgnoreNextPausedInt = false\nmPausedInt = false"];
+threadLoop_2_true -> threadLoop_3;
+threadLoop_3 [label="if (mPausedInt)"];
+threadLoop_3 -> threadLoop_3_true [label="true"];
+threadLoop_3 -> threadLoop_4 [label="false"];
+threadLoop_3_true [label="wait()\nmPausedInt = false\nreturn true"];
+threadLoop_4 [label="if (exitPending)"];
+threadLoop_4 -> threadLoop_4_true [label="true"];
+threadLoop_4 -> threadLoop_5 [label="false"];
+threadLoop_4_true [label="return false"];
+threadLoop_5 [label="ns = processAudioBuffer()"];
+threadLoop_5 -> threadLoop_6;
+threadLoop_6 [label="case ns"];
+threadLoop_6 -> threadLoop_6_0 [label="0"];
+threadLoop_6 -> threadLoop_6_NS_INACTIVE [label="NS_INACTIVE"];
+threadLoop_6 -> threadLoop_6_NS_NEVER [label="NS_NEVER"];
+threadLoop_6 -> threadLoop_6_NS_WHENEVER [label="NS_WHENEVER"];
+threadLoop_6 -> threadLoop_6_default [label="default"];
+threadLoop_6_default [label="if (ns < 0)"];
+threadLoop_6_default -> threadLoop_6_default_true [label="true"];
+threadLoop_6_default -> threadLoop_6_default_false [label="false"];
+threadLoop_6_default_true [label="FATAL"];
+threadLoop_6_default_false [label="pauseInternal(ns) [wake()-able]\nmPausedInternal = true\nmPausedNs = ns\nreturn true"];
+threadLoop_6_0 [label="return true"];
+threadLoop_6_NS_INACTIVE [label="pauseInternal()\nmPausedInternal = true\nmPausedNs = 0\nreturn true"];
+threadLoop_6_NS_NEVER [label="return false"];
+threadLoop_6_NS_WHENEVER [label="ns = 1s"];
+threadLoop_6_NS_WHENEVER -> threadLoop_6_default_false;
+
+}
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 05c89ed..432ecda 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -240,7 +240,7 @@ status_t MediaPlayer::setVideoSurfaceTexture(
// must call with lock held
status_t MediaPlayer::prepareAsync_l()
{
- if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_INITIALIZED | MEDIA_PLAYER_STOPPED) ) ) {
+ if ( (mPlayer != 0) && ( mCurrentState & (MEDIA_PLAYER_INITIALIZED | MEDIA_PLAYER_STOPPED) ) ) {
mPlayer->setAudioStreamType(mStreamType);
if (mAudioAttributesParcel != NULL) {
mPlayer->setParameter(KEY_PARAMETER_AUDIO_ATTRIBUTES, *mAudioAttributesParcel);
@@ -414,7 +414,8 @@ status_t MediaPlayer::getCurrentPosition(int *msec)
status_t MediaPlayer::getDuration_l(int *msec)
{
ALOGV("getDuration_l");
- bool isValidState = (mCurrentState & (MEDIA_PLAYER_PREPARED | MEDIA_PLAYER_STARTED | MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_STOPPED | MEDIA_PLAYER_PLAYBACK_COMPLETE));
+ bool isValidState = (mCurrentState & (MEDIA_PLAYER_PREPARED | MEDIA_PLAYER_STARTED |
+ MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_STOPPED | MEDIA_PLAYER_PLAYBACK_COMPLETE));
if (mPlayer != 0 && isValidState) {
int durationMs;
status_t ret = mPlayer->getDuration(&durationMs);
@@ -443,7 +444,8 @@ status_t MediaPlayer::getDuration(int *msec)
status_t MediaPlayer::seekTo_l(int msec)
{
ALOGV("seekTo %d", msec);
- if ((mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_STARTED | MEDIA_PLAYER_PREPARED | MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_PLAYBACK_COMPLETE) ) ) {
+ if ((mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_STARTED | MEDIA_PLAYER_PREPARED |
+ MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_PLAYBACK_COMPLETE) ) ) {
if ( msec < 0 ) {
ALOGW("Attempt to seek to invalid position: %d", msec);
msec = 0;
@@ -477,7 +479,8 @@ status_t MediaPlayer::seekTo_l(int msec)
return NO_ERROR;
}
}
- ALOGE("Attempt to perform seekTo in wrong state: mPlayer=%p, mCurrentState=%u", mPlayer.get(), mCurrentState);
+ ALOGE("Attempt to perform seekTo in wrong state: mPlayer=%p, mCurrentState=%u", mPlayer.get(),
+ mCurrentState);
return INVALID_OPERATION;
}
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 1952b86..973e156 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -264,32 +264,6 @@ status_t MediaRecorder::setAudioEncoder(int ae)
return ret;
}
-status_t MediaRecorder::setOutputFile(const char* path)
-{
- ALOGV("setOutputFile(%s)", path);
- if (mMediaRecorder == NULL) {
- ALOGE("media recorder is not initialized yet");
- return INVALID_OPERATION;
- }
- if (mIsOutputFileSet) {
- ALOGE("output file has already been set");
- return INVALID_OPERATION;
- }
- if (!(mCurrentState & MEDIA_RECORDER_DATASOURCE_CONFIGURED)) {
- ALOGE("setOutputFile called in an invalid state(%d)", mCurrentState);
- return INVALID_OPERATION;
- }
-
- status_t ret = mMediaRecorder->setOutputFile(path);
- if (OK != ret) {
- ALOGV("setOutputFile failed: %d", ret);
- mCurrentState = MEDIA_RECORDER_ERROR;
- return ret;
- }
- mIsOutputFileSet = true;
- return ret;
-}
-
status_t MediaRecorder::setOutputFile(int fd, int64_t offset, int64_t length)
{
ALOGV("setOutputFile(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 194abbb..4d4de9b 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -154,17 +154,6 @@ status_t MediaRecorderClient::setAudioEncoder(int ae)
return mRecorder->setAudioEncoder((audio_encoder)ae);
}
-status_t MediaRecorderClient::setOutputFile(const char* path)
-{
- ALOGV("setOutputFile(%s)", path);
- Mutex::Autolock lock(mLock);
- if (mRecorder == NULL) {
- ALOGE("recorder is not initialized");
- return NO_INIT;
- }
- return mRecorder->setOutputFile(path);
-}
-
status_t MediaRecorderClient::setOutputFile(int fd, int64_t offset, int64_t length)
{
ALOGV("setOutputFile(%d, %lld, %lld)", fd, offset, length);
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index a65ec9f..a444b6c 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -38,7 +38,6 @@ public:
virtual status_t setOutputFormat(int of);
virtual status_t setVideoEncoder(int ve);
virtual status_t setAudioEncoder(int ae);
- virtual status_t setOutputFile(const char* path);
virtual status_t setOutputFile(int fd, int64_t offset,
int64_t length);
virtual status_t setVideoSize(int width, int height);
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 86639cb..5c16920 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -241,14 +241,6 @@ status_t StagefrightRecorder::setPreviewSurface(const sp<IGraphicBufferProducer>
return OK;
}
-status_t StagefrightRecorder::setOutputFile(const char * /* path */) {
- ALOGE("setOutputFile(const char*) must not be called");
- // We don't actually support this at all, as the media_server process
- // no longer has permissions to create files.
-
- return -EPERM;
-}
-
status_t StagefrightRecorder::setOutputFile(int fd, int64_t offset, int64_t length) {
ALOGV("setOutputFile: %d, %lld, %lld", fd, offset, length);
// These don't make any sense, do they?
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 54c38d3..a6eba39 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -53,7 +53,6 @@ struct StagefrightRecorder : public MediaRecorderBase {
virtual status_t setVideoFrameRate(int frames_per_second);
virtual status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy);
virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
- virtual status_t setOutputFile(const char *path);
virtual status_t setOutputFile(int fd, int64_t offset, int64_t length);
virtual status_t setParameters(const String8& params);
virtual status_t setListener(const sp<IMediaRecorderClient>& listener);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index bc79fdb..abfa4d3 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -351,6 +351,14 @@ status_t NuPlayerDriver::seekTo(int msec) {
case STATE_PREPARED:
case STATE_STOPPED_AND_PREPARED:
{
+ int curpos = 0;
+ if (mPositionUs > 0) {
+ curpos = (mPositionUs + 500ll) / 1000;
+ }
+ if (curpos == msec) {
+ // nothing to do, and doing something anyway could result in deadlock (b/15323063)
+ break;
+ }
mStartupSeekTimeUs = seekTimeUs;
// pretend that the seek completed. It will actually happen when starting playback.
// TODO: actually perform the seek here, so the player is ready to go at the new
diff --git a/media/libnbaio/Android.mk b/media/libnbaio/Android.mk
index 9707c4a..1353f28 100644
--- a/media/libnbaio/Android.mk
+++ b/media/libnbaio/Android.mk
@@ -11,7 +11,6 @@ LOCAL_SRC_FILES := \
MonoPipeReader.cpp \
Pipe.cpp \
PipeReader.cpp \
- roundup.c \
SourceAudioBufferProvider.cpp
LOCAL_SRC_FILES += NBLog.cpp
@@ -27,12 +26,13 @@ LOCAL_SRC_FILES += NBLog.cpp
LOCAL_MODULE := libnbaio
LOCAL_SHARED_LIBRARIES := \
+ libaudioutils \
libbinder \
libcommon_time_client \
libcutils \
libutils \
liblog
-LOCAL_STATIC_LIBRARIES += libinstantssq
+LOCAL_C_INCLUDES := $(call include-path-for, audio-utils)
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libnbaio/MonoPipe.cpp b/media/libnbaio/MonoPipe.cpp
index 0b65861..129e9ef 100644
--- a/media/libnbaio/MonoPipe.cpp
+++ b/media/libnbaio/MonoPipe.cpp
@@ -27,7 +27,7 @@
#include <utils/Trace.h>
#include <media/AudioBufferProvider.h>
#include <media/nbaio/MonoPipe.h>
-#include <media/nbaio/roundup.h>
+#include <audio_utils/roundup.h>
namespace android {
diff --git a/media/libnbaio/MonoPipeReader.cpp b/media/libnbaio/MonoPipeReader.cpp
index de82229..e4d3ed8 100644
--- a/media/libnbaio/MonoPipeReader.cpp
+++ b/media/libnbaio/MonoPipeReader.cpp
@@ -39,7 +39,7 @@ ssize_t MonoPipeReader::availableToRead()
return NEGOTIATE;
}
ssize_t ret = android_atomic_acquire_load(&mPipe->mRear) - mPipe->mFront;
- ALOG_ASSERT((0 <= ret) && (ret <= mMaxFrames));
+ ALOG_ASSERT((0 <= ret) && ((size_t) ret <= mPipe->mMaxFrames));
return ret;
}
diff --git a/media/libnbaio/Pipe.cpp b/media/libnbaio/Pipe.cpp
index 6e0ec8c..13f211d 100644
--- a/media/libnbaio/Pipe.cpp
+++ b/media/libnbaio/Pipe.cpp
@@ -21,7 +21,7 @@
#include <cutils/compiler.h>
#include <utils/Log.h>
#include <media/nbaio/Pipe.h>
-#include <media/nbaio/roundup.h>
+#include <audio_utils/roundup.h>
namespace android {
diff --git a/media/libnbaio/roundup.c b/media/libnbaio/roundup.c
deleted file mode 100644
index 1d552d1..0000000
--- a/media/libnbaio/roundup.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <media/nbaio/roundup.h>
-
-unsigned roundup(unsigned v)
-{
- // __builtin_clz is undefined for zero input
- if (v == 0) {
- v = 1;
- }
- int lz = __builtin_clz((int) v);
- unsigned rounded = ((unsigned) 0x80000000) >> lz;
- // 0x800000001 and higher are actually rounded _down_ to prevent overflow
- if (v > rounded && lz > 0) {
- rounded <<= 1;
- }
- return rounded;
-}
diff --git a/media/libstagefright/AACWriter.cpp b/media/libstagefright/AACWriter.cpp
index 2e41d80..9d90dbd 100644
--- a/media/libstagefright/AACWriter.cpp
+++ b/media/libstagefright/AACWriter.cpp
@@ -36,33 +36,19 @@
namespace android {
-AACWriter::AACWriter(const char *filename)
- : mFd(-1),
- mInitCheck(NO_INIT),
- mStarted(false),
- mPaused(false),
- mResumed(false),
- mChannelCount(-1),
- mSampleRate(-1),
- mAACProfile(OMX_AUDIO_AACObjectLC) {
-
- ALOGV("AACWriter Constructor");
-
- mFd = open(filename, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
- if (mFd >= 0) {
- mInitCheck = OK;
- }
-}
-
AACWriter::AACWriter(int fd)
: mFd(dup(fd)),
mInitCheck(mFd < 0? NO_INIT: OK),
mStarted(false),
mPaused(false),
mResumed(false),
+ mThread(0),
+ mEstimatedSizeBytes(0),
+ mEstimatedDurationUs(0),
mChannelCount(-1),
mSampleRate(-1),
- mAACProfile(OMX_AUDIO_AACObjectLC) {
+ mAACProfile(OMX_AUDIO_AACObjectLC),
+ mFrameDurationUs(0) {
}
AACWriter::~AACWriter() {
diff --git a/media/libstagefright/AMRWriter.cpp b/media/libstagefright/AMRWriter.cpp
index 9aa7d95..f53d7f0 100644
--- a/media/libstagefright/AMRWriter.cpp
+++ b/media/libstagefright/AMRWriter.cpp
@@ -31,19 +31,6 @@
namespace android {
-AMRWriter::AMRWriter(const char *filename)
- : mFd(-1),
- mInitCheck(NO_INIT),
- mStarted(false),
- mPaused(false),
- mResumed(false) {
-
- mFd = open(filename, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
- if (mFd >= 0) {
- mInitCheck = OK;
- }
-}
-
AMRWriter::AMRWriter(int fd)
: mFd(dup(fd)),
mInitCheck(mFd < 0? NO_INIT: OK),
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 2629afc..6d9bbae 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -101,6 +101,7 @@ LOCAL_STATIC_LIBRARIES := \
libstagefright_color_conversion \
libstagefright_aacenc \
libstagefright_matroska \
+ libstagefright_mediafilter \
libstagefright_webm \
libstagefright_timedtext \
libvpx \
@@ -108,13 +109,14 @@ LOCAL_STATIC_LIBRARIES := \
libstagefright_mpeg2ts \
libstagefright_id3 \
libFLAC \
- libmedia_helper
+ libmedia_helper \
LOCAL_SHARED_LIBRARIES += \
libstagefright_enc_common \
libstagefright_avc_common \
libstagefright_foundation \
- libdl
+ libdl \
+ libRScpp \
LOCAL_CFLAGS += -Wno-multichar
diff --git a/media/libstagefright/FileSource.cpp b/media/libstagefright/FileSource.cpp
index a7ca3da..f0db76b 100644
--- a/media/libstagefright/FileSource.cpp
+++ b/media/libstagefright/FileSource.cpp
@@ -14,6 +14,10 @@
* limitations under the License.
*/
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FileSource"
+#include <utils/Log.h>
+
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/FileSource.h>
#include <sys/types.h>
diff --git a/media/libstagefright/MPEG2TSWriter.cpp b/media/libstagefright/MPEG2TSWriter.cpp
index 9856f92..4359fb9 100644
--- a/media/libstagefright/MPEG2TSWriter.cpp
+++ b/media/libstagefright/MPEG2TSWriter.cpp
@@ -480,19 +480,6 @@ MPEG2TSWriter::MPEG2TSWriter(int fd)
init();
}
-MPEG2TSWriter::MPEG2TSWriter(const char *filename)
- : mFile(fopen(filename, "wb")),
- mWriteCookie(NULL),
- mWriteFunc(NULL),
- mStarted(false),
- mNumSourcesDone(0),
- mNumTSPacketsWritten(0),
- mNumTSPacketsBeforeMeta(0),
- mPATContinuityCounter(0),
- mPMTContinuityCounter(0) {
- init();
-}
-
MPEG2TSWriter::MPEG2TSWriter(
void *cookie,
ssize_t (*write)(void *cookie, const void *data, size_t size))
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 9f20b1d..beb6f20 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -345,31 +345,6 @@ private:
Track &operator=(const Track &);
};
-MPEG4Writer::MPEG4Writer(const char *filename)
- : mFd(-1),
- mInitCheck(NO_INIT),
- mIsRealTimeRecording(true),
- mUse4ByteNalLength(true),
- mUse32BitOffset(true),
- mIsFileSizeLimitExplicitlyRequested(false),
- mPaused(false),
- mStarted(false),
- mWriterThreadStarted(false),
- mOffset(0),
- mMdatOffset(0),
- mEstimatedMoovBoxSize(0),
- mInterleaveDurationUs(1000000),
- mLatitudex10000(0),
- mLongitudex10000(0),
- mAreGeoTagsAvailable(false),
- mStartTimeOffsetMs(-1) {
-
- mFd = open(filename, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
- if (mFd >= 0) {
- mInitCheck = OK;
- }
-}
-
MPEG4Writer::MPEG4Writer(int fd)
: mFd(dup(fd)),
mInitCheck(mFd < 0? NO_INIT: OK),
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index c2381b4..8fa07b2 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -36,6 +36,7 @@
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaFilter.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/NativeWindowWrapper.h>
#include <private/android_filesystem_config.h>
@@ -189,7 +190,16 @@ status_t MediaCodec::init(const AString &name, bool nameIsType, bool encoder) {
// quickly, violating the OpenMAX specs, until that is remedied
// we need to invest in an extra looper to free the main event
// queue.
- mCodec = new ACodec;
+
+ if (nameIsType || !strncasecmp(name.c_str(), "omx.", 4)) {
+ mCodec = new ACodec;
+ } else if (!nameIsType
+ && !strncasecmp(name.c_str(), "android.filter.", 15)) {
+ mCodec = new MediaFilter;
+ } else {
+ return NAME_NOT_FOUND;
+ }
+
bool needDedicatedLooper = false;
if (nameIsType && !strncasecmp(name.c_str(), "video/", 6)) {
needDedicatedLooper = true;
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index c7c6f34..b13877d 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -38,21 +38,6 @@
namespace android {
-MediaMuxer::MediaMuxer(const char *path, OutputFormat format)
- : mFormat(format),
- mState(UNINITIALIZED) {
- if (format == OUTPUT_FORMAT_MPEG_4) {
- mWriter = new MPEG4Writer(path);
- } else if (format == OUTPUT_FORMAT_WEBM) {
- mWriter = new WebmWriter(path);
- }
-
- if (mWriter != NULL) {
- mFileMeta = new MetaData;
- mState = INITIALIZED;
- }
-}
-
MediaMuxer::MediaMuxer(int fd, OutputFormat format)
: mFormat(format),
mState(UNINITIALIZED) {
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 8a95643..6e6a78a 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -38,7 +38,10 @@ SoftVPX::SoftVPX(
NULL /* profileLevels */, 0 /* numProfileLevels */,
320 /* width */, 240 /* height */, callbacks, appData, component),
mMode(codingType == OMX_VIDEO_CodingVP8 ? MODE_VP8 : MODE_VP9),
+ mEOSStatus(INPUT_DATA_AVAILABLE),
mCtx(NULL),
+ mFrameParallelMode(false),
+ mTimeStampIdx(0),
mImg(NULL) {
// arbitrary from avc/hevc as vpx does not specify a min compression ratio
const size_t kMinCompressionRatio = mMode == MODE_VP8 ? 2 : 4;
@@ -51,9 +54,7 @@ SoftVPX::SoftVPX(
}
SoftVPX::~SoftVPX() {
- vpx_codec_destroy((vpx_codec_ctx_t *)mCtx);
- delete (vpx_codec_ctx_t *)mCtx;
- mCtx = NULL;
+ destroyDecoder();
}
static int GetCPUCoreCount() {
@@ -73,12 +74,19 @@ status_t SoftVPX::initDecoder() {
mCtx = new vpx_codec_ctx_t;
vpx_codec_err_t vpx_err;
vpx_codec_dec_cfg_t cfg;
+ vpx_codec_flags_t flags;
memset(&cfg, 0, sizeof(vpx_codec_dec_cfg_t));
+ memset(&flags, 0, sizeof(vpx_codec_flags_t));
cfg.threads = GetCPUCoreCount();
+
+ if (mFrameParallelMode) {
+ flags |= VPX_CODEC_USE_FRAME_THREADING;
+ }
+
if ((vpx_err = vpx_codec_dec_init(
(vpx_codec_ctx_t *)mCtx,
mMode == MODE_VP8 ? &vpx_codec_vp8_dx_algo : &vpx_codec_vp9_dx_algo,
- &cfg, 0))) {
+ &cfg, flags))) {
ALOGE("on2 decoder failed to initialize. (%d)", vpx_err);
return UNKNOWN_ERROR;
}
@@ -86,86 +94,155 @@ status_t SoftVPX::initDecoder() {
return OK;
}
+status_t SoftVPX::destroyDecoder() {
+ vpx_codec_destroy((vpx_codec_ctx_t *)mCtx);
+ delete (vpx_codec_ctx_t *)mCtx;
+ mCtx = NULL;
+ return OK;
+}
+
+bool SoftVPX::outputBuffers(bool flushDecoder, bool display, bool eos, bool *portWillReset) {
+ List<BufferInfo *> &inQueue = getPortQueue(0);
+ List<BufferInfo *> &outQueue = getPortQueue(1);
+ BufferInfo *outInfo = NULL;
+ OMX_BUFFERHEADERTYPE *outHeader = NULL;
+ vpx_codec_iter_t iter = NULL;
+
+ if (flushDecoder && mFrameParallelMode) {
+ // Flush decoder by passing NULL data ptr and 0 size.
+ // Ideally, this should never fail.
+ if (vpx_codec_decode((vpx_codec_ctx_t *)mCtx, NULL, 0, NULL, 0)) {
+ ALOGE("Failed to flush on2 decoder.");
+ return false;
+ }
+ }
+
+ if (!display) {
+ if (!flushDecoder) {
+ ALOGE("Invalid operation.");
+ return false;
+ }
+ // Drop all the decoded frames in decoder.
+ while ((mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter))) {
+ }
+ return true;
+ }
+
+ while (!outQueue.empty()) {
+ if (mImg == NULL) {
+ mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter);
+ if (mImg == NULL) {
+ break;
+ }
+ }
+ uint32_t width = mImg->d_w;
+ uint32_t height = mImg->d_h;
+ outInfo = *outQueue.begin();
+ outHeader = outInfo->mHeader;
+ CHECK_EQ(mImg->fmt, IMG_FMT_I420);
+ handlePortSettingsChange(portWillReset, width, height);
+ if (*portWillReset) {
+ return true;
+ }
+
+ outHeader->nOffset = 0;
+ outHeader->nFilledLen = (width * height * 3) / 2;
+ outHeader->nFlags = 0;
+ outHeader->nTimeStamp = *(OMX_TICKS *)mImg->user_priv;
+
+ uint8_t *dst = outHeader->pBuffer;
+ const uint8_t *srcY = (const uint8_t *)mImg->planes[PLANE_Y];
+ const uint8_t *srcU = (const uint8_t *)mImg->planes[PLANE_U];
+ const uint8_t *srcV = (const uint8_t *)mImg->planes[PLANE_V];
+ size_t srcYStride = mImg->stride[PLANE_Y];
+ size_t srcUStride = mImg->stride[PLANE_U];
+ size_t srcVStride = mImg->stride[PLANE_V];
+ copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
+
+ mImg = NULL;
+ outInfo->mOwnedByUs = false;
+ outQueue.erase(outQueue.begin());
+ outInfo = NULL;
+ notifyFillBufferDone(outHeader);
+ outHeader = NULL;
+ }
+
+ if (!eos) {
+ return true;
+ }
+
+ if (!outQueue.empty()) {
+ outInfo = *outQueue.begin();
+ outQueue.erase(outQueue.begin());
+ outHeader = outInfo->mHeader;
+ outHeader->nTimeStamp = 0;
+ outHeader->nFilledLen = 0;
+ outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+ outInfo->mOwnedByUs = false;
+ notifyFillBufferDone(outHeader);
+ mEOSStatus = OUTPUT_FRAMES_FLUSHED;
+ }
+ return true;
+}
+
void SoftVPX::onQueueFilled(OMX_U32 /* portIndex */) {
- if (mOutputPortSettingsChange != NONE) {
+ if (mOutputPortSettingsChange != NONE || mEOSStatus == OUTPUT_FRAMES_FLUSHED) {
return;
}
List<BufferInfo *> &inQueue = getPortQueue(0);
List<BufferInfo *> &outQueue = getPortQueue(1);
bool EOSseen = false;
+ vpx_codec_err_t err;
+ bool portWillReset = false;
+
+ while ((mEOSStatus == INPUT_EOS_SEEN || !inQueue.empty())
+ && !outQueue.empty()) {
+ // Output the pending frames that left from last port reset or decoder flush.
+ if (mEOSStatus == INPUT_EOS_SEEN || mImg != NULL) {
+ if (!outputBuffers(
+ mEOSStatus == INPUT_EOS_SEEN, true /* display */,
+ mEOSStatus == INPUT_EOS_SEEN, &portWillReset)) {
+ ALOGE("on2 decoder failed to output frame.");
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+ if (portWillReset || mEOSStatus == OUTPUT_FRAMES_FLUSHED ||
+ mEOSStatus == INPUT_EOS_SEEN) {
+ return;
+ }
+ }
- while (!inQueue.empty() && !outQueue.empty()) {
BufferInfo *inInfo = *inQueue.begin();
OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+ mTimeStamps[mTimeStampIdx] = inHeader->nTimeStamp;
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
-
if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ mEOSStatus = INPUT_EOS_SEEN;
EOSseen = true;
- if (inHeader->nFilledLen == 0) {
- inQueue.erase(inQueue.begin());
- inInfo->mOwnedByUs = false;
- notifyEmptyBufferDone(inHeader);
-
- outHeader->nFilledLen = 0;
- outHeader->nFlags = OMX_BUFFERFLAG_EOS;
-
- outQueue.erase(outQueue.begin());
- outInfo->mOwnedByUs = false;
- notifyFillBufferDone(outHeader);
- return;
- }
}
- if (mImg == NULL) {
- if (vpx_codec_decode(
- (vpx_codec_ctx_t *)mCtx,
- inHeader->pBuffer + inHeader->nOffset,
- inHeader->nFilledLen,
- NULL,
- 0)) {
- ALOGE("on2 decoder failed to decode frame.");
-
- notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
- return;
- }
- vpx_codec_iter_t iter = NULL;
- mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter);
+ if (inHeader->nFilledLen > 0 &&
+ vpx_codec_decode((vpx_codec_ctx_t *)mCtx,
+ inHeader->pBuffer + inHeader->nOffset,
+ inHeader->nFilledLen,
+ &mTimeStamps[mTimeStampIdx], 0)) {
+ ALOGE("on2 decoder failed to decode frame.");
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
}
+ mTimeStampIdx = (mTimeStampIdx + 1) % kNumBuffers;
- if (mImg != NULL) {
- CHECK_EQ(mImg->fmt, IMG_FMT_I420);
-
- uint32_t width = mImg->d_w;
- uint32_t height = mImg->d_h;
- bool portWillReset = false;
- handlePortSettingsChange(&portWillReset, width, height);
- if (portWillReset) {
- return;
- }
-
- outHeader->nOffset = 0;
- outHeader->nFilledLen = (width * height * 3) / 2;
- outHeader->nFlags = EOSseen ? OMX_BUFFERFLAG_EOS : 0;
- outHeader->nTimeStamp = inHeader->nTimeStamp;
-
- uint8_t *dst = outHeader->pBuffer;
- const uint8_t *srcY = (const uint8_t *)mImg->planes[PLANE_Y];
- const uint8_t *srcU = (const uint8_t *)mImg->planes[PLANE_U];
- const uint8_t *srcV = (const uint8_t *)mImg->planes[PLANE_V];
- size_t srcYStride = mImg->stride[PLANE_Y];
- size_t srcUStride = mImg->stride[PLANE_U];
- size_t srcVStride = mImg->stride[PLANE_V];
- copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
-
- mImg = NULL;
- outInfo->mOwnedByUs = false;
- outQueue.erase(outQueue.begin());
- outInfo = NULL;
- notifyFillBufferDone(outHeader);
- outHeader = NULL;
+ if (!outputBuffers(
+ EOSseen /* flushDecoder */, true /* display */, EOSseen, &portWillReset)) {
+ ALOGE("on2 decoder failed to output frame.");
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+ if (portWillReset) {
+ return;
}
inInfo->mOwnedByUs = false;
@@ -176,6 +253,30 @@ void SoftVPX::onQueueFilled(OMX_U32 /* portIndex */) {
}
}
+void SoftVPX::onPortFlushCompleted(OMX_U32 portIndex) {
+ if (portIndex == kInputPortIndex) {
+ bool portWillReset = false;
+ if (!outputBuffers(
+ true /* flushDecoder */, false /* display */, false /* eos */, &portWillReset)) {
+ ALOGE("Failed to flush decoder.");
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+ mEOSStatus = INPUT_DATA_AVAILABLE;
+ }
+}
+
+void SoftVPX::onReset() {
+ bool portWillReset = false;
+ if (!outputBuffers(
+ true /* flushDecoder */, false /* display */, false /* eos */, &portWillReset)) {
+ ALOGW("Failed to flush decoder. Try to hard reset decoder");
+ destroyDecoder();
+ initDecoder();
+ }
+ mEOSStatus = INPUT_DATA_AVAILABLE;
+}
+
} // namespace android
android::SoftOMXComponent *createSoftOMXComponent(
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.h b/media/libstagefright/codecs/on2/dec/SoftVPX.h
index 8f68693..8ccbae2 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.h
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.h
@@ -38,6 +38,8 @@ protected:
virtual ~SoftVPX();
virtual void onQueueFilled(OMX_U32 portIndex);
+ virtual void onPortFlushCompleted(OMX_U32 portIndex);
+ virtual void onReset();
private:
enum {
@@ -49,11 +51,21 @@ private:
MODE_VP9
} mMode;
- void *mCtx;
+ enum {
+ INPUT_DATA_AVAILABLE, // VPX component is ready to decode data.
+ INPUT_EOS_SEEN, // VPX component saw EOS and is flushing On2 decoder.
+ OUTPUT_FRAMES_FLUSHED // VPX component finished flushing On2 decoder.
+ } mEOSStatus;
+ void *mCtx;
+ bool mFrameParallelMode; // Frame parallel is only supported by VP9 decoder.
+ OMX_TICKS mTimeStamps[kNumBuffers];
+ uint8_t mTimeStampIdx;
vpx_image_t *mImg;
status_t initDecoder();
+ status_t destroyDecoder();
+ bool outputBuffers(bool flushDecoder, bool display, bool eos, bool *portWillReset);
DISALLOW_EVIL_CONSTRUCTORS(SoftVPX);
};
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index 1899b40..6474906 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -98,32 +98,48 @@ void SoftwareRenderer::resetFormatIfChanged(const sp<AMessage> &format) {
mCropWidth = mCropRight - mCropLeft + 1;
mCropHeight = mCropBottom - mCropTop + 1;
- int halFormat;
- size_t bufWidth, bufHeight;
-
- switch (mColorFormat) {
- case OMX_COLOR_FormatYUV420Planar:
- case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
- {
- if (!runningInEmulator()) {
+ // by default convert everything to RGB565
+ int halFormat = HAL_PIXEL_FORMAT_RGB_565;
+ size_t bufWidth = mCropWidth;
+ size_t bufHeight = mCropHeight;
+
+ // hardware has YUV12 and RGBA8888 support, so convert known formats
+ if (!runningInEmulator()) {
+ switch (mColorFormat) {
+ case OMX_COLOR_FormatYUV420Planar:
+ case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
+ {
halFormat = HAL_PIXEL_FORMAT_YV12;
bufWidth = (mCropWidth + 1) & ~1;
bufHeight = (mCropHeight + 1) & ~1;
break;
}
-
- // fall through.
+ case OMX_COLOR_Format24bitRGB888:
+ {
+ halFormat = HAL_PIXEL_FORMAT_RGB_888;
+ bufWidth = (mCropWidth + 1) & ~1;
+ bufHeight = (mCropHeight + 1) & ~1;
+ break;
+ }
+ case OMX_COLOR_Format32bitARGB8888:
+ case OMX_COLOR_Format32BitRGBA8888:
+ {
+ halFormat = HAL_PIXEL_FORMAT_RGBA_8888;
+ bufWidth = (mCropWidth + 1) & ~1;
+ bufHeight = (mCropHeight + 1) & ~1;
+ break;
+ }
+ default:
+ {
+ break;
+ }
}
+ }
- default:
- halFormat = HAL_PIXEL_FORMAT_RGB_565;
- bufWidth = mCropWidth;
- bufHeight = mCropHeight;
-
- mConverter = new ColorConverter(
- mColorFormat, OMX_COLOR_Format16bitRGB565);
- CHECK(mConverter->isValid());
- break;
+ if (halFormat == HAL_PIXEL_FORMAT_RGB_565) {
+ mConverter = new ColorConverter(
+ mColorFormat, OMX_COLOR_Format16bitRGB565);
+ CHECK(mConverter->isValid());
}
CHECK(mNativeWindow != NULL);
@@ -200,6 +216,8 @@ void SoftwareRenderer::render(
CHECK_EQ(0, mapper.lock(
buf->handle, GRALLOC_USAGE_SW_WRITE_OFTEN, bounds, &dst));
+ // TODO move the other conversions also into ColorConverter, and
+ // fix cropping issues (when mCropLeft/Top != 0 or mWidth != mCropWidth)
if (mConverter) {
mConverter->convert(
data,
@@ -210,7 +228,8 @@ void SoftwareRenderer::render(
0, 0, mCropWidth - 1, mCropHeight - 1);
} else if (mColorFormat == OMX_COLOR_FormatYUV420Planar) {
const uint8_t *src_y = (const uint8_t *)data;
- const uint8_t *src_u = (const uint8_t *)data + mWidth * mHeight;
+ const uint8_t *src_u =
+ (const uint8_t *)data + mWidth * mHeight;
const uint8_t *src_v = src_u + (mWidth / 2 * mHeight / 2);
uint8_t *dst_y = (uint8_t *)dst;
@@ -236,14 +255,10 @@ void SoftwareRenderer::render(
dst_u += dst_c_stride;
dst_v += dst_c_stride;
}
- } else {
- CHECK_EQ(mColorFormat, OMX_TI_COLOR_FormatYUV420PackedSemiPlanar);
-
- const uint8_t *src_y =
- (const uint8_t *)data;
-
- const uint8_t *src_uv =
- (const uint8_t *)data + mWidth * (mHeight - mCropTop / 2);
+ } else if (mColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar) {
+ const uint8_t *src_y = (const uint8_t *)data;
+ const uint8_t *src_uv = (const uint8_t *)data
+ + mWidth * (mHeight - mCropTop / 2);
uint8_t *dst_y = (uint8_t *)dst;
@@ -271,6 +286,40 @@ void SoftwareRenderer::render(
dst_u += dst_c_stride;
dst_v += dst_c_stride;
}
+ } else if (mColorFormat == OMX_COLOR_Format24bitRGB888) {
+ uint8_t* srcPtr = (uint8_t*)data;
+ uint8_t* dstPtr = (uint8_t*)dst;
+
+ for (size_t y = 0; y < (size_t)mCropHeight; ++y) {
+ memcpy(dstPtr, srcPtr, mCropWidth * 3);
+ srcPtr += mWidth * 3;
+ dstPtr += buf->stride * 3;
+ }
+ } else if (mColorFormat == OMX_COLOR_Format32bitARGB8888) {
+ uint8_t *srcPtr, *dstPtr;
+
+ for (size_t y = 0; y < (size_t)mCropHeight; ++y) {
+ srcPtr = (uint8_t*)data + mWidth * 4 * y;
+ dstPtr = (uint8_t*)dst + buf->stride * 4 * y;
+ for (size_t x = 0; x < (size_t)mCropWidth; ++x) {
+ uint8_t a = *srcPtr++;
+ for (size_t i = 0; i < 3; ++i) { // copy RGB
+ *dstPtr++ = *srcPtr++;
+ }
+ *dstPtr++ = a; // alpha last (ARGB to RGBA)
+ }
+ }
+ } else if (mColorFormat == OMX_COLOR_Format32BitRGBA8888) {
+ uint8_t* srcPtr = (uint8_t*)data;
+ uint8_t* dstPtr = (uint8_t*)dst;
+
+ for (size_t y = 0; y < (size_t)mCropHeight; ++y) {
+ memcpy(dstPtr, srcPtr, mCropWidth * 4);
+ srcPtr += mWidth * 4;
+ dstPtr += buf->stride * 4;
+ }
+ } else {
+ LOG_ALWAYS_FATAL("bad color format %#x", mColorFormat);
}
CHECK_EQ(0, mapper.unlock(buf->handle));
diff --git a/media/libstagefright/filters/Android.mk b/media/libstagefright/filters/Android.mk
new file mode 100644
index 0000000..36ab444
--- /dev/null
+++ b/media/libstagefright/filters/Android.mk
@@ -0,0 +1,27 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ ColorConvert.cpp \
+ GraphicBufferListener.cpp \
+ IntrinsicBlurFilter.cpp \
+ MediaFilter.cpp \
+ RSFilter.cpp \
+ SaturationFilter.cpp \
+ saturationARGB.rs \
+ SimpleFilter.cpp \
+ ZeroFilter.cpp
+
+LOCAL_C_INCLUDES := \
+ $(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/frameworks/rs/cpp \
+ $(TOP)/frameworks/rs \
+
+intermediates := $(call intermediates-dir-for,STATIC_LIBRARIES,libRS,TARGET,)
+LOCAL_C_INCLUDES += $(intermediates)
+
+LOCAL_CFLAGS += -Wno-multichar
+
+LOCAL_MODULE:= libstagefright_mediafilter
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libstagefright/filters/ColorConvert.cpp b/media/libstagefright/filters/ColorConvert.cpp
new file mode 100644
index 0000000..a5039f9
--- /dev/null
+++ b/media/libstagefright/filters/ColorConvert.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ColorConvert.h"
+
+#ifndef max
+#define max(a,b) ((a) > (b) ? (a) : (b))
+#endif
+#ifndef min
+#define min(a,b) ((a) < (b) ? (a) : (b))
+#endif
+
+namespace android {
+
+void YUVToRGB(
+ int32_t y, int32_t u, int32_t v,
+ int32_t* r, int32_t* g, int32_t* b) {
+ y -= 16;
+ u -= 128;
+ v -= 128;
+
+ *b = 1192 * y + 2066 * u;
+ *g = 1192 * y - 833 * v - 400 * u;
+ *r = 1192 * y + 1634 * v;
+
+ *r = min(262143, max(0, *r));
+ *g = min(262143, max(0, *g));
+ *b = min(262143, max(0, *b));
+
+ *r >>= 10;
+ *g >>= 10;
+ *b >>= 10;
+}
+
+void convertYUV420spToARGB(
+ uint8_t *pY, uint8_t *pUV, int32_t width, int32_t height,
+ uint8_t *dest) {
+ const int32_t bytes_per_pixel = 2;
+
+ for (int32_t i = 0; i < height; i++) {
+ for (int32_t j = 0; j < width; j++) {
+ int32_t y = *(pY + i * width + j);
+ int32_t u = *(pUV + (i/2) * width + bytes_per_pixel * (j/2));
+ int32_t v = *(pUV + (i/2) * width + bytes_per_pixel * (j/2) + 1);
+
+ int32_t r, g, b;
+ YUVToRGB(y, u, v, &r, &g, &b);
+
+ *dest++ = 0xFF;
+ *dest++ = r;
+ *dest++ = g;
+ *dest++ = b;
+ }
+ }
+}
+
+void convertYUV420spToRGB888(
+ uint8_t *pY, uint8_t *pUV, int32_t width, int32_t height,
+ uint8_t *dest) {
+ const int32_t bytes_per_pixel = 2;
+
+ for (int32_t i = 0; i < height; i++) {
+ for (int32_t j = 0; j < width; j++) {
+ int32_t y = *(pY + i * width + j);
+ int32_t u = *(pUV + (i/2) * width + bytes_per_pixel * (j/2));
+ int32_t v = *(pUV + (i/2) * width + bytes_per_pixel * (j/2) + 1);
+
+ int32_t r, g, b;
+ YUVToRGB(y, u, v, &r, &g, &b);
+
+ *dest++ = r;
+ *dest++ = g;
+ *dest++ = b;
+ }
+ }
+}
+
+// HACK - not even slightly optimized
+// TODO: remove when RGBA support is added to SoftwareRenderer
+void convertRGBAToARGB(
+ uint8_t *src, int32_t width, int32_t height, uint32_t stride,
+ uint8_t *dest) {
+ for (size_t i = 0; i < height; ++i) {
+ for (size_t j = 0; j < width; ++j) {
+ uint8_t r = *src++;
+ uint8_t g = *src++;
+ uint8_t b = *src++;
+ uint8_t a = *src++;
+ *dest++ = a;
+ *dest++ = r;
+ *dest++ = g;
+ *dest++ = b;
+ }
+ src += (stride - width) * 4;
+ }
+}
+
+} // namespace android
diff --git a/media/libstagefright/filters/ColorConvert.h b/media/libstagefright/filters/ColorConvert.h
new file mode 100644
index 0000000..13faa02
--- /dev/null
+++ b/media/libstagefright/filters/ColorConvert.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef COLOR_CONVERT_H_
+#define COLOR_CONVERT_H_
+
+#include <inttypes.h>
+
+namespace android {
+
+void YUVToRGB(
+ int32_t y, int32_t u, int32_t v,
+ int32_t* r, int32_t* g, int32_t* b);
+
+void convertYUV420spToARGB(
+ uint8_t *pY, uint8_t *pUV, int32_t width, int32_t height,
+ uint8_t *dest);
+
+void convertYUV420spToRGB888(
+ uint8_t *pY, uint8_t *pUV, int32_t width, int32_t height,
+ uint8_t *dest);
+
+// TODO: remove when RGBA support is added to SoftwareRenderer
+void convertRGBAToARGB(
+ uint8_t *src, int32_t width, int32_t height, uint32_t stride,
+ uint8_t *dest);
+
+} // namespace android
+
+#endif // COLOR_CONVERT_H_
diff --git a/media/libstagefright/filters/GraphicBufferListener.cpp b/media/libstagefright/filters/GraphicBufferListener.cpp
new file mode 100644
index 0000000..fa38192
--- /dev/null
+++ b/media/libstagefright/filters/GraphicBufferListener.cpp
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "GraphicBufferListener"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaErrors.h>
+
+#include "GraphicBufferListener.h"
+
+namespace android {
+
+status_t GraphicBufferListener::init(
+ const sp<AMessage> &notify,
+ size_t bufferWidth, size_t bufferHeight, size_t bufferCount) {
+ mNotify = notify;
+
+ String8 name("GraphicBufferListener");
+ BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+ mConsumer->setConsumerName(name);
+ mConsumer->setDefaultBufferSize(bufferWidth, bufferHeight);
+ mConsumer->setConsumerUsageBits(GRALLOC_USAGE_SW_READ_OFTEN);
+
+ status_t err = mConsumer->setMaxAcquiredBufferCount(bufferCount);
+ if (err != NO_ERROR) {
+ ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
+ bufferCount, err);
+ return err;
+ }
+
+ wp<BufferQueue::ConsumerListener> listener =
+ static_cast<BufferQueue::ConsumerListener*>(this);
+ sp<BufferQueue::ProxyConsumerListener> proxy =
+ new BufferQueue::ProxyConsumerListener(listener);
+
+ err = mConsumer->consumerConnect(proxy, false);
+ if (err != NO_ERROR) {
+ ALOGE("Error connecting to BufferQueue: %s (%d)",
+ strerror(-err), err);
+ return err;
+ }
+
+ ALOGV("init() successful.");
+
+ return OK;
+}
+
+void GraphicBufferListener::onFrameAvailable(const BufferItem& /* item */) {
+ ALOGV("onFrameAvailable() called");
+
+ {
+ Mutex::Autolock autoLock(mMutex);
+ mNumFramesAvailable++;
+ }
+
+ sp<AMessage> notify = mNotify->dup();
+ mNotify->setWhat(kWhatFrameAvailable);
+ mNotify->post();
+}
+
+void GraphicBufferListener::onBuffersReleased() {
+ ALOGV("onBuffersReleased() called");
+ // nothing to do
+}
+
+void GraphicBufferListener::onSidebandStreamChanged() {
+ ALOGW("GraphicBufferListener cannot consume sideband streams.");
+ // nothing to do
+}
+
+BufferQueue::BufferItem GraphicBufferListener::getBufferItem() {
+ BufferQueue::BufferItem item;
+
+ {
+ Mutex::Autolock autoLock(mMutex);
+ if (mNumFramesAvailable <= 0) {
+ ALOGE("getBuffer() called with no frames available");
+ return item;
+ }
+ mNumFramesAvailable--;
+ }
+
+ status_t err = mConsumer->acquireBuffer(&item, 0);
+ if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
+ // shouldn't happen, since we track num frames available
+ ALOGE("frame was not available");
+ item.mBuf = -1;
+ return item;
+ } else if (err != OK) {
+ ALOGE("acquireBuffer returned err=%d", err);
+ item.mBuf = -1;
+ return item;
+ }
+
+ // Wait for it to become available.
+ err = item.mFence->waitForever("GraphicBufferListener::getBufferItem");
+ if (err != OK) {
+ ALOGW("failed to wait for buffer fence: %d", err);
+ // keep going
+ }
+
+ // If this is the first time we're seeing this buffer, add it to our
+ // slot table.
+ if (item.mGraphicBuffer != NULL) {
+ ALOGV("setting mBufferSlot %d", item.mBuf);
+ mBufferSlot[item.mBuf] = item.mGraphicBuffer;
+ }
+
+ return item;
+}
+
+sp<GraphicBuffer> GraphicBufferListener::getBuffer(
+ BufferQueue::BufferItem item) {
+ sp<GraphicBuffer> buf;
+ if (item.mBuf < 0 || item.mBuf >= BufferQueue::NUM_BUFFER_SLOTS) {
+ ALOGE("getBuffer() received invalid BufferItem: mBuf==%d", item.mBuf);
+ return buf;
+ }
+
+ buf = mBufferSlot[item.mBuf];
+ CHECK(buf.get() != NULL);
+
+ return buf;
+}
+
+status_t GraphicBufferListener::releaseBuffer(
+ BufferQueue::BufferItem item) {
+ if (item.mBuf < 0 || item.mBuf >= BufferQueue::NUM_BUFFER_SLOTS) {
+ ALOGE("getBuffer() received invalid BufferItem: mBuf==%d", item.mBuf);
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
+ EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/filters/GraphicBufferListener.h b/media/libstagefright/filters/GraphicBufferListener.h
new file mode 100644
index 0000000..b3e0ee3
--- /dev/null
+++ b/media/libstagefright/filters/GraphicBufferListener.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GRAPHIC_BUFFER_LISTENER_H_
+#define GRAPHIC_BUFFER_LISTENER_H_
+
+#include <gui/BufferQueue.h>
+
+namespace android {
+
+struct AMessage;
+
+struct GraphicBufferListener : public BufferQueue::ConsumerListener {
+public:
+ GraphicBufferListener() {};
+
+ status_t init(
+ const sp<AMessage> &notify,
+ size_t bufferWidth, size_t bufferHeight, size_t bufferCount);
+
+ virtual void onFrameAvailable(const BufferItem& item);
+ virtual void onBuffersReleased();
+ virtual void onSidebandStreamChanged();
+
+ // Returns the handle to the producer side of the BufferQueue. Buffers
+ // queued on this will be received by GraphicBufferListener.
+ sp<IGraphicBufferProducer> getIGraphicBufferProducer() const {
+ return mProducer;
+ }
+
+ BufferQueue::BufferItem getBufferItem();
+ sp<GraphicBuffer> getBuffer(BufferQueue::BufferItem item);
+ status_t releaseBuffer(BufferQueue::BufferItem item);
+
+ enum {
+ kWhatFrameAvailable = 'frav',
+ };
+
+private:
+ sp<AMessage> mNotify;
+ size_t mNumFramesAvailable;
+
+ mutable Mutex mMutex;
+
+ // Our BufferQueue interfaces. mProducer is passed to the producer through
+ // getIGraphicBufferProducer, and mConsumer is used internally to retrieve
+ // the buffers queued by the producer.
+ sp<IGraphicBufferProducer> mProducer;
+ sp<IGraphicBufferConsumer> mConsumer;
+
+ // Cache of GraphicBuffers from the buffer queue.
+ sp<GraphicBuffer> mBufferSlot[BufferQueue::NUM_BUFFER_SLOTS];
+};
+
+} // namespace android
+
+#endif // GRAPHIC_BUFFER_LISTENER_H
diff --git a/media/libstagefright/filters/IntrinsicBlurFilter.cpp b/media/libstagefright/filters/IntrinsicBlurFilter.cpp
new file mode 100644
index 0000000..cbcf699
--- /dev/null
+++ b/media/libstagefright/filters/IntrinsicBlurFilter.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "IntrinsicBlurFilter"
+
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include "IntrinsicBlurFilter.h"
+
+namespace android {
+
+status_t IntrinsicBlurFilter::configure(const sp<AMessage> &msg) {
+ status_t err = SimpleFilter::configure(msg);
+ if (err != OK) {
+ return err;
+ }
+
+ if (!msg->findString("cacheDir", &mCacheDir)) {
+ ALOGE("Failed to find cache directory in config message.");
+ return NAME_NOT_FOUND;
+ }
+
+ return OK;
+}
+
+status_t IntrinsicBlurFilter::start() {
+ // TODO: use a single RS context object for entire application
+ mRS = new RSC::RS();
+
+ if (!mRS->init(mCacheDir.c_str())) {
+ ALOGE("Failed to initialize RenderScript context.");
+ return NO_INIT;
+ }
+
+ // 32-bit elements for ARGB8888
+ RSC::sp<const RSC::Element> e = RSC::Element::U8_4(mRS);
+
+ RSC::Type::Builder tb(mRS, e);
+ tb.setX(mWidth);
+ tb.setY(mHeight);
+ RSC::sp<const RSC::Type> t = tb.create();
+
+ mAllocIn = RSC::Allocation::createTyped(mRS, t);
+ mAllocOut = RSC::Allocation::createTyped(mRS, t);
+
+ mBlur = RSC::ScriptIntrinsicBlur::create(mRS, e);
+ mBlur->setRadius(mBlurRadius);
+ mBlur->setInput(mAllocIn);
+
+ return OK;
+}
+
+void IntrinsicBlurFilter::reset() {
+ mBlur.clear();
+ mAllocOut.clear();
+ mAllocIn.clear();
+ mRS.clear();
+}
+
+status_t IntrinsicBlurFilter::setParameters(const sp<AMessage> &msg) {
+ sp<AMessage> params;
+ CHECK(msg->findMessage("params", &params));
+
+ float blurRadius;
+ if (params->findFloat("blur-radius", &blurRadius)) {
+ mBlurRadius = blurRadius;
+ }
+
+ return OK;
+}
+
+status_t IntrinsicBlurFilter::processBuffers(
+ const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) {
+ mAllocIn->copy1DRangeFrom(0, mWidth * mHeight, srcBuffer->data());
+ mBlur->forEach(mAllocOut);
+ mAllocOut->copy1DRangeTo(0, mWidth * mHeight, outBuffer->data());
+
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/filters/IntrinsicBlurFilter.h b/media/libstagefright/filters/IntrinsicBlurFilter.h
new file mode 100644
index 0000000..4707ab7
--- /dev/null
+++ b/media/libstagefright/filters/IntrinsicBlurFilter.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INTRINSIC_BLUR_FILTER_H_
+#define INTRINSIC_BLUR_FILTER_H_
+
+#include "RenderScript.h"
+#include "SimpleFilter.h"
+
+namespace android {
+
+struct IntrinsicBlurFilter : public SimpleFilter {
+public:
+ IntrinsicBlurFilter() : mBlurRadius(1.f) {};
+
+ virtual status_t configure(const sp<AMessage> &msg);
+ virtual status_t start();
+ virtual void reset();
+ virtual status_t setParameters(const sp<AMessage> &msg);
+ virtual status_t processBuffers(
+ const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer);
+
+protected:
+ virtual ~IntrinsicBlurFilter() {};
+
+private:
+ AString mCacheDir;
+ RSC::sp<RSC::RS> mRS;
+ RSC::sp<RSC::Allocation> mAllocIn;
+ RSC::sp<RSC::Allocation> mAllocOut;
+ RSC::sp<RSC::ScriptIntrinsicBlur> mBlur;
+ float mBlurRadius;
+};
+
+} // namespace android
+
+#endif // INTRINSIC_BLUR_FILTER_H_
diff --git a/media/libstagefright/filters/MediaFilter.cpp b/media/libstagefright/filters/MediaFilter.cpp
new file mode 100644
index 0000000..c5289b6
--- /dev/null
+++ b/media/libstagefright/filters/MediaFilter.cpp
@@ -0,0 +1,816 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaFilter"
+
+#include <inttypes.h>
+#include <utils/Trace.h>
+
+#include <binder/MemoryDealer.h>
+
+#include <media/stagefright/BufferProducerWrapper.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaFilter.h>
+
+#include "ColorConvert.h"
+#include "GraphicBufferListener.h"
+#include "IntrinsicBlurFilter.h"
+#include "RSFilter.h"
+#include "SaturationFilter.h"
+#include "ZeroFilter.h"
+
+namespace android {
+
+// parameter: number of input and output buffers
+static const size_t kBufferCountActual = 4;
+
+MediaFilter::MediaFilter()
+ : mState(UNINITIALIZED),
+ mGeneration(0),
+ mGraphicBufferListener(NULL) {
+}
+
+MediaFilter::~MediaFilter() {
+}
+
+//////////////////// PUBLIC FUNCTIONS //////////////////////////////////////////
+
+void MediaFilter::setNotificationMessage(const sp<AMessage> &msg) {
+ mNotify = msg;
+}
+
+void MediaFilter::initiateAllocateComponent(const sp<AMessage> &msg) {
+ msg->setWhat(kWhatAllocateComponent);
+ msg->setTarget(id());
+ msg->post();
+}
+
+void MediaFilter::initiateConfigureComponent(const sp<AMessage> &msg) {
+ msg->setWhat(kWhatConfigureComponent);
+ msg->setTarget(id());
+ msg->post();
+}
+
+void MediaFilter::initiateCreateInputSurface() {
+ (new AMessage(kWhatCreateInputSurface, id()))->post();
+}
+
+void MediaFilter::initiateStart() {
+ (new AMessage(kWhatStart, id()))->post();
+}
+
+void MediaFilter::initiateShutdown(bool keepComponentAllocated) {
+ sp<AMessage> msg = new AMessage(kWhatShutdown, id());
+ msg->setInt32("keepComponentAllocated", keepComponentAllocated);
+ msg->post();
+}
+
+void MediaFilter::signalFlush() {
+ (new AMessage(kWhatFlush, id()))->post();
+}
+
+void MediaFilter::signalResume() {
+ (new AMessage(kWhatResume, id()))->post();
+}
+
+// nothing to do
+void MediaFilter::signalRequestIDRFrame() {
+ return;
+}
+
+void MediaFilter::signalSetParameters(const sp<AMessage> &params) {
+ sp<AMessage> msg = new AMessage(kWhatSetParameters, id());
+ msg->setMessage("params", params);
+ msg->post();
+}
+
+void MediaFilter::signalEndOfInputStream() {
+ (new AMessage(kWhatSignalEndOfInputStream, id()))->post();
+}
+
+void MediaFilter::onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatAllocateComponent:
+ {
+ onAllocateComponent(msg);
+ break;
+ }
+ case kWhatConfigureComponent:
+ {
+ onConfigureComponent(msg);
+ break;
+ }
+ case kWhatStart:
+ {
+ onStart();
+ break;
+ }
+ case kWhatProcessBuffers:
+ {
+ processBuffers();
+ break;
+ }
+ case kWhatInputBufferFilled:
+ {
+ onInputBufferFilled(msg);
+ break;
+ }
+ case kWhatOutputBufferDrained:
+ {
+ onOutputBufferDrained(msg);
+ break;
+ }
+ case kWhatShutdown:
+ {
+ onShutdown(msg);
+ break;
+ }
+ case kWhatFlush:
+ {
+ onFlush();
+ break;
+ }
+ case kWhatResume:
+ {
+ // nothing to do
+ break;
+ }
+ case kWhatSetParameters:
+ {
+ onSetParameters(msg);
+ break;
+ }
+ case kWhatCreateInputSurface:
+ {
+ onCreateInputSurface();
+ break;
+ }
+ case GraphicBufferListener::kWhatFrameAvailable:
+ {
+ onInputFrameAvailable();
+ break;
+ }
+ case kWhatSignalEndOfInputStream:
+ {
+ onSignalEndOfInputStream();
+ break;
+ }
+ default:
+ {
+ ALOGE("Message not handled:\n%s", msg->debugString().c_str());
+ break;
+ }
+ }
+}
+
+//////////////////// PORT DESCRIPTION //////////////////////////////////////////
+
+MediaFilter::PortDescription::PortDescription() {
+}
+
+void MediaFilter::PortDescription::addBuffer(
+ IOMX::buffer_id id, const sp<ABuffer> &buffer) {
+ mBufferIDs.push_back(id);
+ mBuffers.push_back(buffer);
+}
+
+size_t MediaFilter::PortDescription::countBuffers() {
+ return mBufferIDs.size();
+}
+
+IOMX::buffer_id MediaFilter::PortDescription::bufferIDAt(size_t index) const {
+ return mBufferIDs.itemAt(index);
+}
+
+sp<ABuffer> MediaFilter::PortDescription::bufferAt(size_t index) const {
+ return mBuffers.itemAt(index);
+}
+
+//////////////////// HELPER FUNCTIONS //////////////////////////////////////////
+
+void MediaFilter::signalProcessBuffers() {
+ (new AMessage(kWhatProcessBuffers, id()))->post();
+}
+
+void MediaFilter::signalError(status_t error) {
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", CodecBase::kWhatError);
+ notify->setInt32("err", error);
+ notify->post();
+}
+
+status_t MediaFilter::allocateBuffersOnPort(OMX_U32 portIndex) {
+ CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
+ const bool isInput = portIndex == kPortIndexInput;
+ const size_t bufferSize = isInput ? mMaxInputSize : mMaxOutputSize;
+
+ CHECK(mDealer[portIndex] == NULL);
+ CHECK(mBuffers[portIndex].isEmpty());
+
+ ALOGV("Allocating %zu buffers of size %zu on %s port",
+ kBufferCountActual, bufferSize,
+ isInput ? "input" : "output");
+
+ size_t totalSize = kBufferCountActual * bufferSize;
+
+ mDealer[portIndex] = new MemoryDealer(totalSize, "MediaFilter");
+
+ for (size_t i = 0; i < kBufferCountActual; ++i) {
+ sp<IMemory> mem = mDealer[portIndex]->allocate(bufferSize);
+ CHECK(mem.get() != NULL);
+
+ BufferInfo info;
+ info.mStatus = BufferInfo::OWNED_BY_US;
+ info.mBufferID = i;
+ info.mGeneration = mGeneration;
+ info.mOutputFlags = 0;
+ info.mData = new ABuffer(mem->pointer(), bufferSize);
+ info.mData->meta()->setInt64("timeUs", 0);
+
+ mBuffers[portIndex].push_back(info);
+
+ if (!isInput) {
+ mAvailableOutputBuffers.push(
+ &mBuffers[portIndex].editItemAt(i));
+ }
+ }
+
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", CodecBase::kWhatBuffersAllocated);
+
+ notify->setInt32("portIndex", portIndex);
+
+ sp<PortDescription> desc = new PortDescription;
+
+ for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
+ const BufferInfo &info = mBuffers[portIndex][i];
+
+ desc->addBuffer(info.mBufferID, info.mData);
+ }
+
+ notify->setObject("portDesc", desc);
+ notify->post();
+
+ return OK;
+}
+
+MediaFilter::BufferInfo* MediaFilter::findBufferByID(
+ uint32_t portIndex, IOMX::buffer_id bufferID,
+ ssize_t *index) {
+ for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
+ BufferInfo *info = &mBuffers[portIndex].editItemAt(i);
+
+ if (info->mBufferID == bufferID) {
+ if (index != NULL) {
+ *index = i;
+ }
+ return info;
+ }
+ }
+
+ TRESPASS();
+
+ return NULL;
+}
+
+void MediaFilter::postFillThisBuffer(BufferInfo *info) {
+ ALOGV("postFillThisBuffer on buffer %d", info->mBufferID);
+ if (mPortEOS[kPortIndexInput]) {
+ return;
+ }
+
+ CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_US);
+
+ info->mGeneration = mGeneration;
+
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", CodecBase::kWhatFillThisBuffer);
+ notify->setInt32("buffer-id", info->mBufferID);
+
+ info->mData->meta()->clear();
+ notify->setBuffer("buffer", info->mData);
+
+ sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, id());
+ reply->setInt32("buffer-id", info->mBufferID);
+
+ notify->setMessage("reply", reply);
+
+ info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;
+ notify->post();
+}
+
+void MediaFilter::postDrainThisBuffer(BufferInfo *info) {
+ CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_US);
+
+ info->mGeneration = mGeneration;
+
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", CodecBase::kWhatDrainThisBuffer);
+ notify->setInt32("buffer-id", info->mBufferID);
+ notify->setInt32("flags", info->mOutputFlags);
+ notify->setBuffer("buffer", info->mData);
+
+ sp<AMessage> reply = new AMessage(kWhatOutputBufferDrained, id());
+ reply->setInt32("buffer-id", info->mBufferID);
+
+ notify->setMessage("reply", reply);
+
+ notify->post();
+
+ info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;
+}
+
+void MediaFilter::postEOS() {
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", CodecBase::kWhatEOS);
+ notify->setInt32("err", ERROR_END_OF_STREAM);
+ notify->post();
+
+ ALOGV("Sent kWhatEOS.");
+}
+
+void MediaFilter::sendFormatChange() {
+ sp<AMessage> notify = mNotify->dup();
+
+ notify->setInt32("what", kWhatOutputFormatChanged);
+
+ AString mime;
+ CHECK(mOutputFormat->findString("mime", &mime));
+ notify->setString("mime", mime.c_str());
+
+ notify->setInt32("stride", mStride);
+ notify->setInt32("slice-height", mSliceHeight);
+ notify->setInt32("color-format", mColorFormatOut);
+ notify->setRect("crop", 0, 0, mStride - 1, mSliceHeight - 1);
+ notify->setInt32("width", mWidth);
+ notify->setInt32("height", mHeight);
+
+ notify->post();
+}
+
+void MediaFilter::requestFillEmptyInput() {
+ if (mPortEOS[kPortIndexInput]) {
+ return;
+ }
+
+ for (size_t i = 0; i < mBuffers[kPortIndexInput].size(); ++i) {
+ BufferInfo *info = &mBuffers[kPortIndexInput].editItemAt(i);
+
+ if (info->mStatus == BufferInfo::OWNED_BY_US) {
+ postFillThisBuffer(info);
+ }
+ }
+}
+
+void MediaFilter::processBuffers() {
+ if (mAvailableInputBuffers.empty() || mAvailableOutputBuffers.empty()) {
+ ALOGV("Skipping process (buffers unavailable)");
+ return;
+ }
+
+ if (mPortEOS[kPortIndexOutput]) {
+ // TODO notify caller of queueInput error when it is supported
+ // in MediaCodec
+ ALOGW("Tried to process a buffer after EOS.");
+ return;
+ }
+
+ BufferInfo *inputInfo = mAvailableInputBuffers[0];
+ mAvailableInputBuffers.removeAt(0);
+ BufferInfo *outputInfo = mAvailableOutputBuffers[0];
+ mAvailableOutputBuffers.removeAt(0);
+
+ status_t err;
+ err = mFilter->processBuffers(inputInfo->mData, outputInfo->mData);
+ if (err != (status_t)OK) {
+ outputInfo->mData->meta()->setInt32("err", err);
+ }
+
+ int64_t timeUs;
+ CHECK(inputInfo->mData->meta()->findInt64("timeUs", &timeUs));
+ outputInfo->mData->meta()->setInt64("timeUs", timeUs);
+ outputInfo->mOutputFlags = 0;
+ int32_t eos = 0;
+ if (inputInfo->mData->meta()->findInt32("eos", &eos) && eos != 0) {
+ outputInfo->mOutputFlags |= OMX_BUFFERFLAG_EOS;
+ mPortEOS[kPortIndexOutput] = true;
+ outputInfo->mData->meta()->setInt32("eos", eos);
+ postEOS();
+ ALOGV("Output stream saw EOS.");
+ }
+
+ ALOGV("Processed input buffer %u [%zu], output buffer %u [%zu]",
+ inputInfo->mBufferID, inputInfo->mData->size(),
+ outputInfo->mBufferID, outputInfo->mData->size());
+
+ if (mGraphicBufferListener != NULL) {
+ delete inputInfo;
+ } else {
+ postFillThisBuffer(inputInfo);
+ }
+ postDrainThisBuffer(outputInfo);
+
+ // prevent any corner case where buffers could get stuck in queue
+ signalProcessBuffers();
+}
+
+void MediaFilter::onAllocateComponent(const sp<AMessage> &msg) {
+ CHECK_EQ(mState, UNINITIALIZED);
+
+ CHECK(msg->findString("componentName", &mComponentName));
+ const char* name = mComponentName.c_str();
+ if (!strcasecmp(name, "android.filter.zerofilter")) {
+ mFilter = new ZeroFilter;
+ } else if (!strcasecmp(name, "android.filter.saturation")) {
+ mFilter = new SaturationFilter;
+ } else if (!strcasecmp(name, "android.filter.intrinsicblur")) {
+ mFilter = new IntrinsicBlurFilter;
+ } else if (!strcasecmp(name, "android.filter.RenderScript")) {
+ mFilter = new RSFilter;
+ } else {
+ ALOGE("Unrecognized filter name: %s", name);
+ signalError(NAME_NOT_FOUND);
+ return;
+ }
+
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatComponentAllocated);
+ // HACK - need "OMX.google" to use MediaCodec's software renderer
+ notify->setString("componentName", "OMX.google.MediaFilter");
+ notify->post();
+ mState = INITIALIZED;
+ ALOGV("Handled kWhatAllocateComponent.");
+}
+
+void MediaFilter::onConfigureComponent(const sp<AMessage> &msg) {
+ // TODO: generalize to allow audio filters as well as video
+
+ CHECK_EQ(mState, INITIALIZED);
+
+ // get params - at least mime, width & height
+ AString mime;
+ CHECK(msg->findString("mime", &mime));
+ if (strcasecmp(mime.c_str(), MEDIA_MIMETYPE_VIDEO_RAW)) {
+ ALOGE("Bad mime: %s", mime.c_str());
+ signalError(BAD_VALUE);
+ return;
+ }
+
+ CHECK(msg->findInt32("width", &mWidth));
+ CHECK(msg->findInt32("height", &mHeight));
+ if (!msg->findInt32("stride", &mStride)) {
+ mStride = mWidth;
+ }
+ if (!msg->findInt32("slice-height", &mSliceHeight)) {
+ mSliceHeight = mHeight;
+ }
+
+ mMaxInputSize = mWidth * mHeight * 4; // room for ARGB8888
+ int32_t maxInputSize;
+ if (msg->findInt32("max-input-size", &maxInputSize)
+ && (size_t)maxInputSize > mMaxInputSize) {
+ mMaxInputSize = maxInputSize;
+ }
+
+ if (!msg->findInt32("color-format", &mColorFormatIn)) {
+ // default to OMX_COLOR_Format32bitARGB8888
+ mColorFormatIn = OMX_COLOR_Format32bitARGB8888;
+ msg->setInt32("color-format", mColorFormatIn);
+ }
+ mColorFormatOut = mColorFormatIn;
+
+ mMaxOutputSize = mWidth * mHeight * 4; // room for ARGB8888
+
+ AString cacheDir;
+ if (!msg->findString("cacheDir", &cacheDir)) {
+ ALOGE("Failed to find cache directory in config message.");
+ signalError(NAME_NOT_FOUND);
+ return;
+ }
+
+ status_t err;
+ err = mFilter->configure(msg);
+ if (err != (status_t)OK) {
+ ALOGE("Failed to configure filter component, err %d", err);
+ signalError(err);
+ return;
+ }
+
+ mInputFormat = new AMessage();
+ mInputFormat->setString("mime", mime.c_str());
+ mInputFormat->setInt32("stride", mStride);
+ mInputFormat->setInt32("slice-height", mSliceHeight);
+ mInputFormat->setInt32("color-format", mColorFormatIn);
+ mInputFormat->setRect("crop", 0, 0, mStride, mSliceHeight);
+ mInputFormat->setInt32("width", mWidth);
+ mInputFormat->setInt32("height", mHeight);
+
+ mOutputFormat = new AMessage();
+ mOutputFormat->setString("mime", mime.c_str());
+ mOutputFormat->setInt32("stride", mStride);
+ mOutputFormat->setInt32("slice-height", mSliceHeight);
+ mOutputFormat->setInt32("color-format", mColorFormatOut);
+ mOutputFormat->setRect("crop", 0, 0, mStride, mSliceHeight);
+ mOutputFormat->setInt32("width", mWidth);
+ mOutputFormat->setInt32("height", mHeight);
+
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatComponentConfigured);
+ notify->setString("componentName", "MediaFilter");
+ notify->setMessage("input-format", mInputFormat);
+ notify->setMessage("output-format", mOutputFormat);
+ notify->post();
+ mState = CONFIGURED;
+ ALOGV("Handled kWhatConfigureComponent.");
+
+ sendFormatChange();
+}
+
+void MediaFilter::onStart() {
+ CHECK_EQ(mState, CONFIGURED);
+
+ allocateBuffersOnPort(kPortIndexInput);
+
+ allocateBuffersOnPort(kPortIndexOutput);
+
+ status_t err = mFilter->start();
+ if (err != (status_t)OK) {
+ ALOGE("Failed to start filter component, err %d", err);
+ signalError(err);
+ return;
+ }
+
+ mPortEOS[kPortIndexInput] = false;
+ mPortEOS[kPortIndexOutput] = false;
+ mInputEOSResult = OK;
+ mState = STARTED;
+
+ requestFillEmptyInput();
+ ALOGV("Handled kWhatStart.");
+}
+
+void MediaFilter::onInputBufferFilled(const sp<AMessage> &msg) {
+ IOMX::buffer_id bufferID;
+ CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
+ BufferInfo *info = findBufferByID(kPortIndexInput, bufferID);
+
+ if (mState != STARTED) {
+ // we're not running, so we'll just keep that buffer...
+ info->mStatus = BufferInfo::OWNED_BY_US;
+ return;
+ }
+
+ if (info->mGeneration != mGeneration) {
+ ALOGV("Caught a stale input buffer [ID %d]", bufferID);
+ // buffer is stale (taken before a flush/shutdown) - repost it
+ CHECK_EQ(info->mStatus, BufferInfo::OWNED_BY_US);
+ postFillThisBuffer(info);
+ return;
+ }
+
+ CHECK_EQ(info->mStatus, BufferInfo::OWNED_BY_UPSTREAM);
+ info->mStatus = BufferInfo::OWNED_BY_US;
+
+ sp<ABuffer> buffer;
+ int32_t err = OK;
+ bool eos = false;
+
+ if (!msg->findBuffer("buffer", &buffer)) {
+ // these are unfilled buffers returned by client
+ CHECK(msg->findInt32("err", &err));
+
+ if (err == OK) {
+ // buffers with no errors are returned on MediaCodec.flush
+ ALOGV("saw unfilled buffer (MediaCodec.flush)");
+ postFillThisBuffer(info);
+ return;
+ } else {
+ ALOGV("saw error %d instead of an input buffer", err);
+ eos = true;
+ }
+
+ buffer.clear();
+ }
+
+ int32_t isCSD;
+ if (buffer != NULL && buffer->meta()->findInt32("csd", &isCSD)
+ && isCSD != 0) {
+ // ignore codec-specific data buffers
+ ALOGW("MediaFilter received a codec-specific data buffer");
+ postFillThisBuffer(info);
+ return;
+ }
+
+ int32_t tmp;
+ if (buffer != NULL && buffer->meta()->findInt32("eos", &tmp) && tmp) {
+ eos = true;
+ err = ERROR_END_OF_STREAM;
+ }
+
+ mAvailableInputBuffers.push_back(info);
+ processBuffers();
+
+ if (eos) {
+ mPortEOS[kPortIndexInput] = true;
+ mInputEOSResult = err;
+ }
+
+ ALOGV("Handled kWhatInputBufferFilled. [ID %u]", bufferID);
+}
+
+void MediaFilter::onOutputBufferDrained(const sp<AMessage> &msg) {
+ IOMX::buffer_id bufferID;
+ CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
+ BufferInfo *info = findBufferByID(kPortIndexOutput, bufferID);
+
+ if (mState != STARTED) {
+ // we're not running, so we'll just keep that buffer...
+ info->mStatus = BufferInfo::OWNED_BY_US;
+ return;
+ }
+
+ if (info->mGeneration != mGeneration) {
+ ALOGV("Caught a stale output buffer [ID %d]", bufferID);
+ // buffer is stale (taken before a flush/shutdown) - keep it
+ CHECK_EQ(info->mStatus, BufferInfo::OWNED_BY_US);
+ return;
+ }
+
+ CHECK_EQ(info->mStatus, BufferInfo::OWNED_BY_UPSTREAM);
+ info->mStatus = BufferInfo::OWNED_BY_US;
+
+ mAvailableOutputBuffers.push_back(info);
+
+ processBuffers();
+
+ ALOGV("Handled kWhatOutputBufferDrained. [ID %u]",
+ bufferID);
+}
+
+void MediaFilter::onShutdown(const sp<AMessage> &msg) {
+ mGeneration++;
+
+ if (mState != UNINITIALIZED) {
+ mFilter->reset();
+ }
+
+ int32_t keepComponentAllocated;
+ CHECK(msg->findInt32("keepComponentAllocated", &keepComponentAllocated));
+ if (!keepComponentAllocated || mState == UNINITIALIZED) {
+ mState = UNINITIALIZED;
+ } else {
+ mState = INITIALIZED;
+ }
+
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
+ notify->post();
+}
+
+void MediaFilter::onFlush() {
+ mGeneration++;
+
+ mAvailableInputBuffers.clear();
+ for (size_t i = 0; i < mBuffers[kPortIndexInput].size(); ++i) {
+ BufferInfo *info = &mBuffers[kPortIndexInput].editItemAt(i);
+ info->mStatus = BufferInfo::OWNED_BY_US;
+ }
+ mAvailableOutputBuffers.clear();
+ for (size_t i = 0; i < mBuffers[kPortIndexOutput].size(); ++i) {
+ BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
+ info->mStatus = BufferInfo::OWNED_BY_US;
+ mAvailableOutputBuffers.push_back(info);
+ }
+
+ mPortEOS[kPortIndexInput] = false;
+ mPortEOS[kPortIndexOutput] = false;
+ mInputEOSResult = OK;
+
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", CodecBase::kWhatFlushCompleted);
+ notify->post();
+ ALOGV("Posted kWhatFlushCompleted");
+
+ // MediaCodec returns all input buffers after flush, so in
+ // onInputBufferFilled we call postFillThisBuffer on them
+}
+
+void MediaFilter::onSetParameters(const sp<AMessage> &msg) {
+ CHECK(mState != STARTED);
+
+ status_t err = mFilter->setParameters(msg);
+ if (err != (status_t)OK) {
+ ALOGE("setParameters returned err %d", err);
+ }
+}
+
+void MediaFilter::onCreateInputSurface() {
+ CHECK(mState == CONFIGURED);
+
+ mGraphicBufferListener = new GraphicBufferListener;
+
+ sp<AMessage> notify = new AMessage();
+ notify->setTarget(id());
+ status_t err = mGraphicBufferListener->init(
+ notify, mStride, mSliceHeight, kBufferCountActual);
+
+ if (err != OK) {
+ ALOGE("Failed to init mGraphicBufferListener: %d", err);
+ signalError(err);
+ return;
+ }
+
+ sp<AMessage> reply = mNotify->dup();
+ reply->setInt32("what", CodecBase::kWhatInputSurfaceCreated);
+ reply->setObject(
+ "input-surface",
+ new BufferProducerWrapper(
+ mGraphicBufferListener->getIGraphicBufferProducer()));
+ reply->post();
+}
+
+void MediaFilter::onInputFrameAvailable() {
+ BufferQueue::BufferItem item = mGraphicBufferListener->getBufferItem();
+ sp<GraphicBuffer> buf = mGraphicBufferListener->getBuffer(item);
+
+ // get pointer to graphic buffer
+ void* bufPtr;
+ buf->lock(GraphicBuffer::USAGE_SW_READ_OFTEN, &bufPtr);
+
+ // HACK - there is no OMX_COLOR_FORMATTYPE value for RGBA, so the format
+ // conversion is hardcoded until we add this.
+ // TODO: check input format and convert only if necessary
+ // copy RGBA graphic buffer into temporary ARGB input buffer
+ BufferInfo *inputInfo = new BufferInfo;
+ inputInfo->mData = new ABuffer(buf->getWidth() * buf->getHeight() * 4);
+ ALOGV("Copying surface data into temp buffer.");
+ convertRGBAToARGB(
+ (uint8_t*)bufPtr, buf->getWidth(), buf->getHeight(),
+ buf->getStride(), inputInfo->mData->data());
+ inputInfo->mBufferID = item.mBuf;
+ inputInfo->mGeneration = mGeneration;
+ inputInfo->mOutputFlags = 0;
+ inputInfo->mStatus = BufferInfo::OWNED_BY_US;
+ inputInfo->mData->meta()->setInt64("timeUs", item.mTimestamp / 1000);
+
+ mAvailableInputBuffers.push_back(inputInfo);
+
+ mGraphicBufferListener->releaseBuffer(item);
+
+ signalProcessBuffers();
+}
+
+void MediaFilter::onSignalEndOfInputStream() {
+ // if using input surface, need to send an EOS output buffer
+ if (mGraphicBufferListener != NULL) {
+ Vector<BufferInfo> *outputBufs = &mBuffers[kPortIndexOutput];
+ BufferInfo* eosBuf;
+ bool foundBuf = false;
+ for (size_t i = 0; i < kBufferCountActual; i++) {
+ eosBuf = &outputBufs->editItemAt(i);
+ if (eosBuf->mStatus == BufferInfo::OWNED_BY_US) {
+ foundBuf = true;
+ break;
+ }
+ }
+
+ if (!foundBuf) {
+ ALOGE("onSignalEndOfInputStream failed to find an output buffer");
+ return;
+ }
+
+ eosBuf->mOutputFlags = OMX_BUFFERFLAG_EOS;
+ eosBuf->mGeneration = mGeneration;
+ eosBuf->mData->setRange(0, 0);
+ postDrainThisBuffer(eosBuf);
+ ALOGV("Posted EOS on output buffer %zu", eosBuf->mBufferID);
+ }
+
+ mPortEOS[kPortIndexOutput] = true;
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", CodecBase::kWhatSignaledInputEOS);
+ notify->post();
+
+ ALOGV("Output stream saw EOS.");
+}
+
+} // namespace android
diff --git a/media/libstagefright/filters/RSFilter.cpp b/media/libstagefright/filters/RSFilter.cpp
new file mode 100644
index 0000000..b569945
--- /dev/null
+++ b/media/libstagefright/filters/RSFilter.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "RSFilter"
+
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include "RSFilter.h"
+
+namespace android {
+
+RSFilter::RSFilter() {
+
+}
+
+RSFilter::~RSFilter() {
+
+}
+
+status_t RSFilter::configure(const sp<AMessage> &msg) {
+ status_t err = SimpleFilter::configure(msg);
+ if (err != OK) {
+ return err;
+ }
+
+ if (!msg->findString("cacheDir", &mCacheDir)) {
+ ALOGE("Failed to find cache directory in config message.");
+ return NAME_NOT_FOUND;
+ }
+
+ sp<RenderScriptWrapper> wrapper;
+ if (!msg->findObject("rs-wrapper", (sp<RefBase>*)&wrapper)) {
+ ALOGE("Failed to find RenderScriptWrapper in config message.");
+ return NAME_NOT_FOUND;
+ }
+
+ mRS = wrapper->mContext;
+ mCallback = wrapper->mCallback;
+
+ return OK;
+}
+
+status_t RSFilter::start() {
+ // 32-bit elements for ARGB8888
+ RSC::sp<const RSC::Element> e = RSC::Element::U8_4(mRS);
+
+ RSC::Type::Builder tb(mRS, e);
+ tb.setX(mWidth);
+ tb.setY(mHeight);
+ RSC::sp<const RSC::Type> t = tb.create();
+
+ mAllocIn = RSC::Allocation::createTyped(mRS, t);
+ mAllocOut = RSC::Allocation::createTyped(mRS, t);
+
+ return OK;
+}
+
+void RSFilter::reset() {
+ mCallback.clear();
+ mAllocOut.clear();
+ mAllocIn.clear();
+ mRS.clear();
+}
+
+status_t RSFilter::setParameters(const sp<AMessage> &msg) {
+ return mCallback->handleSetParameters(msg);
+}
+
+status_t RSFilter::processBuffers(
+ const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) {
+ mAllocIn->copy1DRangeFrom(0, mWidth * mHeight, srcBuffer->data());
+ mCallback->processBuffers(mAllocIn.get(), mAllocOut.get());
+ mAllocOut->copy1DRangeTo(0, mWidth * mHeight, outBuffer->data());
+
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/filters/RSFilter.h b/media/libstagefright/filters/RSFilter.h
new file mode 100644
index 0000000..c5b5074
--- /dev/null
+++ b/media/libstagefright/filters/RSFilter.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RS_FILTER_H_
+#define RS_FILTER_H_
+
+#include <media/stagefright/RenderScriptWrapper.h>
+#include <RenderScript.h>
+
+#include "SimpleFilter.h"
+
+namespace android {
+
+struct AString;
+
+struct RSFilter : public SimpleFilter {
+public:
+ RSFilter();
+
+ virtual status_t configure(const sp<AMessage> &msg);
+ virtual status_t start();
+ virtual void reset();
+ virtual status_t setParameters(const sp<AMessage> &msg);
+ virtual status_t processBuffers(
+ const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer);
+
+protected:
+ virtual ~RSFilter();
+
+private:
+ AString mCacheDir;
+ sp<RenderScriptWrapper::RSFilterCallback> mCallback;
+ RSC::sp<RSC::RS> mRS;
+ RSC::sp<RSC::Allocation> mAllocIn;
+ RSC::sp<RSC::Allocation> mAllocOut;
+};
+
+} // namespace android
+
+#endif // RS_FILTER_H_
diff --git a/media/libstagefright/filters/SaturationFilter.cpp b/media/libstagefright/filters/SaturationFilter.cpp
new file mode 100644
index 0000000..ba5f75a
--- /dev/null
+++ b/media/libstagefright/filters/SaturationFilter.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SaturationFilter"
+
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include "SaturationFilter.h"
+
+namespace android {
+
+status_t SaturationFilter::configure(const sp<AMessage> &msg) {
+ status_t err = SimpleFilter::configure(msg);
+ if (err != OK) {
+ return err;
+ }
+
+ if (!msg->findString("cacheDir", &mCacheDir)) {
+ ALOGE("Failed to find cache directory in config message.");
+ return NAME_NOT_FOUND;
+ }
+
+ return OK;
+}
+
+status_t SaturationFilter::start() {
+ // TODO: use a single RS context object for entire application
+ mRS = new RSC::RS();
+
+ if (!mRS->init(mCacheDir.c_str())) {
+ ALOGE("Failed to initialize RenderScript context.");
+ return NO_INIT;
+ }
+
+ // 32-bit elements for ARGB8888
+ RSC::sp<const RSC::Element> e = RSC::Element::U8_4(mRS);
+
+ RSC::Type::Builder tb(mRS, e);
+ tb.setX(mWidth);
+ tb.setY(mHeight);
+ RSC::sp<const RSC::Type> t = tb.create();
+
+ mAllocIn = RSC::Allocation::createTyped(mRS, t);
+ mAllocOut = RSC::Allocation::createTyped(mRS, t);
+
+ mScript = new ScriptC_saturationARGB(mRS);
+
+ mScript->set_gSaturation(mSaturation);
+
+ return OK;
+}
+
+void SaturationFilter::reset() {
+ mScript.clear();
+ mAllocOut.clear();
+ mAllocIn.clear();
+ mRS.clear();
+}
+
+status_t SaturationFilter::setParameters(const sp<AMessage> &msg) {
+ sp<AMessage> params;
+ CHECK(msg->findMessage("params", &params));
+
+ float saturation;
+ if (params->findFloat("saturation", &saturation)) {
+ mSaturation = saturation;
+ }
+
+ return OK;
+}
+
+status_t SaturationFilter::processBuffers(
+ const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) {
+ mAllocIn->copy1DRangeFrom(0, mWidth * mHeight, srcBuffer->data());
+ mScript->forEach_root(mAllocIn, mAllocOut);
+ mAllocOut->copy1DRangeTo(0, mWidth * mHeight, outBuffer->data());
+
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/filters/SaturationFilter.h b/media/libstagefright/filters/SaturationFilter.h
new file mode 100644
index 0000000..0545021
--- /dev/null
+++ b/media/libstagefright/filters/SaturationFilter.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SATURATION_FILTER_H_
+#define SATURATION_FILTER_H_
+
+#include <RenderScript.h>
+
+#include "ScriptC_saturationARGB.h"
+#include "SimpleFilter.h"
+
+namespace android {
+
+struct SaturationFilter : public SimpleFilter {
+public:
+ SaturationFilter() : mSaturation(1.f) {};
+
+ virtual status_t configure(const sp<AMessage> &msg);
+ virtual status_t start();
+ virtual void reset();
+ virtual status_t setParameters(const sp<AMessage> &msg);
+ virtual status_t processBuffers(
+ const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer);
+
+protected:
+ virtual ~SaturationFilter() {};
+
+private:
+ AString mCacheDir;
+ RSC::sp<RSC::RS> mRS;
+ RSC::sp<RSC::Allocation> mAllocIn;
+ RSC::sp<RSC::Allocation> mAllocOut;
+ RSC::sp<ScriptC_saturationARGB> mScript;
+ float mSaturation;
+};
+
+} // namespace android
+
+#endif // SATURATION_FILTER_H_
diff --git a/media/libstagefright/filters/SimpleFilter.cpp b/media/libstagefright/filters/SimpleFilter.cpp
new file mode 100644
index 0000000..6c1ca2c
--- /dev/null
+++ b/media/libstagefright/filters/SimpleFilter.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include "SimpleFilter.h"
+
+namespace android {
+
+status_t SimpleFilter::configure(const sp<AMessage> &msg) {
+ CHECK(msg->findInt32("width", &mWidth));
+ CHECK(msg->findInt32("height", &mHeight));
+ if (!msg->findInt32("stride", &mStride)) {
+ mStride = mWidth;
+ }
+ if (!msg->findInt32("slice-height", &mSliceHeight)) {
+ mSliceHeight = mHeight;
+ }
+ CHECK(msg->findInt32("color-format", &mColorFormatIn));
+ mColorFormatOut = mColorFormatIn;
+
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/filters/SimpleFilter.h b/media/libstagefright/filters/SimpleFilter.h
new file mode 100644
index 0000000..4cd37ef
--- /dev/null
+++ b/media/libstagefright/filters/SimpleFilter.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SIMPLE_FILTER_H_
+#define SIMPLE_FILTER_H_
+
+#include <stdint.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+struct ABuffer;
+struct AMessage;
+
+namespace android {
+
+struct SimpleFilter : public RefBase {
+public:
+ SimpleFilter() : mWidth(0), mHeight(0), mStride(0), mSliceHeight(0),
+ mColorFormatIn(0), mColorFormatOut(0) {};
+
+ virtual status_t configure(const sp<AMessage> &msg);
+
+ virtual status_t start() = 0;
+ virtual void reset() = 0;
+ virtual status_t setParameters(const sp<AMessage> &msg) = 0;
+ virtual status_t processBuffers(
+ const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) = 0;
+
+protected:
+ int32_t mWidth, mHeight;
+ int32_t mStride, mSliceHeight;
+ int32_t mColorFormatIn, mColorFormatOut;
+
+ virtual ~SimpleFilter() {};
+};
+
+} // namespace android
+
+#endif // SIMPLE_FILTER_H_
diff --git a/media/libstagefright/filters/ZeroFilter.cpp b/media/libstagefright/filters/ZeroFilter.cpp
new file mode 100644
index 0000000..3f1243c
--- /dev/null
+++ b/media/libstagefright/filters/ZeroFilter.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ZeroFilter"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include "ZeroFilter.h"
+
+namespace android {
+
+status_t ZeroFilter::setParameters(const sp<AMessage> &msg) {
+ sp<AMessage> params;
+ CHECK(msg->findMessage("params", &params));
+
+ int32_t invert;
+ if (params->findInt32("invert", &invert)) {
+ mInvertData = (invert != 0);
+ }
+
+ return OK;
+}
+
+status_t ZeroFilter::processBuffers(
+ const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) {
+ // assuming identical input & output buffers, since we're a copy filter
+ if (mInvertData) {
+ uint32_t* src = (uint32_t*)srcBuffer->data();
+ uint32_t* dest = (uint32_t*)outBuffer->data();
+ for (size_t i = 0; i < srcBuffer->size() / 4; ++i) {
+ *(dest++) = *(src++) ^ 0xFFFFFFFF;
+ }
+ } else {
+ memcpy(outBuffer->data(), srcBuffer->data(), srcBuffer->size());
+ }
+ outBuffer->setRange(0, srcBuffer->size());
+
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/filters/ZeroFilter.h b/media/libstagefright/filters/ZeroFilter.h
new file mode 100644
index 0000000..bd34dfb
--- /dev/null
+++ b/media/libstagefright/filters/ZeroFilter.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ZERO_FILTER_H_
+#define ZERO_FILTER_H_
+
+#include "SimpleFilter.h"
+
+namespace android {
+
+struct ZeroFilter : public SimpleFilter {
+public:
+ ZeroFilter() : mInvertData(false) {};
+
+ virtual status_t start() { return OK; };
+ virtual void reset() {};
+ virtual status_t setParameters(const sp<AMessage> &msg);
+ virtual status_t processBuffers(
+ const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer);
+
+protected:
+ virtual ~ZeroFilter() {};
+
+private:
+ bool mInvertData;
+};
+
+} // namespace android
+
+#endif // ZERO_FILTER_H_
diff --git a/media/libstagefright/filters/saturation.rs b/media/libstagefright/filters/saturation.rs
new file mode 100644
index 0000000..2c867ac
--- /dev/null
+++ b/media/libstagefright/filters/saturation.rs
@@ -0,0 +1,40 @@
+// Sample script for RGB888 support (compare to saturationARGB.rs)
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma version(1)
+#pragma rs java_package_name(com.android.rs.cppbasic)
+#pragma rs_fp_relaxed
+
+const static float3 gMonoMult = {0.299f, 0.587f, 0.114f};
+
+// global variables (parameters accessible to application code)
+float gSaturation = 1.0f;
+
+void root(const uchar3 *v_in, uchar3 *v_out) {
+ // scale 0-255 uchar to 0-1.0 float
+ float3 in = {v_in->r * 0.003921569f, v_in->g * 0.003921569f,
+ v_in->b * 0.003921569f};
+
+ // apply saturation filter
+ float3 result = dot(in, gMonoMult);
+ result = mix(result, in, gSaturation);
+
+ // convert to uchar, copied from rsPackColorTo8888
+ v_out->x = (uchar)clamp((result.r * 255.f + 0.5f), 0.f, 255.f);
+ v_out->y = (uchar)clamp((result.g * 255.f + 0.5f), 0.f, 255.f);
+ v_out->z = (uchar)clamp((result.b * 255.f + 0.5f), 0.f, 255.f);
+}
diff --git a/media/libstagefright/filters/saturationARGB.rs b/media/libstagefright/filters/saturationARGB.rs
new file mode 100644
index 0000000..1de9dd8
--- /dev/null
+++ b/media/libstagefright/filters/saturationARGB.rs
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma version(1)
+#pragma rs java_package_name(com.android.rs.cppbasic)
+#pragma rs_fp_relaxed
+
+const static float3 gMonoMult = {0.299f, 0.587f, 0.114f};
+
+// global variables (parameters accessible to application code)
+float gSaturation = 1.0f;
+
+void root(const uchar4 *v_in, uchar4 *v_out) {
+ v_out->x = v_in->x; // don't modify A
+
+ // get RGB, scale 0-255 uchar to 0-1.0 float
+ float3 rgb = {v_in->y * 0.003921569f, v_in->z * 0.003921569f,
+ v_in->w * 0.003921569f};
+
+ // apply saturation filter
+ float3 result = dot(rgb, gMonoMult);
+ result = mix(result, rgb, gSaturation);
+
+ v_out->y = (uchar)clamp((result.r * 255.f + 0.5f), 0.f, 255.f);
+ v_out->z = (uchar)clamp((result.g * 255.f + 0.5f), 0.f, 255.f);
+ v_out->w = (uchar)clamp((result.b * 255.f + 0.5f), 0.f, 255.f);
+}
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index a605595..4cf3819 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -617,8 +617,6 @@ sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAAC() {
// having to interpolate.
// The final AAC frame may well extend into the next RangeInfo but
// that's ok.
- // TODO: the logic commented above is skipped because codec cannot take
- // arbitrary sized input buffers;
size_t offset = 0;
while (offset < info.mLength) {
if (offset + 7 > mBuffer->size()) {
@@ -683,12 +681,9 @@ sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAAC() {
size_t headerSize __unused = protection_absent ? 7 : 9;
offset += aac_frame_length;
- // TODO: move back to concatenation when codec can support arbitrary input buffers.
- // For now only queue a single buffer
- break;
}
- int64_t timeUs = fetchTimestampAAC(offset);
+ int64_t timeUs = fetchTimestamp(offset);
sp<ABuffer> accessUnit = new ABuffer(offset);
memcpy(accessUnit->data(), mBuffer->data(), offset);
@@ -735,45 +730,6 @@ int64_t ElementaryStreamQueue::fetchTimestamp(size_t size) {
return timeUs;
}
-// TODO: avoid interpolating timestamps once codec supports arbitrary sized input buffers
-int64_t ElementaryStreamQueue::fetchTimestampAAC(size_t size) {
- int64_t timeUs = -1;
- bool first = true;
-
- size_t samplesize = size;
- while (size > 0) {
- CHECK(!mRangeInfos.empty());
-
- RangeInfo *info = &*mRangeInfos.begin();
-
- if (first) {
- timeUs = info->mTimestampUs;
- first = false;
- }
-
- if (info->mLength > size) {
- int32_t sampleRate;
- CHECK(mFormat->findInt32(kKeySampleRate, &sampleRate));
- info->mLength -= size;
- size_t numSamples = 1024 * size / samplesize;
- info->mTimestampUs += numSamples * 1000000ll / sampleRate;
- size = 0;
- } else {
- size -= info->mLength;
-
- mRangeInfos.erase(mRangeInfos.begin());
- info = NULL;
- }
-
- }
-
- if (timeUs == 0ll) {
- ALOGV("Returning 0 timestamp");
- }
-
- return timeUs;
-}
-
struct NALPosition {
size_t nalOffset;
size_t nalSize;
diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/ESQueue.h
index eb4b1c9..45b4624 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.h
+++ b/media/libstagefright/mpeg2ts/ESQueue.h
@@ -77,7 +77,6 @@ private:
// consume a logical (compressed) access unit of size "size",
// returns its timestamp in us (or -1 if no time information).
int64_t fetchTimestamp(size_t size);
- int64_t fetchTimestampAAC(size_t size);
DISALLOW_EVIL_CONSTRUCTORS(ElementaryStreamQueue);
};
diff --git a/media/libstagefright/webm/WebmWriter.cpp b/media/libstagefright/webm/WebmWriter.cpp
index 069961b..737f144 100644
--- a/media/libstagefright/webm/WebmWriter.cpp
+++ b/media/libstagefright/webm/WebmWriter.cpp
@@ -80,38 +80,6 @@ WebmWriter::WebmWriter(int fd)
mCuePoints);
}
-WebmWriter::WebmWriter(const char *filename)
- : mInitCheck(NO_INIT),
- mTimeCodeScale(1000000),
- mStartTimestampUs(0),
- mStartTimeOffsetMs(0),
- mSegmentOffset(0),
- mSegmentDataStart(0),
- mInfoOffset(0),
- mInfoSize(0),
- mTracksOffset(0),
- mCuesOffset(0),
- mPaused(false),
- mStarted(false),
- mIsFileSizeLimitExplicitlyRequested(false),
- mIsRealTimeRecording(false),
- mStreamableFile(true),
- mEstimatedCuesSize(0) {
- mFd = open(filename, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
- if (mFd >= 0) {
- ALOGV("fd %d; flags: %o", mFd, fcntl(mFd, F_GETFL, 0));
- mInitCheck = OK;
- }
- mStreams[kAudioIndex] = WebmStream(kAudioType, "Audio", &WebmWriter::audioTrack);
- mStreams[kVideoIndex] = WebmStream(kVideoType, "Video", &WebmWriter::videoTrack);
- mSinkThread = new WebmFrameSinkThread(
- mFd,
- mSegmentDataStart,
- mStreams[kVideoIndex].mSink,
- mStreams[kAudioIndex].mSink,
- mCuePoints);
-}
-
// static
sp<WebmElement> WebmWriter::videoTrack(const sp<MetaData>& md) {
int32_t width, height;
diff --git a/media/libstagefright/webm/WebmWriter.h b/media/libstagefright/webm/WebmWriter.h
index 36b6965..4ad770e 100644
--- a/media/libstagefright/webm/WebmWriter.h
+++ b/media/libstagefright/webm/WebmWriter.h
@@ -37,7 +37,6 @@ namespace android {
class WebmWriter : public MediaWriter {
public:
WebmWriter(int fd);
- WebmWriter(const char *filename);
~WebmWriter() { reset(); }
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
index 3a280f0..f1b84ad 100644
--- a/media/mediaserver/Android.mk
+++ b/media/mediaserver/Android.mk
@@ -11,7 +11,7 @@ endif
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
- main_mediaserver.cpp
+ main_mediaserver.cpp
LOCAL_SHARED_LIBRARIES := \
libaudioflinger \
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index ed00b72..3124e4a 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -352,7 +352,8 @@ media_status_t AMediaCodec_releaseOutputBufferAtTime(
}
//EXPORT
-media_status_t AMediaCodec_setNotificationCallback(AMediaCodec *mData, OnCodecEvent callback, void *userdata) {
+media_status_t AMediaCodec_setNotificationCallback(AMediaCodec *mData, OnCodecEvent callback,
+ void *userdata) {
mData->mCallback = callback;
mData->mCallbackUserData = userdata;
return AMEDIA_OK;
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index db57d0b..0ecd64f 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -70,7 +70,8 @@ media_status_t AMediaExtractor_delete(AMediaExtractor *mData) {
}
EXPORT
-media_status_t AMediaExtractor_setDataSourceFd(AMediaExtractor *mData, int fd, off64_t offset, off64_t length) {
+media_status_t AMediaExtractor_setDataSourceFd(AMediaExtractor *mData, int fd, off64_t offset,
+ off64_t length) {
ALOGV("setDataSource(%d, %lld, %lld)", fd, offset, length);
return translate_error(mData->mImpl->setDataSource(fd, offset, length));
}
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 993db73..9ad437a 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -185,7 +185,8 @@ AudioFlinger::AudioFlinger()
char value[PROPERTY_VALUE_MAX];
bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1);
if (doLog) {
- mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters", MemoryHeapBase::READ_ONLY);
+ mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters",
+ MemoryHeapBase::READ_ONLY);
}
#ifdef TEE_SINK
@@ -401,6 +402,9 @@ status_t AudioFlinger::dump(int fd, const Vector<String16>& args)
String8 result(kClientLockedString);
write(fd, result.string(), result.size());
}
+
+ EffectDumpEffects(fd);
+
dumpClients(fd, args);
if (clientLocked) {
mClientLock.unlock();
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index fd28ea1..0d4b358 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -430,6 +430,10 @@ void AudioMixer::setLog(NBLog::Writer *log)
mState.mLog = log;
}
+static inline audio_format_t selectMixerInFormat(audio_format_t inputFormat __unused) {
+ return kUseFloat && kUseNewMixer ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+}
+
int AudioMixer::getTrackName(audio_channel_mask_t channelMask,
audio_format_t format, int sessionId)
{
@@ -492,24 +496,23 @@ int AudioMixer::getTrackName(audio_channel_mask_t channelMask,
t->mInputBufferProvider = NULL;
t->mReformatBufferProvider = NULL;
t->downmixerBufferProvider = NULL;
+ t->mPostDownmixReformatBufferProvider = NULL;
t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
t->mFormat = format;
- t->mMixerInFormat = kUseFloat && kUseNewMixer
- ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+ t->mMixerInFormat = selectMixerInFormat(format);
+ t->mDownmixRequiresFormat = AUDIO_FORMAT_INVALID; // no format required
t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits(
AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
// Check the downmixing (or upmixing) requirements.
- status_t status = initTrackDownmix(t, n);
+ status_t status = t->prepareForDownmix();
if (status != OK) {
ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask);
return -1;
}
- // initTrackDownmix() may change the input format requirement.
- // If you desire floating point input to the mixer, it may change
- // to integer because the downmixer requires integer to process.
+ // prepareForDownmix() may change mDownmixRequiresFormat
ALOGVV("mMixerFormat:%#x mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
- prepareTrackForReformat(t, n);
+ t->prepareForReformat();
mTrackNames |= 1 << n;
return TRACK0 + n;
}
@@ -526,7 +529,7 @@ void AudioMixer::invalidateState(uint32_t mask)
}
// Called when channel masks have changed for a track name
-// TODO: Fix Downmixbufferprofider not to (possibly) change mixer input format,
+// TODO: Fix DownmixerBufferProvider not to (possibly) change mixer input format,
// which will simplify this logic.
bool AudioMixer::setChannelMasks(int name,
audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask) {
@@ -551,21 +554,18 @@ bool AudioMixer::setChannelMasks(int name,
// channel masks have changed, does this track need a downmixer?
// update to try using our desired format (if we aren't already using it)
- const audio_format_t prevMixerInFormat = track.mMixerInFormat;
- track.mMixerInFormat = kUseFloat && kUseNewMixer
- ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
- const status_t status = initTrackDownmix(&mState.tracks[name], name);
+ const audio_format_t prevDownmixerFormat = track.mDownmixRequiresFormat;
+ const status_t status = mState.tracks[name].prepareForDownmix();
ALOGE_IF(status != OK,
- "initTrackDownmix error %d, track channel mask %#x, mixer channel mask %#x",
+ "prepareForDownmix error %d, track channel mask %#x, mixer channel mask %#x",
status, track.channelMask, track.mMixerChannelMask);
- const bool mixerInFormatChanged = prevMixerInFormat != track.mMixerInFormat;
- if (mixerInFormatChanged) {
- prepareTrackForReformat(&track, name); // because of downmixer, track format may change!
+ if (prevDownmixerFormat != track.mDownmixRequiresFormat) {
+ track.prepareForReformat(); // because of downmixer, track format may change!
}
- if (track.resampler && (mixerInFormatChanged || mixerChannelCountChanged)) {
- // resampler input format or channels may have changed.
+ if (track.resampler && mixerChannelCountChanged) {
+ // resampler channels may have changed.
const uint32_t resetToSampleRate = track.sampleRate;
delete track.resampler;
track.resampler = NULL;
@@ -576,99 +576,122 @@ bool AudioMixer::setChannelMasks(int name,
return true;
}
-status_t AudioMixer::initTrackDownmix(track_t* pTrack, int trackName)
-{
- // Only remix (upmix or downmix) if the track and mixer/device channel masks
- // are not the same and not handled internally, as mono -> stereo currently is.
- if (pTrack->channelMask != pTrack->mMixerChannelMask
- && !(pTrack->channelMask == AUDIO_CHANNEL_OUT_MONO
- && pTrack->mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO)) {
- return prepareTrackForDownmix(pTrack, trackName);
- }
- // no remix necessary
- unprepareTrackForDownmix(pTrack, trackName);
- return NO_ERROR;
-}
-
-void AudioMixer::unprepareTrackForDownmix(track_t* pTrack, int trackName __unused) {
- ALOGV("AudioMixer::unprepareTrackForDownmix(%d)", trackName);
+void AudioMixer::track_t::unprepareForDownmix() {
+ ALOGV("AudioMixer::unprepareForDownmix(%p)", this);
- if (pTrack->downmixerBufferProvider != NULL) {
+ mDownmixRequiresFormat = AUDIO_FORMAT_INVALID;
+ if (downmixerBufferProvider != NULL) {
// this track had previously been configured with a downmixer, delete it
ALOGV(" deleting old downmixer");
- delete pTrack->downmixerBufferProvider;
- pTrack->downmixerBufferProvider = NULL;
- reconfigureBufferProviders(pTrack);
+ delete downmixerBufferProvider;
+ downmixerBufferProvider = NULL;
+ reconfigureBufferProviders();
} else {
ALOGV(" nothing to do, no downmixer to delete");
}
}
-status_t AudioMixer::prepareTrackForDownmix(track_t* pTrack, int trackName)
+status_t AudioMixer::track_t::prepareForDownmix()
{
- ALOGV("AudioMixer::prepareTrackForDownmix(%d) with mask 0x%x", trackName, pTrack->channelMask);
+ ALOGV("AudioMixer::prepareForDownmix(%p) with mask 0x%x",
+ this, channelMask);
// discard the previous downmixer if there was one
- unprepareTrackForDownmix(pTrack, trackName);
+ unprepareForDownmix();
+ // Only remix (upmix or downmix) if the track and mixer/device channel masks
+ // are not the same and not handled internally, as mono -> stereo currently is.
+ if (channelMask == mMixerChannelMask
+ || (channelMask == AUDIO_CHANNEL_OUT_MONO
+ && mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO)) {
+ return NO_ERROR;
+ }
if (DownmixerBufferProvider::isMultichannelCapable()) {
- DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(pTrack->channelMask,
- pTrack->mMixerChannelMask,
- AUDIO_FORMAT_PCM_16_BIT /* TODO: use pTrack->mMixerInFormat, now only PCM 16 */,
- pTrack->sampleRate, pTrack->sessionId, kCopyBufferFrameCount);
+ DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(channelMask,
+ mMixerChannelMask,
+ AUDIO_FORMAT_PCM_16_BIT /* TODO: use mMixerInFormat, now only PCM 16 */,
+ sampleRate, sessionId, kCopyBufferFrameCount);
if (pDbp->isValid()) { // if constructor completed properly
- pTrack->mMixerInFormat = AUDIO_FORMAT_PCM_16_BIT; // PCM 16 bit required for downmix
- pTrack->downmixerBufferProvider = pDbp;
- reconfigureBufferProviders(pTrack);
+ mDownmixRequiresFormat = AUDIO_FORMAT_PCM_16_BIT; // PCM 16 bit required for downmix
+ downmixerBufferProvider = pDbp;
+ reconfigureBufferProviders();
return NO_ERROR;
}
delete pDbp;
}
// Effect downmixer does not accept the channel conversion. Let's use our remixer.
- RemixBufferProvider* pRbp = new RemixBufferProvider(pTrack->channelMask,
- pTrack->mMixerChannelMask, pTrack->mMixerInFormat, kCopyBufferFrameCount);
+ RemixBufferProvider* pRbp = new RemixBufferProvider(channelMask,
+ mMixerChannelMask, mMixerInFormat, kCopyBufferFrameCount);
// Remix always finds a conversion whereas Downmixer effect above may fail.
- pTrack->downmixerBufferProvider = pRbp;
- reconfigureBufferProviders(pTrack);
+ downmixerBufferProvider = pRbp;
+ reconfigureBufferProviders();
return NO_ERROR;
}
-void AudioMixer::unprepareTrackForReformat(track_t* pTrack, int trackName __unused) {
- ALOGV("AudioMixer::unprepareTrackForReformat(%d)", trackName);
- if (pTrack->mReformatBufferProvider != NULL) {
- delete pTrack->mReformatBufferProvider;
- pTrack->mReformatBufferProvider = NULL;
- reconfigureBufferProviders(pTrack);
+void AudioMixer::track_t::unprepareForReformat() {
+ ALOGV("AudioMixer::unprepareForReformat(%p)", this);
+ bool requiresReconfigure = false;
+ if (mReformatBufferProvider != NULL) {
+ delete mReformatBufferProvider;
+ mReformatBufferProvider = NULL;
+ requiresReconfigure = true;
+ }
+ if (mPostDownmixReformatBufferProvider != NULL) {
+ delete mPostDownmixReformatBufferProvider;
+ mPostDownmixReformatBufferProvider = NULL;
+ requiresReconfigure = true;
+ }
+ if (requiresReconfigure) {
+ reconfigureBufferProviders();
}
}
-status_t AudioMixer::prepareTrackForReformat(track_t* pTrack, int trackName)
+status_t AudioMixer::track_t::prepareForReformat()
{
- ALOGV("AudioMixer::prepareTrackForReformat(%d) with format %#x", trackName, pTrack->mFormat);
- // discard the previous reformatter if there was one
- unprepareTrackForReformat(pTrack, trackName);
- // only configure reformatter if needed
- if (pTrack->mFormat != pTrack->mMixerInFormat) {
- pTrack->mReformatBufferProvider = new ReformatBufferProvider(
- audio_channel_count_from_out_mask(pTrack->channelMask),
- pTrack->mFormat, pTrack->mMixerInFormat,
+ ALOGV("AudioMixer::prepareForReformat(%p) with format %#x", this, mFormat);
+ // discard previous reformatters
+ unprepareForReformat();
+ // only configure reformatters as needed
+ const audio_format_t targetFormat = mDownmixRequiresFormat != AUDIO_FORMAT_INVALID
+ ? mDownmixRequiresFormat : mMixerInFormat;
+ bool requiresReconfigure = false;
+ if (mFormat != targetFormat) {
+ mReformatBufferProvider = new ReformatBufferProvider(
+ audio_channel_count_from_out_mask(channelMask),
+ mFormat,
+ targetFormat,
kCopyBufferFrameCount);
- reconfigureBufferProviders(pTrack);
+ requiresReconfigure = true;
+ }
+ if (targetFormat != mMixerInFormat) {
+ mPostDownmixReformatBufferProvider = new ReformatBufferProvider(
+ audio_channel_count_from_out_mask(mMixerChannelMask),
+ targetFormat,
+ mMixerInFormat,
+ kCopyBufferFrameCount);
+ requiresReconfigure = true;
+ }
+ if (requiresReconfigure) {
+ reconfigureBufferProviders();
}
return NO_ERROR;
}
-void AudioMixer::reconfigureBufferProviders(track_t* pTrack)
+void AudioMixer::track_t::reconfigureBufferProviders()
{
- pTrack->bufferProvider = pTrack->mInputBufferProvider;
- if (pTrack->mReformatBufferProvider) {
- pTrack->mReformatBufferProvider->setBufferProvider(pTrack->bufferProvider);
- pTrack->bufferProvider = pTrack->mReformatBufferProvider;
+ bufferProvider = mInputBufferProvider;
+ if (mReformatBufferProvider) {
+ mReformatBufferProvider->setBufferProvider(bufferProvider);
+ bufferProvider = mReformatBufferProvider;
+ }
+ if (downmixerBufferProvider) {
+ downmixerBufferProvider->setBufferProvider(bufferProvider);
+ bufferProvider = downmixerBufferProvider;
}
- if (pTrack->downmixerBufferProvider) {
- pTrack->downmixerBufferProvider->setBufferProvider(pTrack->bufferProvider);
- pTrack->bufferProvider = pTrack->downmixerBufferProvider;
+ if (mPostDownmixReformatBufferProvider) {
+ mPostDownmixReformatBufferProvider->setBufferProvider(bufferProvider);
+ bufferProvider = mPostDownmixReformatBufferProvider;
}
}
@@ -687,9 +710,9 @@ void AudioMixer::deleteTrackName(int name)
delete track.resampler;
track.resampler = NULL;
// delete the downmixer
- unprepareTrackForDownmix(&mState.tracks[name], name);
+ mState.tracks[name].unprepareForDownmix();
// delete the reformatter
- unprepareTrackForReformat(&mState.tracks[name], name);
+ mState.tracks[name].unprepareForReformat();
mTrackNames &= ~(1<<name);
}
@@ -828,7 +851,7 @@ void AudioMixer::setParameter(int name, int target, int param, void *value)
ALOG_ASSERT(audio_is_linear_pcm(format), "Invalid format %#x", format);
track.mFormat = format;
ALOGV("setParameter(TRACK, FORMAT, %#x)", format);
- prepareTrackForReformat(&track, name);
+ track.prepareForReformat();
invalidateState(1 << name);
}
} break;
@@ -1032,10 +1055,13 @@ void AudioMixer::setBufferProvider(int name, AudioBufferProvider* bufferProvider
if (mState.tracks[name].mReformatBufferProvider != NULL) {
mState.tracks[name].mReformatBufferProvider->reset();
} else if (mState.tracks[name].downmixerBufferProvider != NULL) {
+ mState.tracks[name].downmixerBufferProvider->reset();
+ } else if (mState.tracks[name].mPostDownmixReformatBufferProvider != NULL) {
+ mState.tracks[name].mPostDownmixReformatBufferProvider->reset();
}
mState.tracks[name].mInputBufferProvider = bufferProvider;
- reconfigureBufferProviders(&mState.tracks[name]);
+ mState.tracks[name].reconfigureBufferProviders();
}
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index f4f142b..c5df08a 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -127,10 +127,16 @@ public:
size_t getUnreleasedFrames(int name) const;
static inline bool isValidPcmTrackFormat(audio_format_t format) {
- return format == AUDIO_FORMAT_PCM_16_BIT ||
- format == AUDIO_FORMAT_PCM_24_BIT_PACKED ||
- format == AUDIO_FORMAT_PCM_32_BIT ||
- format == AUDIO_FORMAT_PCM_FLOAT;
+ switch (format) {
+ case AUDIO_FORMAT_PCM_8_BIT:
+ case AUDIO_FORMAT_PCM_16_BIT:
+ case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ case AUDIO_FORMAT_PCM_32_BIT:
+ case AUDIO_FORMAT_PCM_FLOAT:
+ return true;
+ default:
+ return false;
+ }
}
private:
@@ -205,17 +211,34 @@ private:
int32_t* auxBuffer;
// 16-byte boundary
+
+ /* Buffer providers are constructed to translate the track input data as needed.
+ *
+ * 1) mInputBufferProvider: The AudioTrack buffer provider.
+ * 2) mReformatBufferProvider: If not NULL, performs the audio reformat to
+ * match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
+ * requires reformat. For example, it may convert floating point input to
+ * PCM_16_bit if that's required by the downmixer.
+ * 3) downmixerBufferProvider: If not NULL, performs the channel remixing to match
+ * the number of channels required by the mixer sink.
+ * 4) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
+ * the downmixer requirements to the mixer engine input requirements.
+ */
AudioBufferProvider* mInputBufferProvider; // externally provided buffer provider.
CopyBufferProvider* mReformatBufferProvider; // provider wrapper for reformatting.
CopyBufferProvider* downmixerBufferProvider; // wrapper for channel conversion.
+ CopyBufferProvider* mPostDownmixReformatBufferProvider;
+ // 16-byte boundary
int32_t sessionId;
- // 16-byte boundary
audio_format_t mMixerFormat; // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
audio_format_t mFormat; // input track format
audio_format_t mMixerInFormat; // mix internal format AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
// each track must be converted to this format.
+ audio_format_t mDownmixRequiresFormat; // required downmixer format
+ // AUDIO_FORMAT_PCM_16_BIT if 16 bit necessary
+ // AUDIO_FORMAT_INVALID if no required format
float mVolume[MAX_NUM_VOLUMES]; // floating point set volume
float mPrevVolume[MAX_NUM_VOLUMES]; // floating point previous volume
@@ -225,7 +248,6 @@ private:
float mPrevAuxLevel; // floating point prev aux level
float mAuxInc; // floating point aux increment
- // 16-byte boundary
audio_channel_mask_t mMixerChannelMask;
uint32_t mMixerChannelCount;
@@ -236,6 +258,12 @@ private:
void adjustVolumeRamp(bool aux, bool useFloat = false);
size_t getUnreleasedFrames() const { return resampler != NULL ?
resampler->getUnreleasedFrames() : 0; };
+
+ status_t prepareForDownmix();
+ void unprepareForDownmix();
+ status_t prepareForReformat();
+ void unprepareForReformat();
+ void reconfigureBufferProviders();
};
typedef void (*process_hook_t)(state_t* state, int64_t pts);
@@ -382,14 +410,6 @@ private:
bool setChannelMasks(int name,
audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask);
- // TODO: remove unused trackName/trackNum from functions below.
- static status_t initTrackDownmix(track_t* pTrack, int trackName);
- static status_t prepareTrackForDownmix(track_t* pTrack, int trackNum);
- static void unprepareTrackForDownmix(track_t* pTrack, int trackName);
- static status_t prepareTrackForReformat(track_t* pTrack, int trackNum);
- static void unprepareTrackForReformat(track_t* pTrack, int trackName);
- static void reconfigureBufferProviders(track_t* pTrack);
-
static void track__genericResample(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
int32_t* aux);
static void track__nop(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
diff --git a/services/audioflinger/AudioResamplerFirGen.h b/services/audioflinger/AudioResamplerFirGen.h
index f3718b6..a9c84de 100644
--- a/services/audioflinger/AudioResamplerFirGen.h
+++ b/services/audioflinger/AudioResamplerFirGen.h
@@ -204,7 +204,8 @@ struct I0ATerm {
template <>
struct I0ATerm<0> { // 1/sqrt(2*PI);
- static const CONSTEXPR double value = 0.398942280401432677939946059934381868475858631164934657665925;
+ static const CONSTEXPR double value =
+ 0.398942280401432677939946059934381868475858631164934657665925;
};
#if USE_HORNERS_METHOD
diff --git a/services/audioflinger/AudioResamplerFirProcess.h b/services/audioflinger/AudioResamplerFirProcess.h
index efc8055..1118bf8 100644
--- a/services/audioflinger/AudioResamplerFirProcess.h
+++ b/services/audioflinger/AudioResamplerFirProcess.h
@@ -174,7 +174,8 @@ struct InterpNull {
* Process() calls ProcessBase() with TFUNC = InterpCompute, for interpolated phase.
*/
-template <int CHANNELS, int STRIDE, typename TFUNC, typename TC, typename TI, typename TO, typename TINTERP>
+template <int CHANNELS, int STRIDE, typename TFUNC, typename TC, typename TI, typename TO,
+ typename TINTERP>
static inline
void ProcessBase(TO* const out,
size_t count,
@@ -268,7 +269,8 @@ void Process(TO* const out,
TINTERP lerpP,
const TO* const volumeLR)
{
- ProcessBase<CHANNELS, STRIDE, InterpCompute>(out, count, coefsP, coefsN, sP, sN, lerpP, volumeLR);
+ ProcessBase<CHANNELS, STRIDE, InterpCompute>(out, count, coefsP, coefsN, sP, sN, lerpP,
+ volumeLR);
}
/*
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 2678cbf..141a79e 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -629,7 +629,8 @@ void FastMixerDumpState::dump(int fd) const
left.sample(tail[i]);
right.sample(tail[n - (i + 1)]);
}
- dprintf(fd, " Distribution of mix cycle times in ms for the tails (> ~3 stddev outliers):\n"
+ dprintf(fd, " Distribution of mix cycle times in ms for the tails "
+ "(> ~3 stddev outliers):\n"
" left tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n"
" right tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
left.mean()*1e-6, left.minimum()*1e-6, left.maximum()*1e-6, left.stddev()*1e-6,
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index ee48276..902d5e4 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -255,7 +255,7 @@ public:
class Buffer : public AudioBufferProvider::Buffer {
public:
- int16_t *mBuffer;
+ void *mBuffer;
};
OutputTrack(PlaybackThread *thread,
@@ -271,7 +271,7 @@ public:
AudioSystem::SYNC_EVENT_NONE,
int triggerSession = 0);
virtual void stop();
- bool write(int16_t* data, uint32_t frames);
+ bool write(void* data, uint32_t frames);
bool bufferQueueEmpty() const { return mBufferQueue.size() == 0; }
bool isActive() const { return mActive; }
const wp<ThreadBase>& thread() const { return mThread; }
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 51025fe..15dd408 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -172,6 +172,18 @@ static int sFastTrackMultiplier = kFastTrackMultiplier;
// and that all "fast" AudioRecord clients read from. In either case, the size can be small.
static const size_t kRecordThreadReadOnlyHeapSize = 0x2000;
+// Returns the source frames needed to resample to destination frames. This is not a precise
+// value and depends on the resampler (and possibly how it handles rounding internally).
+// If srcSampleRate and dstSampleRate are equal, then it returns destination frames, which
+// may not be a true if the resampler is asynchronous.
+static inline size_t sourceFramesNeeded(
+ uint32_t srcSampleRate, size_t dstFramesRequired, uint32_t dstSampleRate) {
+ // +1 for rounding - always do this even if matched ratio
+ // +1 for additional sample needed for interpolation
+ return srcSampleRate == dstSampleRate ? dstFramesRequired :
+ size_t((uint64_t)dstFramesRequired * srcSampleRate / dstSampleRate + 1 + 1);
+}
+
// ----------------------------------------------------------------------------
static pthread_once_t sFastTrackMultiplierOnce = PTHREAD_ONCE_INIT;
@@ -314,6 +326,64 @@ void CpuStats::sample(const String8 &title
// ThreadBase
// ----------------------------------------------------------------------------
+// static
+const char *AudioFlinger::ThreadBase::threadTypeToString(AudioFlinger::ThreadBase::type_t type)
+{
+ switch (type) {
+ case MIXER:
+ return "MIXER";
+ case DIRECT:
+ return "DIRECT";
+ case DUPLICATING:
+ return "DUPLICATING";
+ case RECORD:
+ return "RECORD";
+ case OFFLOAD:
+ return "OFFLOAD";
+ default:
+ return "unknown";
+ }
+}
+
+static String8 outputFlagsToString(audio_output_flags_t flags)
+{
+ static const struct mapping {
+ audio_output_flags_t mFlag;
+ const char * mString;
+ } mappings[] = {
+ AUDIO_OUTPUT_FLAG_DIRECT, "DIRECT",
+ AUDIO_OUTPUT_FLAG_PRIMARY, "PRIMARY",
+ AUDIO_OUTPUT_FLAG_FAST, "FAST",
+ AUDIO_OUTPUT_FLAG_DEEP_BUFFER, "DEEP_BUFFER",
+ AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD, "COMPRESS_OFFLOAAD",
+ AUDIO_OUTPUT_FLAG_NON_BLOCKING, "NON_BLOCKING",
+ AUDIO_OUTPUT_FLAG_HW_AV_SYNC, "HW_AV_SYNC",
+ AUDIO_OUTPUT_FLAG_NONE, "NONE", // must be last
+ };
+ String8 result;
+ audio_output_flags_t allFlags = AUDIO_OUTPUT_FLAG_NONE;
+ const mapping *entry;
+ for (entry = mappings; entry->mFlag != AUDIO_OUTPUT_FLAG_NONE; entry++) {
+ allFlags = (audio_output_flags_t) (allFlags | entry->mFlag);
+ if (flags & entry->mFlag) {
+ if (!result.isEmpty()) {
+ result.append("|");
+ }
+ result.append(entry->mString);
+ }
+ }
+ if (flags & ~allFlags) {
+ if (!result.isEmpty()) {
+ result.append("|");
+ }
+ result.appendFormat("0x%X", flags & ~allFlags);
+ }
+ if (result.isEmpty()) {
+ result.append(entry->mString);
+ }
+ return result;
+}
+
AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
audio_devices_t outDevice, audio_devices_t inDevice, type_t type)
: Thread(false /*canCallJava*/),
@@ -577,20 +647,21 @@ void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args __u
bool locked = AudioFlinger::dumpTryLock(mLock);
if (!locked) {
- dprintf(fd, "thread %p maybe dead locked\n", this);
+ dprintf(fd, "thread %p may be deadlocked\n", this);
}
dprintf(fd, " I/O handle: %d\n", mId);
dprintf(fd, " TID: %d\n", getTid());
dprintf(fd, " Standby: %s\n", mStandby ? "yes" : "no");
- dprintf(fd, " Sample rate: %u\n", mSampleRate);
+ dprintf(fd, " Sample rate: %u Hz\n", mSampleRate);
dprintf(fd, " HAL frame count: %zu\n", mFrameCount);
+ dprintf(fd, " HAL format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat));
dprintf(fd, " HAL buffer size: %u bytes\n", mBufferSize);
- dprintf(fd, " Channel Count: %u\n", mChannelCount);
- dprintf(fd, " Channel Mask: 0x%08x (%s)\n", mChannelMask,
+ dprintf(fd, " Channel count: %u\n", mChannelCount);
+ dprintf(fd, " Channel mask: 0x%08x (%s)\n", mChannelMask,
channelMaskToString(mChannelMask, mType != RECORD).string());
- dprintf(fd, " Format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat));
- dprintf(fd, " Frame size: %zu\n", mFrameSize);
+ dprintf(fd, " Format: 0x%x (%s)\n", mFormat, formatToString(mFormat));
+ dprintf(fd, " Frame size: %zu bytes\n", mFrameSize);
dprintf(fd, " Pending config events:");
size_t numConfig = mConfigEvents.size();
if (numConfig) {
@@ -1315,7 +1386,7 @@ void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& ar
void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
{
- dprintf(fd, "\nOutput thread %p:\n", this);
+ dprintf(fd, "\nOutput thread %p type %d (%s):\n", this, type(), threadTypeToString(type()));
dprintf(fd, " Normal frame count: %zu\n", mNormalFrameCount);
dprintf(fd, " Last write occurred (msecs): %llu\n", ns2ms(systemTime() - mLastWriteTime));
dprintf(fd, " Total writes: %d\n", mNumWrites);
@@ -1326,6 +1397,10 @@ void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>&
dprintf(fd, " Mixer buffer: %p\n", mMixerBuffer);
dprintf(fd, " Effect buffer: %p\n", mEffectBuffer);
dprintf(fd, " Fast track availMask=%#x\n", mFastTrackAvailMask);
+ AudioStreamOut *output = mOutput;
+ audio_output_flags_t flags = output != NULL ? output->flags : AUDIO_OUTPUT_FLAG_NONE;
+ String8 flagsAsString = outputFlagsToString(flags);
+ dprintf(fd, " AudioStreamOut: %p flags %#x (%s)\n", output, flags, flagsAsString.string());
dumpBase(fd, args);
}
@@ -1861,6 +1936,22 @@ void AudioFlinger::PlaybackThread::readOutputParameters_l()
}
}
+ if (mType == DUPLICATING && mMixerBufferEnabled && mEffectBufferEnabled) {
+ // For best precision, we use float instead of the associated output
+ // device format (typically PCM 16 bit).
+
+ mFormat = AUDIO_FORMAT_PCM_FLOAT;
+ mFrameSize = mChannelCount * audio_bytes_per_sample(mFormat);
+ mBufferSize = mFrameSize * mFrameCount;
+
+ // TODO: We currently use the associated output device channel mask and sample rate.
+ // (1) Perhaps use the ORed channel mask of all downstream MixerThreads
+ // (if a valid mask) to avoid premature downmix.
+ // (2) Perhaps use the maximum sample rate of all downstream MixerThreads
+ // instead of the output device sample rate to avoid loss of high frequency information.
+ // This may need to be updated as MixerThread/OutputTracks are added and not here.
+ }
+
// Calculate size of normal sink buffer relative to the HAL output buffer size
double multiplier = 1.0;
if (mType == MIXER && (kUseFastMixer == FastMixer_Static ||
@@ -2137,6 +2228,7 @@ ssize_t AudioFlinger::PlaybackThread::threadLoop_write()
} else {
bytesWritten = framesWritten;
}
+ mLatchDValid = false;
status_t status = mNormalSink->getTimestamp(mLatchD.mTimestamp);
if (status == NO_ERROR) {
size_t totalFramesWritten = mNormalSink->framesWritten();
@@ -2640,7 +2732,9 @@ bool AudioFlinger::PlaybackThread::threadLoop()
}
} else {
+ ATRACE_BEGIN("sleep");
usleep(sleepTime);
+ ATRACE_END();
}
}
@@ -2800,6 +2894,12 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud
mNormalFrameCount);
mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
+ if (type == DUPLICATING) {
+ // The Duplicating thread uses the AudioMixer and delivers data to OutputTracks
+ // (downstream MixerThreads) in DuplicatingThread::threadLoop_write().
+ // Do not create or use mFastMixer, mOutputSink, mPipeSink, or mNormalSink.
+ return;
+ }
// create an NBAIO sink for the HAL output stream, and negotiate
mOutputSink = new AudioStreamOutSink(output->stream);
size_t numCounterOffers = 0;
@@ -2841,6 +2941,7 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud
NBAIO_Format format = mOutputSink->format();
NBAIO_Format origformat = format;
// adjust format to match that of the Fast Mixer
+ ALOGV("format changed from %d to %d", format.mFormat, fastMixerFormat);
format.mFormat = fastMixerFormat;
format.mFrameSize = audio_bytes_per_sample(format.mFormat) * format.mChannelCount;
@@ -3386,8 +3487,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac
if (sr == mSampleRate) {
desiredFrames = mNormalFrameCount;
} else {
- // +1 for rounding and +1 for additional sample needed for interpolation
- desiredFrames = (mNormalFrameCount * sr) / mSampleRate + 1 + 1;
+ desiredFrames = sourceFramesNeeded(sr, mNormalFrameCount, mSampleRate);
// add frames already consumed but not yet released by the resampler
// because mAudioTrackServerProxy->framesReady() will include these frames
desiredFrames += mAudioMixer->getUnreleasedFrames(track->name());
@@ -3405,6 +3505,23 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac
}
size_t framesReady = track->framesReady();
+ if (ATRACE_ENABLED()) {
+ // I wish we had formatted trace names
+ char traceName[16];
+ strcpy(traceName, "nRdy");
+ int name = track->name();
+ if (AudioMixer::TRACK0 <= name &&
+ name < (int) (AudioMixer::TRACK0 + AudioMixer::MAX_NUM_TRACKS)) {
+ name -= AudioMixer::TRACK0;
+ traceName[4] = (name / 10) + '0';
+ traceName[5] = (name % 10) + '0';
+ } else {
+ traceName[4] = '?';
+ traceName[5] = '?';
+ }
+ traceName[6] = '\0';
+ ATRACE_INT(traceName, framesReady);
+ }
if ((framesReady >= minFrames) && track->isReady() &&
!track->isPaused() && !track->isTerminated())
{
@@ -4797,16 +4914,8 @@ void AudioFlinger::DuplicatingThread::threadLoop_sleepTime()
ssize_t AudioFlinger::DuplicatingThread::threadLoop_write()
{
- // We convert the duplicating thread format to AUDIO_FORMAT_PCM_16_BIT
- // for delivery downstream as needed. This in-place conversion is safe as
- // AUDIO_FORMAT_PCM_16_BIT is smaller than any other supported format
- // (AUDIO_FORMAT_PCM_8_BIT is not allowed here).
- if (mFormat != AUDIO_FORMAT_PCM_16_BIT) {
- memcpy_by_audio_format(mSinkBuffer, AUDIO_FORMAT_PCM_16_BIT,
- mSinkBuffer, mFormat, writeFrames * mChannelCount);
- }
for (size_t i = 0; i < outputTracks.size(); i++) {
- outputTracks[i]->write(reinterpret_cast<int16_t*>(mSinkBuffer), writeFrames);
+ outputTracks[i]->write(mSinkBuffer, writeFrames);
}
mStandby = false;
return (ssize_t)mSinkBufferSize;
@@ -4833,25 +4942,26 @@ void AudioFlinger::DuplicatingThread::clearOutputTracks()
void AudioFlinger::DuplicatingThread::addOutputTrack(MixerThread *thread)
{
Mutex::Autolock _l(mLock);
- // FIXME explain this formula
- size_t frameCount = (3 * mNormalFrameCount * mSampleRate) / thread->sampleRate();
- // OutputTrack is forced to AUDIO_FORMAT_PCM_16_BIT regardless of mFormat
- // due to current usage case and restrictions on the AudioBufferProvider.
- // Actual buffer conversion is done in threadLoop_write().
- //
- // TODO: This may change in the future, depending on multichannel
- // (and non int16_t*) support on AF::PlaybackThread::OutputTrack
- OutputTrack *outputTrack = new OutputTrack(thread,
+ // The downstream MixerThread consumes thread->frameCount() amount of frames per mix pass.
+ // Adjust for thread->sampleRate() to determine minimum buffer frame count.
+ // Then triple buffer because Threads do not run synchronously and may not be clock locked.
+ const size_t frameCount =
+ 3 * sourceFramesNeeded(mSampleRate, thread->frameCount(), thread->sampleRate());
+ // TODO: Consider asynchronous sample rate conversion to handle clock disparity
+ // from different OutputTracks and their associated MixerThreads (e.g. one may
+ // nearly empty and the other may be dropping data).
+
+ sp<OutputTrack> outputTrack = new OutputTrack(thread,
this,
mSampleRate,
- AUDIO_FORMAT_PCM_16_BIT,
+ mFormat,
mChannelMask,
frameCount,
IPCThreadState::self()->getCallingUid());
if (outputTrack->cblk() != NULL) {
thread->setStreamVolume(AUDIO_STREAM_PATCH, 1.0f);
mOutputTracks.add(outputTrack);
- ALOGV("addOutputTrack() track %p, on thread %p", outputTrack, thread);
+ ALOGV("addOutputTrack() track %p, on thread %p", outputTrack.get(), thread);
updateWaitTime_l();
}
}
@@ -5135,7 +5245,9 @@ reacquire_wakelock:
// sleep with mutex unlocked
if (sleepUs > 0) {
+ ATRACE_BEGIN("sleep");
usleep(sleepUs);
+ ATRACE_END();
sleepUs = 0;
}
@@ -5279,7 +5391,8 @@ reacquire_wakelock:
state->mCommand = FastCaptureState::READ_WRITE;
#if 0 // FIXME
mFastCaptureDumpState.increaseSamplingN(mAudioFlinger->isLowRamDevice() ?
- FastCaptureDumpState::kSamplingNforLowRamDevice : FastMixerDumpState::kSamplingN);
+ FastCaptureDumpState::kSamplingNforLowRamDevice :
+ FastMixerDumpState::kSamplingN);
#endif
didModify = true;
}
@@ -5427,8 +5540,8 @@ reacquire_wakelock:
upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, (const int16_t *)src,
part1);
} else {
- downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, (const int16_t *)src,
- part1);
+ downmix_to_mono_i16_from_stereo_i16((int16_t *)dst,
+ (const int16_t *)src, part1);
}
dst += part1 * activeTrack->mFrameSize;
front += part1;
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 1088843..a1ac42c 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -32,6 +32,8 @@ public:
OFFLOAD // Thread class is OffloadThread
};
+ static const char *threadTypeToString(type_t type);
+
ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
audio_devices_t outDevice, audio_devices_t inDevice, type_t type);
virtual ~ThreadBase();
@@ -406,6 +408,7 @@ protected:
audio_channel_mask_t mChannelMask;
uint32_t mChannelCount;
size_t mFrameSize;
+ // not HAL frame size, this is for output sink (to pipe to fast mixer)
audio_format_t mFormat; // Source format for Recording and
// Sink format for Playback.
// Sink format may be different than
@@ -1167,7 +1170,8 @@ private:
const sp<MemoryDealer> mReadOnlyHeap;
// one-time initialization, no locks required
- sp<FastCapture> mFastCapture; // non-0 if there is also a fast capture
+ sp<FastCapture> mFastCapture; // non-0 if there is also
+ // a fast capture
// FIXME audio watchdog thread
// contents are not guaranteed to be consistent, no locks required
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index e970036..7757ea2 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -859,6 +859,7 @@ void AudioFlinger::PlaybackThread::Track::reset()
if (mState == FLUSHED) {
mState = IDLE;
}
+ mPreviousValid = false;
}
}
@@ -1709,14 +1710,13 @@ void AudioFlinger::PlaybackThread::OutputTrack::stop()
mActive = false;
}
-bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames)
+bool AudioFlinger::PlaybackThread::OutputTrack::write(void* data, uint32_t frames)
{
Buffer *pInBuffer;
Buffer inBuffer;
- uint32_t channelCount = mChannelCount;
bool outputBufferFull = false;
inBuffer.frameCount = frames;
- inBuffer.i16 = data;
+ inBuffer.raw = data;
uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
@@ -1726,13 +1726,17 @@ bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t fr
if (thread != 0) {
MixerThread *mixerThread = (MixerThread *)thread.get();
if (mFrameCount > frames) {
+ // For the first write after being inactive, ensure that we have
+ // enough frames to fill mFrameCount (which should be multiples of
+ // the minimum buffer requirements of the downstream MixerThread).
+ // This provides enough frames for the downstream mixer to begin
+ // (see AudioFlinger::PlaybackThread::Track::isReady()).
if (mBufferQueue.size() < kMaxOverFlowBuffers) {
uint32_t startFrames = (mFrameCount - frames);
pInBuffer = new Buffer;
- pInBuffer->mBuffer = new int16_t[startFrames * channelCount];
+ pInBuffer->mBuffer = calloc(1, startFrames * mFrameSize);
pInBuffer->frameCount = startFrames;
- pInBuffer->i16 = pInBuffer->mBuffer;
- memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t));
+ pInBuffer->raw = pInBuffer->mBuffer;
mBufferQueue.add(pInBuffer);
} else {
ALOGW("OutputTrack::write() %p no more buffers in queue", this);
@@ -1773,20 +1777,20 @@ bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t fr
uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
pInBuffer->frameCount;
- memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t));
+ memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * mFrameSize);
Proxy::Buffer buf;
buf.mFrameCount = outFrames;
buf.mRaw = NULL;
mClientProxy->releaseBuffer(&buf);
pInBuffer->frameCount -= outFrames;
- pInBuffer->i16 += outFrames * channelCount;
+ pInBuffer->raw = (int8_t *)pInBuffer->raw + outFrames * mFrameSize;
mOutBuffer.frameCount -= outFrames;
- mOutBuffer.i16 += outFrames * channelCount;
+ mOutBuffer.raw = (int8_t *)mOutBuffer.raw + outFrames * mFrameSize;
if (pInBuffer->frameCount == 0) {
if (mBufferQueue.size()) {
mBufferQueue.removeAt(0);
- delete [] pInBuffer->mBuffer;
+ free(pInBuffer->mBuffer);
delete pInBuffer;
ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this,
mThread.unsafe_get(), mBufferQueue.size());
@@ -1802,11 +1806,10 @@ bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t fr
if (thread != 0 && !thread->standby()) {
if (mBufferQueue.size() < kMaxOverFlowBuffers) {
pInBuffer = new Buffer;
- pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount];
+ pInBuffer->mBuffer = malloc(inBuffer.frameCount * mFrameSize);
pInBuffer->frameCount = inBuffer.frameCount;
- pInBuffer->i16 = pInBuffer->mBuffer;
- memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount *
- sizeof(int16_t));
+ pInBuffer->raw = pInBuffer->mBuffer;
+ memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * mFrameSize);
mBufferQueue.add(pInBuffer);
ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this,
mThread.unsafe_get(), mBufferQueue.size());
@@ -1817,23 +1820,10 @@ bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t fr
}
}
- // Calling write() with a 0 length buffer, means that no more data will be written:
- // If no more buffers are pending, fill output track buffer to make sure it is started
- // by output mixer.
- if (frames == 0 && mBufferQueue.size() == 0) {
- // FIXME borken, replace by getting framesReady() from proxy
- size_t user = 0; // was mCblk->user
- if (user < mFrameCount) {
- frames = mFrameCount - user;
- pInBuffer = new Buffer;
- pInBuffer->mBuffer = new int16_t[frames * channelCount];
- pInBuffer->frameCount = frames;
- pInBuffer->i16 = pInBuffer->mBuffer;
- memset(pInBuffer->raw, 0, frames * channelCount * sizeof(int16_t));
- mBufferQueue.add(pInBuffer);
- } else if (mActive) {
- stop();
- }
+ // Calling write() with a 0 length buffer means that no more data will be written:
+ // We rely on stop() to set the appropriate flags to allow the remaining frames to play out.
+ if (frames == 0 && mBufferQueue.size() == 0 && mActive) {
+ stop();
}
return outputBufferFull;
@@ -1859,7 +1849,7 @@ void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
for (size_t i = 0; i < size; i++) {
Buffer *pBuffer = mBufferQueue.itemAt(i);
- delete [] pBuffer->mBuffer;
+ free(pBuffer->mBuffer);
delete pBuffer;
}
mBufferQueue.clear();
diff --git a/services/audioflinger/tests/mixer_to_wav_tests.sh b/services/audioflinger/tests/mixer_to_wav_tests.sh
index 9b39e77..e60e6d5 100755
--- a/services/audioflinger/tests/mixer_to_wav_tests.sh
+++ b/services/audioflinger/tests/mixer_to_wav_tests.sh
@@ -63,8 +63,18 @@ function createwav() {
# process__genericResampling
# track__Resample / track__genericResample
adb shell test-mixer $1 -s 48000 \
+ -o /sdcard/tm48000grif.wav \
+ sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000 \
+ sine:f,6,6000,19000 chirp:i,4,30000
+ adb pull /sdcard/tm48000grif.wav $2
+
+# Test:
+# process__genericResampling
+# track__Resample / track__genericResample
+ adb shell test-mixer $1 -s 48000 \
-o /sdcard/tm48000gr.wav \
- sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000
+ sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000 \
+ sine:6,6000,19000
adb pull /sdcard/tm48000gr.wav $2
# Test:
diff --git a/services/audioflinger/tests/test-mixer.cpp b/services/audioflinger/tests/test-mixer.cpp
index 9a4fad6..8da6245 100644
--- a/services/audioflinger/tests/test-mixer.cpp
+++ b/services/audioflinger/tests/test-mixer.cpp
@@ -39,7 +39,7 @@ static void usage(const char* name) {
fprintf(stderr, "Usage: %s [-f] [-m] [-c channels]"
" [-s sample-rate] [-o <output-file>] [-a <aux-buffer-file>] [-P csv]"
" (<input-file> | <command>)+\n", name);
- fprintf(stderr, " -f enable floating point input track\n");
+ fprintf(stderr, " -f enable floating point input track by default\n");
fprintf(stderr, " -m enable floating point mixer output\n");
fprintf(stderr, " -c number of mixer output channels\n");
fprintf(stderr, " -s mixer sample-rate\n");
@@ -47,8 +47,8 @@ static void usage(const char* name) {
fprintf(stderr, " -a <aux-buffer-file>\n");
fprintf(stderr, " -P # frames provided per call to resample() in CSV format\n");
fprintf(stderr, " <input-file> is a WAV file\n");
- fprintf(stderr, " <command> can be 'sine:<channels>,<frequency>,<samplerate>'\n");
- fprintf(stderr, " 'chirp:<channels>,<samplerate>'\n");
+ fprintf(stderr, " <command> can be 'sine:[(i|f),]<channels>,<frequency>,<samplerate>'\n");
+ fprintf(stderr, " 'chirp:[(i|f),]<channels>,<samplerate>'\n");
}
static int writeFile(const char *filename, const void *buffer,
@@ -78,6 +78,18 @@ static int writeFile(const char *filename, const void *buffer,
return EXIT_SUCCESS;
}
+const char *parseFormat(const char *s, bool *useFloat) {
+ if (!strncmp(s, "f,", 2)) {
+ *useFloat = true;
+ return s + 2;
+ }
+ if (!strncmp(s, "i,", 2)) {
+ *useFloat = false;
+ return s + 2;
+ }
+ return s;
+}
+
int main(int argc, char* argv[]) {
const char* const progname = argv[0];
bool useInputFloat = false;
@@ -88,8 +100,9 @@ int main(int argc, char* argv[]) {
std::vector<int> Pvalues;
const char* outputFilename = NULL;
const char* auxFilename = NULL;
- std::vector<int32_t> Names;
- std::vector<SignalProvider> Providers;
+ std::vector<int32_t> names;
+ std::vector<SignalProvider> providers;
+ std::vector<audio_format_t> formats;
for (int ch; (ch = getopt(argc, argv, "fmc:s:o:a:P:")) != -1;) {
switch (ch) {
@@ -138,54 +151,65 @@ int main(int argc, char* argv[]) {
size_t outputFrames = 0;
// create providers for each track
- Providers.resize(argc);
+ names.resize(argc);
+ providers.resize(argc);
+ formats.resize(argc);
for (int i = 0; i < argc; ++i) {
static const char chirp[] = "chirp:";
static const char sine[] = "sine:";
static const double kSeconds = 1;
+ bool useFloat = useInputFloat;
if (!strncmp(argv[i], chirp, strlen(chirp))) {
std::vector<int> v;
+ const char *s = parseFormat(argv[i] + strlen(chirp), &useFloat);
- parseCSV(argv[i] + strlen(chirp), v);
+ parseCSV(s, v);
if (v.size() == 2) {
printf("creating chirp(%d %d)\n", v[0], v[1]);
- if (useInputFloat) {
- Providers[i].setChirp<float>(v[0], 0, v[1]/2, v[1], kSeconds);
+ if (useFloat) {
+ providers[i].setChirp<float>(v[0], 0, v[1]/2, v[1], kSeconds);
+ formats[i] = AUDIO_FORMAT_PCM_FLOAT;
} else {
- Providers[i].setChirp<int16_t>(v[0], 0, v[1]/2, v[1], kSeconds);
+ providers[i].setChirp<int16_t>(v[0], 0, v[1]/2, v[1], kSeconds);
+ formats[i] = AUDIO_FORMAT_PCM_16_BIT;
}
- Providers[i].setIncr(Pvalues);
+ providers[i].setIncr(Pvalues);
} else {
fprintf(stderr, "malformed input '%s'\n", argv[i]);
}
} else if (!strncmp(argv[i], sine, strlen(sine))) {
std::vector<int> v;
+ const char *s = parseFormat(argv[i] + strlen(sine), &useFloat);
- parseCSV(argv[i] + strlen(sine), v);
+ parseCSV(s, v);
if (v.size() == 3) {
printf("creating sine(%d %d %d)\n", v[0], v[1], v[2]);
- if (useInputFloat) {
- Providers[i].setSine<float>(v[0], v[1], v[2], kSeconds);
+ if (useFloat) {
+ providers[i].setSine<float>(v[0], v[1], v[2], kSeconds);
+ formats[i] = AUDIO_FORMAT_PCM_FLOAT;
} else {
- Providers[i].setSine<int16_t>(v[0], v[1], v[2], kSeconds);
+ providers[i].setSine<int16_t>(v[0], v[1], v[2], kSeconds);
+ formats[i] = AUDIO_FORMAT_PCM_16_BIT;
}
- Providers[i].setIncr(Pvalues);
+ providers[i].setIncr(Pvalues);
} else {
fprintf(stderr, "malformed input '%s'\n", argv[i]);
}
} else {
printf("creating filename(%s)\n", argv[i]);
if (useInputFloat) {
- Providers[i].setFile<float>(argv[i]);
+ providers[i].setFile<float>(argv[i]);
+ formats[i] = AUDIO_FORMAT_PCM_FLOAT;
} else {
- Providers[i].setFile<short>(argv[i]);
+ providers[i].setFile<short>(argv[i]);
+ formats[i] = AUDIO_FORMAT_PCM_16_BIT;
}
- Providers[i].setIncr(Pvalues);
+ providers[i].setIncr(Pvalues);
}
// calculate the number of output frames
- size_t nframes = (int64_t) Providers[i].getNumFrames() * outputSampleRate
- / Providers[i].getSampleRate();
+ size_t nframes = (int64_t) providers[i].getNumFrames() * outputSampleRate
+ / providers[i].getSampleRate();
if (i == 0 || outputFrames > nframes) { // choose minimum for outputFrames
outputFrames = nframes;
}
@@ -213,22 +237,20 @@ int main(int argc, char* argv[]) {
// create the mixer.
const size_t mixerFrameCount = 320; // typical numbers may range from 240 or 960
AudioMixer *mixer = new AudioMixer(mixerFrameCount, outputSampleRate);
- audio_format_t inputFormat = useInputFloat
- ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
audio_format_t mixerFormat = useMixerFloat
? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
- float f = AudioMixer::UNITY_GAIN_FLOAT / Providers.size(); // normalize volume by # tracks
+ float f = AudioMixer::UNITY_GAIN_FLOAT / providers.size(); // normalize volume by # tracks
static float f0; // zero
// set up the tracks.
- for (size_t i = 0; i < Providers.size(); ++i) {
- //printf("track %d out of %d\n", i, Providers.size());
- uint32_t channelMask = audio_channel_out_mask_from_count(Providers[i].getNumChannels());
+ for (size_t i = 0; i < providers.size(); ++i) {
+ //printf("track %d out of %d\n", i, providers.size());
+ uint32_t channelMask = audio_channel_out_mask_from_count(providers[i].getNumChannels());
int32_t name = mixer->getTrackName(channelMask,
- inputFormat, AUDIO_SESSION_OUTPUT_MIX);
+ formats[i], AUDIO_SESSION_OUTPUT_MIX);
ALOG_ASSERT(name >= 0);
- Names.push_back(name);
- mixer->setBufferProvider(name, &Providers[i]);
+ names[i] = name;
+ mixer->setBufferProvider(name, &providers[i]);
mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
(void *)outputAddr);
mixer->setParameter(
@@ -240,7 +262,7 @@ int main(int argc, char* argv[]) {
name,
AudioMixer::TRACK,
AudioMixer::FORMAT,
- (void *)(uintptr_t)inputFormat);
+ (void *)(uintptr_t)formats[i]);
mixer->setParameter(
name,
AudioMixer::TRACK,
@@ -255,7 +277,7 @@ int main(int argc, char* argv[]) {
name,
AudioMixer::RESAMPLE,
AudioMixer::SAMPLE_RATE,
- (void *)(uintptr_t)Providers[i].getSampleRate());
+ (void *)(uintptr_t)providers[i].getSampleRate());
if (useRamp) {
mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f0);
mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f0);
@@ -277,11 +299,11 @@ int main(int argc, char* argv[]) {
// pump the mixer to process data.
size_t i;
for (i = 0; i < outputFrames - mixerFrameCount; i += mixerFrameCount) {
- for (size_t j = 0; j < Names.size(); ++j) {
- mixer->setParameter(Names[j], AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
+ for (size_t j = 0; j < names.size(); ++j) {
+ mixer->setParameter(names[j], AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
(char *) outputAddr + i * outputFrameSize);
if (auxFilename) {
- mixer->setParameter(Names[j], AudioMixer::TRACK, AudioMixer::AUX_BUFFER,
+ mixer->setParameter(names[j], AudioMixer::TRACK, AudioMixer::AUX_BUFFER,
(char *) auxAddr + i * auxFrameSize);
}
}
diff --git a/services/medialog/Android.mk b/services/medialog/Android.mk
index 95f2fef..03438bf 100644
--- a/services/medialog/Android.mk
+++ b/services/medialog/Android.mk
@@ -10,4 +10,6 @@ LOCAL_MODULE:= libmedialogservice
LOCAL_32_BIT_ONLY := true
+LOCAL_C_INCLUDES := $(call include-path-for, audio-utils)
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/tools/resampler_tools/Android.mk b/tools/resampler_tools/Android.mk
index e8cbe39..b58e4cd 100644
--- a/tools/resampler_tools/Android.mk
+++ b/tools/resampler_tools/Android.mk
@@ -1,6 +1,6 @@
# Copyright 2005 The Android Open Source Project
#
-# Android.mk for resampler_tools
+# Android.mk for resampler_tools
#